blob: 279f30b4994c21cfca92f1e87f60aa8a93bd3057 [file] [log] [blame]
Ben Murdoch8b112d22011-06-08 16:22:53 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
Ben Murdoch8b112d22011-06-08 16:22:53 +010033#include "codegen.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "compilation-cache.h"
35#include "debug.h"
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000036#include "deoptimizer.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "global-handles.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000038#include "heap-profiler.h"
Steve Block1e0659c2011-05-24 12:43:12 +010039#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000040#include "mark-compact.h"
41#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010042#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010043#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080044#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000045#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000046#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000047#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010048#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010049#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000050#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000051#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000052#endif
Steve Block44f0eee2011-05-26 01:26:41 +010053#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
54#include "regexp-macro-assembler.h"
55#include "mips/regexp-macro-assembler-mips.h"
56#endif
Steve Block6ded16b2010-05-10 14:33:55 +010057
Steve Blocka7e24c12009-10-30 11:49:00 +000058namespace v8 {
59namespace internal {
60
61
John Reck59135872010-11-02 12:39:01 -070062static const intptr_t kMinimumPromotionLimit = 2 * MB;
63static const intptr_t kMinimumAllocationLimit = 8 * MB;
64
Steve Blocka7e24c12009-10-30 11:49:00 +000065
Steve Block44f0eee2011-05-26 01:26:41 +010066static Mutex* gc_initializer_mutex = OS::CreateMutex();
Steve Blocka7e24c12009-10-30 11:49:00 +000067
Steve Blocka7e24c12009-10-30 11:49:00 +000068
Steve Block44f0eee2011-05-26 01:26:41 +010069Heap::Heap()
70 : isolate_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +000071// semispace_size_ should be a power of 2 and old_generation_size_ should be
72// a multiple of Page::kPageSize.
Ben Murdoch257744e2011-11-30 15:57:28 +000073#if 0//defined(ANDROID)
Steve Block44f0eee2011-05-26 01:26:41 +010074 reserved_semispace_size_(2*MB),
75 max_semispace_size_(2*MB),
76 initial_semispace_size_(128*KB),
Ben Murdoch257744e2011-11-30 15:57:28 +000077 max_old_generation_size_(512*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010078 max_executable_size_(max_old_generation_size_),
79 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000080#elif defined(V8_TARGET_ARCH_X64)
Steve Block44f0eee2011-05-26 01:26:41 +010081 reserved_semispace_size_(16*MB),
82 max_semispace_size_(16*MB),
83 initial_semispace_size_(1*MB),
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000084 max_old_generation_size_(1400*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010085 max_executable_size_(256*MB),
86 code_range_size_(512*MB),
Steve Blocka7e24c12009-10-30 11:49:00 +000087#else
Steve Block44f0eee2011-05-26 01:26:41 +010088 reserved_semispace_size_(8*MB),
89 max_semispace_size_(8*MB),
90 initial_semispace_size_(512*KB),
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000091 max_old_generation_size_(700*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010092 max_executable_size_(128*MB),
93 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000094#endif
Steve Blocka7e24c12009-10-30 11:49:00 +000095// Variables set based on semispace_size_ and old_generation_size_ in
Steve Block44f0eee2011-05-26 01:26:41 +010096// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
Steve Block3ce2e202009-11-05 08:53:23 +000097// Will be 4 * reserved_semispace_size_ to ensure that young
98// generation can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +010099 survived_since_last_expansion_(0),
Ben Murdoch257744e2011-11-30 15:57:28 +0000100 sweep_generation_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100101 always_allocate_scope_depth_(0),
102 linear_allocation_scope_depth_(0),
103 contexts_disposed_(0),
104 new_space_(this),
105 old_pointer_space_(NULL),
106 old_data_space_(NULL),
107 code_space_(NULL),
108 map_space_(NULL),
109 cell_space_(NULL),
110 lo_space_(NULL),
111 gc_state_(NOT_IN_GC),
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000112 gc_post_processing_depth_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100113 mc_count_(0),
114 ms_count_(0),
115 gc_count_(0),
116 unflattened_strings_length_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000117#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100118 allocation_allowed_(true),
119 allocation_timeout_(0),
120 disallow_allocation_failure_(false),
121 debug_utils_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000122#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100123 old_gen_promotion_limit_(kMinimumPromotionLimit),
124 old_gen_allocation_limit_(kMinimumAllocationLimit),
125 external_allocation_limit_(0),
126 amount_of_external_allocated_memory_(0),
127 amount_of_external_allocated_memory_at_last_global_gc_(0),
128 old_gen_exhausted_(false),
129 hidden_symbol_(NULL),
130 global_gc_prologue_callback_(NULL),
131 global_gc_epilogue_callback_(NULL),
132 gc_safe_size_of_old_object_(NULL),
Steve Block053d10c2011-06-13 19:13:29 +0100133 total_regexp_code_generated_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100134 tracer_(NULL),
135 young_survivors_after_last_gc_(0),
136 high_survival_rate_period_length_(0),
137 survival_rate_(0),
138 previous_survival_rate_trend_(Heap::STABLE),
139 survival_rate_trend_(Heap::STABLE),
140 max_gc_pause_(0),
141 max_alive_after_gc_(0),
142 min_in_mutator_(kMaxInt),
143 alive_after_last_gc_(0),
144 last_gc_end_timestamp_(0.0),
145 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
146 number_idle_notifications_(0),
147 last_idle_notification_gc_count_(0),
148 last_idle_notification_gc_count_init_(false),
149 configured_(false),
150 is_safe_to_read_maps_(true) {
151 // Allow build-time customization of the max semispace size. Building
152 // V8 with snapshots and a non-default max semispace size is much
153 // easier if you can define it as part of the build environment.
154#if defined(V8_MAX_SEMISPACE_SIZE)
155 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
156#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000157
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000158 intptr_t max_virtual = OS::MaxVirtualMemory();
159
160 if (max_virtual > 0) {
161 if (code_range_size_ > 0) {
162 // Reserve no more than 1/8 of the memory for the code range.
163 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
164 }
165 }
166
Steve Block44f0eee2011-05-26 01:26:41 +0100167 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
168 global_contexts_list_ = NULL;
169 mark_compact_collector_.heap_ = this;
170 external_string_table_.heap_ = this;
171}
172
Steve Blocka7e24c12009-10-30 11:49:00 +0000173
Ben Murdochf87a2032010-10-22 12:50:53 +0100174intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000175 if (!HasBeenSetup()) return 0;
176
177 return new_space_.Capacity() +
178 old_pointer_space_->Capacity() +
179 old_data_space_->Capacity() +
180 code_space_->Capacity() +
181 map_space_->Capacity() +
182 cell_space_->Capacity();
183}
184
185
Ben Murdochf87a2032010-10-22 12:50:53 +0100186intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000187 if (!HasBeenSetup()) return 0;
188
189 return new_space_.CommittedMemory() +
190 old_pointer_space_->CommittedMemory() +
191 old_data_space_->CommittedMemory() +
192 code_space_->CommittedMemory() +
193 map_space_->CommittedMemory() +
194 cell_space_->CommittedMemory() +
195 lo_space_->Size();
196}
197
Russell Brenner90bac252010-11-18 13:33:46 -0800198intptr_t Heap::CommittedMemoryExecutable() {
199 if (!HasBeenSetup()) return 0;
200
Steve Block44f0eee2011-05-26 01:26:41 +0100201 return isolate()->memory_allocator()->SizeExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800202}
203
Steve Block3ce2e202009-11-05 08:53:23 +0000204
Ben Murdochf87a2032010-10-22 12:50:53 +0100205intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000206 if (!HasBeenSetup()) return 0;
207
208 return new_space_.Available() +
209 old_pointer_space_->Available() +
210 old_data_space_->Available() +
211 code_space_->Available() +
212 map_space_->Available() +
213 cell_space_->Available();
214}
215
216
217bool Heap::HasBeenSetup() {
218 return old_pointer_space_ != NULL &&
219 old_data_space_ != NULL &&
220 code_space_ != NULL &&
221 map_space_ != NULL &&
222 cell_space_ != NULL &&
223 lo_space_ != NULL;
224}
225
226
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100227int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100228 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
229 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100230 MapWord map_word = object->map_word();
231 map_word.ClearMark();
232 map_word.ClearOverflow();
233 return object->SizeFromMap(map_word.ToMap());
234}
235
236
237int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100238 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
239 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100240 uint32_t marker = Memory::uint32_at(object->address());
241 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
242 return kIntSize;
243 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
244 return Memory::int_at(object->address() + kIntSize);
245 } else {
246 MapWord map_word = object->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +0100247 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100248 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
249 return object->SizeFromMap(map);
250 }
251}
252
253
Steve Blocka7e24c12009-10-30 11:49:00 +0000254GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
255 // Is global GC requested?
256 if (space != NEW_SPACE || FLAG_gc_global) {
Steve Block44f0eee2011-05-26 01:26:41 +0100257 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000258 return MARK_COMPACTOR;
259 }
260
261 // Is enough data promoted to justify a global GC?
262 if (OldGenerationPromotionLimitReached()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100263 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000264 return MARK_COMPACTOR;
265 }
266
267 // Have allocation in OLD and LO failed?
268 if (old_gen_exhausted_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100269 isolate_->counters()->
270 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000271 return MARK_COMPACTOR;
272 }
273
274 // Is there enough space left in OLD to guarantee that a scavenge can
275 // succeed?
276 //
277 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
278 // for object promotion. It counts only the bytes that the memory
279 // allocator has not yet allocated from the OS and assigned to any space,
280 // and does not count available bytes already in the old space or code
281 // space. Undercounting is safe---we may get an unrequested full GC when
282 // a scavenge would have succeeded.
Steve Block44f0eee2011-05-26 01:26:41 +0100283 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
284 isolate_->counters()->
285 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000286 return MARK_COMPACTOR;
287 }
288
289 // Default
290 return SCAVENGER;
291}
292
293
294// TODO(1238405): Combine the infrastructure for --heap-stats and
295// --log-gc to avoid the complicated preprocessor and flag testing.
Steve Blocka7e24c12009-10-30 11:49:00 +0000296void Heap::ReportStatisticsBeforeGC() {
297 // Heap::ReportHeapStatistics will also log NewSpace statistics when
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000298 // compiled --log-gc is set. The following logic is used to avoid
299 // double logging.
300#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000301 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
302 if (FLAG_heap_stats) {
303 ReportHeapStatistics("Before GC");
304 } else if (FLAG_log_gc) {
305 new_space_.ReportStatistics();
306 }
307 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000308#else
Steve Blocka7e24c12009-10-30 11:49:00 +0000309 if (FLAG_log_gc) {
310 new_space_.CollectStatistics();
311 new_space_.ReportStatistics();
312 new_space_.ClearHistograms();
313 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000314#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000315}
316
317
Steve Blocka7e24c12009-10-30 11:49:00 +0000318void Heap::PrintShortHeapStatistics() {
319 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100320 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
321 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block44f0eee2011-05-26 01:26:41 +0100322 isolate_->memory_allocator()->Size(),
323 isolate_->memory_allocator()->Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100324 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
325 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000326 Heap::new_space_.Size(),
327 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100328 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
329 ", available: %8" V8_PTR_PREFIX "d"
330 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000331 old_pointer_space_->Size(),
332 old_pointer_space_->Available(),
333 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100334 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
335 ", available: %8" V8_PTR_PREFIX "d"
336 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000337 old_data_space_->Size(),
338 old_data_space_->Available(),
339 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100340 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
341 ", available: %8" V8_PTR_PREFIX "d"
342 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000343 code_space_->Size(),
344 code_space_->Available(),
345 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100346 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
347 ", available: %8" V8_PTR_PREFIX "d"
348 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000349 map_space_->Size(),
350 map_space_->Available(),
351 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100352 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
353 ", available: %8" V8_PTR_PREFIX "d"
354 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000355 cell_space_->Size(),
356 cell_space_->Available(),
357 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100358 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
359 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000360 lo_space_->Size(),
361 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000362}
Steve Blocka7e24c12009-10-30 11:49:00 +0000363
364
365// TODO(1238405): Combine the infrastructure for --heap-stats and
366// --log-gc to avoid the complicated preprocessor and flag testing.
367void Heap::ReportStatisticsAfterGC() {
368 // Similar to the before GC, we use some complicated logic to ensure that
369 // NewSpace statistics are logged exactly once when --log-gc is turned on.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000370#if defined(DEBUG)
Steve Blocka7e24c12009-10-30 11:49:00 +0000371 if (FLAG_heap_stats) {
372 new_space_.CollectStatistics();
373 ReportHeapStatistics("After GC");
374 } else if (FLAG_log_gc) {
375 new_space_.ReportStatistics();
376 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000377#else
Steve Blocka7e24c12009-10-30 11:49:00 +0000378 if (FLAG_log_gc) new_space_.ReportStatistics();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000379#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000380}
Steve Blocka7e24c12009-10-30 11:49:00 +0000381
382
383void Heap::GarbageCollectionPrologue() {
Steve Block44f0eee2011-05-26 01:26:41 +0100384 isolate_->transcendental_cache()->Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100385 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000386 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100387 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000388#ifdef DEBUG
389 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
390 allow_allocation(false);
391
392 if (FLAG_verify_heap) {
393 Verify();
394 }
395
396 if (FLAG_gc_verbose) Print();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000397#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000398
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000399#if defined(DEBUG)
Steve Blocka7e24c12009-10-30 11:49:00 +0000400 ReportStatisticsBeforeGC();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000401#endif // DEBUG
Steve Block1e0659c2011-05-24 12:43:12 +0100402
403 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000404}
405
Ben Murdochf87a2032010-10-22 12:50:53 +0100406intptr_t Heap::SizeOfObjects() {
407 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000408 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000409 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800410 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000411 }
412 return total;
413}
414
415void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100416 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000417#ifdef DEBUG
418 allow_allocation(true);
419 ZapFromSpace();
420
421 if (FLAG_verify_heap) {
422 Verify();
423 }
424
Steve Block44f0eee2011-05-26 01:26:41 +0100425 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000426 if (FLAG_print_handles) PrintHandles();
427 if (FLAG_gc_verbose) Print();
428 if (FLAG_code_stats) ReportCodeStatistics("After GC");
429#endif
430
Steve Block44f0eee2011-05-26 01:26:41 +0100431 isolate_->counters()->alive_after_last_gc()->Set(
432 static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000433
Steve Block44f0eee2011-05-26 01:26:41 +0100434 isolate_->counters()->symbol_table_capacity()->Set(
435 symbol_table()->Capacity());
436 isolate_->counters()->number_of_symbols()->Set(
437 symbol_table()->NumberOfElements());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000438#if defined(DEBUG)
Steve Blocka7e24c12009-10-30 11:49:00 +0000439 ReportStatisticsAfterGC();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000440#endif // DEBUG
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000441#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100442 isolate_->debug()->AfterGarbageCollection();
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000443#endif // ENABLE_DEBUGGER_SUPPORT
Steve Blocka7e24c12009-10-30 11:49:00 +0000444}
445
446
John Reck59135872010-11-02 12:39:01 -0700447void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000448 // Since we are ignoring the return value, the exact choice of space does
449 // not matter, so long as we do not specify NEW_SPACE, which would not
450 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100451 mark_compact_collector_.SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700452 CollectGarbage(OLD_POINTER_SPACE);
Steve Block44f0eee2011-05-26 01:26:41 +0100453 mark_compact_collector_.SetForceCompaction(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000454}
455
456
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800457void Heap::CollectAllAvailableGarbage() {
458 // Since we are ignoring the return value, the exact choice of space does
459 // not matter, so long as we do not specify NEW_SPACE, which would not
460 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100461 mark_compact_collector()->SetForceCompaction(true);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800462
463 // Major GC would invoke weak handle callbacks on weakly reachable
464 // handles, but won't collect weakly reachable objects until next
465 // major GC. Therefore if we collect aggressively and weak handle callback
466 // has been invoked, we rerun major GC to release objects which become
467 // garbage.
468 // Note: as weak callbacks can execute arbitrary code, we cannot
469 // hope that eventually there will be no weak callbacks invocations.
470 // Therefore stop recollecting after several attempts.
471 const int kMaxNumberOfAttempts = 7;
472 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
473 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
474 break;
475 }
476 }
Steve Block44f0eee2011-05-26 01:26:41 +0100477 mark_compact_collector()->SetForceCompaction(false);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800478}
479
480
481bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000482 // The VM is in the GC state until exiting this function.
Steve Block44f0eee2011-05-26 01:26:41 +0100483 VMState state(isolate_, GC);
Steve Blocka7e24c12009-10-30 11:49:00 +0000484
485#ifdef DEBUG
486 // Reset the allocation timeout to the GC interval, but make sure to
487 // allow at least a few allocations after a collection. The reason
488 // for this is that we have a lot of allocation sequences and we
489 // assume that a garbage collection will allow the subsequent
490 // allocation attempts to go through.
491 allocation_timeout_ = Max(6, FLAG_gc_interval);
492#endif
493
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800494 bool next_gc_likely_to_collect_more = false;
495
Steve Block44f0eee2011-05-26 01:26:41 +0100496 { GCTracer tracer(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000497 GarbageCollectionPrologue();
498 // The GC count was incremented in the prologue. Tell the tracer about
499 // it.
500 tracer.set_gc_count(gc_count_);
501
Steve Blocka7e24c12009-10-30 11:49:00 +0000502 // Tell the tracer which collector we've selected.
503 tracer.set_collector(collector);
504
505 HistogramTimer* rate = (collector == SCAVENGER)
Steve Block44f0eee2011-05-26 01:26:41 +0100506 ? isolate_->counters()->gc_scavenger()
507 : isolate_->counters()->gc_compactor();
Steve Blocka7e24c12009-10-30 11:49:00 +0000508 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800509 next_gc_likely_to_collect_more =
510 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000511 rate->Stop();
512
513 GarbageCollectionEpilogue();
514 }
515
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800516 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000517}
518
519
520void Heap::PerformScavenge() {
Steve Block44f0eee2011-05-26 01:26:41 +0100521 GCTracer tracer(this);
John Reck59135872010-11-02 12:39:01 -0700522 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000523}
524
525
526#ifdef DEBUG
527// Helper class for verifying the symbol table.
528class SymbolTableVerifier : public ObjectVisitor {
529 public:
Steve Blocka7e24c12009-10-30 11:49:00 +0000530 void VisitPointers(Object** start, Object** end) {
531 // Visit all HeapObject pointers in [start, end).
532 for (Object** p = start; p < end; p++) {
533 if ((*p)->IsHeapObject()) {
534 // Check that the symbol is actually a symbol.
535 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
536 }
537 }
538 }
539};
540#endif // DEBUG
541
542
543static void VerifySymbolTable() {
544#ifdef DEBUG
545 SymbolTableVerifier verifier;
Steve Block44f0eee2011-05-26 01:26:41 +0100546 HEAP->symbol_table()->IterateElements(&verifier);
Steve Blocka7e24c12009-10-30 11:49:00 +0000547#endif // DEBUG
548}
549
550
Leon Clarkee46be812010-01-19 14:06:41 +0000551void Heap::ReserveSpace(
552 int new_space_size,
553 int pointer_space_size,
554 int data_space_size,
555 int code_space_size,
556 int map_space_size,
557 int cell_space_size,
558 int large_object_size) {
559 NewSpace* new_space = Heap::new_space();
560 PagedSpace* old_pointer_space = Heap::old_pointer_space();
561 PagedSpace* old_data_space = Heap::old_data_space();
562 PagedSpace* code_space = Heap::code_space();
563 PagedSpace* map_space = Heap::map_space();
564 PagedSpace* cell_space = Heap::cell_space();
565 LargeObjectSpace* lo_space = Heap::lo_space();
566 bool gc_performed = true;
567 while (gc_performed) {
568 gc_performed = false;
569 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100570 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000571 gc_performed = true;
572 }
573 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100574 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000575 gc_performed = true;
576 }
577 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100578 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000579 gc_performed = true;
580 }
581 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100582 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000583 gc_performed = true;
584 }
585 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100586 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000587 gc_performed = true;
588 }
589 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100590 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000591 gc_performed = true;
592 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100593 // We add a slack-factor of 2 in order to have space for a series of
594 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000595 large_object_size *= 2;
596 // The ReserveSpace method on the large object space checks how much
597 // we can expand the old generation. This includes expansion caused by
598 // allocation in the other spaces.
599 large_object_size += cell_space_size + map_space_size + code_space_size +
600 data_space_size + pointer_space_size;
601 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100602 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000603 gc_performed = true;
604 }
605 }
606}
607
608
Steve Blocka7e24c12009-10-30 11:49:00 +0000609void Heap::EnsureFromSpaceIsCommitted() {
610 if (new_space_.CommitFromSpaceIfNeeded()) return;
611
612 // Committing memory to from space failed.
613 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100614 PagedSpaces spaces;
615 for (PagedSpace* space = spaces.next();
616 space != NULL;
617 space = spaces.next()) {
618 space->RelinkPageListInChunkOrder(true);
619 }
620
Steve Blocka7e24c12009-10-30 11:49:00 +0000621 Shrink();
622 if (new_space_.CommitFromSpaceIfNeeded()) return;
623
624 // Committing memory to from space failed again.
625 // Memory is exhausted and we will die.
626 V8::FatalProcessOutOfMemory("Committing semi space failed.");
627}
628
629
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800630void Heap::ClearJSFunctionResultCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100631 if (isolate_->bootstrapper()->IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100632
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800633 Object* context = global_contexts_list_;
634 while (!context->IsUndefined()) {
635 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100636 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800637 Context::cast(context)->jsfunction_result_caches();
638 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100639 int length = caches->length();
640 for (int i = 0; i < length; i++) {
641 JSFunctionResultCache::cast(caches->get(i))->Clear();
642 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800643 // Get the next context:
644 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100645 }
Steve Block6ded16b2010-05-10 14:33:55 +0100646}
647
648
Steve Block44f0eee2011-05-26 01:26:41 +0100649
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100650void Heap::ClearNormalizedMapCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100651 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100652
653 Object* context = global_contexts_list_;
654 while (!context->IsUndefined()) {
655 Context::cast(context)->normalized_map_cache()->Clear();
656 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
657 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100658}
659
660
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100661#ifdef DEBUG
662
663enum PageWatermarkValidity {
664 ALL_VALID,
665 ALL_INVALID
666};
667
668static void VerifyPageWatermarkValidity(PagedSpace* space,
669 PageWatermarkValidity validity) {
670 PageIterator it(space, PageIterator::PAGES_IN_USE);
671 bool expected_value = (validity == ALL_VALID);
672 while (it.has_next()) {
673 Page* page = it.next();
674 ASSERT(page->IsWatermarkValid() == expected_value);
675 }
676}
677#endif
678
Steve Block8defd9f2010-07-08 12:39:36 +0100679void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
680 double survival_rate =
681 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
682 start_new_space_size;
683
684 if (survival_rate > kYoungSurvivalRateThreshold) {
685 high_survival_rate_period_length_++;
686 } else {
687 high_survival_rate_period_length_ = 0;
688 }
689
690 double survival_rate_diff = survival_rate_ - survival_rate;
691
692 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
693 set_survival_rate_trend(DECREASING);
694 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
695 set_survival_rate_trend(INCREASING);
696 } else {
697 set_survival_rate_trend(STABLE);
698 }
699
700 survival_rate_ = survival_rate;
701}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100702
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800703bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700704 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800705 bool next_gc_likely_to_collect_more = false;
706
Ben Murdochf87a2032010-10-22 12:50:53 +0100707 if (collector != SCAVENGER) {
Steve Block44f0eee2011-05-26 01:26:41 +0100708 PROFILE(isolate_, CodeMovingGCEvent());
Ben Murdochf87a2032010-10-22 12:50:53 +0100709 }
710
Steve Blocka7e24c12009-10-30 11:49:00 +0000711 VerifySymbolTable();
712 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
713 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100714 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000715 global_gc_prologue_callback_();
716 }
Steve Block6ded16b2010-05-10 14:33:55 +0100717
718 GCType gc_type =
719 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
720
721 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
722 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
723 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
724 }
725 }
726
Steve Blocka7e24c12009-10-30 11:49:00 +0000727 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100728
Ben Murdochf87a2032010-10-22 12:50:53 +0100729 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100730
Steve Blocka7e24c12009-10-30 11:49:00 +0000731 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100732 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000733 MarkCompact(tracer);
Ben Murdoch257744e2011-11-30 15:57:28 +0000734 sweep_generation_++;
Steve Block8defd9f2010-07-08 12:39:36 +0100735 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
736 IsStableOrIncreasingSurvivalTrend();
737
738 UpdateSurvivalRateTrend(start_new_space_size);
739
John Reck59135872010-11-02 12:39:01 -0700740 intptr_t old_gen_size = PromotedSpaceSize();
741 old_gen_promotion_limit_ =
742 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
743 old_gen_allocation_limit_ =
744 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100745
John Reck59135872010-11-02 12:39:01 -0700746 if (high_survival_rate_during_scavenges &&
747 IsStableOrIncreasingSurvivalTrend()) {
748 // Stable high survival rates of young objects both during partial and
749 // full collection indicate that mutator is either building or modifying
750 // a structure with a long lifetime.
751 // In this case we aggressively raise old generation memory limits to
752 // postpone subsequent mark-sweep collection and thus trade memory
753 // space for the mutation speed.
754 old_gen_promotion_limit_ *= 2;
755 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100756 }
757
John Reck59135872010-11-02 12:39:01 -0700758 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100759 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100760 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100761 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100762 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100763
764 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000765 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000766
Steve Block44f0eee2011-05-26 01:26:41 +0100767 isolate_->counters()->objs_since_last_young()->Set(0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000768
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000769 gc_post_processing_depth_++;
Ben Murdoch257744e2011-11-30 15:57:28 +0000770 { DisableAssertNoAllocation allow_allocation;
John Reck59135872010-11-02 12:39:01 -0700771 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800772 next_gc_likely_to_collect_more =
Ben Murdoch257744e2011-11-30 15:57:28 +0000773 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
John Reck59135872010-11-02 12:39:01 -0700774 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000775 gc_post_processing_depth_--;
John Reck59135872010-11-02 12:39:01 -0700776
Steve Block3ce2e202009-11-05 08:53:23 +0000777 // Update relocatables.
778 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000779
780 if (collector == MARK_COMPACTOR) {
781 // Register the amount of external allocated memory.
782 amount_of_external_allocated_memory_at_last_global_gc_ =
783 amount_of_external_allocated_memory_;
784 }
785
Steve Block6ded16b2010-05-10 14:33:55 +0100786 GCCallbackFlags callback_flags = tracer->is_compacting()
787 ? kGCCallbackFlagCompacted
788 : kNoGCCallbackFlags;
789 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
790 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
791 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
792 }
793 }
794
Steve Blocka7e24c12009-10-30 11:49:00 +0000795 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
796 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100797 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000798 global_gc_epilogue_callback_();
799 }
800 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800801
802 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000803}
804
805
Steve Blocka7e24c12009-10-30 11:49:00 +0000806void Heap::MarkCompact(GCTracer* tracer) {
807 gc_state_ = MARK_COMPACT;
Steve Block44f0eee2011-05-26 01:26:41 +0100808 LOG(isolate_, ResourceEvent("markcompact", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000809
Steve Block44f0eee2011-05-26 01:26:41 +0100810 mark_compact_collector_.Prepare(tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000811
Steve Block44f0eee2011-05-26 01:26:41 +0100812 bool is_compacting = mark_compact_collector_.IsCompacting();
Steve Blocka7e24c12009-10-30 11:49:00 +0000813
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100814 if (is_compacting) {
815 mc_count_++;
816 } else {
817 ms_count_++;
818 }
819 tracer->set_full_gc_count(mc_count_ + ms_count_);
820
Steve Blocka7e24c12009-10-30 11:49:00 +0000821 MarkCompactPrologue(is_compacting);
822
Steve Block44f0eee2011-05-26 01:26:41 +0100823 is_safe_to_read_maps_ = false;
824 mark_compact_collector_.CollectGarbage();
825 is_safe_to_read_maps_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000826
Steve Block44f0eee2011-05-26 01:26:41 +0100827 LOG(isolate_, ResourceEvent("markcompact", "end"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000828
829 gc_state_ = NOT_IN_GC;
830
831 Shrink();
832
Steve Block44f0eee2011-05-26 01:26:41 +0100833 isolate_->counters()->objs_since_last_full()->Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100834
835 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000836}
837
838
839void Heap::MarkCompactPrologue(bool is_compacting) {
840 // At any old GC clear the keyed lookup cache to enable collection of unused
841 // maps.
Steve Block44f0eee2011-05-26 01:26:41 +0100842 isolate_->keyed_lookup_cache()->Clear();
843 isolate_->context_slot_cache()->Clear();
844 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000845
Steve Block44f0eee2011-05-26 01:26:41 +0100846 isolate_->compilation_cache()->MarkCompactPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000847
Kristian Monsen25f61362010-05-21 11:50:48 +0100848 CompletelyClearInstanceofCache();
849
Leon Clarkee46be812010-01-19 14:06:41 +0000850 if (is_compacting) FlushNumberStringCache();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000851 if (FLAG_cleanup_code_caches_at_gc) {
852 polymorphic_code_cache()->set_cache(undefined_value());
853 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000854
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100855 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000856}
857
858
859Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700860 Object* obj = NULL; // Initialization to please compiler.
861 { MaybeObject* maybe_obj = code_space_->FindObject(a);
862 if (!maybe_obj->ToObject(&obj)) {
863 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
864 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 return obj;
867}
868
869
870// Helper class for copying HeapObjects
871class ScavengeVisitor: public ObjectVisitor {
872 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100873 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000874
875 void VisitPointer(Object** p) { ScavengePointer(p); }
876
877 void VisitPointers(Object** start, Object** end) {
878 // Copy all HeapObject pointers in [start, end)
879 for (Object** p = start; p < end; p++) ScavengePointer(p);
880 }
881
882 private:
883 void ScavengePointer(Object** p) {
884 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +0100885 if (!heap_->InNewSpace(object)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
887 reinterpret_cast<HeapObject*>(object));
888 }
Steve Block44f0eee2011-05-26 01:26:41 +0100889
890 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000891};
892
893
Steve Blocka7e24c12009-10-30 11:49:00 +0000894#ifdef DEBUG
895// Visitor class to verify pointers in code or data space do not point into
896// new space.
897class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
898 public:
899 void VisitPointers(Object** start, Object**end) {
900 for (Object** current = start; current < end; current++) {
901 if ((*current)->IsHeapObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100902 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000903 }
904 }
905 }
906};
907
908
909static void VerifyNonPointerSpacePointers() {
910 // Verify that there are no pointers to new space in spaces where we
911 // do not expect them.
912 VerifyNonPointerSpacePointersVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +0100913 HeapObjectIterator code_it(HEAP->code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000914 for (HeapObject* object = code_it.next();
915 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000916 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000917
Steve Block44f0eee2011-05-26 01:26:41 +0100918 HeapObjectIterator data_it(HEAP->old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000919 for (HeapObject* object = data_it.next();
920 object != NULL; object = data_it.next())
921 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000922}
923#endif
924
925
Steve Block6ded16b2010-05-10 14:33:55 +0100926void Heap::CheckNewSpaceExpansionCriteria() {
927 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
928 survived_since_last_expansion_ > new_space_.Capacity()) {
929 // Grow the size of new space if there is room to grow and enough
930 // data has survived scavenge since the last expansion.
931 new_space_.Grow();
932 survived_since_last_expansion_ = 0;
933 }
934}
935
936
Ben Murdoch257744e2011-11-30 15:57:28 +0000937static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
938 return heap->InNewSpace(*p) &&
939 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
940}
941
942
Steve Blocka7e24c12009-10-30 11:49:00 +0000943void Heap::Scavenge() {
944#ifdef DEBUG
945 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
946#endif
947
948 gc_state_ = SCAVENGE;
949
Ben Murdoch8b112d22011-06-08 16:22:53 +0100950 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
951
Steve Block44f0eee2011-05-26 01:26:41 +0100952 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100953#ifdef DEBUG
954 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
955 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
956#endif
957
958 // We do not update an allocation watermark of the top page during linear
959 // allocation to avoid overhead. So to maintain the watermark invariant
960 // we have to manually cache the watermark and mark the top page as having an
961 // invalid watermark. This guarantees that dirty regions iteration will use a
962 // correct watermark even if a linear allocation happens.
963 old_pointer_space_->FlushTopPageWatermark();
964 map_space_->FlushTopPageWatermark();
965
Steve Blocka7e24c12009-10-30 11:49:00 +0000966 // Implements Cheney's copying algorithm
Steve Block44f0eee2011-05-26 01:26:41 +0100967 LOG(isolate_, ResourceEvent("scavenge", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000968
969 // Clear descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100970 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000971
972 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100973 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000974
Steve Block6ded16b2010-05-10 14:33:55 +0100975 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000976
977 // Flip the semispaces. After flipping, to space is empty, from space has
978 // live objects.
979 new_space_.Flip();
980 new_space_.ResetAllocationInfo();
981
982 // We need to sweep newly copied objects which can be either in the
983 // to space or promoted to the old generation. For to-space
984 // objects, we treat the bottom of the to space as a queue. Newly
985 // copied and unswept objects lie between a 'front' mark and the
986 // allocation pointer.
987 //
988 // Promoted objects can go into various old-generation spaces, and
989 // can be allocated internally in the spaces (from the free list).
990 // We treat the top of the to space as a queue of addresses of
991 // promoted objects. The addresses of newly promoted and unswept
992 // objects lie between a 'front' mark and a 'rear' mark that is
993 // updated as a side effect of promoting an object.
994 //
995 // There is guaranteed to be enough room at the top of the to space
996 // for the addresses of promoted objects: every object promoted
997 // frees up its size in bytes from the top of the new space, and
998 // objects are at least one pointer in size.
999 Address new_space_front = new_space_.ToSpaceLow();
Steve Block44f0eee2011-05-26 01:26:41 +01001000 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +00001001
Steve Block44f0eee2011-05-26 01:26:41 +01001002 is_safe_to_read_maps_ = false;
1003 ScavengeVisitor scavenge_visitor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00001004 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001005 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001006
1007 // Copy objects reachable from the old generation. By definition,
1008 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001009 IterateDirtyRegions(old_pointer_space_,
Steve Block44f0eee2011-05-26 01:26:41 +01001010 &Heap::IteratePointersInDirtyRegion,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001011 &ScavengePointer,
1012 WATERMARK_CAN_BE_INVALID);
1013
1014 IterateDirtyRegions(map_space_,
1015 &IteratePointersInDirtyMapsRegion,
1016 &ScavengePointer,
1017 WATERMARK_CAN_BE_INVALID);
1018
1019 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001020
1021 // Copy objects reachable from cells by scavenging cell values directly.
1022 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001023 for (HeapObject* cell = cell_iterator.next();
1024 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001025 if (cell->IsJSGlobalPropertyCell()) {
1026 Address value_address =
1027 reinterpret_cast<Address>(cell) +
1028 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1029 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1030 }
1031 }
1032
Ben Murdochf87a2032010-10-22 12:50:53 +01001033 // Scavenge object reachable from the global contexts list directly.
1034 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1035
Leon Clarkee46be812010-01-19 14:06:41 +00001036 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001037 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
Ben Murdoch257744e2011-11-30 15:57:28 +00001038 &IsUnscavengedHeapObject);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001039 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1040 &scavenge_visitor);
Ben Murdoch257744e2011-11-30 15:57:28 +00001041 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1042
Leon Clarkee46be812010-01-19 14:06:41 +00001043
Steve Block6ded16b2010-05-10 14:33:55 +01001044 UpdateNewSpaceReferencesInExternalStringTable(
1045 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1046
Steve Block1e0659c2011-05-24 12:43:12 +01001047 LiveObjectList::UpdateReferencesForScavengeGC();
Steve Block44f0eee2011-05-26 01:26:41 +01001048 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Block1e0659c2011-05-24 12:43:12 +01001049
Leon Clarkee46be812010-01-19 14:06:41 +00001050 ASSERT(new_space_front == new_space_.top());
1051
Steve Block44f0eee2011-05-26 01:26:41 +01001052 is_safe_to_read_maps_ = true;
1053
Leon Clarkee46be812010-01-19 14:06:41 +00001054 // Set age mark.
1055 new_space_.set_age_mark(new_space_.top());
1056
1057 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001058 IncrementYoungSurvivorsCounter(static_cast<int>(
1059 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001060
Steve Block44f0eee2011-05-26 01:26:41 +01001061 LOG(isolate_, ResourceEvent("scavenge", "end"));
Leon Clarkee46be812010-01-19 14:06:41 +00001062
1063 gc_state_ = NOT_IN_GC;
1064}
1065
1066
Steve Block44f0eee2011-05-26 01:26:41 +01001067String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1068 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001069 MapWord first_word = HeapObject::cast(*p)->map_word();
1070
1071 if (!first_word.IsForwardingAddress()) {
1072 // Unreachable external string can be finalized.
Steve Block44f0eee2011-05-26 01:26:41 +01001073 heap->FinalizeExternalString(String::cast(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001074 return NULL;
1075 }
1076
1077 // String is still reachable.
1078 return String::cast(first_word.ToForwardingAddress());
1079}
1080
1081
1082void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1083 ExternalStringTableUpdaterCallback updater_func) {
Steve Block44f0eee2011-05-26 01:26:41 +01001084 external_string_table_.Verify();
Leon Clarkee46be812010-01-19 14:06:41 +00001085
Steve Block44f0eee2011-05-26 01:26:41 +01001086 if (external_string_table_.new_space_strings_.is_empty()) return;
Leon Clarkee46be812010-01-19 14:06:41 +00001087
Steve Block44f0eee2011-05-26 01:26:41 +01001088 Object** start = &external_string_table_.new_space_strings_[0];
1089 Object** end = start + external_string_table_.new_space_strings_.length();
Leon Clarkee46be812010-01-19 14:06:41 +00001090 Object** last = start;
1091
1092 for (Object** p = start; p < end; ++p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001093 ASSERT(InFromSpace(*p));
1094 String* target = updater_func(this, p);
Leon Clarkee46be812010-01-19 14:06:41 +00001095
Steve Block6ded16b2010-05-10 14:33:55 +01001096 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001097
Leon Clarkee46be812010-01-19 14:06:41 +00001098 ASSERT(target->IsExternalString());
1099
Steve Block44f0eee2011-05-26 01:26:41 +01001100 if (InNewSpace(target)) {
Leon Clarkee46be812010-01-19 14:06:41 +00001101 // String is still in new space. Update the table entry.
1102 *last = target;
1103 ++last;
1104 } else {
1105 // String got promoted. Move it to the old string list.
Steve Block44f0eee2011-05-26 01:26:41 +01001106 external_string_table_.AddOldString(target);
Leon Clarkee46be812010-01-19 14:06:41 +00001107 }
1108 }
1109
1110 ASSERT(last <= end);
Steve Block44f0eee2011-05-26 01:26:41 +01001111 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
Leon Clarkee46be812010-01-19 14:06:41 +00001112}
1113
1114
Steve Block44f0eee2011-05-26 01:26:41 +01001115static Object* ProcessFunctionWeakReferences(Heap* heap,
1116 Object* function,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001117 WeakObjectRetainer* retainer) {
Steve Block44f0eee2011-05-26 01:26:41 +01001118 Object* head = heap->undefined_value();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001119 JSFunction* tail = NULL;
1120 Object* candidate = function;
Steve Block44f0eee2011-05-26 01:26:41 +01001121 while (candidate != heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001122 // Check whether to keep the candidate in the list.
1123 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1124 Object* retain = retainer->RetainAs(candidate);
1125 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001126 if (head == heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001127 // First element in the list.
1128 head = candidate_function;
1129 } else {
1130 // Subsequent elements in the list.
1131 ASSERT(tail != NULL);
1132 tail->set_next_function_link(candidate_function);
1133 }
1134 // Retained function is new tail.
1135 tail = candidate_function;
1136 }
1137 // Move to next element in the list.
1138 candidate = candidate_function->next_function_link();
1139 }
1140
1141 // Terminate the list if there is one or more elements.
1142 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001143 tail->set_next_function_link(heap->undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001144 }
1145
1146 return head;
1147}
1148
1149
Ben Murdochf87a2032010-10-22 12:50:53 +01001150void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1151 Object* head = undefined_value();
1152 Context* tail = NULL;
1153 Object* candidate = global_contexts_list_;
Steve Block44f0eee2011-05-26 01:26:41 +01001154 while (candidate != undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001155 // Check whether to keep the candidate in the list.
1156 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1157 Object* retain = retainer->RetainAs(candidate);
1158 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001159 if (head == undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001160 // First element in the list.
1161 head = candidate_context;
1162 } else {
1163 // Subsequent elements in the list.
1164 ASSERT(tail != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001165 tail->set_unchecked(this,
1166 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001167 candidate_context,
1168 UPDATE_WRITE_BARRIER);
1169 }
1170 // Retained context is new tail.
1171 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001172
1173 // Process the weak list of optimized functions for the context.
1174 Object* function_list_head =
1175 ProcessFunctionWeakReferences(
Steve Block44f0eee2011-05-26 01:26:41 +01001176 this,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001177 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1178 retainer);
Steve Block44f0eee2011-05-26 01:26:41 +01001179 candidate_context->set_unchecked(this,
1180 Context::OPTIMIZED_FUNCTIONS_LIST,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001181 function_list_head,
1182 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001183 }
1184 // Move to next element in the list.
1185 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1186 }
1187
1188 // Terminate the list if there is one or more elements.
1189 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001190 tail->set_unchecked(this,
1191 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001192 Heap::undefined_value(),
1193 UPDATE_WRITE_BARRIER);
1194 }
1195
1196 // Update the head of the list of contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01001197 global_contexts_list_ = head;
Ben Murdochf87a2032010-10-22 12:50:53 +01001198}
1199
1200
Iain Merrick75681382010-08-19 15:07:18 +01001201class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1202 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001203 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001204 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +01001205 if (!heap->InNewSpace(object)) return;
Iain Merrick75681382010-08-19 15:07:18 +01001206 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1207 reinterpret_cast<HeapObject*>(object));
1208 }
1209};
1210
1211
Leon Clarkee46be812010-01-19 14:06:41 +00001212Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1213 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001214 do {
1215 ASSERT(new_space_front <= new_space_.top());
1216
1217 // The addresses new_space_front and new_space_.top() define a
1218 // queue of unprocessed copied objects. Process them until the
1219 // queue is empty.
1220 while (new_space_front < new_space_.top()) {
1221 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001222 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001223 }
1224
1225 // Promote and process all the to-be-promoted objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001226 while (!promotion_queue_.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001227 HeapObject* target;
1228 int size;
Steve Block44f0eee2011-05-26 01:26:41 +01001229 promotion_queue_.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001230
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001231 // Promoted object might be already partially visited
1232 // during dirty regions iteration. Thus we search specificly
1233 // for pointers to from semispace instead of looking for pointers
1234 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001235 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001236 IterateAndMarkPointersToFromSpace(target->address(),
1237 target->address() + size,
1238 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001239 }
1240
1241 // Take another spin if there are now unswept objects in new space
1242 // (there are currently no more unswept promoted objects).
1243 } while (new_space_front < new_space_.top());
1244
Leon Clarkee46be812010-01-19 14:06:41 +00001245 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001246}
1247
1248
Ben Murdoch8b112d22011-06-08 16:22:53 +01001249enum LoggingAndProfiling {
1250 LOGGING_AND_PROFILING_ENABLED,
1251 LOGGING_AND_PROFILING_DISABLED
1252};
1253
1254
1255typedef void (*ScavengingCallback)(Map* map,
1256 HeapObject** slot,
1257 HeapObject* object);
1258
1259
1260static Atomic32 scavenging_visitors_table_mode_;
1261static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1262
1263
1264INLINE(static void DoScavengeObject(Map* map,
1265 HeapObject** slot,
1266 HeapObject* obj));
1267
1268
1269void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1270 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1271}
1272
1273
1274template<LoggingAndProfiling logging_and_profiling_mode>
Iain Merrick75681382010-08-19 15:07:18 +01001275class ScavengingVisitor : public StaticVisitorBase {
1276 public:
1277 static void Initialize() {
1278 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1279 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1280 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1281 table_.Register(kVisitByteArray, &EvacuateByteArray);
1282 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001283 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001284
Ben Murdochf87a2032010-10-22 12:50:53 +01001285 table_.Register(kVisitGlobalContext,
1286 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001287 template VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001288
1289 table_.Register(kVisitConsString,
1290 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001291 template VisitSpecialized<ConsString::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001292
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001293 table_.Register(kVisitSlicedString,
1294 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1295 template VisitSpecialized<SlicedString::kSize>);
1296
Iain Merrick75681382010-08-19 15:07:18 +01001297 table_.Register(kVisitSharedFunctionInfo,
1298 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001299 template VisitSpecialized<SharedFunctionInfo::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001300
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001301 table_.Register(kVisitJSWeakMap,
1302 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1303 Visit);
1304
Ben Murdoch257744e2011-11-30 15:57:28 +00001305 table_.Register(kVisitJSRegExp,
1306 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1307 Visit);
1308
Iain Merrick75681382010-08-19 15:07:18 +01001309 table_.Register(kVisitJSFunction,
1310 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001311 template VisitSpecialized<JSFunction::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001312
1313 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1314 kVisitDataObject,
1315 kVisitDataObjectGeneric>();
1316
1317 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1318 kVisitJSObject,
1319 kVisitJSObjectGeneric>();
1320
1321 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1322 kVisitStruct,
1323 kVisitStructGeneric>();
1324 }
1325
Ben Murdoch8b112d22011-06-08 16:22:53 +01001326 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1327 return &table_;
Iain Merrick75681382010-08-19 15:07:18 +01001328 }
1329
Iain Merrick75681382010-08-19 15:07:18 +01001330 private:
1331 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1332 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1333
Steve Block44f0eee2011-05-26 01:26:41 +01001334 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
Iain Merrick75681382010-08-19 15:07:18 +01001335 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001336#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001337 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001338#endif
Iain Merrick75681382010-08-19 15:07:18 +01001339 should_record = should_record || FLAG_log_gc;
Iain Merrick75681382010-08-19 15:07:18 +01001340 if (should_record) {
Steve Block44f0eee2011-05-26 01:26:41 +01001341 if (heap->new_space()->Contains(obj)) {
1342 heap->new_space()->RecordAllocation(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001343 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001344 heap->new_space()->RecordPromotion(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001345 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001346 }
1347 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001348
Iain Merrick75681382010-08-19 15:07:18 +01001349 // Helper function used by CopyObject to copy a source object to an
1350 // allocated target object and update the forwarding pointer in the source
1351 // object. Returns the target object.
Steve Block44f0eee2011-05-26 01:26:41 +01001352 INLINE(static HeapObject* MigrateObject(Heap* heap,
1353 HeapObject* source,
Iain Merrick75681382010-08-19 15:07:18 +01001354 HeapObject* target,
1355 int size)) {
1356 // Copy the content of source to target.
Steve Block44f0eee2011-05-26 01:26:41 +01001357 heap->CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001358
Iain Merrick75681382010-08-19 15:07:18 +01001359 // Set the forwarding address.
1360 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001361
Ben Murdoch8b112d22011-06-08 16:22:53 +01001362 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001363 // Update NewSpace stats if necessary.
1364 RecordCopiedObject(heap, target);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001365 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Ben Murdoch8b112d22011-06-08 16:22:53 +01001366 Isolate* isolate = heap->isolate();
1367 if (isolate->logger()->is_logging() ||
Ben Murdoch257744e2011-11-30 15:57:28 +00001368 CpuProfiler::is_profiling(isolate)) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001369 if (target->IsSharedFunctionInfo()) {
1370 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1371 source->address(), target->address()));
1372 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001373 }
Ben Murdoch8b112d22011-06-08 16:22:53 +01001374 }
1375
Iain Merrick75681382010-08-19 15:07:18 +01001376 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001377 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001378
1379
Iain Merrick75681382010-08-19 15:07:18 +01001380 template<ObjectContents object_contents, SizeRestriction size_restriction>
1381 static inline void EvacuateObject(Map* map,
1382 HeapObject** slot,
1383 HeapObject* object,
1384 int object_size) {
1385 ASSERT((size_restriction != SMALL) ||
1386 (object_size <= Page::kMaxHeapObjectSize));
1387 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001388
Steve Block44f0eee2011-05-26 01:26:41 +01001389 Heap* heap = map->heap();
1390 if (heap->ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001391 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001392
Iain Merrick75681382010-08-19 15:07:18 +01001393 if ((size_restriction != SMALL) &&
1394 (object_size > Page::kMaxHeapObjectSize)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001395 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001396 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001397 if (object_contents == DATA_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001398 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001399 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001400 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001401 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001402 }
1403
John Reck59135872010-11-02 12:39:01 -07001404 Object* result = NULL; // Initialization to please compiler.
1405 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001406 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001407 *slot = MigrateObject(heap, object , target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001408
Iain Merrick75681382010-08-19 15:07:18 +01001409 if (object_contents == POINTER_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001410 heap->promotion_queue()->insert(target, object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001411 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001412
Steve Block44f0eee2011-05-26 01:26:41 +01001413 heap->tracer()->increment_promoted_objects_size(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001414 return;
1415 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001416 }
John Reck59135872010-11-02 12:39:01 -07001417 Object* result =
Steve Block44f0eee2011-05-26 01:26:41 +01001418 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1419 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001420 return;
1421 }
1422
Iain Merrick75681382010-08-19 15:07:18 +01001423
1424 static inline void EvacuateFixedArray(Map* map,
1425 HeapObject** slot,
1426 HeapObject* object) {
1427 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1428 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1429 slot,
1430 object,
1431 object_size);
1432 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001433
1434
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001435 static inline void EvacuateFixedDoubleArray(Map* map,
1436 HeapObject** slot,
1437 HeapObject* object) {
1438 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1439 int object_size = FixedDoubleArray::SizeFor(length);
1440 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
1441 slot,
1442 object,
1443 object_size);
1444 }
1445
1446
Iain Merrick75681382010-08-19 15:07:18 +01001447 static inline void EvacuateByteArray(Map* map,
1448 HeapObject** slot,
1449 HeapObject* object) {
1450 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1451 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1452 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001453
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001454
Iain Merrick75681382010-08-19 15:07:18 +01001455 static inline void EvacuateSeqAsciiString(Map* map,
1456 HeapObject** slot,
1457 HeapObject* object) {
1458 int object_size = SeqAsciiString::cast(object)->
1459 SeqAsciiStringSize(map->instance_type());
1460 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1461 }
1462
1463
1464 static inline void EvacuateSeqTwoByteString(Map* map,
1465 HeapObject** slot,
1466 HeapObject* object) {
1467 int object_size = SeqTwoByteString::cast(object)->
1468 SeqTwoByteStringSize(map->instance_type());
1469 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1470 }
1471
1472
1473 static inline bool IsShortcutCandidate(int type) {
1474 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1475 }
1476
1477 static inline void EvacuateShortcutCandidate(Map* map,
1478 HeapObject** slot,
1479 HeapObject* object) {
1480 ASSERT(IsShortcutCandidate(map->instance_type()));
1481
Steve Block44f0eee2011-05-26 01:26:41 +01001482 if (ConsString::cast(object)->unchecked_second() ==
1483 map->heap()->empty_string()) {
Iain Merrick75681382010-08-19 15:07:18 +01001484 HeapObject* first =
1485 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1486
1487 *slot = first;
1488
Steve Block44f0eee2011-05-26 01:26:41 +01001489 if (!map->heap()->InNewSpace(first)) {
Iain Merrick75681382010-08-19 15:07:18 +01001490 object->set_map_word(MapWord::FromForwardingAddress(first));
1491 return;
1492 }
1493
1494 MapWord first_word = first->map_word();
1495 if (first_word.IsForwardingAddress()) {
1496 HeapObject* target = first_word.ToForwardingAddress();
1497
1498 *slot = target;
1499 object->set_map_word(MapWord::FromForwardingAddress(target));
1500 return;
1501 }
1502
Ben Murdoch8b112d22011-06-08 16:22:53 +01001503 DoScavengeObject(first->map(), slot, first);
Iain Merrick75681382010-08-19 15:07:18 +01001504 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1505 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001506 }
Iain Merrick75681382010-08-19 15:07:18 +01001507
1508 int object_size = ConsString::kSize;
1509 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001510 }
1511
Iain Merrick75681382010-08-19 15:07:18 +01001512 template<ObjectContents object_contents>
1513 class ObjectEvacuationStrategy {
1514 public:
1515 template<int object_size>
1516 static inline void VisitSpecialized(Map* map,
1517 HeapObject** slot,
1518 HeapObject* object) {
1519 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1520 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001521
Iain Merrick75681382010-08-19 15:07:18 +01001522 static inline void Visit(Map* map,
1523 HeapObject** slot,
1524 HeapObject* object) {
1525 int object_size = map->instance_size();
1526 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1527 }
1528 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001529
Ben Murdoch8b112d22011-06-08 16:22:53 +01001530 static VisitorDispatchTable<ScavengingCallback> table_;
Iain Merrick75681382010-08-19 15:07:18 +01001531};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001532
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001533
Ben Murdoch8b112d22011-06-08 16:22:53 +01001534template<LoggingAndProfiling logging_and_profiling_mode>
1535VisitorDispatchTable<ScavengingCallback>
1536 ScavengingVisitor<logging_and_profiling_mode>::table_;
1537
1538
1539static void InitializeScavengingVisitorsTables() {
1540 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
1541 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
1542 scavenging_visitors_table_.CopyFrom(
1543 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
1544 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
1545}
1546
1547
1548void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
1549 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
1550 // Table was already updated by some isolate.
1551 return;
1552 }
1553
Ben Murdoch257744e2011-11-30 15:57:28 +00001554 if (isolate()->logger()->is_logging() |
1555 CpuProfiler::is_profiling(isolate()) ||
Ben Murdoch8b112d22011-06-08 16:22:53 +01001556 (isolate()->heap_profiler() != NULL &&
1557 isolate()->heap_profiler()->is_profiling())) {
1558 // If one of the isolates is doing scavenge at this moment of time
1559 // it might see this table in an inconsitent state when
1560 // some of the callbacks point to
1561 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1562 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1563 // However this does not lead to any bugs as such isolate does not have
1564 // profiling enabled and any isolate with enabled profiling is guaranteed
1565 // to see the table in the consistent state.
1566 scavenging_visitors_table_.CopyFrom(
1567 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1568
1569 // We use Release_Store to prevent reordering of this write before writes
1570 // to the table.
1571 Release_Store(&scavenging_visitors_table_mode_,
1572 LOGGING_AND_PROFILING_ENABLED);
1573 }
1574}
Steve Blocka7e24c12009-10-30 11:49:00 +00001575
1576
1577void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +01001578 ASSERT(HEAP->InFromSpace(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001579 MapWord first_word = object->map_word();
1580 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001581 Map* map = first_word.ToMap();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001582 DoScavengeObject(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001583}
1584
1585
John Reck59135872010-11-02 12:39:01 -07001586MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1587 int instance_size) {
1588 Object* result;
1589 { MaybeObject* maybe_result = AllocateRawMap();
1590 if (!maybe_result->ToObject(&result)) return maybe_result;
1591 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001592
1593 // Map::cast cannot be used due to uninitialized map field.
1594 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1595 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1596 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Steve Block44f0eee2011-05-26 01:26:41 +01001597 reinterpret_cast<Map*>(result)->set_visitor_id(
1598 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001599 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001600 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001601 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001602 reinterpret_cast<Map*>(result)->set_bit_field(0);
1603 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001604 return result;
1605}
1606
1607
John Reck59135872010-11-02 12:39:01 -07001608MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1609 Object* result;
1610 { MaybeObject* maybe_result = AllocateRawMap();
1611 if (!maybe_result->ToObject(&result)) return maybe_result;
1612 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001613
1614 Map* map = reinterpret_cast<Map*>(result);
1615 map->set_map(meta_map());
1616 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001617 map->set_visitor_id(
1618 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001619 map->set_prototype(null_value());
1620 map->set_constructor(null_value());
1621 map->set_instance_size(instance_size);
1622 map->set_inobject_properties(0);
1623 map->set_pre_allocated_property_fields(0);
Ben Murdoch257744e2011-11-30 15:57:28 +00001624 map->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001625 map->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001626 map->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001627 map->set_unused_property_fields(0);
1628 map->set_bit_field(0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001629 map->set_bit_field2(1 << Map::kIsExtensible);
1630 map->set_elements_kind(JSObject::FAST_ELEMENTS);
Leon Clarkee46be812010-01-19 14:06:41 +00001631
1632 // If the map object is aligned fill the padding area with Smi 0 objects.
1633 if (Map::kPadStart < Map::kSize) {
1634 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1635 0,
1636 Map::kSize - Map::kPadStart);
1637 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001638 return map;
1639}
1640
1641
John Reck59135872010-11-02 12:39:01 -07001642MaybeObject* Heap::AllocateCodeCache() {
1643 Object* result;
1644 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1645 if (!maybe_result->ToObject(&result)) return maybe_result;
1646 }
Steve Block6ded16b2010-05-10 14:33:55 +01001647 CodeCache* code_cache = CodeCache::cast(result);
1648 code_cache->set_default_cache(empty_fixed_array());
1649 code_cache->set_normal_type_cache(undefined_value());
1650 return code_cache;
1651}
1652
1653
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001654MaybeObject* Heap::AllocatePolymorphicCodeCache() {
1655 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
1656}
1657
1658
Steve Blocka7e24c12009-10-30 11:49:00 +00001659const Heap::StringTypeTable Heap::string_type_table[] = {
1660#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1661 {type, size, k##camel_name##MapRootIndex},
1662 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1663#undef STRING_TYPE_ELEMENT
1664};
1665
1666
1667const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1668#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1669 {contents, k##name##RootIndex},
1670 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1671#undef CONSTANT_SYMBOL_ELEMENT
1672};
1673
1674
1675const Heap::StructTable Heap::struct_table[] = {
1676#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1677 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1678 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1679#undef STRUCT_TABLE_ELEMENT
1680};
1681
1682
1683bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001684 Object* obj;
1685 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1686 if (!maybe_obj->ToObject(&obj)) return false;
1687 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001688 // Map::cast cannot be used due to uninitialized map field.
1689 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1690 set_meta_map(new_meta_map);
1691 new_meta_map->set_map(new_meta_map);
1692
John Reck59135872010-11-02 12:39:01 -07001693 { MaybeObject* maybe_obj =
1694 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1695 if (!maybe_obj->ToObject(&obj)) return false;
1696 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001697 set_fixed_array_map(Map::cast(obj));
1698
John Reck59135872010-11-02 12:39:01 -07001699 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1700 if (!maybe_obj->ToObject(&obj)) return false;
1701 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001702 set_oddball_map(Map::cast(obj));
1703
Steve Block6ded16b2010-05-10 14:33:55 +01001704 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001705 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1706 if (!maybe_obj->ToObject(&obj)) return false;
1707 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001708 set_empty_fixed_array(FixedArray::cast(obj));
1709
John Reck59135872010-11-02 12:39:01 -07001710 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1711 if (!maybe_obj->ToObject(&obj)) return false;
1712 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001713 set_null_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001714 Oddball::cast(obj)->set_kind(Oddball::kNull);
Steve Blocka7e24c12009-10-30 11:49:00 +00001715
1716 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001717 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1718 if (!maybe_obj->ToObject(&obj)) return false;
1719 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001720 set_empty_descriptor_array(DescriptorArray::cast(obj));
1721
1722 // Fix the instance_descriptors for the existing maps.
Ben Murdoch257744e2011-11-30 15:57:28 +00001723 meta_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001724 meta_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001725 meta_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001726
Ben Murdoch257744e2011-11-30 15:57:28 +00001727 fixed_array_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001728 fixed_array_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001729 fixed_array_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001730
Ben Murdoch257744e2011-11-30 15:57:28 +00001731 oddball_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001732 oddball_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001733 oddball_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001734
1735 // Fix prototype object for existing maps.
1736 meta_map()->set_prototype(null_value());
1737 meta_map()->set_constructor(null_value());
1738
1739 fixed_array_map()->set_prototype(null_value());
1740 fixed_array_map()->set_constructor(null_value());
1741
1742 oddball_map()->set_prototype(null_value());
1743 oddball_map()->set_constructor(null_value());
1744
John Reck59135872010-11-02 12:39:01 -07001745 { MaybeObject* maybe_obj =
1746 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1747 if (!maybe_obj->ToObject(&obj)) return false;
1748 }
Iain Merrick75681382010-08-19 15:07:18 +01001749 set_fixed_cow_array_map(Map::cast(obj));
1750 ASSERT(fixed_array_map() != fixed_cow_array_map());
1751
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001752 { MaybeObject* maybe_obj =
1753 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1754 if (!maybe_obj->ToObject(&obj)) return false;
1755 }
1756 set_serialized_scope_info_map(Map::cast(obj));
1757
John Reck59135872010-11-02 12:39:01 -07001758 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1759 if (!maybe_obj->ToObject(&obj)) return false;
1760 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001761 set_heap_number_map(Map::cast(obj));
1762
Ben Murdoch257744e2011-11-30 15:57:28 +00001763 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
John Reck59135872010-11-02 12:39:01 -07001764 if (!maybe_obj->ToObject(&obj)) return false;
1765 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001766 set_foreign_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001767
1768 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1769 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001770 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1771 if (!maybe_obj->ToObject(&obj)) return false;
1772 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001773 roots_[entry.index] = Map::cast(obj);
1774 }
1775
John Reck59135872010-11-02 12:39:01 -07001776 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1777 if (!maybe_obj->ToObject(&obj)) return false;
1778 }
Steve Blockd0582a62009-12-15 09:54:21 +00001779 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001780 Map::cast(obj)->set_is_undetectable();
1781
John Reck59135872010-11-02 12:39:01 -07001782 { MaybeObject* maybe_obj =
1783 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1784 if (!maybe_obj->ToObject(&obj)) return false;
1785 }
Steve Blockd0582a62009-12-15 09:54:21 +00001786 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001787 Map::cast(obj)->set_is_undetectable();
1788
John Reck59135872010-11-02 12:39:01 -07001789 { MaybeObject* maybe_obj =
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001790 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
1791 if (!maybe_obj->ToObject(&obj)) return false;
1792 }
1793 set_fixed_double_array_map(Map::cast(obj));
1794
1795 { MaybeObject* maybe_obj =
John Reck59135872010-11-02 12:39:01 -07001796 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1797 if (!maybe_obj->ToObject(&obj)) return false;
1798 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001799 set_byte_array_map(Map::cast(obj));
1800
Ben Murdochb0fe1622011-05-05 13:52:32 +01001801 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1802 if (!maybe_obj->ToObject(&obj)) return false;
1803 }
1804 set_empty_byte_array(ByteArray::cast(obj));
1805
John Reck59135872010-11-02 12:39:01 -07001806 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001807 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
John Reck59135872010-11-02 12:39:01 -07001808 if (!maybe_obj->ToObject(&obj)) return false;
1809 }
Steve Block44f0eee2011-05-26 01:26:41 +01001810 set_external_pixel_array_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001811
John Reck59135872010-11-02 12:39:01 -07001812 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1813 ExternalArray::kAlignedSize);
1814 if (!maybe_obj->ToObject(&obj)) return false;
1815 }
Steve Block3ce2e202009-11-05 08:53:23 +00001816 set_external_byte_array_map(Map::cast(obj));
1817
John Reck59135872010-11-02 12:39:01 -07001818 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1819 ExternalArray::kAlignedSize);
1820 if (!maybe_obj->ToObject(&obj)) return false;
1821 }
Steve Block3ce2e202009-11-05 08:53:23 +00001822 set_external_unsigned_byte_array_map(Map::cast(obj));
1823
John Reck59135872010-11-02 12:39:01 -07001824 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1825 ExternalArray::kAlignedSize);
1826 if (!maybe_obj->ToObject(&obj)) return false;
1827 }
Steve Block3ce2e202009-11-05 08:53:23 +00001828 set_external_short_array_map(Map::cast(obj));
1829
John Reck59135872010-11-02 12:39:01 -07001830 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1831 ExternalArray::kAlignedSize);
1832 if (!maybe_obj->ToObject(&obj)) return false;
1833 }
Steve Block3ce2e202009-11-05 08:53:23 +00001834 set_external_unsigned_short_array_map(Map::cast(obj));
1835
John Reck59135872010-11-02 12:39:01 -07001836 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1837 ExternalArray::kAlignedSize);
1838 if (!maybe_obj->ToObject(&obj)) return false;
1839 }
Steve Block3ce2e202009-11-05 08:53:23 +00001840 set_external_int_array_map(Map::cast(obj));
1841
John Reck59135872010-11-02 12:39:01 -07001842 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1843 ExternalArray::kAlignedSize);
1844 if (!maybe_obj->ToObject(&obj)) return false;
1845 }
Steve Block3ce2e202009-11-05 08:53:23 +00001846 set_external_unsigned_int_array_map(Map::cast(obj));
1847
John Reck59135872010-11-02 12:39:01 -07001848 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1849 ExternalArray::kAlignedSize);
1850 if (!maybe_obj->ToObject(&obj)) return false;
1851 }
Steve Block3ce2e202009-11-05 08:53:23 +00001852 set_external_float_array_map(Map::cast(obj));
1853
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001854 { MaybeObject* maybe_obj =
1855 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1856 if (!maybe_obj->ToObject(&obj)) return false;
1857 }
1858 set_non_strict_arguments_elements_map(Map::cast(obj));
1859
Ben Murdoch257744e2011-11-30 15:57:28 +00001860 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
1861 ExternalArray::kAlignedSize);
1862 if (!maybe_obj->ToObject(&obj)) return false;
1863 }
1864 set_external_double_array_map(Map::cast(obj));
1865
John Reck59135872010-11-02 12:39:01 -07001866 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1867 if (!maybe_obj->ToObject(&obj)) return false;
1868 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001869 set_code_map(Map::cast(obj));
1870
John Reck59135872010-11-02 12:39:01 -07001871 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1872 JSGlobalPropertyCell::kSize);
1873 if (!maybe_obj->ToObject(&obj)) return false;
1874 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001875 set_global_property_cell_map(Map::cast(obj));
1876
John Reck59135872010-11-02 12:39:01 -07001877 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1878 if (!maybe_obj->ToObject(&obj)) return false;
1879 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001880 set_one_pointer_filler_map(Map::cast(obj));
1881
John Reck59135872010-11-02 12:39:01 -07001882 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1883 if (!maybe_obj->ToObject(&obj)) return false;
1884 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001885 set_two_pointer_filler_map(Map::cast(obj));
1886
1887 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1888 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001889 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1890 if (!maybe_obj->ToObject(&obj)) return false;
1891 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001892 roots_[entry.index] = Map::cast(obj);
1893 }
1894
John Reck59135872010-11-02 12:39:01 -07001895 { MaybeObject* maybe_obj =
1896 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1897 if (!maybe_obj->ToObject(&obj)) return false;
1898 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001899 set_hash_table_map(Map::cast(obj));
1900
John Reck59135872010-11-02 12:39:01 -07001901 { MaybeObject* maybe_obj =
1902 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1903 if (!maybe_obj->ToObject(&obj)) return false;
1904 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001905 set_function_context_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001906
John Reck59135872010-11-02 12:39:01 -07001907 { MaybeObject* maybe_obj =
1908 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1909 if (!maybe_obj->ToObject(&obj)) return false;
1910 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001911 set_catch_context_map(Map::cast(obj));
1912
John Reck59135872010-11-02 12:39:01 -07001913 { MaybeObject* maybe_obj =
1914 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1915 if (!maybe_obj->ToObject(&obj)) return false;
1916 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001917 set_with_context_map(Map::cast(obj));
1918
1919 { MaybeObject* maybe_obj =
1920 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1921 if (!maybe_obj->ToObject(&obj)) return false;
1922 }
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001923 set_block_context_map(Map::cast(obj));
1924
1925 { MaybeObject* maybe_obj =
1926 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1927 if (!maybe_obj->ToObject(&obj)) return false;
1928 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001929 Map* global_context_map = Map::cast(obj);
1930 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1931 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001932
John Reck59135872010-11-02 12:39:01 -07001933 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1934 SharedFunctionInfo::kAlignedSize);
1935 if (!maybe_obj->ToObject(&obj)) return false;
1936 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001937 set_shared_function_info_map(Map::cast(obj));
1938
Steve Block1e0659c2011-05-24 12:43:12 +01001939 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1940 JSMessageObject::kSize);
1941 if (!maybe_obj->ToObject(&obj)) return false;
1942 }
1943 set_message_object_map(Map::cast(obj));
1944
Steve Block44f0eee2011-05-26 01:26:41 +01001945 ASSERT(!InNewSpace(empty_fixed_array()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001946 return true;
1947}
1948
1949
John Reck59135872010-11-02 12:39:01 -07001950MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001951 // Statically ensure that it is safe to allocate heap numbers in paged
1952 // spaces.
1953 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1954 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1955
John Reck59135872010-11-02 12:39:01 -07001956 Object* result;
1957 { MaybeObject* maybe_result =
1958 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1959 if (!maybe_result->ToObject(&result)) return maybe_result;
1960 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001961
1962 HeapObject::cast(result)->set_map(heap_number_map());
1963 HeapNumber::cast(result)->set_value(value);
1964 return result;
1965}
1966
1967
John Reck59135872010-11-02 12:39:01 -07001968MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001969 // Use general version, if we're forced to always allocate.
1970 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1971
1972 // This version of AllocateHeapNumber is optimized for
1973 // allocation in new space.
1974 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1975 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001976 Object* result;
1977 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1978 if (!maybe_result->ToObject(&result)) return maybe_result;
1979 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001980 HeapObject::cast(result)->set_map(heap_number_map());
1981 HeapNumber::cast(result)->set_value(value);
1982 return result;
1983}
1984
1985
John Reck59135872010-11-02 12:39:01 -07001986MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1987 Object* result;
1988 { MaybeObject* maybe_result = AllocateRawCell();
1989 if (!maybe_result->ToObject(&result)) return maybe_result;
1990 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001991 HeapObject::cast(result)->set_map(global_property_cell_map());
1992 JSGlobalPropertyCell::cast(result)->set_value(value);
1993 return result;
1994}
1995
1996
John Reck59135872010-11-02 12:39:01 -07001997MaybeObject* Heap::CreateOddball(const char* to_string,
Steve Block44f0eee2011-05-26 01:26:41 +01001998 Object* to_number,
1999 byte kind) {
John Reck59135872010-11-02 12:39:01 -07002000 Object* result;
2001 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
2002 if (!maybe_result->ToObject(&result)) return maybe_result;
2003 }
Steve Block44f0eee2011-05-26 01:26:41 +01002004 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00002005}
2006
2007
2008bool Heap::CreateApiObjects() {
2009 Object* obj;
2010
John Reck59135872010-11-02 12:39:01 -07002011 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2012 if (!maybe_obj->ToObject(&obj)) return false;
2013 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002014 set_neander_map(Map::cast(obj));
2015
Steve Block44f0eee2011-05-26 01:26:41 +01002016 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
John Reck59135872010-11-02 12:39:01 -07002017 if (!maybe_obj->ToObject(&obj)) return false;
2018 }
2019 Object* elements;
2020 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2021 if (!maybe_elements->ToObject(&elements)) return false;
2022 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002023 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2024 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2025 set_message_listeners(JSObject::cast(obj));
2026
2027 return true;
2028}
2029
2030
Steve Blocka7e24c12009-10-30 11:49:00 +00002031void Heap::CreateJSEntryStub() {
2032 JSEntryStub stub;
2033 set_js_entry_code(*stub.GetCode());
2034}
2035
2036
2037void Heap::CreateJSConstructEntryStub() {
2038 JSConstructEntryStub stub;
2039 set_js_construct_entry_code(*stub.GetCode());
2040}
2041
2042
2043void Heap::CreateFixedStubs() {
2044 // Here we create roots for fixed stubs. They are needed at GC
2045 // for cooking and uncooking (check out frames.cc).
2046 // The eliminates the need for doing dictionary lookup in the
2047 // stub cache for these stubs.
2048 HandleScope scope;
2049 // gcc-4.4 has problem generating correct code of following snippet:
Steve Block44f0eee2011-05-26 01:26:41 +01002050 // { JSEntryStub stub;
2051 // js_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00002052 // }
Steve Block44f0eee2011-05-26 01:26:41 +01002053 // { JSConstructEntryStub stub;
2054 // js_construct_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00002055 // }
2056 // To workaround the problem, make separate functions without inlining.
Steve Blocka7e24c12009-10-30 11:49:00 +00002057 Heap::CreateJSEntryStub();
2058 Heap::CreateJSConstructEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00002059}
2060
2061
2062bool Heap::CreateInitialObjects() {
2063 Object* obj;
2064
2065 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07002066 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2067 if (!maybe_obj->ToObject(&obj)) return false;
2068 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002069 set_minus_zero_value(obj);
2070 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2071
John Reck59135872010-11-02 12:39:01 -07002072 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2073 if (!maybe_obj->ToObject(&obj)) return false;
2074 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002075 set_nan_value(obj);
2076
John Reck59135872010-11-02 12:39:01 -07002077 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
2078 if (!maybe_obj->ToObject(&obj)) return false;
2079 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002080 set_undefined_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002081 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
Steve Blocka7e24c12009-10-30 11:49:00 +00002082 ASSERT(!InNewSpace(undefined_value()));
2083
2084 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07002085 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2086 if (!maybe_obj->ToObject(&obj)) return false;
2087 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002088 // Don't use set_symbol_table() due to asserts.
2089 roots_[kSymbolTableRootIndex] = obj;
2090
2091 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07002092 Object* symbol;
2093 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2094 if (!maybe_symbol->ToObject(&symbol)) return false;
2095 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002096 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2097 Oddball::cast(undefined_value())->set_to_number(nan_value());
2098
Steve Blocka7e24c12009-10-30 11:49:00 +00002099 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07002100 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01002101 Oddball::cast(null_value())->Initialize("null",
2102 Smi::FromInt(0),
2103 Oddball::kNull);
John Reck59135872010-11-02 12:39:01 -07002104 if (!maybe_obj->ToObject(&obj)) return false;
2105 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002106
Steve Block44f0eee2011-05-26 01:26:41 +01002107 { MaybeObject* maybe_obj = CreateOddball("true",
2108 Smi::FromInt(1),
2109 Oddball::kTrue);
John Reck59135872010-11-02 12:39:01 -07002110 if (!maybe_obj->ToObject(&obj)) return false;
2111 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002112 set_true_value(obj);
2113
Steve Block44f0eee2011-05-26 01:26:41 +01002114 { MaybeObject* maybe_obj = CreateOddball("false",
2115 Smi::FromInt(0),
2116 Oddball::kFalse);
John Reck59135872010-11-02 12:39:01 -07002117 if (!maybe_obj->ToObject(&obj)) return false;
2118 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002119 set_false_value(obj);
2120
Steve Block44f0eee2011-05-26 01:26:41 +01002121 { MaybeObject* maybe_obj = CreateOddball("hole",
2122 Smi::FromInt(-1),
2123 Oddball::kTheHole);
John Reck59135872010-11-02 12:39:01 -07002124 if (!maybe_obj->ToObject(&obj)) return false;
2125 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002126 set_the_hole_value(obj);
2127
Ben Murdoch086aeea2011-05-13 15:57:08 +01002128 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Steve Block44f0eee2011-05-26 01:26:41 +01002129 Smi::FromInt(-4),
2130 Oddball::kArgumentMarker);
Ben Murdoch086aeea2011-05-13 15:57:08 +01002131 if (!maybe_obj->ToObject(&obj)) return false;
2132 }
2133 set_arguments_marker(obj);
2134
Steve Block44f0eee2011-05-26 01:26:41 +01002135 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2136 Smi::FromInt(-2),
2137 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002138 if (!maybe_obj->ToObject(&obj)) return false;
2139 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002140 set_no_interceptor_result_sentinel(obj);
2141
Steve Block44f0eee2011-05-26 01:26:41 +01002142 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2143 Smi::FromInt(-3),
2144 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002145 if (!maybe_obj->ToObject(&obj)) return false;
2146 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002147 set_termination_exception(obj);
2148
2149 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002150 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2151 if (!maybe_obj->ToObject(&obj)) return false;
2152 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002153 set_empty_string(String::cast(obj));
2154
2155 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002156 { MaybeObject* maybe_obj =
2157 LookupAsciiSymbol(constant_symbol_table[i].contents);
2158 if (!maybe_obj->ToObject(&obj)) return false;
2159 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002160 roots_[constant_symbol_table[i].index] = String::cast(obj);
2161 }
2162
2163 // Allocate the hidden symbol which is used to identify the hidden properties
2164 // in JSObjects. The hash code has a special value so that it will not match
2165 // the empty string when searching for the property. It cannot be part of the
2166 // loop above because it needs to be allocated manually with the special
2167 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2168 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002169 { MaybeObject* maybe_obj =
2170 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2171 if (!maybe_obj->ToObject(&obj)) return false;
2172 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002173 hidden_symbol_ = String::cast(obj);
2174
Ben Murdoch257744e2011-11-30 15:57:28 +00002175 // Allocate the foreign for __proto__.
John Reck59135872010-11-02 12:39:01 -07002176 { MaybeObject* maybe_obj =
Ben Murdoch257744e2011-11-30 15:57:28 +00002177 AllocateForeign((Address) &Accessors::ObjectPrototype);
John Reck59135872010-11-02 12:39:01 -07002178 if (!maybe_obj->ToObject(&obj)) return false;
2179 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002180 set_prototype_accessors(Foreign::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00002181
2182 // Allocate the code_stubs dictionary. The initial size is set to avoid
2183 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002184 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2185 if (!maybe_obj->ToObject(&obj)) return false;
2186 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002187 set_code_stubs(NumberDictionary::cast(obj));
2188
2189 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2190 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002191 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2192 if (!maybe_obj->ToObject(&obj)) return false;
2193 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002194 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2195
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002196 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2197 if (!maybe_obj->ToObject(&obj)) return false;
2198 }
2199 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2200
Kristian Monsen25f61362010-05-21 11:50:48 +01002201 set_instanceof_cache_function(Smi::FromInt(0));
2202 set_instanceof_cache_map(Smi::FromInt(0));
2203 set_instanceof_cache_answer(Smi::FromInt(0));
2204
Steve Blocka7e24c12009-10-30 11:49:00 +00002205 CreateFixedStubs();
2206
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002207 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002208 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2209 if (!maybe_obj->ToObject(&obj)) return false;
2210 }
Steve Block44f0eee2011-05-26 01:26:41 +01002211 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2212 obj);
John Reck59135872010-11-02 12:39:01 -07002213 if (!maybe_obj->ToObject(&obj)) return false;
2214 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002215 set_intrinsic_function_names(StringDictionary::cast(obj));
2216
Leon Clarkee46be812010-01-19 14:06:41 +00002217 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002218
Steve Block6ded16b2010-05-10 14:33:55 +01002219 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002220 { MaybeObject* maybe_obj =
2221 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2222 if (!maybe_obj->ToObject(&obj)) return false;
2223 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002224 set_single_character_string_cache(FixedArray::cast(obj));
2225
2226 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002227 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2228 if (!maybe_obj->ToObject(&obj)) return false;
2229 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002230 set_natives_source_cache(FixedArray::cast(obj));
2231
Steve Block44f0eee2011-05-26 01:26:41 +01002232 // Handling of script id generation is in FACTORY->NewScript.
Steve Blocka7e24c12009-10-30 11:49:00 +00002233 set_last_script_id(undefined_value());
2234
2235 // Initialize keyed lookup cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002236 isolate_->keyed_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002237
2238 // Initialize context slot cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002239 isolate_->context_slot_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002240
2241 // Initialize descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002242 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002243
2244 // Initialize compilation cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002245 isolate_->compilation_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002246
2247 return true;
2248}
2249
2250
John Reck59135872010-11-02 12:39:01 -07002251MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002252 // Compute the size of the number string cache based on the max heap size.
2253 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2254 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2255 int number_string_cache_size = max_semispace_size_ / 512;
2256 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002257 Object* obj;
2258 MaybeObject* maybe_obj =
2259 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2260 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2261 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002262}
2263
2264
2265void Heap::FlushNumberStringCache() {
2266 // Flush the number to string cache.
2267 int len = number_string_cache()->length();
2268 for (int i = 0; i < len; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002269 number_string_cache()->set_undefined(this, i);
Leon Clarkee46be812010-01-19 14:06:41 +00002270 }
2271}
2272
2273
Steve Blocka7e24c12009-10-30 11:49:00 +00002274static inline int double_get_hash(double d) {
2275 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002276 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002277}
2278
2279
2280static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002281 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002282}
2283
2284
Steve Blocka7e24c12009-10-30 11:49:00 +00002285Object* Heap::GetNumberStringCache(Object* number) {
2286 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002287 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002288 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002289 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002290 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002291 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002292 }
2293 Object* key = number_string_cache()->get(hash * 2);
2294 if (key == number) {
2295 return String::cast(number_string_cache()->get(hash * 2 + 1));
2296 } else if (key->IsHeapNumber() &&
2297 number->IsHeapNumber() &&
2298 key->Number() == number->Number()) {
2299 return String::cast(number_string_cache()->get(hash * 2 + 1));
2300 }
2301 return undefined_value();
2302}
2303
2304
2305void Heap::SetNumberStringCache(Object* number, String* string) {
2306 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002307 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002308 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002309 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002310 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002311 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002312 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002313 number_string_cache()->set(hash * 2, number);
2314 }
2315 number_string_cache()->set(hash * 2 + 1, string);
2316}
2317
2318
John Reck59135872010-11-02 12:39:01 -07002319MaybeObject* Heap::NumberToString(Object* number,
2320 bool check_number_string_cache) {
Steve Block44f0eee2011-05-26 01:26:41 +01002321 isolate_->counters()->number_to_string_runtime()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002322 if (check_number_string_cache) {
2323 Object* cached = GetNumberStringCache(number);
2324 if (cached != undefined_value()) {
2325 return cached;
2326 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002327 }
2328
2329 char arr[100];
2330 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2331 const char* str;
2332 if (number->IsSmi()) {
2333 int num = Smi::cast(number)->value();
2334 str = IntToCString(num, buffer);
2335 } else {
2336 double num = HeapNumber::cast(number)->value();
2337 str = DoubleToCString(num, buffer);
2338 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002339
John Reck59135872010-11-02 12:39:01 -07002340 Object* js_string;
2341 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2342 if (maybe_js_string->ToObject(&js_string)) {
2343 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002344 }
John Reck59135872010-11-02 12:39:01 -07002345 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002346}
2347
2348
Steve Block3ce2e202009-11-05 08:53:23 +00002349Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2350 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2351}
2352
2353
2354Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2355 ExternalArrayType array_type) {
2356 switch (array_type) {
2357 case kExternalByteArray:
2358 return kExternalByteArrayMapRootIndex;
2359 case kExternalUnsignedByteArray:
2360 return kExternalUnsignedByteArrayMapRootIndex;
2361 case kExternalShortArray:
2362 return kExternalShortArrayMapRootIndex;
2363 case kExternalUnsignedShortArray:
2364 return kExternalUnsignedShortArrayMapRootIndex;
2365 case kExternalIntArray:
2366 return kExternalIntArrayMapRootIndex;
2367 case kExternalUnsignedIntArray:
2368 return kExternalUnsignedIntArrayMapRootIndex;
2369 case kExternalFloatArray:
2370 return kExternalFloatArrayMapRootIndex;
Ben Murdoch257744e2011-11-30 15:57:28 +00002371 case kExternalDoubleArray:
2372 return kExternalDoubleArrayMapRootIndex;
Steve Block44f0eee2011-05-26 01:26:41 +01002373 case kExternalPixelArray:
2374 return kExternalPixelArrayMapRootIndex;
Steve Block3ce2e202009-11-05 08:53:23 +00002375 default:
2376 UNREACHABLE();
2377 return kUndefinedValueRootIndex;
2378 }
2379}
2380
2381
John Reck59135872010-11-02 12:39:01 -07002382MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002383 // We need to distinguish the minus zero value and this cannot be
2384 // done after conversion to int. Doing this by comparing bit
2385 // patterns is faster than using fpclassify() et al.
2386 static const DoubleRepresentation minus_zero(-0.0);
2387
2388 DoubleRepresentation rep(value);
2389 if (rep.bits == minus_zero.bits) {
2390 return AllocateHeapNumber(-0.0, pretenure);
2391 }
2392
2393 int int_value = FastD2I(value);
2394 if (value == int_value && Smi::IsValid(int_value)) {
2395 return Smi::FromInt(int_value);
2396 }
2397
2398 // Materialize the value in the heap.
2399 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002400}
2401
2402
Ben Murdoch257744e2011-11-30 15:57:28 +00002403MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
2404 // Statically ensure that it is safe to allocate foreigns in paged spaces.
2405 STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002406 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002407 Object* result;
Ben Murdoch257744e2011-11-30 15:57:28 +00002408 { MaybeObject* maybe_result = Allocate(foreign_map(), space);
John Reck59135872010-11-02 12:39:01 -07002409 if (!maybe_result->ToObject(&result)) return maybe_result;
2410 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002411
Ben Murdoch257744e2011-11-30 15:57:28 +00002412 Foreign::cast(result)->set_address(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002413 return result;
2414}
2415
2416
John Reck59135872010-11-02 12:39:01 -07002417MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002418 SharedFunctionInfo* share;
2419 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2420 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
Steve Blocka7e24c12009-10-30 11:49:00 +00002421
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002422 // Set pointer fields.
Steve Blocka7e24c12009-10-30 11:49:00 +00002423 share->set_name(name);
Steve Block44f0eee2011-05-26 01:26:41 +01002424 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
Steve Blocka7e24c12009-10-30 11:49:00 +00002425 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002426 share->set_scope_info(SerializedScopeInfo::Empty());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002427 Code* construct_stub =
2428 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
Steve Blocka7e24c12009-10-30 11:49:00 +00002429 share->set_construct_stub(construct_stub);
Steve Blocka7e24c12009-10-30 11:49:00 +00002430 share->set_instance_class_name(Object_symbol());
2431 share->set_function_data(undefined_value());
2432 share->set_script(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002433 share->set_debug_info(undefined_value());
2434 share->set_inferred_name(empty_string());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002435 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002436 share->set_this_property_assignments(undefined_value());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002437 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
2438
2439 // Set integer fields (smi or int, depending on the architecture).
2440 share->set_length(0);
2441 share->set_formal_parameter_count(0);
2442 share->set_expected_nof_properties(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002443 share->set_num_literals(0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002444 share->set_start_position_and_type(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002445 share->set_end_position(0);
2446 share->set_function_token_position(0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002447 // All compiler hints default to false or 0.
2448 share->set_compiler_hints(0);
2449 share->set_this_property_assignments_count(0);
2450 share->set_opt_count(0);
2451
2452 return share;
Steve Blocka7e24c12009-10-30 11:49:00 +00002453}
2454
2455
Steve Block1e0659c2011-05-24 12:43:12 +01002456MaybeObject* Heap::AllocateJSMessageObject(String* type,
2457 JSArray* arguments,
2458 int start_position,
2459 int end_position,
2460 Object* script,
2461 Object* stack_trace,
2462 Object* stack_frames) {
2463 Object* result;
2464 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2465 if (!maybe_result->ToObject(&result)) return maybe_result;
2466 }
2467 JSMessageObject* message = JSMessageObject::cast(result);
2468 message->set_properties(Heap::empty_fixed_array());
2469 message->set_elements(Heap::empty_fixed_array());
2470 message->set_type(type);
2471 message->set_arguments(arguments);
2472 message->set_start_position(start_position);
2473 message->set_end_position(end_position);
2474 message->set_script(script);
2475 message->set_stack_trace(stack_trace);
2476 message->set_stack_frames(stack_frames);
2477 return result;
2478}
2479
2480
2481
Steve Blockd0582a62009-12-15 09:54:21 +00002482// Returns true for a character in a range. Both limits are inclusive.
2483static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2484 // This makes uses of the the unsigned wraparound.
2485 return character - from <= to - from;
2486}
2487
2488
John Reck59135872010-11-02 12:39:01 -07002489MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Steve Block44f0eee2011-05-26 01:26:41 +01002490 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07002491 uint32_t c1,
2492 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002493 String* symbol;
2494 // Numeric strings have a different hash algorithm not known by
2495 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2496 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
Steve Block44f0eee2011-05-26 01:26:41 +01002497 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002498 return symbol;
2499 // Now we know the length is 2, we might as well make use of that fact
2500 // when building the new string.
2501 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2502 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002503 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002504 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
John Reck59135872010-11-02 12:39:01 -07002505 if (!maybe_result->ToObject(&result)) return maybe_result;
2506 }
Steve Blockd0582a62009-12-15 09:54:21 +00002507 char* dest = SeqAsciiString::cast(result)->GetChars();
2508 dest[0] = c1;
2509 dest[1] = c2;
2510 return result;
2511 } else {
John Reck59135872010-11-02 12:39:01 -07002512 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002513 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
John Reck59135872010-11-02 12:39:01 -07002514 if (!maybe_result->ToObject(&result)) return maybe_result;
2515 }
Steve Blockd0582a62009-12-15 09:54:21 +00002516 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2517 dest[0] = c1;
2518 dest[1] = c2;
2519 return result;
2520 }
2521}
2522
2523
John Reck59135872010-11-02 12:39:01 -07002524MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002525 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002526 if (first_length == 0) {
2527 return second;
2528 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002529
2530 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002531 if (second_length == 0) {
2532 return first;
2533 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002534
2535 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002536
2537 // Optimization for 2-byte strings often used as keys in a decompression
2538 // dictionary. Check whether we already have the string in the symbol
2539 // table to prevent creation of many unneccesary strings.
2540 if (length == 2) {
2541 unsigned c1 = first->Get(0);
2542 unsigned c2 = second->Get(0);
Steve Block44f0eee2011-05-26 01:26:41 +01002543 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blockd0582a62009-12-15 09:54:21 +00002544 }
2545
Steve Block6ded16b2010-05-10 14:33:55 +01002546 bool first_is_ascii = first->IsAsciiRepresentation();
2547 bool second_is_ascii = second->IsAsciiRepresentation();
2548 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002549
2550 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002551 // of the new cons string is too large.
2552 if (length > String::kMaxLength || length < 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01002553 isolate()->context()->mark_out_of_memory();
Steve Blocka7e24c12009-10-30 11:49:00 +00002554 return Failure::OutOfMemoryException();
2555 }
2556
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002557 bool is_ascii_data_in_two_byte_string = false;
2558 if (!is_ascii) {
2559 // At least one of the strings uses two-byte representation so we
2560 // can't use the fast case code for short ascii strings below, but
2561 // we can try to save memory if all chars actually fit in ascii.
2562 is_ascii_data_in_two_byte_string =
2563 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2564 if (is_ascii_data_in_two_byte_string) {
Steve Block44f0eee2011-05-26 01:26:41 +01002565 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002566 }
2567 }
2568
Steve Blocka7e24c12009-10-30 11:49:00 +00002569 // If the resulting string is small make a flat string.
2570 if (length < String::kMinNonFlatLength) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002571 // Note that neither of the two inputs can be a slice because:
2572 STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
Steve Blocka7e24c12009-10-30 11:49:00 +00002573 ASSERT(first->IsFlat());
2574 ASSERT(second->IsFlat());
2575 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002576 Object* result;
2577 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2578 if (!maybe_result->ToObject(&result)) return maybe_result;
2579 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002580 // Copy the characters into the new object.
2581 char* dest = SeqAsciiString::cast(result)->GetChars();
2582 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002583 const char* src;
2584 if (first->IsExternalString()) {
2585 src = ExternalAsciiString::cast(first)->resource()->data();
2586 } else {
2587 src = SeqAsciiString::cast(first)->GetChars();
2588 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002589 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2590 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002591 if (second->IsExternalString()) {
2592 src = ExternalAsciiString::cast(second)->resource()->data();
2593 } else {
2594 src = SeqAsciiString::cast(second)->GetChars();
2595 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002596 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2597 return result;
2598 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002599 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002600 Object* result;
2601 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2602 if (!maybe_result->ToObject(&result)) return maybe_result;
2603 }
Steve Block6ded16b2010-05-10 14:33:55 +01002604 // Copy the characters into the new object.
2605 char* dest = SeqAsciiString::cast(result)->GetChars();
2606 String::WriteToFlat(first, dest, 0, first_length);
2607 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block44f0eee2011-05-26 01:26:41 +01002608 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002609 return result;
2610 }
2611
John Reck59135872010-11-02 12:39:01 -07002612 Object* result;
2613 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2614 if (!maybe_result->ToObject(&result)) return maybe_result;
2615 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002616 // Copy the characters into the new object.
2617 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2618 String::WriteToFlat(first, dest, 0, first_length);
2619 String::WriteToFlat(second, dest + first_length, 0, second_length);
2620 return result;
2621 }
2622 }
2623
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002624 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2625 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002626
John Reck59135872010-11-02 12:39:01 -07002627 Object* result;
2628 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2629 if (!maybe_result->ToObject(&result)) return maybe_result;
2630 }
Leon Clarke4515c472010-02-03 11:58:03 +00002631
2632 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002633 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002634 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002635 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002636 cons_string->set_hash_field(String::kEmptyHashField);
2637 cons_string->set_first(first, mode);
2638 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002639 return result;
2640}
2641
2642
John Reck59135872010-11-02 12:39:01 -07002643MaybeObject* Heap::AllocateSubString(String* buffer,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002644 int start,
2645 int end,
2646 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002647 int length = end - start;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002648 if (length == 0) {
2649 return empty_string();
2650 } else if (length == 1) {
Steve Block44f0eee2011-05-26 01:26:41 +01002651 return LookupSingleCharacterStringFromCode(buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002652 } else if (length == 2) {
2653 // Optimization for 2-byte strings often used as keys in a decompression
2654 // dictionary. Check whether we already have the string in the symbol
2655 // table to prevent creation of many unneccesary strings.
2656 unsigned c1 = buffer->Get(start);
2657 unsigned c2 = buffer->Get(start + 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002658 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002659 }
2660
2661 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002662 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002663
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002664 // TODO(1626): For now slicing external strings is not supported. However,
2665 // a flat cons string can have an external string as first part in some cases.
2666 // Therefore we have to single out this case as well.
2667 if (!FLAG_string_slices ||
2668 (buffer->IsConsString() &&
2669 (!buffer->IsFlat() ||
2670 !ConsString::cast(buffer)->first()->IsSeqString())) ||
2671 buffer->IsExternalString() ||
2672 length < SlicedString::kMinLength ||
2673 pretenure == TENURED) {
2674 Object* result;
2675 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2676 ? AllocateRawAsciiString(length, pretenure)
2677 : AllocateRawTwoByteString(length, pretenure);
2678 if (!maybe_result->ToObject(&result)) return maybe_result;
2679 }
2680 String* string_result = String::cast(result);
2681 // Copy the characters into the new object.
2682 if (buffer->IsAsciiRepresentation()) {
2683 ASSERT(string_result->IsAsciiRepresentation());
2684 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2685 String::WriteToFlat(buffer, dest, start, end);
2686 } else {
2687 ASSERT(string_result->IsTwoByteRepresentation());
2688 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2689 String::WriteToFlat(buffer, dest, start, end);
2690 }
2691 return result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002692 }
Steve Blockd0582a62009-12-15 09:54:21 +00002693
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002694 ASSERT(buffer->IsFlat());
2695 ASSERT(!buffer->IsExternalString());
2696#if DEBUG
2697 buffer->StringVerify();
2698#endif
2699
2700 Object* result;
2701 { Map* map = buffer->IsAsciiRepresentation()
2702 ? sliced_ascii_string_map()
2703 : sliced_string_map();
2704 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2705 if (!maybe_result->ToObject(&result)) return maybe_result;
2706 }
2707
2708 AssertNoAllocation no_gc;
2709 SlicedString* sliced_string = SlicedString::cast(result);
2710 sliced_string->set_length(length);
2711 sliced_string->set_hash_field(String::kEmptyHashField);
2712 if (buffer->IsConsString()) {
2713 ConsString* cons = ConsString::cast(buffer);
2714 ASSERT(cons->second()->length() == 0);
2715 sliced_string->set_parent(cons->first());
2716 sliced_string->set_offset(start);
2717 } else if (buffer->IsSlicedString()) {
2718 // Prevent nesting sliced strings.
2719 SlicedString* parent_slice = SlicedString::cast(buffer);
2720 sliced_string->set_parent(parent_slice->parent());
2721 sliced_string->set_offset(start + parent_slice->offset());
2722 } else {
2723 sliced_string->set_parent(buffer);
2724 sliced_string->set_offset(start);
2725 }
2726 ASSERT(sliced_string->parent()->IsSeqString());
Steve Blocka7e24c12009-10-30 11:49:00 +00002727 return result;
2728}
2729
2730
John Reck59135872010-11-02 12:39:01 -07002731MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002732 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002733 size_t length = resource->length();
2734 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002735 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002736 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002737 }
2738
Steve Blockd0582a62009-12-15 09:54:21 +00002739 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002740 Object* result;
2741 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2742 if (!maybe_result->ToObject(&result)) return maybe_result;
2743 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002744
2745 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002746 external_string->set_length(static_cast<int>(length));
2747 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002748 external_string->set_resource(resource);
2749
2750 return result;
2751}
2752
2753
John Reck59135872010-11-02 12:39:01 -07002754MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002755 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002756 size_t length = resource->length();
2757 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002758 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002759 return Failure::OutOfMemoryException();
2760 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002761
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002762 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002763 // ASCII characters. If yes, we use a different string map.
2764 static const size_t kAsciiCheckLengthLimit = 32;
2765 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2766 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002767 Map* map = is_ascii ?
Steve Block44f0eee2011-05-26 01:26:41 +01002768 external_string_with_ascii_data_map() : external_string_map();
John Reck59135872010-11-02 12:39:01 -07002769 Object* result;
2770 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2771 if (!maybe_result->ToObject(&result)) return maybe_result;
2772 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002773
2774 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002775 external_string->set_length(static_cast<int>(length));
2776 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002777 external_string->set_resource(resource);
2778
2779 return result;
2780}
2781
2782
John Reck59135872010-11-02 12:39:01 -07002783MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002784 if (code <= String::kMaxAsciiCharCode) {
Steve Block44f0eee2011-05-26 01:26:41 +01002785 Object* value = single_character_string_cache()->get(code);
2786 if (value != undefined_value()) return value;
Steve Blocka7e24c12009-10-30 11:49:00 +00002787
2788 char buffer[1];
2789 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002790 Object* result;
2791 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002792
John Reck59135872010-11-02 12:39:01 -07002793 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002794 single_character_string_cache()->set(code, result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002795 return result;
2796 }
2797
John Reck59135872010-11-02 12:39:01 -07002798 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002799 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
John Reck59135872010-11-02 12:39:01 -07002800 if (!maybe_result->ToObject(&result)) return maybe_result;
2801 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002802 String* answer = String::cast(result);
2803 answer->Set(0, code);
2804 return answer;
2805}
2806
2807
John Reck59135872010-11-02 12:39:01 -07002808MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002809 if (length < 0 || length > ByteArray::kMaxLength) {
2810 return Failure::OutOfMemoryException();
2811 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002812 if (pretenure == NOT_TENURED) {
2813 return AllocateByteArray(length);
2814 }
2815 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002816 Object* result;
2817 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2818 ? old_data_space_->AllocateRaw(size)
2819 : lo_space_->AllocateRaw(size);
2820 if (!maybe_result->ToObject(&result)) return maybe_result;
2821 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002822
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002823 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2824 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002825 return result;
2826}
2827
2828
John Reck59135872010-11-02 12:39:01 -07002829MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002830 if (length < 0 || length > ByteArray::kMaxLength) {
2831 return Failure::OutOfMemoryException();
2832 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002833 int size = ByteArray::SizeFor(length);
2834 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002835 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002836 Object* result;
2837 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2838 if (!maybe_result->ToObject(&result)) return maybe_result;
2839 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002840
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002841 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2842 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002843 return result;
2844}
2845
2846
2847void Heap::CreateFillerObjectAt(Address addr, int size) {
2848 if (size == 0) return;
2849 HeapObject* filler = HeapObject::FromAddress(addr);
2850 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002851 filler->set_map(one_pointer_filler_map());
2852 } else if (size == 2 * kPointerSize) {
2853 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002854 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002855 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002856 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2857 }
2858}
2859
2860
John Reck59135872010-11-02 12:39:01 -07002861MaybeObject* Heap::AllocateExternalArray(int length,
2862 ExternalArrayType array_type,
2863 void* external_pointer,
2864 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002865 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002866 Object* result;
2867 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2868 space,
2869 OLD_DATA_SPACE);
2870 if (!maybe_result->ToObject(&result)) return maybe_result;
2871 }
Steve Block3ce2e202009-11-05 08:53:23 +00002872
2873 reinterpret_cast<ExternalArray*>(result)->set_map(
2874 MapForExternalArrayType(array_type));
2875 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2876 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2877 external_pointer);
2878
2879 return result;
2880}
2881
2882
John Reck59135872010-11-02 12:39:01 -07002883MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2884 Code::Flags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002885 Handle<Object> self_reference,
2886 bool immovable) {
Leon Clarkeac952652010-07-15 11:15:24 +01002887 // Allocate ByteArray before the Code object, so that we do not risk
2888 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002889 Object* reloc_info;
2890 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2891 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2892 }
Leon Clarkeac952652010-07-15 11:15:24 +01002893
Steve Block44f0eee2011-05-26 01:26:41 +01002894 // Compute size.
Leon Clarkeac952652010-07-15 11:15:24 +01002895 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002896 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002897 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002898 MaybeObject* maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002899 // Large code objects and code objects which should stay at a fixed address
2900 // are allocated in large object space.
2901 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
John Reck59135872010-11-02 12:39:01 -07002902 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002903 } else {
John Reck59135872010-11-02 12:39:01 -07002904 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002905 }
2906
John Reck59135872010-11-02 12:39:01 -07002907 Object* result;
2908 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002909
2910 // Initialize the object
2911 HeapObject::cast(result)->set_map(code_map());
2912 Code* code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002913 ASSERT(!isolate_->code_range()->exists() ||
2914 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002915 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002916 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002917 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002918 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2919 code->set_check_type(RECEIVER_MAP_CHECK);
2920 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002921 code->set_deoptimization_data(empty_fixed_array());
Ben Murdoch257744e2011-11-30 15:57:28 +00002922 code->set_next_code_flushing_candidate(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002923 // Allow self references to created code object by patching the handle to
2924 // point to the newly allocated Code object.
2925 if (!self_reference.is_null()) {
2926 *(self_reference.location()) = code;
2927 }
2928 // Migrate generated code.
2929 // The generated code can contain Object** values (typically from handles)
2930 // that are dereferenced during the copy to point directly to the actual heap
2931 // objects. These pointers can include references to the code object itself,
2932 // through the self_reference parameter.
2933 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002934
2935#ifdef DEBUG
2936 code->Verify();
2937#endif
2938 return code;
2939}
2940
2941
John Reck59135872010-11-02 12:39:01 -07002942MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002943 // Allocate an object the same size as the code object.
2944 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002945 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002946 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002947 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002948 } else {
John Reck59135872010-11-02 12:39:01 -07002949 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002950 }
2951
John Reck59135872010-11-02 12:39:01 -07002952 Object* result;
2953 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002954
2955 // Copy code object.
2956 Address old_addr = code->address();
2957 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002958 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002959 // Relocate the copy.
2960 Code* new_code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002961 ASSERT(!isolate_->code_range()->exists() ||
2962 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002963 new_code->Relocate(new_addr - old_addr);
2964 return new_code;
2965}
2966
2967
John Reck59135872010-11-02 12:39:01 -07002968MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002969 // Allocate ByteArray before the Code object, so that we do not risk
2970 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002971 Object* reloc_info_array;
2972 { MaybeObject* maybe_reloc_info_array =
2973 AllocateByteArray(reloc_info.length(), TENURED);
2974 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2975 return maybe_reloc_info_array;
2976 }
2977 }
Leon Clarkeac952652010-07-15 11:15:24 +01002978
2979 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002980
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002981 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002982
2983 Address old_addr = code->address();
2984
2985 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002986 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002987
John Reck59135872010-11-02 12:39:01 -07002988 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002989 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002990 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002991 } else {
John Reck59135872010-11-02 12:39:01 -07002992 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002993 }
2994
John Reck59135872010-11-02 12:39:01 -07002995 Object* result;
2996 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002997
2998 // Copy code object.
2999 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3000
3001 // Copy header and instructions.
3002 memcpy(new_addr, old_addr, relocation_offset);
3003
Steve Block6ded16b2010-05-10 14:33:55 +01003004 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01003005 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01003006
Leon Clarkeac952652010-07-15 11:15:24 +01003007 // Copy patched rinfo.
3008 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01003009
3010 // Relocate the copy.
Steve Block44f0eee2011-05-26 01:26:41 +01003011 ASSERT(!isolate_->code_range()->exists() ||
3012 isolate_->code_range()->contains(code->address()));
Steve Block6ded16b2010-05-10 14:33:55 +01003013 new_code->Relocate(new_addr - old_addr);
3014
3015#ifdef DEBUG
3016 code->Verify();
3017#endif
3018 return new_code;
3019}
3020
3021
John Reck59135872010-11-02 12:39:01 -07003022MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003023 ASSERT(gc_state_ == NOT_IN_GC);
3024 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00003025 // If allocation failures are disallowed, we may allocate in a different
3026 // space when new space is full and the object is not a large object.
3027 AllocationSpace retry_space =
3028 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07003029 Object* result;
3030 { MaybeObject* maybe_result =
3031 AllocateRaw(map->instance_size(), space, retry_space);
3032 if (!maybe_result->ToObject(&result)) return maybe_result;
3033 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003034 HeapObject::cast(result)->set_map(map);
3035 return result;
3036}
3037
3038
John Reck59135872010-11-02 12:39:01 -07003039MaybeObject* Heap::InitializeFunction(JSFunction* function,
3040 SharedFunctionInfo* shared,
3041 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003042 ASSERT(!prototype->IsMap());
3043 function->initialize_properties();
3044 function->initialize_elements();
3045 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01003046 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00003047 function->set_prototype_or_initial_map(prototype);
3048 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00003049 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01003050 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00003051 return function;
3052}
3053
3054
John Reck59135872010-11-02 12:39:01 -07003055MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003056 // Allocate the prototype. Make sure to use the object function
3057 // from the function's context, since the function can be from a
3058 // different context.
3059 JSFunction* object_function =
3060 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07003061 Object* prototype;
3062 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
3063 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3064 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003065 // When creating the prototype for the function we must set its
3066 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07003067 Object* result;
3068 { MaybeObject* maybe_result =
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003069 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3070 constructor_symbol(), function, DONT_ENUM);
John Reck59135872010-11-02 12:39:01 -07003071 if (!maybe_result->ToObject(&result)) return maybe_result;
3072 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003073 return prototype;
3074}
3075
3076
John Reck59135872010-11-02 12:39:01 -07003077MaybeObject* Heap::AllocateFunction(Map* function_map,
3078 SharedFunctionInfo* shared,
3079 Object* prototype,
3080 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003081 AllocationSpace space =
3082 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07003083 Object* result;
3084 { MaybeObject* maybe_result = Allocate(function_map, space);
3085 if (!maybe_result->ToObject(&result)) return maybe_result;
3086 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003087 return InitializeFunction(JSFunction::cast(result), shared, prototype);
3088}
3089
3090
John Reck59135872010-11-02 12:39:01 -07003091MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003092 // To get fast allocation and map sharing for arguments objects we
3093 // allocate them based on an arguments boilerplate.
3094
Steve Block44f0eee2011-05-26 01:26:41 +01003095 JSObject* boilerplate;
3096 int arguments_object_size;
3097 bool strict_mode_callee = callee->IsJSFunction() &&
3098 JSFunction::cast(callee)->shared()->strict_mode();
3099 if (strict_mode_callee) {
3100 boilerplate =
3101 isolate()->context()->global_context()->
3102 strict_mode_arguments_boilerplate();
3103 arguments_object_size = kArgumentsObjectSizeStrict;
3104 } else {
3105 boilerplate =
3106 isolate()->context()->global_context()->arguments_boilerplate();
3107 arguments_object_size = kArgumentsObjectSize;
3108 }
3109
Steve Blocka7e24c12009-10-30 11:49:00 +00003110 // This calls Copy directly rather than using Heap::AllocateRaw so we
3111 // duplicate the check here.
3112 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3113
Leon Clarkee46be812010-01-19 14:06:41 +00003114 // Check that the size of the boilerplate matches our
3115 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3116 // on the size being a known constant.
Steve Block44f0eee2011-05-26 01:26:41 +01003117 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
Leon Clarkee46be812010-01-19 14:06:41 +00003118
3119 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07003120 Object* result;
3121 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003122 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
John Reck59135872010-11-02 12:39:01 -07003123 if (!maybe_result->ToObject(&result)) return maybe_result;
3124 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003125
3126 // Copy the content. The arguments boilerplate doesn't have any
3127 // fields that point to new space so it's safe to skip the write
3128 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003129 CopyBlock(HeapObject::cast(result)->address(),
3130 boilerplate->address(),
Steve Block44f0eee2011-05-26 01:26:41 +01003131 JSObject::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003132
Steve Block44f0eee2011-05-26 01:26:41 +01003133 // Set the length property.
3134 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Steve Blocka7e24c12009-10-30 11:49:00 +00003135 Smi::FromInt(length),
3136 SKIP_WRITE_BARRIER);
Steve Block44f0eee2011-05-26 01:26:41 +01003137 // Set the callee property for non-strict mode arguments object only.
3138 if (!strict_mode_callee) {
3139 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3140 callee);
3141 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003142
3143 // Check the state of the object
3144 ASSERT(JSObject::cast(result)->HasFastProperties());
3145 ASSERT(JSObject::cast(result)->HasFastElements());
3146
3147 return result;
3148}
3149
3150
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003151static bool HasDuplicates(DescriptorArray* descriptors) {
3152 int count = descriptors->number_of_descriptors();
3153 if (count > 1) {
3154 String* prev_key = descriptors->GetKey(0);
3155 for (int i = 1; i != count; i++) {
3156 String* current_key = descriptors->GetKey(i);
3157 if (prev_key == current_key) return true;
3158 prev_key = current_key;
3159 }
3160 }
3161 return false;
3162}
3163
3164
John Reck59135872010-11-02 12:39:01 -07003165MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003166 ASSERT(!fun->has_initial_map());
3167
3168 // First create a new map with the size and number of in-object properties
3169 // suggested by the function.
3170 int instance_size = fun->shared()->CalculateInstanceSize();
3171 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07003172 Object* map_obj;
Steve Block44f0eee2011-05-26 01:26:41 +01003173 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
John Reck59135872010-11-02 12:39:01 -07003174 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3175 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003176
3177 // Fetch or allocate prototype.
3178 Object* prototype;
3179 if (fun->has_instance_prototype()) {
3180 prototype = fun->instance_prototype();
3181 } else {
John Reck59135872010-11-02 12:39:01 -07003182 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3183 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3184 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003185 }
3186 Map* map = Map::cast(map_obj);
3187 map->set_inobject_properties(in_object_properties);
3188 map->set_unused_property_fields(in_object_properties);
3189 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01003190 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003191
Andrei Popescu402d9372010-02-26 13:31:12 +00003192 // If the function has only simple this property assignments add
3193 // field descriptors for these to the initial map as the object
3194 // cannot be constructed without having these properties. Guard by
3195 // the inline_new flag so we only change the map if we generate a
3196 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003197 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003198 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003199 int count = fun->shared()->this_property_assignments_count();
3200 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003201 // Inline constructor can only handle inobject properties.
3202 fun->shared()->ForbidInlineConstructor();
3203 } else {
John Reck59135872010-11-02 12:39:01 -07003204 Object* descriptors_obj;
3205 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3206 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3207 return maybe_descriptors_obj;
3208 }
3209 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003210 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3211 for (int i = 0; i < count; i++) {
3212 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3213 ASSERT(name->IsSymbol());
3214 FieldDescriptor field(name, i, NONE);
3215 field.SetEnumerationIndex(i);
3216 descriptors->Set(i, &field);
3217 }
3218 descriptors->SetNextEnumerationIndex(count);
3219 descriptors->SortUnchecked();
3220
3221 // The descriptors may contain duplicates because the compiler does not
3222 // guarantee the uniqueness of property names (it would have required
3223 // quadratic time). Once the descriptors are sorted we can check for
3224 // duplicates in linear time.
3225 if (HasDuplicates(descriptors)) {
3226 fun->shared()->ForbidInlineConstructor();
3227 } else {
3228 map->set_instance_descriptors(descriptors);
3229 map->set_pre_allocated_property_fields(count);
3230 map->set_unused_property_fields(in_object_properties - count);
3231 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003232 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003233 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003234
3235 fun->shared()->StartInobjectSlackTracking(map);
3236
Steve Blocka7e24c12009-10-30 11:49:00 +00003237 return map;
3238}
3239
3240
3241void Heap::InitializeJSObjectFromMap(JSObject* obj,
3242 FixedArray* properties,
3243 Map* map) {
3244 obj->set_properties(properties);
3245 obj->initialize_elements();
3246 // TODO(1240798): Initialize the object's body using valid initial values
3247 // according to the object's initial map. For example, if the map's
3248 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3249 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3250 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3251 // verification code has to cope with (temporarily) invalid objects. See
3252 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003253 Object* filler;
3254 // We cannot always fill with one_pointer_filler_map because objects
3255 // created from API functions expect their internal fields to be initialized
3256 // with undefined_value.
3257 if (map->constructor()->IsJSFunction() &&
3258 JSFunction::cast(map->constructor())->shared()->
3259 IsInobjectSlackTrackingInProgress()) {
3260 // We might want to shrink the object later.
3261 ASSERT(obj->GetInternalFieldCount() == 0);
3262 filler = Heap::one_pointer_filler_map();
3263 } else {
3264 filler = Heap::undefined_value();
3265 }
3266 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003267}
3268
3269
John Reck59135872010-11-02 12:39:01 -07003270MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003271 // JSFunctions should be allocated using AllocateFunction to be
3272 // properly initialized.
3273 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3274
Steve Block8defd9f2010-07-08 12:39:36 +01003275 // Both types of global objects should be allocated using
3276 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003277 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3278 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3279
3280 // Allocate the backing storage for the properties.
3281 int prop_size =
3282 map->pre_allocated_property_fields() +
3283 map->unused_property_fields() -
3284 map->inobject_properties();
3285 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003286 Object* properties;
3287 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3288 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3289 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003290
3291 // Allocate the JSObject.
3292 AllocationSpace space =
3293 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3294 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003295 Object* obj;
3296 { MaybeObject* maybe_obj = Allocate(map, space);
3297 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3298 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003299
3300 // Initialize the JSObject.
3301 InitializeJSObjectFromMap(JSObject::cast(obj),
3302 FixedArray::cast(properties),
3303 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003304 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003305 return obj;
3306}
3307
3308
John Reck59135872010-11-02 12:39:01 -07003309MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3310 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003311 // Allocate the initial map if absent.
3312 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003313 Object* initial_map;
3314 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3315 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3316 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003317 constructor->set_initial_map(Map::cast(initial_map));
3318 Map::cast(initial_map)->set_constructor(constructor);
3319 }
3320 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003321 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003322 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003323#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003324 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003325 Object* non_failure;
3326 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3327#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003328 return result;
3329}
3330
3331
Ben Murdoch257744e2011-11-30 15:57:28 +00003332MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
3333 // Allocate map.
3334 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3335 // maps. Will probably depend on the identity of the handler object, too.
3336 Map* map;
3337 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
3338 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3339 map->set_prototype(prototype);
Ben Murdoch257744e2011-11-30 15:57:28 +00003340
3341 // Allocate the proxy object.
3342 Object* result;
3343 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3344 if (!maybe_result->ToObject(&result)) return maybe_result;
3345 JSProxy::cast(result)->set_handler(handler);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003346 JSProxy::cast(result)->set_padding(Smi::FromInt(0));
Ben Murdoch257744e2011-11-30 15:57:28 +00003347 return result;
3348}
3349
3350
John Reck59135872010-11-02 12:39:01 -07003351MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003352 ASSERT(constructor->has_initial_map());
3353 Map* map = constructor->initial_map();
3354
3355 // Make sure no field properties are described in the initial map.
3356 // This guarantees us that normalizing the properties does not
3357 // require us to change property values to JSGlobalPropertyCells.
3358 ASSERT(map->NextFreePropertyIndex() == 0);
3359
3360 // Make sure we don't have a ton of pre-allocated slots in the
3361 // global objects. They will be unused once we normalize the object.
3362 ASSERT(map->unused_property_fields() == 0);
3363 ASSERT(map->inobject_properties() == 0);
3364
3365 // Initial size of the backing store to avoid resize of the storage during
3366 // bootstrapping. The size differs between the JS global object ad the
3367 // builtins object.
3368 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3369
3370 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003371 Object* obj;
3372 { MaybeObject* maybe_obj =
3373 StringDictionary::Allocate(
3374 map->NumberOfDescribedProperties() * 2 + initial_size);
3375 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3376 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003377 StringDictionary* dictionary = StringDictionary::cast(obj);
3378
3379 // The global object might be created from an object template with accessors.
3380 // Fill these accessors into the dictionary.
3381 DescriptorArray* descs = map->instance_descriptors();
3382 for (int i = 0; i < descs->number_of_descriptors(); i++) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003383 PropertyDetails details(descs->GetDetails(i));
Steve Blocka7e24c12009-10-30 11:49:00 +00003384 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3385 PropertyDetails d =
3386 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3387 Object* value = descs->GetCallbacksObject(i);
Steve Block44f0eee2011-05-26 01:26:41 +01003388 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
John Reck59135872010-11-02 12:39:01 -07003389 if (!maybe_value->ToObject(&value)) return maybe_value;
3390 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003391
John Reck59135872010-11-02 12:39:01 -07003392 Object* result;
3393 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3394 if (!maybe_result->ToObject(&result)) return maybe_result;
3395 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003396 dictionary = StringDictionary::cast(result);
3397 }
3398
3399 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003400 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3401 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3402 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003403 JSObject* global = JSObject::cast(obj);
3404 InitializeJSObjectFromMap(global, dictionary, map);
3405
3406 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003407 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3408 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3409 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003410 Map* new_map = Map::cast(obj);
3411
3412 // Setup the global object as a normalized object.
3413 global->set_map(new_map);
Ben Murdoch257744e2011-11-30 15:57:28 +00003414 global->map()->clear_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00003415 global->set_properties(dictionary);
3416
3417 // Make sure result is a global object with properties in dictionary.
3418 ASSERT(global->IsGlobalObject());
3419 ASSERT(!global->HasFastProperties());
3420 return global;
3421}
3422
3423
John Reck59135872010-11-02 12:39:01 -07003424MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003425 // Never used to copy functions. If functions need to be copied we
3426 // have to be careful to clear the literals array.
3427 ASSERT(!source->IsJSFunction());
3428
3429 // Make the clone.
3430 Map* map = source->map();
3431 int object_size = map->instance_size();
3432 Object* clone;
3433
3434 // If we're forced to always allocate, we use the general allocation
3435 // functions which may leave us with an object in old space.
3436 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003437 { MaybeObject* maybe_clone =
3438 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3439 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3440 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003441 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003442 CopyBlock(clone_address,
3443 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003444 object_size);
3445 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003446 RecordWrites(clone_address,
3447 JSObject::kHeaderSize,
3448 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003449 } else {
John Reck59135872010-11-02 12:39:01 -07003450 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3451 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3452 }
Steve Block44f0eee2011-05-26 01:26:41 +01003453 ASSERT(InNewSpace(clone));
Steve Blocka7e24c12009-10-30 11:49:00 +00003454 // Since we know the clone is allocated in new space, we can copy
3455 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003456 CopyBlock(HeapObject::cast(clone)->address(),
3457 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003458 object_size);
3459 }
3460
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003461 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003462 FixedArray* properties = FixedArray::cast(source->properties());
3463 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003464 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003465 Object* elem;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003466 { MaybeObject* maybe_elem;
3467 if (elements->map() == fixed_cow_array_map()) {
3468 maybe_elem = FixedArray::cast(elements);
3469 } else if (source->HasFastDoubleElements()) {
3470 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3471 } else {
3472 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
3473 }
John Reck59135872010-11-02 12:39:01 -07003474 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3475 }
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003476 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem));
Steve Blocka7e24c12009-10-30 11:49:00 +00003477 }
3478 // Update properties if necessary.
3479 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003480 Object* prop;
3481 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3482 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3483 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003484 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3485 }
3486 // Return the new clone.
3487 return clone;
3488}
3489
3490
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003491MaybeObject* Heap::ReinitializeJSProxyAsJSObject(JSProxy* object) {
3492 // Allocate fresh map.
3493 // TODO(rossberg): Once we optimize proxies, cache these maps.
3494 Map* map;
3495 MaybeObject* maybe_map_obj =
3496 AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3497 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3498
3499 // Check that the receiver has the same size as a fresh object.
3500 ASSERT(map->instance_size() == object->map()->instance_size());
3501
3502 map->set_prototype(object->map()->prototype());
3503
3504 // Allocate the backing storage for the properties.
3505 int prop_size = map->unused_property_fields() - map->inobject_properties();
3506 Object* properties;
3507 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3508 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3509 }
3510
3511 // Reset the map for the object.
3512 object->set_map(map);
3513
3514 // Reinitialize the object from the constructor map.
3515 InitializeJSObjectFromMap(JSObject::cast(object),
3516 FixedArray::cast(properties), map);
3517 return object;
3518}
3519
3520
John Reck59135872010-11-02 12:39:01 -07003521MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3522 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003523 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003524 Map* map = constructor->initial_map();
3525
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003526 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003527 // objects allocated using the constructor.
3528 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003529 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003530
3531 // Allocate the backing storage for the properties.
3532 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003533 Object* properties;
3534 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3535 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3536 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003537
3538 // Reset the map for the object.
3539 object->set_map(constructor->initial_map());
3540
3541 // Reinitialize the object from the constructor map.
3542 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3543 return object;
3544}
3545
3546
John Reck59135872010-11-02 12:39:01 -07003547MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3548 PretenureFlag pretenure) {
3549 Object* result;
3550 { MaybeObject* maybe_result =
3551 AllocateRawAsciiString(string.length(), pretenure);
3552 if (!maybe_result->ToObject(&result)) return maybe_result;
3553 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003554
3555 // Copy the characters into the new object.
3556 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3557 for (int i = 0; i < string.length(); i++) {
3558 string_result->SeqAsciiStringSet(i, string[i]);
3559 }
3560 return result;
3561}
3562
3563
Steve Block9fac8402011-05-12 15:51:54 +01003564MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3565 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003566 // V8 only supports characters in the Basic Multilingual Plane.
3567 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003568 // Count the number of characters in the UTF-8 string and check if
3569 // it is an ASCII string.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003570 Access<UnicodeCache::Utf8Decoder>
3571 decoder(isolate_->unicode_cache()->utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003572 decoder->Reset(string.start(), string.length());
3573 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003574 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003575 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003576 chars++;
3577 }
3578
John Reck59135872010-11-02 12:39:01 -07003579 Object* result;
3580 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3581 if (!maybe_result->ToObject(&result)) return maybe_result;
3582 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003583
3584 // Convert and copy the characters into the new object.
3585 String* string_result = String::cast(result);
3586 decoder->Reset(string.start(), string.length());
3587 for (int i = 0; i < chars; i++) {
3588 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003589 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003590 string_result->Set(i, r);
3591 }
3592 return result;
3593}
3594
3595
John Reck59135872010-11-02 12:39:01 -07003596MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3597 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003598 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003599 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003600 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003601 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003602 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003603 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003604 }
John Reck59135872010-11-02 12:39:01 -07003605 Object* result;
3606 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003607
3608 // Copy the characters into the new object, which may be either ASCII or
3609 // UTF-16.
3610 String* string_result = String::cast(result);
3611 for (int i = 0; i < string.length(); i++) {
3612 string_result->Set(i, string[i]);
3613 }
3614 return result;
3615}
3616
3617
3618Map* Heap::SymbolMapForString(String* string) {
3619 // If the string is in new space it cannot be used as a symbol.
3620 if (InNewSpace(string)) return NULL;
3621
3622 // Find the corresponding symbol map for strings.
3623 Map* map = string->map();
Steve Block44f0eee2011-05-26 01:26:41 +01003624 if (map == ascii_string_map()) {
3625 return ascii_symbol_map();
3626 }
3627 if (map == string_map()) {
3628 return symbol_map();
3629 }
3630 if (map == cons_string_map()) {
3631 return cons_symbol_map();
3632 }
3633 if (map == cons_ascii_string_map()) {
3634 return cons_ascii_symbol_map();
3635 }
3636 if (map == external_string_map()) {
3637 return external_symbol_map();
3638 }
3639 if (map == external_ascii_string_map()) {
3640 return external_ascii_symbol_map();
3641 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003642 if (map == external_string_with_ascii_data_map()) {
3643 return external_symbol_with_ascii_data_map();
3644 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003645
3646 // No match found.
3647 return NULL;
3648}
3649
3650
John Reck59135872010-11-02 12:39:01 -07003651MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3652 int chars,
3653 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003654 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003655 // Ensure the chars matches the number of characters in the buffer.
3656 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3657 // Determine whether the string is ascii.
3658 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003659 while (buffer->has_more()) {
3660 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3661 is_ascii = false;
3662 break;
3663 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003664 }
3665 buffer->Rewind();
3666
3667 // Compute map and object size.
3668 int size;
3669 Map* map;
3670
3671 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003672 if (chars > SeqAsciiString::kMaxLength) {
3673 return Failure::OutOfMemoryException();
3674 }
Steve Blockd0582a62009-12-15 09:54:21 +00003675 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003676 size = SeqAsciiString::SizeFor(chars);
3677 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003678 if (chars > SeqTwoByteString::kMaxLength) {
3679 return Failure::OutOfMemoryException();
3680 }
Steve Blockd0582a62009-12-15 09:54:21 +00003681 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003682 size = SeqTwoByteString::SizeFor(chars);
3683 }
3684
3685 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003686 Object* result;
3687 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3688 ? lo_space_->AllocateRaw(size)
3689 : old_data_space_->AllocateRaw(size);
3690 if (!maybe_result->ToObject(&result)) return maybe_result;
3691 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003692
3693 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003694 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003695 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003696 answer->set_length(chars);
3697 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003698
3699 ASSERT_EQ(size, answer->Size());
3700
3701 // Fill in the characters.
3702 for (int i = 0; i < chars; i++) {
3703 answer->Set(i, buffer->GetNext());
3704 }
3705 return answer;
3706}
3707
3708
John Reck59135872010-11-02 12:39:01 -07003709MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003710 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3711 return Failure::OutOfMemoryException();
3712 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003713
3714 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003715 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003716
Leon Clarkee46be812010-01-19 14:06:41 +00003717 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3718 AllocationSpace retry_space = OLD_DATA_SPACE;
3719
Steve Blocka7e24c12009-10-30 11:49:00 +00003720 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003721 if (size > kMaxObjectSizeInNewSpace) {
3722 // Allocate in large object space, retry space will be ignored.
3723 space = LO_SPACE;
3724 } else if (size > MaxObjectSizeInPagedSpace()) {
3725 // Allocate in new space, retry in large object space.
3726 retry_space = LO_SPACE;
3727 }
3728 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3729 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003730 }
John Reck59135872010-11-02 12:39:01 -07003731 Object* result;
3732 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3733 if (!maybe_result->ToObject(&result)) return maybe_result;
3734 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003735
Steve Blocka7e24c12009-10-30 11:49:00 +00003736 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003737 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003738 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003739 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003740 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3741 return result;
3742}
3743
3744
John Reck59135872010-11-02 12:39:01 -07003745MaybeObject* Heap::AllocateRawTwoByteString(int length,
3746 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003747 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3748 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003749 }
Leon Clarkee46be812010-01-19 14:06:41 +00003750 int size = SeqTwoByteString::SizeFor(length);
3751 ASSERT(size <= SeqTwoByteString::kMaxSize);
3752 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3753 AllocationSpace retry_space = OLD_DATA_SPACE;
3754
3755 if (space == NEW_SPACE) {
3756 if (size > kMaxObjectSizeInNewSpace) {
3757 // Allocate in large object space, retry space will be ignored.
3758 space = LO_SPACE;
3759 } else if (size > MaxObjectSizeInPagedSpace()) {
3760 // Allocate in new space, retry in large object space.
3761 retry_space = LO_SPACE;
3762 }
3763 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3764 space = LO_SPACE;
3765 }
John Reck59135872010-11-02 12:39:01 -07003766 Object* result;
3767 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3768 if (!maybe_result->ToObject(&result)) return maybe_result;
3769 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003770
Steve Blocka7e24c12009-10-30 11:49:00 +00003771 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003772 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003773 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003774 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003775 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3776 return result;
3777}
3778
3779
John Reck59135872010-11-02 12:39:01 -07003780MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003781 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003782 Object* result;
3783 { MaybeObject* maybe_result =
3784 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3785 if (!maybe_result->ToObject(&result)) return maybe_result;
3786 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003787 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003788 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3789 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003790 return result;
3791}
3792
3793
John Reck59135872010-11-02 12:39:01 -07003794MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003795 if (length < 0 || length > FixedArray::kMaxLength) {
3796 return Failure::OutOfMemoryException();
3797 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003798 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003799 // Use the general function if we're forced to always allocate.
3800 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3801 // Allocate the raw data for a fixed array.
3802 int size = FixedArray::SizeFor(length);
3803 return size <= kMaxObjectSizeInNewSpace
3804 ? new_space_.AllocateRaw(size)
3805 : lo_space_->AllocateRawFixedArray(size);
3806}
3807
3808
John Reck59135872010-11-02 12:39:01 -07003809MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003810 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003811 Object* obj;
3812 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3813 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3814 }
Steve Block44f0eee2011-05-26 01:26:41 +01003815 if (InNewSpace(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003816 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003817 dst->set_map(map);
3818 CopyBlock(dst->address() + kPointerSize,
3819 src->address() + kPointerSize,
3820 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003821 return obj;
3822 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003823 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003824 FixedArray* result = FixedArray::cast(obj);
3825 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003826
Steve Blocka7e24c12009-10-30 11:49:00 +00003827 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003828 AssertNoAllocation no_gc;
3829 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003830 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3831 return result;
3832}
3833
3834
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003835MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3836 Map* map) {
3837 int len = src->length();
3838 Object* obj;
3839 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3840 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3841 }
3842 HeapObject* dst = HeapObject::cast(obj);
3843 dst->set_map(map);
3844 CopyBlock(
3845 dst->address() + FixedDoubleArray::kLengthOffset,
3846 src->address() + FixedDoubleArray::kLengthOffset,
3847 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
3848 return obj;
3849}
3850
3851
John Reck59135872010-11-02 12:39:01 -07003852MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003853 ASSERT(length >= 0);
3854 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003855 Object* result;
3856 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3857 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003858 }
John Reck59135872010-11-02 12:39:01 -07003859 // Initialize header.
3860 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3861 array->set_map(fixed_array_map());
3862 array->set_length(length);
3863 // Initialize body.
Steve Block44f0eee2011-05-26 01:26:41 +01003864 ASSERT(!InNewSpace(undefined_value()));
John Reck59135872010-11-02 12:39:01 -07003865 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003866 return result;
3867}
3868
3869
John Reck59135872010-11-02 12:39:01 -07003870MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003871 if (length < 0 || length > FixedArray::kMaxLength) {
3872 return Failure::OutOfMemoryException();
3873 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003874
Leon Clarkee46be812010-01-19 14:06:41 +00003875 AllocationSpace space =
3876 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003877 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003878 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3879 // Too big for new space.
3880 space = LO_SPACE;
3881 } else if (space == OLD_POINTER_SPACE &&
3882 size > MaxObjectSizeInPagedSpace()) {
3883 // Too big for old pointer space.
3884 space = LO_SPACE;
3885 }
3886
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003887 AllocationSpace retry_space =
3888 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3889
3890 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003891}
3892
3893
John Reck59135872010-11-02 12:39:01 -07003894MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
Steve Block44f0eee2011-05-26 01:26:41 +01003895 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07003896 int length,
3897 PretenureFlag pretenure,
3898 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003899 ASSERT(length >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +01003900 ASSERT(heap->empty_fixed_array()->IsFixedArray());
3901 if (length == 0) return heap->empty_fixed_array();
Steve Block6ded16b2010-05-10 14:33:55 +01003902
Steve Block44f0eee2011-05-26 01:26:41 +01003903 ASSERT(!heap->InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003904 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01003905 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07003906 if (!maybe_result->ToObject(&result)) return maybe_result;
3907 }
Steve Block6ded16b2010-05-10 14:33:55 +01003908
Steve Block44f0eee2011-05-26 01:26:41 +01003909 HeapObject::cast(result)->set_map(heap->fixed_array_map());
Steve Block6ded16b2010-05-10 14:33:55 +01003910 FixedArray* array = FixedArray::cast(result);
3911 array->set_length(length);
3912 MemsetPointer(array->data_start(), filler, length);
3913 return array;
3914}
3915
3916
John Reck59135872010-11-02 12:39:01 -07003917MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003918 return AllocateFixedArrayWithFiller(this,
3919 length,
3920 pretenure,
3921 undefined_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003922}
3923
3924
John Reck59135872010-11-02 12:39:01 -07003925MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3926 PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01003927 return AllocateFixedArrayWithFiller(this,
3928 length,
3929 pretenure,
3930 the_hole_value());
Steve Block6ded16b2010-05-10 14:33:55 +01003931}
3932
3933
John Reck59135872010-11-02 12:39:01 -07003934MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003935 if (length == 0) return empty_fixed_array();
3936
John Reck59135872010-11-02 12:39:01 -07003937 Object* obj;
3938 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3939 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3940 }
Steve Block6ded16b2010-05-10 14:33:55 +01003941
3942 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3943 FixedArray::cast(obj)->set_length(length);
3944 return obj;
3945}
3946
3947
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003948MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
3949 int size = FixedDoubleArray::SizeFor(0);
3950 Object* result;
3951 { MaybeObject* maybe_result =
3952 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3953 if (!maybe_result->ToObject(&result)) return maybe_result;
3954 }
3955 // Initialize the object.
3956 reinterpret_cast<FixedDoubleArray*>(result)->set_map(
3957 fixed_double_array_map());
3958 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
3959 return result;
3960}
3961
3962
3963MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
3964 int length,
3965 PretenureFlag pretenure) {
3966 if (length == 0) return empty_fixed_double_array();
3967
3968 Object* obj;
3969 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
3970 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3971 }
3972
3973 reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
3974 FixedDoubleArray::cast(obj)->set_length(length);
3975 return obj;
3976}
3977
3978
3979MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
3980 PretenureFlag pretenure) {
3981 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
3982 return Failure::OutOfMemoryException();
3983 }
3984
3985 AllocationSpace space =
3986 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3987 int size = FixedDoubleArray::SizeFor(length);
3988 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3989 // Too big for new space.
3990 space = LO_SPACE;
3991 } else if (space == OLD_DATA_SPACE &&
3992 size > MaxObjectSizeInPagedSpace()) {
3993 // Too big for old data space.
3994 space = LO_SPACE;
3995 }
3996
3997 AllocationSpace retry_space =
3998 (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
3999
4000 return AllocateRaw(size, space, retry_space);
4001}
4002
4003
John Reck59135872010-11-02 12:39:01 -07004004MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
4005 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004006 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07004007 if (!maybe_result->ToObject(&result)) return maybe_result;
4008 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004009 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00004010 ASSERT(result->IsHashTable());
4011 return result;
4012}
4013
4014
John Reck59135872010-11-02 12:39:01 -07004015MaybeObject* Heap::AllocateGlobalContext() {
4016 Object* result;
4017 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01004018 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07004019 if (!maybe_result->ToObject(&result)) return maybe_result;
4020 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004021 Context* context = reinterpret_cast<Context*>(result);
4022 context->set_map(global_context_map());
4023 ASSERT(context->IsGlobalContext());
4024 ASSERT(result->IsContext());
4025 return result;
4026}
4027
4028
John Reck59135872010-11-02 12:39:01 -07004029MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004030 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07004031 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004032 { MaybeObject* maybe_result = AllocateFixedArray(length);
John Reck59135872010-11-02 12:39:01 -07004033 if (!maybe_result->ToObject(&result)) return maybe_result;
4034 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004035 Context* context = reinterpret_cast<Context*>(result);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004036 context->set_map(function_context_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00004037 context->set_closure(function);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004038 context->set_previous(function->context());
Steve Blocka7e24c12009-10-30 11:49:00 +00004039 context->set_extension(NULL);
4040 context->set_global(function->context()->global());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004041 return context;
Steve Blocka7e24c12009-10-30 11:49:00 +00004042}
4043
4044
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004045MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
4046 Context* previous,
4047 String* name,
4048 Object* thrown_object) {
4049 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
4050 Object* result;
4051 { MaybeObject* maybe_result =
4052 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
4053 if (!maybe_result->ToObject(&result)) return maybe_result;
4054 }
4055 Context* context = reinterpret_cast<Context*>(result);
4056 context->set_map(catch_context_map());
4057 context->set_closure(function);
4058 context->set_previous(previous);
4059 context->set_extension(name);
4060 context->set_global(previous->global());
4061 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
4062 return context;
4063}
4064
4065
4066MaybeObject* Heap::AllocateWithContext(JSFunction* function,
4067 Context* previous,
4068 JSObject* extension) {
John Reck59135872010-11-02 12:39:01 -07004069 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004070 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07004071 if (!maybe_result->ToObject(&result)) return maybe_result;
4072 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004073 Context* context = reinterpret_cast<Context*>(result);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004074 context->set_map(with_context_map());
4075 context->set_closure(function);
Steve Blocka7e24c12009-10-30 11:49:00 +00004076 context->set_previous(previous);
4077 context->set_extension(extension);
4078 context->set_global(previous->global());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004079 return context;
Steve Blocka7e24c12009-10-30 11:49:00 +00004080}
4081
4082
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004083MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
4084 Context* previous,
4085 SerializedScopeInfo* scope_info) {
4086 Object* result;
4087 { MaybeObject* maybe_result =
4088 AllocateFixedArrayWithHoles(scope_info->NumberOfContextSlots());
4089 if (!maybe_result->ToObject(&result)) return maybe_result;
4090 }
4091 Context* context = reinterpret_cast<Context*>(result);
4092 context->set_map(block_context_map());
4093 context->set_closure(function);
4094 context->set_previous(previous);
4095 context->set_extension(scope_info);
4096 context->set_global(previous->global());
4097 return context;
4098}
4099
4100
4101MaybeObject* Heap::AllocateSerializedScopeInfo(int length) {
4102 Object* result;
4103 { MaybeObject* maybe_result = AllocateFixedArray(length, TENURED);
4104 if (!maybe_result->ToObject(&result)) return maybe_result;
4105 }
4106 SerializedScopeInfo* scope_info =
4107 reinterpret_cast<SerializedScopeInfo*>(result);
4108 scope_info->set_map(serialized_scope_info_map());
4109 return scope_info;
4110}
4111
4112
John Reck59135872010-11-02 12:39:01 -07004113MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004114 Map* map;
4115 switch (type) {
Steve Block44f0eee2011-05-26 01:26:41 +01004116#define MAKE_CASE(NAME, Name, name) \
4117 case NAME##_TYPE: map = name##_map(); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00004118STRUCT_LIST(MAKE_CASE)
4119#undef MAKE_CASE
4120 default:
4121 UNREACHABLE();
4122 return Failure::InternalError();
4123 }
4124 int size = map->instance_size();
4125 AllocationSpace space =
4126 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07004127 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004128 { MaybeObject* maybe_result = Allocate(map, space);
John Reck59135872010-11-02 12:39:01 -07004129 if (!maybe_result->ToObject(&result)) return maybe_result;
4130 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004131 Struct::cast(result)->InitializeBody(size);
4132 return result;
4133}
4134
4135
4136bool Heap::IdleNotification() {
4137 static const int kIdlesBeforeScavenge = 4;
4138 static const int kIdlesBeforeMarkSweep = 7;
4139 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00004140 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01004141 static const unsigned int kGCsBetweenCleanup = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01004142
4143 if (!last_idle_notification_gc_count_init_) {
4144 last_idle_notification_gc_count_ = gc_count_;
4145 last_idle_notification_gc_count_init_ = true;
4146 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004147
Steve Block6ded16b2010-05-10 14:33:55 +01004148 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004149 bool finished = false;
4150
Ben Murdochdb5a90a2011-01-06 18:27:03 +00004151 // Reset the number of idle notifications received when a number of
4152 // GCs have taken place. This allows another round of cleanup based
4153 // on idle notifications if enough work has been carried out to
4154 // provoke a number of garbage collections.
Steve Block44f0eee2011-05-26 01:26:41 +01004155 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
4156 number_idle_notifications_ =
4157 Min(number_idle_notifications_ + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00004158 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004159 number_idle_notifications_ = 0;
4160 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004161 }
4162
Steve Block44f0eee2011-05-26 01:26:41 +01004163 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01004164 if (contexts_disposed_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01004165 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01004166 CollectAllGarbage(false);
4167 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01004168 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01004169 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004170 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01004171 last_idle_notification_gc_count_ = gc_count_;
4172 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00004173 // Before doing the mark-sweep collections we clear the
4174 // compilation cache to avoid hanging on to source code and
4175 // generated code for cached functions.
Steve Block44f0eee2011-05-26 01:26:41 +01004176 isolate_->compilation_cache()->Clear();
Steve Blockd0582a62009-12-15 09:54:21 +00004177
Steve Blocka7e24c12009-10-30 11:49:00 +00004178 CollectAllGarbage(false);
4179 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01004180 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004181
Steve Block44f0eee2011-05-26 01:26:41 +01004182 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004183 CollectAllGarbage(true);
4184 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01004185 last_idle_notification_gc_count_ = gc_count_;
4186 number_idle_notifications_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00004187 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01004188 } else if (contexts_disposed_ > 0) {
4189 if (FLAG_expose_gc) {
4190 contexts_disposed_ = 0;
4191 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004192 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01004193 CollectAllGarbage(false);
Steve Block44f0eee2011-05-26 01:26:41 +01004194 last_idle_notification_gc_count_ = gc_count_;
Steve Block6ded16b2010-05-10 14:33:55 +01004195 }
4196 // If this is the first idle notification, we reset the
4197 // notification count to avoid letting idle notifications for
4198 // context disposal garbage collections start a potentially too
4199 // aggressive idle GC cycle.
Steve Block44f0eee2011-05-26 01:26:41 +01004200 if (number_idle_notifications_ <= 1) {
4201 number_idle_notifications_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +01004202 uncommit = false;
4203 }
Steve Block44f0eee2011-05-26 01:26:41 +01004204 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
Ben Murdochdb5a90a2011-01-06 18:27:03 +00004205 // If we have received more than kIdlesBeforeMarkCompact idle
4206 // notifications we do not perform any cleanup because we don't
4207 // expect to gain much by doing so.
4208 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004209 }
4210
Steve Block6ded16b2010-05-10 14:33:55 +01004211 // Make sure that we have no pending context disposals and
4212 // conditionally uncommit from space.
4213 ASSERT(contexts_disposed_ == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01004214 if (uncommit) UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00004215 return finished;
4216}
4217
4218
4219#ifdef DEBUG
4220
4221void Heap::Print() {
4222 if (!HasBeenSetup()) return;
Steve Block44f0eee2011-05-26 01:26:41 +01004223 isolate()->PrintStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00004224 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004225 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4226 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00004227}
4228
4229
4230void Heap::ReportCodeStatistics(const char* title) {
4231 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4232 PagedSpace::ResetCodeStatistics();
4233 // We do not look for code in new space, map space, or old space. If code
4234 // somehow ends up in those spaces, we would miss it here.
4235 code_space_->CollectCodeStatistics();
4236 lo_space_->CollectCodeStatistics();
4237 PagedSpace::ReportCodeStatistics();
4238}
4239
4240
4241// This function expects that NewSpace's allocated objects histogram is
4242// populated (via a call to CollectStatistics or else as a side effect of a
4243// just-completed scavenge collection).
4244void Heap::ReportHeapStatistics(const char* title) {
4245 USE(title);
4246 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4247 title, gc_count_);
4248 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01004249 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
4250 old_gen_promotion_limit_);
4251 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4252 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004253
4254 PrintF("\n");
4255 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
Steve Block44f0eee2011-05-26 01:26:41 +01004256 isolate_->global_handles()->PrintStats();
Steve Blocka7e24c12009-10-30 11:49:00 +00004257 PrintF("\n");
4258
4259 PrintF("Heap statistics : ");
Steve Block44f0eee2011-05-26 01:26:41 +01004260 isolate_->memory_allocator()->ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00004261 PrintF("To space : ");
4262 new_space_.ReportStatistics();
4263 PrintF("Old pointer space : ");
4264 old_pointer_space_->ReportStatistics();
4265 PrintF("Old data space : ");
4266 old_data_space_->ReportStatistics();
4267 PrintF("Code space : ");
4268 code_space_->ReportStatistics();
4269 PrintF("Map space : ");
4270 map_space_->ReportStatistics();
4271 PrintF("Cell space : ");
4272 cell_space_->ReportStatistics();
4273 PrintF("Large object space : ");
4274 lo_space_->ReportStatistics();
4275 PrintF(">>>>>> ========================================= >>>>>>\n");
4276}
4277
4278#endif // DEBUG
4279
4280bool Heap::Contains(HeapObject* value) {
4281 return Contains(value->address());
4282}
4283
4284
4285bool Heap::Contains(Address addr) {
4286 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4287 return HasBeenSetup() &&
4288 (new_space_.ToSpaceContains(addr) ||
4289 old_pointer_space_->Contains(addr) ||
4290 old_data_space_->Contains(addr) ||
4291 code_space_->Contains(addr) ||
4292 map_space_->Contains(addr) ||
4293 cell_space_->Contains(addr) ||
4294 lo_space_->SlowContains(addr));
4295}
4296
4297
4298bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4299 return InSpace(value->address(), space);
4300}
4301
4302
4303bool Heap::InSpace(Address addr, AllocationSpace space) {
4304 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4305 if (!HasBeenSetup()) return false;
4306
4307 switch (space) {
4308 case NEW_SPACE:
4309 return new_space_.ToSpaceContains(addr);
4310 case OLD_POINTER_SPACE:
4311 return old_pointer_space_->Contains(addr);
4312 case OLD_DATA_SPACE:
4313 return old_data_space_->Contains(addr);
4314 case CODE_SPACE:
4315 return code_space_->Contains(addr);
4316 case MAP_SPACE:
4317 return map_space_->Contains(addr);
4318 case CELL_SPACE:
4319 return cell_space_->Contains(addr);
4320 case LO_SPACE:
4321 return lo_space_->SlowContains(addr);
4322 }
4323
4324 return false;
4325}
4326
4327
4328#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004329static void DummyScavengePointer(HeapObject** p) {
4330}
4331
4332
4333static void VerifyPointersUnderWatermark(
4334 PagedSpace* space,
4335 DirtyRegionCallback visit_dirty_region) {
4336 PageIterator it(space, PageIterator::PAGES_IN_USE);
4337
4338 while (it.has_next()) {
4339 Page* page = it.next();
4340 Address start = page->ObjectAreaStart();
4341 Address end = page->AllocationWatermark();
4342
Steve Block44f0eee2011-05-26 01:26:41 +01004343 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004344 start,
4345 end,
4346 visit_dirty_region,
4347 &DummyScavengePointer);
4348 }
4349}
4350
4351
4352static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4353 LargeObjectIterator it(space);
4354 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4355 if (object->IsFixedArray()) {
4356 Address slot_address = object->address();
4357 Address end = object->address() + object->Size();
4358
4359 while (slot_address < end) {
4360 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4361 // When we are not in GC the Heap::InNewSpace() predicate
4362 // checks that pointers which satisfy predicate point into
4363 // the active semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01004364 HEAP->InNewSpace(*slot);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004365 slot_address += kPointerSize;
4366 }
4367 }
4368 }
4369}
4370
4371
Steve Blocka7e24c12009-10-30 11:49:00 +00004372void Heap::Verify() {
4373 ASSERT(HasBeenSetup());
4374
4375 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004376 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004377
4378 new_space_.Verify();
4379
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004380 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4381 old_pointer_space_->Verify(&dirty_regions_visitor);
4382 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004383
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004384 VerifyPointersUnderWatermark(old_pointer_space_,
4385 &IteratePointersInDirtyRegion);
4386 VerifyPointersUnderWatermark(map_space_,
4387 &IteratePointersInDirtyMapsRegion);
4388 VerifyPointersUnderWatermark(lo_space_);
4389
4390 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4391 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4392
4393 VerifyPointersVisitor no_dirty_regions_visitor;
4394 old_data_space_->Verify(&no_dirty_regions_visitor);
4395 code_space_->Verify(&no_dirty_regions_visitor);
4396 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004397
4398 lo_space_->Verify();
4399}
4400#endif // DEBUG
4401
4402
John Reck59135872010-11-02 12:39:01 -07004403MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004404 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004405 Object* new_table;
4406 { MaybeObject* maybe_new_table =
4407 symbol_table()->LookupSymbol(string, &symbol);
4408 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4409 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004410 // Can't use set_symbol_table because SymbolTable::cast knows that
4411 // SymbolTable is a singleton and checks for identity.
4412 roots_[kSymbolTableRootIndex] = new_table;
4413 ASSERT(symbol != NULL);
4414 return symbol;
4415}
4416
4417
Steve Block9fac8402011-05-12 15:51:54 +01004418MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4419 Object* symbol = NULL;
4420 Object* new_table;
4421 { MaybeObject* maybe_new_table =
4422 symbol_table()->LookupAsciiSymbol(string, &symbol);
4423 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4424 }
4425 // Can't use set_symbol_table because SymbolTable::cast knows that
4426 // SymbolTable is a singleton and checks for identity.
4427 roots_[kSymbolTableRootIndex] = new_table;
4428 ASSERT(symbol != NULL);
4429 return symbol;
4430}
4431
4432
Ben Murdoch257744e2011-11-30 15:57:28 +00004433MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
4434 int from,
4435 int length) {
4436 Object* symbol = NULL;
4437 Object* new_table;
4438 { MaybeObject* maybe_new_table =
4439 symbol_table()->LookupSubStringAsciiSymbol(string,
4440 from,
4441 length,
4442 &symbol);
4443 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4444 }
4445 // Can't use set_symbol_table because SymbolTable::cast knows that
4446 // SymbolTable is a singleton and checks for identity.
4447 roots_[kSymbolTableRootIndex] = new_table;
4448 ASSERT(symbol != NULL);
4449 return symbol;
4450}
4451
4452
Steve Block9fac8402011-05-12 15:51:54 +01004453MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4454 Object* symbol = NULL;
4455 Object* new_table;
4456 { MaybeObject* maybe_new_table =
4457 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4458 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4459 }
4460 // Can't use set_symbol_table because SymbolTable::cast knows that
4461 // SymbolTable is a singleton and checks for identity.
4462 roots_[kSymbolTableRootIndex] = new_table;
4463 ASSERT(symbol != NULL);
4464 return symbol;
4465}
4466
4467
John Reck59135872010-11-02 12:39:01 -07004468MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004469 if (string->IsSymbol()) return string;
4470 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004471 Object* new_table;
4472 { MaybeObject* maybe_new_table =
4473 symbol_table()->LookupString(string, &symbol);
4474 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4475 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004476 // Can't use set_symbol_table because SymbolTable::cast knows that
4477 // SymbolTable is a singleton and checks for identity.
4478 roots_[kSymbolTableRootIndex] = new_table;
4479 ASSERT(symbol != NULL);
4480 return symbol;
4481}
4482
4483
4484bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4485 if (string->IsSymbol()) {
4486 *symbol = string;
4487 return true;
4488 }
4489 return symbol_table()->LookupSymbolIfExists(string, symbol);
4490}
4491
4492
4493#ifdef DEBUG
4494void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004495 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004496 for (Address a = new_space_.FromSpaceLow();
4497 a < new_space_.FromSpaceHigh();
4498 a += kPointerSize) {
4499 Memory::Address_at(a) = kFromSpaceZapValue;
4500 }
4501}
4502#endif // DEBUG
4503
4504
Steve Block44f0eee2011-05-26 01:26:41 +01004505bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4506 Address start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004507 Address end,
4508 ObjectSlotCallback copy_object_func) {
4509 Address slot_address = start;
4510 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004511
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004512 while (slot_address < end) {
4513 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004514 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004515 ASSERT((*slot)->IsHeapObject());
4516 copy_object_func(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004517 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004518 ASSERT((*slot)->IsHeapObject());
4519 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004520 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004521 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004522 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004523 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004524 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004525}
4526
4527
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004528// Compute start address of the first map following given addr.
4529static inline Address MapStartAlign(Address addr) {
4530 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4531 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4532}
Steve Blocka7e24c12009-10-30 11:49:00 +00004533
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004534
4535// Compute end address of the first map preceding given addr.
4536static inline Address MapEndAlign(Address addr) {
4537 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4538 return page + ((addr - page) / Map::kSize * Map::kSize);
4539}
4540
4541
4542static bool IteratePointersInDirtyMaps(Address start,
4543 Address end,
4544 ObjectSlotCallback copy_object_func) {
4545 ASSERT(MapStartAlign(start) == start);
4546 ASSERT(MapEndAlign(end) == end);
4547
4548 Address map_address = start;
4549 bool pointers_to_new_space_found = false;
4550
Steve Block44f0eee2011-05-26 01:26:41 +01004551 Heap* heap = HEAP;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004552 while (map_address < end) {
Steve Block44f0eee2011-05-26 01:26:41 +01004553 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004554 ASSERT(Memory::Object_at(map_address)->IsMap());
4555
4556 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4557 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4558
Steve Block44f0eee2011-05-26 01:26:41 +01004559 if (Heap::IteratePointersInDirtyRegion(heap,
4560 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004561 pointer_fields_end,
4562 copy_object_func)) {
4563 pointers_to_new_space_found = true;
4564 }
4565
4566 map_address += Map::kSize;
4567 }
4568
4569 return pointers_to_new_space_found;
4570}
4571
4572
4573bool Heap::IteratePointersInDirtyMapsRegion(
Steve Block44f0eee2011-05-26 01:26:41 +01004574 Heap* heap,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004575 Address start,
4576 Address end,
4577 ObjectSlotCallback copy_object_func) {
4578 Address map_aligned_start = MapStartAlign(start);
4579 Address map_aligned_end = MapEndAlign(end);
4580
4581 bool contains_pointers_to_new_space = false;
4582
4583 if (map_aligned_start != start) {
4584 Address prev_map = map_aligned_start - Map::kSize;
4585 ASSERT(Memory::Object_at(prev_map)->IsMap());
4586
4587 Address pointer_fields_start =
4588 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4589
4590 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004591 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004592
4593 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004594 IteratePointersInDirtyRegion(heap,
4595 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004596 pointer_fields_end,
4597 copy_object_func)
4598 || contains_pointers_to_new_space;
4599 }
4600
4601 contains_pointers_to_new_space =
4602 IteratePointersInDirtyMaps(map_aligned_start,
4603 map_aligned_end,
4604 copy_object_func)
4605 || contains_pointers_to_new_space;
4606
4607 if (map_aligned_end != end) {
4608 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4609
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004610 Address pointer_fields_start =
4611 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004612
4613 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004614 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004615
4616 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004617 IteratePointersInDirtyRegion(heap,
4618 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004619 pointer_fields_end,
4620 copy_object_func)
4621 || contains_pointers_to_new_space;
4622 }
4623
4624 return contains_pointers_to_new_space;
4625}
4626
4627
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004628void Heap::IterateAndMarkPointersToFromSpace(Address start,
4629 Address end,
4630 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004631 Address slot_address = start;
4632 Page* page = Page::FromAddress(start);
4633
4634 uint32_t marks = page->GetRegionMarks();
4635
4636 while (slot_address < end) {
4637 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004638 if (InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004639 ASSERT((*slot)->IsHeapObject());
4640 callback(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004641 if (InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004642 ASSERT((*slot)->IsHeapObject());
4643 marks |= page->GetRegionMaskForAddress(slot_address);
4644 }
4645 }
4646 slot_address += kPointerSize;
4647 }
4648
4649 page->SetRegionMarks(marks);
4650}
4651
4652
4653uint32_t Heap::IterateDirtyRegions(
4654 uint32_t marks,
4655 Address area_start,
4656 Address area_end,
4657 DirtyRegionCallback visit_dirty_region,
4658 ObjectSlotCallback copy_object_func) {
4659 uint32_t newmarks = 0;
4660 uint32_t mask = 1;
4661
4662 if (area_start >= area_end) {
4663 return newmarks;
4664 }
4665
4666 Address region_start = area_start;
4667
4668 // area_start does not necessarily coincide with start of the first region.
4669 // Thus to calculate the beginning of the next region we have to align
4670 // area_start by Page::kRegionSize.
4671 Address second_region =
4672 reinterpret_cast<Address>(
4673 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4674 ~Page::kRegionAlignmentMask);
4675
4676 // Next region might be beyond area_end.
4677 Address region_end = Min(second_region, area_end);
4678
4679 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004680 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004681 newmarks |= mask;
4682 }
4683 }
4684 mask <<= 1;
4685
4686 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4687 region_start = region_end;
4688 region_end = region_start + Page::kRegionSize;
4689
4690 while (region_end <= area_end) {
4691 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004692 if (visit_dirty_region(this,
4693 region_start,
4694 region_end,
4695 copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004696 newmarks |= mask;
4697 }
4698 }
4699
4700 region_start = region_end;
4701 region_end = region_start + Page::kRegionSize;
4702
4703 mask <<= 1;
4704 }
4705
4706 if (region_start != area_end) {
4707 // A small piece of area left uniterated because area_end does not coincide
4708 // with region end. Check whether region covering last part of area is
4709 // dirty.
4710 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004711 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004712 newmarks |= mask;
4713 }
4714 }
4715 }
4716
4717 return newmarks;
4718}
4719
4720
4721
4722void Heap::IterateDirtyRegions(
4723 PagedSpace* space,
4724 DirtyRegionCallback visit_dirty_region,
4725 ObjectSlotCallback copy_object_func,
4726 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004727
4728 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004729
Steve Blocka7e24c12009-10-30 11:49:00 +00004730 while (it.has_next()) {
4731 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004732 uint32_t marks = page->GetRegionMarks();
4733
4734 if (marks != Page::kAllRegionsCleanMarks) {
4735 Address start = page->ObjectAreaStart();
4736
4737 // Do not try to visit pointers beyond page allocation watermark.
4738 // Page can contain garbage pointers there.
4739 Address end;
4740
4741 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4742 page->IsWatermarkValid()) {
4743 end = page->AllocationWatermark();
4744 } else {
4745 end = page->CachedAllocationWatermark();
4746 }
4747
4748 ASSERT(space == old_pointer_space_ ||
4749 (space == map_space_ &&
4750 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4751
4752 page->SetRegionMarks(IterateDirtyRegions(marks,
4753 start,
4754 end,
4755 visit_dirty_region,
4756 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004757 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004758
4759 // Mark page watermark as invalid to maintain watermark validity invariant.
4760 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4761 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004762 }
4763}
4764
4765
Steve Blockd0582a62009-12-15 09:54:21 +00004766void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4767 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004768 IterateWeakRoots(v, mode);
4769}
4770
4771
4772void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004773 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004774 v->Synchronize("symbol_table");
Ben Murdoch257744e2011-11-30 15:57:28 +00004775 if (mode != VISIT_ALL_IN_SCAVENGE &&
4776 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00004777 // Scavenge collections have special processing for this.
Steve Block44f0eee2011-05-26 01:26:41 +01004778 external_string_table_.Iterate(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004779 }
4780 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004781}
4782
4783
Steve Blockd0582a62009-12-15 09:54:21 +00004784void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004785 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004786 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004787
Iain Merrick75681382010-08-19 15:07:18 +01004788 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004789 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004790
Steve Block44f0eee2011-05-26 01:26:41 +01004791 isolate_->bootstrapper()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004792 v->Synchronize("bootstrapper");
Steve Block44f0eee2011-05-26 01:26:41 +01004793 isolate_->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004794 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004795 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004796 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004797
4798#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +01004799 isolate_->debug()->Iterate(v);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004800 if (isolate_->deoptimizer_data() != NULL) {
4801 isolate_->deoptimizer_data()->Iterate(v);
4802 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004803#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004804 v->Synchronize("debug");
Steve Block44f0eee2011-05-26 01:26:41 +01004805 isolate_->compilation_cache()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004806 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004807
4808 // Iterate over local handles in handle scopes.
Steve Block44f0eee2011-05-26 01:26:41 +01004809 isolate_->handle_scope_implementer()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004810 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004811
Leon Clarkee46be812010-01-19 14:06:41 +00004812 // Iterate over the builtin code objects and code stubs in the
4813 // heap. Note that it is not necessary to iterate over code objects
4814 // on scavenge collections.
Ben Murdoch257744e2011-11-30 15:57:28 +00004815 if (mode != VISIT_ALL_IN_SCAVENGE &&
4816 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
Steve Block44f0eee2011-05-26 01:26:41 +01004817 isolate_->builtins()->IterateBuiltins(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004818 }
Steve Blockd0582a62009-12-15 09:54:21 +00004819 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004820
4821 // Iterate over global handles.
Ben Murdoch257744e2011-11-30 15:57:28 +00004822 switch (mode) {
4823 case VISIT_ONLY_STRONG:
4824 isolate_->global_handles()->IterateStrongRoots(v);
4825 break;
4826 case VISIT_ALL_IN_SCAVENGE:
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004827 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
Ben Murdoch257744e2011-11-30 15:57:28 +00004828 break;
4829 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4830 case VISIT_ALL:
4831 isolate_->global_handles()->IterateAllRoots(v);
4832 break;
Steve Blockd0582a62009-12-15 09:54:21 +00004833 }
4834 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004835
4836 // Iterate over pointers being held by inactive threads.
Steve Block44f0eee2011-05-26 01:26:41 +01004837 isolate_->thread_manager()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004838 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004839
4840 // Iterate over the pointers the Serialization/Deserialization code is
4841 // holding.
4842 // During garbage collection this keeps the partial snapshot cache alive.
4843 // During deserialization of the startup snapshot this creates the partial
4844 // snapshot cache and deserializes the objects it refers to. During
4845 // serialization this does nothing, since the partial snapshot cache is
4846 // empty. However the next thing we do is create the partial snapshot,
4847 // filling up the partial snapshot cache with objects it needs as we go.
4848 SerializerDeserializer::Iterate(v);
4849 // We don't do a v->Synchronize call here, because in debug mode that will
4850 // output a flag to the snapshot. However at this point the serializer and
4851 // deserializer are deliberately a little unsynchronized (see above) so the
4852 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004853}
Steve Blocka7e24c12009-10-30 11:49:00 +00004854
4855
Steve Blocka7e24c12009-10-30 11:49:00 +00004856// TODO(1236194): Since the heap size is configurable on the command line
4857// and through the API, we should gracefully handle the case that the heap
4858// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004859bool Heap::ConfigureHeap(int max_semispace_size,
4860 int max_old_gen_size,
4861 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004862 if (HasBeenSetup()) return false;
4863
Steve Block3ce2e202009-11-05 08:53:23 +00004864 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4865
4866 if (Snapshot::IsEnabled()) {
4867 // If we are using a snapshot we always reserve the default amount
4868 // of memory for each semispace because code in the snapshot has
4869 // write-barrier code that relies on the size and alignment of new
4870 // space. We therefore cannot use a larger max semispace size
4871 // than the default reserved semispace size.
4872 if (max_semispace_size_ > reserved_semispace_size_) {
4873 max_semispace_size_ = reserved_semispace_size_;
4874 }
4875 } else {
4876 // If we are not using snapshots we reserve space for the actual
4877 // max semispace size.
4878 reserved_semispace_size_ = max_semispace_size_;
4879 }
4880
4881 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004882 if (max_executable_size > 0) {
4883 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4884 }
4885
4886 // The max executable size must be less than or equal to the max old
4887 // generation size.
4888 if (max_executable_size_ > max_old_generation_size_) {
4889 max_executable_size_ = max_old_generation_size_;
4890 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004891
4892 // The new space size must be a power of two to support single-bit testing
4893 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004894 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4895 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4896 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4897 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004898
4899 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004900 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004901
Steve Block44f0eee2011-05-26 01:26:41 +01004902 configured_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004903 return true;
4904}
4905
4906
4907bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004908 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4909 FLAG_max_old_space_size * MB,
4910 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004911}
4912
4913
Ben Murdochbb769b22010-08-11 14:56:33 +01004914void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004915 *stats->start_marker = HeapStats::kStartMarker;
4916 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004917 *stats->new_space_size = new_space_.SizeAsInt();
4918 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004919 *stats->old_pointer_space_size = old_pointer_space_->Size();
4920 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4921 *stats->old_data_space_size = old_data_space_->Size();
4922 *stats->old_data_space_capacity = old_data_space_->Capacity();
4923 *stats->code_space_size = code_space_->Size();
4924 *stats->code_space_capacity = code_space_->Capacity();
4925 *stats->map_space_size = map_space_->Size();
4926 *stats->map_space_capacity = map_space_->Capacity();
4927 *stats->cell_space_size = cell_space_->Size();
4928 *stats->cell_space_capacity = cell_space_->Capacity();
4929 *stats->lo_space_size = lo_space_->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01004930 isolate_->global_handles()->RecordStats(stats);
4931 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
Ben Murdochbb769b22010-08-11 14:56:33 +01004932 *stats->memory_allocator_capacity =
Steve Block44f0eee2011-05-26 01:26:41 +01004933 isolate()->memory_allocator()->Size() +
4934 isolate()->memory_allocator()->Available();
Iain Merrick75681382010-08-19 15:07:18 +01004935 *stats->os_error = OS::GetLastError();
Steve Block44f0eee2011-05-26 01:26:41 +01004936 isolate()->memory_allocator()->Available();
Ben Murdochbb769b22010-08-11 14:56:33 +01004937 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004938 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004939 for (HeapObject* obj = iterator.next();
4940 obj != NULL;
4941 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004942 InstanceType type = obj->map()->instance_type();
4943 ASSERT(0 <= type && type <= LAST_TYPE);
4944 stats->objects_per_type[type]++;
4945 stats->size_per_type[type] += obj->Size();
4946 }
4947 }
Steve Blockd0582a62009-12-15 09:54:21 +00004948}
4949
4950
Ben Murdochf87a2032010-10-22 12:50:53 +01004951intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004952 return old_pointer_space_->Size()
4953 + old_data_space_->Size()
4954 + code_space_->Size()
4955 + map_space_->Size()
4956 + cell_space_->Size()
4957 + lo_space_->Size();
4958}
4959
4960
4961int Heap::PromotedExternalMemorySize() {
4962 if (amount_of_external_allocated_memory_
4963 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4964 return amount_of_external_allocated_memory_
4965 - amount_of_external_allocated_memory_at_last_global_gc_;
4966}
4967
Steve Block44f0eee2011-05-26 01:26:41 +01004968#ifdef DEBUG
4969
4970// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4971static const int kMarkTag = 2;
4972
4973
4974class HeapDebugUtils {
4975 public:
4976 explicit HeapDebugUtils(Heap* heap)
4977 : search_for_any_global_(false),
4978 search_target_(NULL),
4979 found_target_(false),
4980 object_stack_(20),
4981 heap_(heap) {
4982 }
4983
4984 class MarkObjectVisitor : public ObjectVisitor {
4985 public:
4986 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
4987
4988 void VisitPointers(Object** start, Object** end) {
4989 // Copy all HeapObject pointers in [start, end)
4990 for (Object** p = start; p < end; p++) {
4991 if ((*p)->IsHeapObject())
4992 utils_->MarkObjectRecursively(p);
4993 }
4994 }
4995
4996 HeapDebugUtils* utils_;
4997 };
4998
4999 void MarkObjectRecursively(Object** p) {
5000 if (!(*p)->IsHeapObject()) return;
5001
5002 HeapObject* obj = HeapObject::cast(*p);
5003
5004 Object* map = obj->map();
5005
5006 if (!map->IsHeapObject()) return; // visited before
5007
5008 if (found_target_) return; // stop if target found
5009 object_stack_.Add(obj);
5010 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
5011 (!search_for_any_global_ && (obj == search_target_))) {
5012 found_target_ = true;
5013 return;
5014 }
5015
5016 // not visited yet
5017 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5018
5019 Address map_addr = map_p->address();
5020
5021 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5022
5023 MarkObjectRecursively(&map);
5024
5025 MarkObjectVisitor mark_visitor(this);
5026
5027 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5028 &mark_visitor);
5029
5030 if (!found_target_) // don't pop if found the target
5031 object_stack_.RemoveLast();
5032 }
5033
5034
5035 class UnmarkObjectVisitor : public ObjectVisitor {
5036 public:
5037 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5038
5039 void VisitPointers(Object** start, Object** end) {
5040 // Copy all HeapObject pointers in [start, end)
5041 for (Object** p = start; p < end; p++) {
5042 if ((*p)->IsHeapObject())
5043 utils_->UnmarkObjectRecursively(p);
5044 }
5045 }
5046
5047 HeapDebugUtils* utils_;
5048 };
5049
5050
5051 void UnmarkObjectRecursively(Object** p) {
5052 if (!(*p)->IsHeapObject()) return;
5053
5054 HeapObject* obj = HeapObject::cast(*p);
5055
5056 Object* map = obj->map();
5057
5058 if (map->IsHeapObject()) return; // unmarked already
5059
5060 Address map_addr = reinterpret_cast<Address>(map);
5061
5062 map_addr -= kMarkTag;
5063
5064 ASSERT_TAG_ALIGNED(map_addr);
5065
5066 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5067
5068 obj->set_map(reinterpret_cast<Map*>(map_p));
5069
5070 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5071
5072 UnmarkObjectVisitor unmark_visitor(this);
5073
5074 obj->IterateBody(Map::cast(map_p)->instance_type(),
5075 obj->SizeFromMap(Map::cast(map_p)),
5076 &unmark_visitor);
5077 }
5078
5079
5080 void MarkRootObjectRecursively(Object** root) {
5081 if (search_for_any_global_) {
5082 ASSERT(search_target_ == NULL);
5083 } else {
5084 ASSERT(search_target_->IsHeapObject());
5085 }
5086 found_target_ = false;
5087 object_stack_.Clear();
5088
5089 MarkObjectRecursively(root);
5090 UnmarkObjectRecursively(root);
5091
5092 if (found_target_) {
5093 PrintF("=====================================\n");
5094 PrintF("==== Path to object ====\n");
5095 PrintF("=====================================\n\n");
5096
5097 ASSERT(!object_stack_.is_empty());
5098 for (int i = 0; i < object_stack_.length(); i++) {
5099 if (i > 0) PrintF("\n |\n |\n V\n\n");
5100 Object* obj = object_stack_[i];
5101 obj->Print();
5102 }
5103 PrintF("=====================================\n");
5104 }
5105 }
5106
5107 // Helper class for visiting HeapObjects recursively.
5108 class MarkRootVisitor: public ObjectVisitor {
5109 public:
5110 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5111
5112 void VisitPointers(Object** start, Object** end) {
5113 // Visit all HeapObject pointers in [start, end)
5114 for (Object** p = start; p < end; p++) {
5115 if ((*p)->IsHeapObject())
5116 utils_->MarkRootObjectRecursively(p);
5117 }
5118 }
5119
5120 HeapDebugUtils* utils_;
5121 };
5122
5123 bool search_for_any_global_;
5124 Object* search_target_;
5125 bool found_target_;
5126 List<Object*> object_stack_;
5127 Heap* heap_;
5128
5129 friend class Heap;
5130};
5131
5132#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005133
5134bool Heap::Setup(bool create_heap_objects) {
Steve Block44f0eee2011-05-26 01:26:41 +01005135#ifdef DEBUG
5136 debug_utils_ = new HeapDebugUtils(this);
5137#endif
5138
Steve Blocka7e24c12009-10-30 11:49:00 +00005139 // Initialize heap spaces and initial maps and objects. Whenever something
5140 // goes wrong, just return false. The caller should check the results and
5141 // call Heap::TearDown() to release allocated memory.
5142 //
5143 // If the heap is not yet configured (eg, through the API), configure it.
5144 // Configuration is based on the flags new-space-size (really the semispace
5145 // size) and old-space-size if set or the initial values of semispace_size_
5146 // and old_generation_size_ otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +01005147 if (!configured_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005148 if (!ConfigureHeapDefault()) return false;
5149 }
5150
Steve Block44f0eee2011-05-26 01:26:41 +01005151 gc_initializer_mutex->Lock();
5152 static bool initialized_gc = false;
5153 if (!initialized_gc) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01005154 initialized_gc = true;
5155 InitializeScavengingVisitorsTables();
5156 NewSpaceScavenger::Initialize();
5157 MarkCompactCollector::Initialize();
Steve Block44f0eee2011-05-26 01:26:41 +01005158 }
5159 gc_initializer_mutex->Unlock();
Iain Merrick75681382010-08-19 15:07:18 +01005160
Kristian Monsen80d68ea2010-09-08 11:05:35 +01005161 MarkMapPointersAsEncoded(false);
5162
Steve Blocka7e24c12009-10-30 11:49:00 +00005163 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00005164 // space. The chunk is double the size of the requested reserved
5165 // new space size to ensure that we can find a pair of semispaces that
5166 // are contiguous and aligned to their size.
Steve Block44f0eee2011-05-26 01:26:41 +01005167 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
5168 return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00005169 void* chunk =
Steve Block44f0eee2011-05-26 01:26:41 +01005170 isolate_->memory_allocator()->ReserveInitialChunk(
5171 4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005172 if (chunk == NULL) return false;
5173
5174 // Align the pair of semispaces to their size, which must be a power
5175 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00005176 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00005177 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
5178 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
5179 return false;
5180 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005181
5182 // Initialize old pointer space.
5183 old_pointer_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01005184 new OldSpace(this,
5185 max_old_generation_size_,
5186 OLD_POINTER_SPACE,
5187 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005188 if (old_pointer_space_ == NULL) return false;
5189 if (!old_pointer_space_->Setup(NULL, 0)) return false;
5190
5191 // Initialize old data space.
5192 old_data_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01005193 new OldSpace(this,
5194 max_old_generation_size_,
5195 OLD_DATA_SPACE,
5196 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005197 if (old_data_space_ == NULL) return false;
5198 if (!old_data_space_->Setup(NULL, 0)) return false;
5199
5200 // Initialize the code space, set its maximum capacity to the old
5201 // generation size. It needs executable memory.
5202 // On 64-bit platform(s), we put all code objects in a 2 GB range of
5203 // virtual address space, so that they can call each other with near calls.
5204 if (code_range_size_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01005205 if (!isolate_->code_range()->Setup(code_range_size_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005206 return false;
5207 }
5208 }
5209
5210 code_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01005211 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005212 if (code_space_ == NULL) return false;
5213 if (!code_space_->Setup(NULL, 0)) return false;
5214
5215 // Initialize map space.
Steve Block44f0eee2011-05-26 01:26:41 +01005216 map_space_ = new MapSpace(this, FLAG_use_big_map_space
Leon Clarkee46be812010-01-19 14:06:41 +00005217 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00005218 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
5219 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00005220 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005221 if (map_space_ == NULL) return false;
5222 if (!map_space_->Setup(NULL, 0)) return false;
5223
5224 // Initialize global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01005225 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005226 if (cell_space_ == NULL) return false;
5227 if (!cell_space_->Setup(NULL, 0)) return false;
5228
5229 // The large object code space may contain code or data. We set the memory
5230 // to be non-executable here for safety, but this means we need to enable it
5231 // explicitly when allocating large code objects.
Steve Block44f0eee2011-05-26 01:26:41 +01005232 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005233 if (lo_space_ == NULL) return false;
5234 if (!lo_space_->Setup()) return false;
5235
5236 if (create_heap_objects) {
5237 // Create initial maps.
5238 if (!CreateInitialMaps()) return false;
5239 if (!CreateApiObjects()) return false;
5240
5241 // Create initial objects
5242 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01005243
5244 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00005245 }
5246
Steve Block44f0eee2011-05-26 01:26:41 +01005247 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5248 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00005249
Steve Blocka7e24c12009-10-30 11:49:00 +00005250 return true;
5251}
5252
5253
Steve Blockd0582a62009-12-15 09:54:21 +00005254void Heap::SetStackLimits() {
Steve Block44f0eee2011-05-26 01:26:41 +01005255 ASSERT(isolate_ != NULL);
5256 ASSERT(isolate_ == isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00005257 // On 64 bit machines, pointers are generally out of range of Smis. We write
5258 // something that looks like an out of range Smi to the GC.
5259
Steve Blockd0582a62009-12-15 09:54:21 +00005260 // Set up the special root array entries containing the stack limits.
5261 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00005262 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00005263 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01005264 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blockd0582a62009-12-15 09:54:21 +00005265 roots_[kRealStackLimitRootIndex] =
5266 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01005267 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00005268}
5269
5270
5271void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01005272 if (FLAG_print_cumulative_gc_stat) {
5273 PrintF("\n\n");
5274 PrintF("gc_count=%d ", gc_count_);
5275 PrintF("mark_sweep_count=%d ", ms_count_);
5276 PrintF("mark_compact_count=%d ", mc_count_);
Steve Block44f0eee2011-05-26 01:26:41 +01005277 PrintF("max_gc_pause=%d ", get_max_gc_pause());
5278 PrintF("min_in_mutator=%d ", get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01005279 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
Steve Block44f0eee2011-05-26 01:26:41 +01005280 get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01005281 PrintF("\n\n");
5282 }
5283
Steve Block44f0eee2011-05-26 01:26:41 +01005284 isolate_->global_handles()->TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00005285
Steve Block44f0eee2011-05-26 01:26:41 +01005286 external_string_table_.TearDown();
Leon Clarkee46be812010-01-19 14:06:41 +00005287
Steve Blocka7e24c12009-10-30 11:49:00 +00005288 new_space_.TearDown();
5289
5290 if (old_pointer_space_ != NULL) {
5291 old_pointer_space_->TearDown();
5292 delete old_pointer_space_;
5293 old_pointer_space_ = NULL;
5294 }
5295
5296 if (old_data_space_ != NULL) {
5297 old_data_space_->TearDown();
5298 delete old_data_space_;
5299 old_data_space_ = NULL;
5300 }
5301
5302 if (code_space_ != NULL) {
5303 code_space_->TearDown();
5304 delete code_space_;
5305 code_space_ = NULL;
5306 }
5307
5308 if (map_space_ != NULL) {
5309 map_space_->TearDown();
5310 delete map_space_;
5311 map_space_ = NULL;
5312 }
5313
5314 if (cell_space_ != NULL) {
5315 cell_space_->TearDown();
5316 delete cell_space_;
5317 cell_space_ = NULL;
5318 }
5319
5320 if (lo_space_ != NULL) {
5321 lo_space_->TearDown();
5322 delete lo_space_;
5323 lo_space_ = NULL;
5324 }
5325
Steve Block44f0eee2011-05-26 01:26:41 +01005326 isolate_->memory_allocator()->TearDown();
5327
5328#ifdef DEBUG
5329 delete debug_utils_;
5330 debug_utils_ = NULL;
5331#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005332}
5333
5334
5335void Heap::Shrink() {
5336 // Try to shrink all paged spaces.
5337 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005338 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5339 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00005340}
5341
5342
Steve Block6ded16b2010-05-10 14:33:55 +01005343void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5344 ASSERT(callback != NULL);
5345 GCPrologueCallbackPair pair(callback, gc_type);
5346 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5347 return gc_prologue_callbacks_.Add(pair);
5348}
5349
5350
5351void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5352 ASSERT(callback != NULL);
5353 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5354 if (gc_prologue_callbacks_[i].callback == callback) {
5355 gc_prologue_callbacks_.Remove(i);
5356 return;
5357 }
5358 }
5359 UNREACHABLE();
5360}
5361
5362
5363void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5364 ASSERT(callback != NULL);
5365 GCEpilogueCallbackPair pair(callback, gc_type);
5366 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5367 return gc_epilogue_callbacks_.Add(pair);
5368}
5369
5370
5371void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5372 ASSERT(callback != NULL);
5373 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5374 if (gc_epilogue_callbacks_[i].callback == callback) {
5375 gc_epilogue_callbacks_.Remove(i);
5376 return;
5377 }
5378 }
5379 UNREACHABLE();
5380}
5381
5382
Steve Blocka7e24c12009-10-30 11:49:00 +00005383#ifdef DEBUG
5384
5385class PrintHandleVisitor: public ObjectVisitor {
5386 public:
5387 void VisitPointers(Object** start, Object** end) {
5388 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01005389 PrintF(" handle %p to %p\n",
5390 reinterpret_cast<void*>(p),
5391 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00005392 }
5393};
5394
5395void Heap::PrintHandles() {
5396 PrintF("Handles:\n");
5397 PrintHandleVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +01005398 isolate_->handle_scope_implementer()->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +00005399}
5400
5401#endif
5402
5403
5404Space* AllSpaces::next() {
5405 switch (counter_++) {
5406 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005407 return HEAP->new_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005408 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005409 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005410 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005411 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005412 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005413 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005414 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005415 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005416 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005417 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005418 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005419 return HEAP->lo_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005420 default:
5421 return NULL;
5422 }
5423}
5424
5425
5426PagedSpace* PagedSpaces::next() {
5427 switch (counter_++) {
5428 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005429 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005430 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005431 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005432 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005433 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005434 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005435 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005436 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005437 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005438 default:
5439 return NULL;
5440 }
5441}
5442
5443
5444
5445OldSpace* OldSpaces::next() {
5446 switch (counter_++) {
5447 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005448 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005449 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005450 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005451 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005452 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005453 default:
5454 return NULL;
5455 }
5456}
5457
5458
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005459SpaceIterator::SpaceIterator()
5460 : current_space_(FIRST_SPACE),
5461 iterator_(NULL),
5462 size_func_(NULL) {
5463}
5464
5465
5466SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5467 : current_space_(FIRST_SPACE),
5468 iterator_(NULL),
5469 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005470}
5471
5472
5473SpaceIterator::~SpaceIterator() {
5474 // Delete active iterator if any.
5475 delete iterator_;
5476}
5477
5478
5479bool SpaceIterator::has_next() {
5480 // Iterate until no more spaces.
5481 return current_space_ != LAST_SPACE;
5482}
5483
5484
5485ObjectIterator* SpaceIterator::next() {
5486 if (iterator_ != NULL) {
5487 delete iterator_;
5488 iterator_ = NULL;
5489 // Move to the next space
5490 current_space_++;
5491 if (current_space_ > LAST_SPACE) {
5492 return NULL;
5493 }
5494 }
5495
5496 // Return iterator for the new current space.
5497 return CreateIterator();
5498}
5499
5500
5501// Create an iterator for the space to iterate.
5502ObjectIterator* SpaceIterator::CreateIterator() {
5503 ASSERT(iterator_ == NULL);
5504
5505 switch (current_space_) {
5506 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005507 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005508 break;
5509 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005510 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005511 break;
5512 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005513 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005514 break;
5515 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005516 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005517 break;
5518 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005519 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005520 break;
5521 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005522 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005523 break;
5524 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005525 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005526 break;
5527 }
5528
5529 // Return the newly allocated iterator;
5530 ASSERT(iterator_ != NULL);
5531 return iterator_;
5532}
5533
5534
Ben Murdochb0fe1622011-05-05 13:52:32 +01005535class HeapObjectsFilter {
5536 public:
5537 virtual ~HeapObjectsFilter() {}
5538 virtual bool SkipObject(HeapObject* object) = 0;
5539};
5540
5541
5542class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005543 public:
5544 FreeListNodesFilter() {
5545 MarkFreeListNodes();
5546 }
5547
Ben Murdochb0fe1622011-05-05 13:52:32 +01005548 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005549 if (object->IsMarked()) {
5550 object->ClearMark();
5551 return true;
5552 } else {
5553 return false;
5554 }
5555 }
5556
5557 private:
5558 void MarkFreeListNodes() {
Steve Block44f0eee2011-05-26 01:26:41 +01005559 Heap* heap = HEAP;
5560 heap->old_pointer_space()->MarkFreeListNodes();
5561 heap->old_data_space()->MarkFreeListNodes();
5562 MarkCodeSpaceFreeListNodes(heap);
5563 heap->map_space()->MarkFreeListNodes();
5564 heap->cell_space()->MarkFreeListNodes();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005565 }
5566
Steve Block44f0eee2011-05-26 01:26:41 +01005567 void MarkCodeSpaceFreeListNodes(Heap* heap) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005568 // For code space, using FreeListNode::IsFreeListNode is OK.
Steve Block44f0eee2011-05-26 01:26:41 +01005569 HeapObjectIterator iter(heap->code_space());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005570 for (HeapObject* obj = iter.next_object();
5571 obj != NULL;
5572 obj = iter.next_object()) {
5573 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5574 }
5575 }
5576
5577 AssertNoAllocation no_alloc;
5578};
5579
5580
Ben Murdochb0fe1622011-05-05 13:52:32 +01005581class UnreachableObjectsFilter : public HeapObjectsFilter {
5582 public:
5583 UnreachableObjectsFilter() {
5584 MarkUnreachableObjects();
5585 }
5586
5587 bool SkipObject(HeapObject* object) {
5588 if (object->IsMarked()) {
5589 object->ClearMark();
5590 return true;
5591 } else {
5592 return false;
5593 }
5594 }
5595
5596 private:
5597 class UnmarkingVisitor : public ObjectVisitor {
5598 public:
5599 UnmarkingVisitor() : list_(10) {}
5600
5601 void VisitPointers(Object** start, Object** end) {
5602 for (Object** p = start; p < end; p++) {
5603 if (!(*p)->IsHeapObject()) continue;
5604 HeapObject* obj = HeapObject::cast(*p);
5605 if (obj->IsMarked()) {
5606 obj->ClearMark();
5607 list_.Add(obj);
5608 }
5609 }
5610 }
5611
5612 bool can_process() { return !list_.is_empty(); }
5613
5614 void ProcessNext() {
5615 HeapObject* obj = list_.RemoveLast();
5616 obj->Iterate(this);
5617 }
5618
5619 private:
5620 List<HeapObject*> list_;
5621 };
5622
5623 void MarkUnreachableObjects() {
5624 HeapIterator iterator;
5625 for (HeapObject* obj = iterator.next();
5626 obj != NULL;
5627 obj = iterator.next()) {
5628 obj->SetMark();
5629 }
5630 UnmarkingVisitor visitor;
Steve Block44f0eee2011-05-26 01:26:41 +01005631 HEAP->IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005632 while (visitor.can_process())
5633 visitor.ProcessNext();
5634 }
5635
5636 AssertNoAllocation no_alloc;
5637};
5638
5639
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005640HeapIterator::HeapIterator()
5641 : filtering_(HeapIterator::kNoFiltering),
5642 filter_(NULL) {
5643 Init();
5644}
5645
5646
Ben Murdochb0fe1622011-05-05 13:52:32 +01005647HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005648 : filtering_(filtering),
5649 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005650 Init();
5651}
5652
5653
5654HeapIterator::~HeapIterator() {
5655 Shutdown();
5656}
5657
5658
5659void HeapIterator::Init() {
5660 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005661 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5662 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5663 switch (filtering_) {
5664 case kFilterFreeListNodes:
5665 filter_ = new FreeListNodesFilter;
5666 break;
5667 case kFilterUnreachable:
5668 filter_ = new UnreachableObjectsFilter;
5669 break;
5670 default:
5671 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005672 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005673 object_iterator_ = space_iterator_->next();
5674}
5675
5676
5677void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005678#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005679 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005680 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005681 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005682 ASSERT(object_iterator_ == NULL);
5683 }
5684#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005685 // Make sure the last iterator is deallocated.
5686 delete space_iterator_;
5687 space_iterator_ = NULL;
5688 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005689 delete filter_;
5690 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005691}
5692
5693
Leon Clarked91b9f72010-01-27 17:25:45 +00005694HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005695 if (filter_ == NULL) return NextObject();
5696
5697 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005698 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005699 return obj;
5700}
5701
5702
5703HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005704 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005705 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005706
Leon Clarked91b9f72010-01-27 17:25:45 +00005707 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005708 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005709 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005710 } else {
5711 // Go though the spaces looking for one that has objects.
5712 while (space_iterator_->has_next()) {
5713 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005714 if (HeapObject* obj = object_iterator_->next_object()) {
5715 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005716 }
5717 }
5718 }
5719 // Done with the last space.
5720 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005721 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005722}
5723
5724
5725void HeapIterator::reset() {
5726 // Restart the iterator.
5727 Shutdown();
5728 Init();
5729}
5730
5731
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005732#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
Steve Blocka7e24c12009-10-30 11:49:00 +00005733
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005734Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00005735
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005736class PathTracer::MarkVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00005737 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005738 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00005739 void VisitPointers(Object** start, Object** end) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005740 // Scan all HeapObject pointers in [start, end)
5741 for (Object** p = start; !tracer_->found() && (p < end); p++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005742 if ((*p)->IsHeapObject())
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005743 tracer_->MarkRecursively(p, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005744 }
5745 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005746
5747 private:
5748 PathTracer* tracer_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005749};
5750
Steve Blocka7e24c12009-10-30 11:49:00 +00005751
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005752class PathTracer::UnmarkVisitor: public ObjectVisitor {
5753 public:
5754 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5755 void VisitPointers(Object** start, Object** end) {
5756 // Scan all HeapObject pointers in [start, end)
5757 for (Object** p = start; p < end; p++) {
5758 if ((*p)->IsHeapObject())
5759 tracer_->UnmarkRecursively(p, this);
5760 }
5761 }
5762
5763 private:
5764 PathTracer* tracer_;
5765};
5766
5767
5768void PathTracer::VisitPointers(Object** start, Object** end) {
5769 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5770 // Visit all HeapObject pointers in [start, end)
5771 for (Object** p = start; !done && (p < end); p++) {
5772 if ((*p)->IsHeapObject()) {
5773 TracePathFrom(p);
5774 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5775 }
5776 }
5777}
5778
5779
5780void PathTracer::Reset() {
5781 found_target_ = false;
5782 object_stack_.Clear();
5783}
5784
5785
5786void PathTracer::TracePathFrom(Object** root) {
5787 ASSERT((search_target_ == kAnyGlobalObject) ||
5788 search_target_->IsHeapObject());
5789 found_target_in_trace_ = false;
5790 object_stack_.Clear();
5791
5792 MarkVisitor mark_visitor(this);
5793 MarkRecursively(root, &mark_visitor);
5794
5795 UnmarkVisitor unmark_visitor(this);
5796 UnmarkRecursively(root, &unmark_visitor);
5797
5798 ProcessResults();
5799}
5800
5801
5802void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005803 if (!(*p)->IsHeapObject()) return;
5804
5805 HeapObject* obj = HeapObject::cast(*p);
5806
5807 Object* map = obj->map();
5808
5809 if (!map->IsHeapObject()) return; // visited before
5810
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005811 if (found_target_in_trace_) return; // stop if target found
5812 object_stack_.Add(obj);
5813 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5814 (obj == search_target_)) {
5815 found_target_in_trace_ = true;
5816 found_target_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005817 return;
5818 }
5819
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005820 bool is_global_context = obj->IsGlobalContext();
5821
Steve Blocka7e24c12009-10-30 11:49:00 +00005822 // not visited yet
5823 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5824
5825 Address map_addr = map_p->address();
5826
5827 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5828
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005829 // Scan the object body.
5830 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5831 // This is specialized to scan Context's properly.
5832 Object** start = reinterpret_cast<Object**>(obj->address() +
5833 Context::kHeaderSize);
5834 Object** end = reinterpret_cast<Object**>(obj->address() +
5835 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5836 mark_visitor->VisitPointers(start, end);
5837 } else {
5838 obj->IterateBody(map_p->instance_type(),
5839 obj->SizeFromMap(map_p),
5840 mark_visitor);
5841 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005842
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005843 // Scan the map after the body because the body is a lot more interesting
5844 // when doing leak detection.
5845 MarkRecursively(&map, mark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005846
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005847 if (!found_target_in_trace_) // don't pop if found the target
5848 object_stack_.RemoveLast();
Steve Blocka7e24c12009-10-30 11:49:00 +00005849}
5850
5851
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005852void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005853 if (!(*p)->IsHeapObject()) return;
5854
5855 HeapObject* obj = HeapObject::cast(*p);
5856
5857 Object* map = obj->map();
5858
5859 if (map->IsHeapObject()) return; // unmarked already
5860
5861 Address map_addr = reinterpret_cast<Address>(map);
5862
5863 map_addr -= kMarkTag;
5864
5865 ASSERT_TAG_ALIGNED(map_addr);
5866
5867 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5868
5869 obj->set_map(reinterpret_cast<Map*>(map_p));
5870
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005871 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005872
5873 obj->IterateBody(Map::cast(map_p)->instance_type(),
5874 obj->SizeFromMap(Map::cast(map_p)),
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005875 unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005876}
5877
5878
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005879void PathTracer::ProcessResults() {
5880 if (found_target_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005881 PrintF("=====================================\n");
5882 PrintF("==== Path to object ====\n");
5883 PrintF("=====================================\n\n");
5884
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005885 ASSERT(!object_stack_.is_empty());
5886 for (int i = 0; i < object_stack_.length(); i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005887 if (i > 0) PrintF("\n |\n |\n V\n\n");
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005888 Object* obj = object_stack_[i];
5889#ifdef OBJECT_PRINT
Steve Blocka7e24c12009-10-30 11:49:00 +00005890 obj->Print();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005891#else
5892 obj->ShortPrint();
5893#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005894 }
5895 PrintF("=====================================\n");
5896 }
5897}
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005898#endif // DEBUG || LIVE_OBJECT_LIST
Steve Blocka7e24c12009-10-30 11:49:00 +00005899
5900
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005901#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00005902// Triggers a depth-first traversal of reachable objects from roots
5903// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005904void Heap::TracePathToObject(Object* target) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005905 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5906 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005907}
5908
5909
5910// Triggers a depth-first traversal of reachable objects from roots
5911// and finds a path to any global object and prints it. Useful for
5912// determining the source for leaks of global objects.
5913void Heap::TracePathToGlobal() {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005914 PathTracer tracer(PathTracer::kAnyGlobalObject,
5915 PathTracer::FIND_ALL,
5916 VISIT_ALL);
5917 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005918}
5919#endif
5920
5921
Ben Murdochf87a2032010-10-22 12:50:53 +01005922static intptr_t CountTotalHolesSize() {
5923 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005924 OldSpaces spaces;
5925 for (OldSpace* space = spaces.next();
5926 space != NULL;
5927 space = spaces.next()) {
5928 holes_size += space->Waste() + space->AvailableFree();
5929 }
5930 return holes_size;
5931}
5932
5933
Steve Block44f0eee2011-05-26 01:26:41 +01005934GCTracer::GCTracer(Heap* heap)
Steve Blocka7e24c12009-10-30 11:49:00 +00005935 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005936 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005937 gc_count_(0),
5938 full_gc_count_(0),
5939 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005940 marked_count_(0),
5941 allocated_since_last_gc_(0),
5942 spent_in_mutator_(0),
Steve Block44f0eee2011-05-26 01:26:41 +01005943 promoted_objects_size_(0),
5944 heap_(heap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005945 // These two fields reflect the state of the previous full collection.
5946 // Set them before they are changed by the collector.
Steve Block44f0eee2011-05-26 01:26:41 +01005947 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
5948 previous_marked_count_ =
5949 heap_->mark_compact_collector_.previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005950 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005951 start_time_ = OS::TimeCurrentMillis();
Steve Block44f0eee2011-05-26 01:26:41 +01005952 start_size_ = heap_->SizeOfObjects();
Leon Clarkef7060e22010-06-03 12:02:55 +01005953
5954 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5955 scopes_[i] = 0;
5956 }
5957
5958 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5959
Steve Block44f0eee2011-05-26 01:26:41 +01005960 allocated_since_last_gc_ =
5961 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01005962
Steve Block44f0eee2011-05-26 01:26:41 +01005963 if (heap_->last_gc_end_timestamp_ > 0) {
5964 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005965 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005966}
5967
5968
5969GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005970 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005971 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5972
Steve Block44f0eee2011-05-26 01:26:41 +01005973 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
Leon Clarkef7060e22010-06-03 12:02:55 +01005974
Steve Block44f0eee2011-05-26 01:26:41 +01005975 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
5976 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005977
Steve Block44f0eee2011-05-26 01:26:41 +01005978 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005979
5980 // Update cumulative GC statistics if required.
5981 if (FLAG_print_cumulative_gc_stat) {
Steve Block44f0eee2011-05-26 01:26:41 +01005982 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
5983 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
5984 heap_->alive_after_last_gc_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005985 if (!first_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01005986 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
5987 static_cast<int>(spent_in_mutator_));
Leon Clarkef7060e22010-06-03 12:02:55 +01005988 }
5989 }
5990
5991 if (!FLAG_trace_gc_nvp) {
5992 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5993
5994 PrintF("%s %.1f -> %.1f MB, ",
5995 CollectorString(),
5996 static_cast<double>(start_size_) / MB,
5997 SizeOfHeapObjects());
5998
5999 if (external_time > 0) PrintF("%d / ", external_time);
6000 PrintF("%d ms.\n", time);
6001 } else {
6002 PrintF("pause=%d ", time);
6003 PrintF("mutator=%d ",
6004 static_cast<int>(spent_in_mutator_));
6005
6006 PrintF("gc=");
6007 switch (collector_) {
6008 case SCAVENGER:
6009 PrintF("s");
6010 break;
6011 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01006012 PrintF("%s",
6013 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
Leon Clarkef7060e22010-06-03 12:02:55 +01006014 break;
6015 default:
6016 UNREACHABLE();
6017 }
6018 PrintF(" ");
6019
6020 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6021 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6022 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01006023 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01006024 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
6025
Ben Murdochf87a2032010-10-22 12:50:53 +01006026 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
Steve Block44f0eee2011-05-26 01:26:41 +01006027 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
Ben Murdochf87a2032010-10-22 12:50:53 +01006028 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6029 in_free_list_or_wasted_before_gc_);
6030 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01006031
Ben Murdochf87a2032010-10-22 12:50:53 +01006032 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6033 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01006034
6035 PrintF("\n");
6036 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006037
Steve Block44f0eee2011-05-26 01:26:41 +01006038 heap_->PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00006039}
6040
6041
6042const char* GCTracer::CollectorString() {
6043 switch (collector_) {
6044 case SCAVENGER:
6045 return "Scavenge";
6046 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01006047 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
6048 : "Mark-sweep";
Steve Blocka7e24c12009-10-30 11:49:00 +00006049 }
6050 return "Unknown GC";
6051}
6052
6053
6054int KeyedLookupCache::Hash(Map* map, String* name) {
6055 // Uses only lower 32 bits if pointers are larger.
6056 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00006057 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00006058 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00006059}
6060
6061
6062int KeyedLookupCache::Lookup(Map* map, String* name) {
6063 int index = Hash(map, name);
6064 Key& key = keys_[index];
6065 if ((key.map == map) && key.name->Equals(name)) {
6066 return field_offsets_[index];
6067 }
Steve Block44f0eee2011-05-26 01:26:41 +01006068 return kNotFound;
Steve Blocka7e24c12009-10-30 11:49:00 +00006069}
6070
6071
6072void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
6073 String* symbol;
Steve Block44f0eee2011-05-26 01:26:41 +01006074 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006075 int index = Hash(map, symbol);
6076 Key& key = keys_[index];
6077 key.map = map;
6078 key.name = symbol;
6079 field_offsets_[index] = field_offset;
6080 }
6081}
6082
6083
6084void KeyedLookupCache::Clear() {
6085 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6086}
6087
6088
Steve Blocka7e24c12009-10-30 11:49:00 +00006089void DescriptorLookupCache::Clear() {
6090 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
6091}
6092
6093
Steve Blocka7e24c12009-10-30 11:49:00 +00006094#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01006095void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00006096 ASSERT(FLAG_gc_greedy);
Steve Block44f0eee2011-05-26 01:26:41 +01006097 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +01006098 if (disallow_allocation_failure()) return;
6099 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00006100}
6101#endif
6102
6103
Steve Block44f0eee2011-05-26 01:26:41 +01006104TranscendentalCache::SubCache::SubCache(Type t)
6105 : type_(t),
6106 isolate_(Isolate::Current()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006107 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
6108 uint32_t in1 = 0xffffffffu; // generated by the FPU.
6109 for (int i = 0; i < kCacheSize; i++) {
6110 elements_[i].in[0] = in0;
6111 elements_[i].in[1] = in1;
6112 elements_[i].output = NULL;
6113 }
6114}
6115
6116
Steve Blocka7e24c12009-10-30 11:49:00 +00006117void TranscendentalCache::Clear() {
6118 for (int i = 0; i < kNumberOfCaches; i++) {
6119 if (caches_[i] != NULL) {
6120 delete caches_[i];
6121 caches_[i] = NULL;
6122 }
6123 }
6124}
6125
6126
Leon Clarkee46be812010-01-19 14:06:41 +00006127void ExternalStringTable::CleanUp() {
6128 int last = 0;
6129 for (int i = 0; i < new_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01006130 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
6131 if (heap_->InNewSpace(new_space_strings_[i])) {
Leon Clarkee46be812010-01-19 14:06:41 +00006132 new_space_strings_[last++] = new_space_strings_[i];
6133 } else {
6134 old_space_strings_.Add(new_space_strings_[i]);
6135 }
6136 }
6137 new_space_strings_.Rewind(last);
6138 last = 0;
6139 for (int i = 0; i < old_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01006140 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
6141 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
Leon Clarkee46be812010-01-19 14:06:41 +00006142 old_space_strings_[last++] = old_space_strings_[i];
6143 }
6144 old_space_strings_.Rewind(last);
6145 Verify();
6146}
6147
6148
6149void ExternalStringTable::TearDown() {
6150 new_space_strings_.Free();
6151 old_space_strings_.Free();
6152}
6153
6154
Steve Blocka7e24c12009-10-30 11:49:00 +00006155} } // namespace v8::internal