blob: d0185930b7fb7e13384e08f4f69c5d10bb91698e [file] [log] [blame]
Ben Murdoch8b112d22011-06-08 16:22:53 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
Ben Murdoch8b112d22011-06-08 16:22:53 +010033#include "codegen.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "compilation-cache.h"
35#include "debug.h"
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000036#include "deoptimizer.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "global-handles.h"
Ben Murdoch257744e2011-11-30 15:57:28 +000038#include "heap-profiler.h"
Steve Block1e0659c2011-05-24 12:43:12 +010039#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000040#include "mark-compact.h"
41#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010042#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010043#include "runtime-profiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000045#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010047#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010048#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000049#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000050#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000051#endif
Steve Block44f0eee2011-05-26 01:26:41 +010052#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53#include "regexp-macro-assembler.h"
54#include "mips/regexp-macro-assembler-mips.h"
55#endif
Steve Block6ded16b2010-05-10 14:33:55 +010056
Steve Blocka7e24c12009-10-30 11:49:00 +000057namespace v8 {
58namespace internal {
59
60
John Reck59135872010-11-02 12:39:01 -070061static const intptr_t kMinimumPromotionLimit = 2 * MB;
62static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
Steve Blocka7e24c12009-10-30 11:49:00 +000064
Steve Block44f0eee2011-05-26 01:26:41 +010065static Mutex* gc_initializer_mutex = OS::CreateMutex();
Steve Blocka7e24c12009-10-30 11:49:00 +000066
Steve Blocka7e24c12009-10-30 11:49:00 +000067
Steve Block44f0eee2011-05-26 01:26:41 +010068Heap::Heap()
69 : isolate_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +000070// semispace_size_ should be a power of 2 and old_generation_size_ should be
71// a multiple of Page::kPageSize.
Ben Murdoch589d6972011-11-30 16:04:58 +000072#if defined(ANDROID)
Steve Block44f0eee2011-05-26 01:26:41 +010073 reserved_semispace_size_(2*MB),
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
Ben Murdoch589d6972011-11-30 16:04:58 +000076 max_old_generation_size_(192*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010077 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000079#elif defined(V8_TARGET_ARCH_X64)
Steve Block44f0eee2011-05-26 01:26:41 +010080 reserved_semispace_size_(16*MB),
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000083 max_old_generation_size_(1400*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010084 max_executable_size_(256*MB),
85 code_range_size_(512*MB),
Steve Blocka7e24c12009-10-30 11:49:00 +000086#else
Steve Block44f0eee2011-05-26 01:26:41 +010087 reserved_semispace_size_(8*MB),
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000090 max_old_generation_size_(700*MB),
Steve Block44f0eee2011-05-26 01:26:41 +010091 max_executable_size_(128*MB),
92 code_range_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +000093#endif
Steve Blocka7e24c12009-10-30 11:49:00 +000094// Variables set based on semispace_size_ and old_generation_size_ in
Steve Block44f0eee2011-05-26 01:26:41 +010095// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
Steve Block3ce2e202009-11-05 08:53:23 +000096// Will be 4 * reserved_semispace_size_ to ensure that young
97// generation can be aligned to its size.
Steve Block44f0eee2011-05-26 01:26:41 +010098 survived_since_last_expansion_(0),
Ben Murdoch257744e2011-11-30 15:57:28 +000099 sweep_generation_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100100 always_allocate_scope_depth_(0),
101 linear_allocation_scope_depth_(0),
102 contexts_disposed_(0),
103 new_space_(this),
104 old_pointer_space_(NULL),
105 old_data_space_(NULL),
106 code_space_(NULL),
107 map_space_(NULL),
108 cell_space_(NULL),
109 lo_space_(NULL),
110 gc_state_(NOT_IN_GC),
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000111 gc_post_processing_depth_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100112 mc_count_(0),
113 ms_count_(0),
114 gc_count_(0),
115 unflattened_strings_length_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +0000116#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100117 allocation_allowed_(true),
118 allocation_timeout_(0),
119 disallow_allocation_failure_(false),
120 debug_utils_(NULL),
Steve Blocka7e24c12009-10-30 11:49:00 +0000121#endif // DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100122 old_gen_promotion_limit_(kMinimumPromotionLimit),
123 old_gen_allocation_limit_(kMinimumAllocationLimit),
124 external_allocation_limit_(0),
125 amount_of_external_allocated_memory_(0),
126 amount_of_external_allocated_memory_at_last_global_gc_(0),
127 old_gen_exhausted_(false),
128 hidden_symbol_(NULL),
129 global_gc_prologue_callback_(NULL),
130 global_gc_epilogue_callback_(NULL),
131 gc_safe_size_of_old_object_(NULL),
Steve Block053d10c2011-06-13 19:13:29 +0100132 total_regexp_code_generated_(0),
Steve Block44f0eee2011-05-26 01:26:41 +0100133 tracer_(NULL),
134 young_survivors_after_last_gc_(0),
135 high_survival_rate_period_length_(0),
136 survival_rate_(0),
137 previous_survival_rate_trend_(Heap::STABLE),
138 survival_rate_trend_(Heap::STABLE),
139 max_gc_pause_(0),
140 max_alive_after_gc_(0),
141 min_in_mutator_(kMaxInt),
142 alive_after_last_gc_(0),
143 last_gc_end_timestamp_(0.0),
144 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED),
145 number_idle_notifications_(0),
146 last_idle_notification_gc_count_(0),
147 last_idle_notification_gc_count_init_(false),
148 configured_(false),
149 is_safe_to_read_maps_(true) {
150 // Allow build-time customization of the max semispace size. Building
151 // V8 with snapshots and a non-default max semispace size is much
152 // easier if you can define it as part of the build environment.
153#if defined(V8_MAX_SEMISPACE_SIZE)
154 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
155#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000156
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000157 intptr_t max_virtual = OS::MaxVirtualMemory();
158
159 if (max_virtual > 0) {
160 if (code_range_size_ > 0) {
161 // Reserve no more than 1/8 of the memory for the code range.
162 code_range_size_ = Min(code_range_size_, max_virtual >> 3);
163 }
164 }
165
Steve Block44f0eee2011-05-26 01:26:41 +0100166 memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
167 global_contexts_list_ = NULL;
168 mark_compact_collector_.heap_ = this;
169 external_string_table_.heap_ = this;
170}
171
Steve Blocka7e24c12009-10-30 11:49:00 +0000172
Ben Murdochf87a2032010-10-22 12:50:53 +0100173intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000174 if (!HasBeenSetup()) return 0;
175
176 return new_space_.Capacity() +
177 old_pointer_space_->Capacity() +
178 old_data_space_->Capacity() +
179 code_space_->Capacity() +
180 map_space_->Capacity() +
181 cell_space_->Capacity();
182}
183
184
Ben Murdochf87a2032010-10-22 12:50:53 +0100185intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000186 if (!HasBeenSetup()) return 0;
187
188 return new_space_.CommittedMemory() +
189 old_pointer_space_->CommittedMemory() +
190 old_data_space_->CommittedMemory() +
191 code_space_->CommittedMemory() +
192 map_space_->CommittedMemory() +
193 cell_space_->CommittedMemory() +
194 lo_space_->Size();
195}
196
Russell Brenner90bac252010-11-18 13:33:46 -0800197intptr_t Heap::CommittedMemoryExecutable() {
198 if (!HasBeenSetup()) return 0;
199
Steve Block44f0eee2011-05-26 01:26:41 +0100200 return isolate()->memory_allocator()->SizeExecutable();
Russell Brenner90bac252010-11-18 13:33:46 -0800201}
202
Steve Block3ce2e202009-11-05 08:53:23 +0000203
Ben Murdochf87a2032010-10-22 12:50:53 +0100204intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000205 if (!HasBeenSetup()) return 0;
206
207 return new_space_.Available() +
208 old_pointer_space_->Available() +
209 old_data_space_->Available() +
210 code_space_->Available() +
211 map_space_->Available() +
212 cell_space_->Available();
213}
214
215
216bool Heap::HasBeenSetup() {
217 return old_pointer_space_ != NULL &&
218 old_data_space_ != NULL &&
219 code_space_ != NULL &&
220 map_space_ != NULL &&
221 cell_space_ != NULL &&
222 lo_space_ != NULL;
223}
224
225
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100226int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100227 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
228 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100229 MapWord map_word = object->map_word();
230 map_word.ClearMark();
231 map_word.ClearOverflow();
232 return object->SizeFromMap(map_word.ToMap());
233}
234
235
236int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100237 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
238 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100239 uint32_t marker = Memory::uint32_at(object->address());
240 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
241 return kIntSize;
242 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
243 return Memory::int_at(object->address() + kIntSize);
244 } else {
245 MapWord map_word = object->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +0100246 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100247 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
248 return object->SizeFromMap(map);
249 }
250}
251
252
Steve Blocka7e24c12009-10-30 11:49:00 +0000253GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
254 // Is global GC requested?
255 if (space != NEW_SPACE || FLAG_gc_global) {
Steve Block44f0eee2011-05-26 01:26:41 +0100256 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000257 return MARK_COMPACTOR;
258 }
259
260 // Is enough data promoted to justify a global GC?
261 if (OldGenerationPromotionLimitReached()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100262 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000263 return MARK_COMPACTOR;
264 }
265
266 // Have allocation in OLD and LO failed?
267 if (old_gen_exhausted_) {
Steve Block44f0eee2011-05-26 01:26:41 +0100268 isolate_->counters()->
269 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000270 return MARK_COMPACTOR;
271 }
272
273 // Is there enough space left in OLD to guarantee that a scavenge can
274 // succeed?
275 //
276 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
277 // for object promotion. It counts only the bytes that the memory
278 // allocator has not yet allocated from the OS and assigned to any space,
279 // and does not count available bytes already in the old space or code
280 // space. Undercounting is safe---we may get an unrequested full GC when
281 // a scavenge would have succeeded.
Steve Block44f0eee2011-05-26 01:26:41 +0100282 if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
283 isolate_->counters()->
284 gc_compactor_caused_by_oldspace_exhaustion()->Increment();
Steve Blocka7e24c12009-10-30 11:49:00 +0000285 return MARK_COMPACTOR;
286 }
287
288 // Default
289 return SCAVENGER;
290}
291
292
293// TODO(1238405): Combine the infrastructure for --heap-stats and
294// --log-gc to avoid the complicated preprocessor and flag testing.
Steve Blocka7e24c12009-10-30 11:49:00 +0000295void Heap::ReportStatisticsBeforeGC() {
296 // Heap::ReportHeapStatistics will also log NewSpace statistics when
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000297 // compiled --log-gc is set. The following logic is used to avoid
298 // double logging.
299#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000300 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
301 if (FLAG_heap_stats) {
302 ReportHeapStatistics("Before GC");
303 } else if (FLAG_log_gc) {
304 new_space_.ReportStatistics();
305 }
306 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000307#else
Steve Blocka7e24c12009-10-30 11:49:00 +0000308 if (FLAG_log_gc) {
309 new_space_.CollectStatistics();
310 new_space_.ReportStatistics();
311 new_space_.ClearHistograms();
312 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000313#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000314}
315
316
Steve Blocka7e24c12009-10-30 11:49:00 +0000317void Heap::PrintShortHeapStatistics() {
318 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100319 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
320 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block44f0eee2011-05-26 01:26:41 +0100321 isolate_->memory_allocator()->Size(),
322 isolate_->memory_allocator()->Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100323 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
324 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000325 Heap::new_space_.Size(),
326 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100327 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
328 ", available: %8" V8_PTR_PREFIX "d"
329 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000330 old_pointer_space_->Size(),
331 old_pointer_space_->Available(),
332 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100333 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
334 ", available: %8" V8_PTR_PREFIX "d"
335 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000336 old_data_space_->Size(),
337 old_data_space_->Available(),
338 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100339 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
340 ", available: %8" V8_PTR_PREFIX "d"
341 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000342 code_space_->Size(),
343 code_space_->Available(),
344 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100345 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
346 ", available: %8" V8_PTR_PREFIX "d"
347 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000348 map_space_->Size(),
349 map_space_->Available(),
350 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100351 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
352 ", available: %8" V8_PTR_PREFIX "d"
353 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000354 cell_space_->Size(),
355 cell_space_->Available(),
356 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100357 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
358 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000359 lo_space_->Size(),
360 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000361}
Steve Blocka7e24c12009-10-30 11:49:00 +0000362
363
364// TODO(1238405): Combine the infrastructure for --heap-stats and
365// --log-gc to avoid the complicated preprocessor and flag testing.
366void Heap::ReportStatisticsAfterGC() {
367 // Similar to the before GC, we use some complicated logic to ensure that
368 // NewSpace statistics are logged exactly once when --log-gc is turned on.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000369#if defined(DEBUG)
Steve Blocka7e24c12009-10-30 11:49:00 +0000370 if (FLAG_heap_stats) {
371 new_space_.CollectStatistics();
372 ReportHeapStatistics("After GC");
373 } else if (FLAG_log_gc) {
374 new_space_.ReportStatistics();
375 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000376#else
Steve Blocka7e24c12009-10-30 11:49:00 +0000377 if (FLAG_log_gc) new_space_.ReportStatistics();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000378#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000379}
Steve Blocka7e24c12009-10-30 11:49:00 +0000380
381
382void Heap::GarbageCollectionPrologue() {
Steve Block44f0eee2011-05-26 01:26:41 +0100383 isolate_->transcendental_cache()->Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100384 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000385 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100386 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000387#ifdef DEBUG
388 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
389 allow_allocation(false);
390
391 if (FLAG_verify_heap) {
392 Verify();
393 }
394
395 if (FLAG_gc_verbose) Print();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000396#endif // DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +0000397
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000398#if defined(DEBUG)
Steve Blocka7e24c12009-10-30 11:49:00 +0000399 ReportStatisticsBeforeGC();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000400#endif // DEBUG
Steve Block1e0659c2011-05-24 12:43:12 +0100401
402 LiveObjectList::GCPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000403}
404
Ben Murdochf87a2032010-10-22 12:50:53 +0100405intptr_t Heap::SizeOfObjects() {
406 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000407 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800409 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 }
411 return total;
412}
413
414void Heap::GarbageCollectionEpilogue() {
Steve Block1e0659c2011-05-24 12:43:12 +0100415 LiveObjectList::GCEpilogue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000416#ifdef DEBUG
417 allow_allocation(true);
418 ZapFromSpace();
419
420 if (FLAG_verify_heap) {
421 Verify();
422 }
423
Steve Block44f0eee2011-05-26 01:26:41 +0100424 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000425 if (FLAG_print_handles) PrintHandles();
426 if (FLAG_gc_verbose) Print();
427 if (FLAG_code_stats) ReportCodeStatistics("After GC");
428#endif
429
Steve Block44f0eee2011-05-26 01:26:41 +0100430 isolate_->counters()->alive_after_last_gc()->Set(
431 static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000432
Steve Block44f0eee2011-05-26 01:26:41 +0100433 isolate_->counters()->symbol_table_capacity()->Set(
434 symbol_table()->Capacity());
435 isolate_->counters()->number_of_symbols()->Set(
436 symbol_table()->NumberOfElements());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000437#if defined(DEBUG)
Steve Blocka7e24c12009-10-30 11:49:00 +0000438 ReportStatisticsAfterGC();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000439#endif // DEBUG
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000440#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100441 isolate_->debug()->AfterGarbageCollection();
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000442#endif // ENABLE_DEBUGGER_SUPPORT
Steve Blocka7e24c12009-10-30 11:49:00 +0000443}
444
445
John Reck59135872010-11-02 12:39:01 -0700446void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000447 // Since we are ignoring the return value, the exact choice of space does
448 // not matter, so long as we do not specify NEW_SPACE, which would not
449 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100450 mark_compact_collector_.SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700451 CollectGarbage(OLD_POINTER_SPACE);
Steve Block44f0eee2011-05-26 01:26:41 +0100452 mark_compact_collector_.SetForceCompaction(false);
Steve Blocka7e24c12009-10-30 11:49:00 +0000453}
454
455
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800456void Heap::CollectAllAvailableGarbage() {
457 // Since we are ignoring the return value, the exact choice of space does
458 // not matter, so long as we do not specify NEW_SPACE, which would not
459 // cause a full GC.
Steve Block44f0eee2011-05-26 01:26:41 +0100460 mark_compact_collector()->SetForceCompaction(true);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800461
462 // Major GC would invoke weak handle callbacks on weakly reachable
463 // handles, but won't collect weakly reachable objects until next
464 // major GC. Therefore if we collect aggressively and weak handle callback
465 // has been invoked, we rerun major GC to release objects which become
466 // garbage.
467 // Note: as weak callbacks can execute arbitrary code, we cannot
468 // hope that eventually there will be no weak callbacks invocations.
469 // Therefore stop recollecting after several attempts.
470 const int kMaxNumberOfAttempts = 7;
471 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
472 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
473 break;
474 }
475 }
Steve Block44f0eee2011-05-26 01:26:41 +0100476 mark_compact_collector()->SetForceCompaction(false);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800477}
478
479
480bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 // The VM is in the GC state until exiting this function.
Steve Block44f0eee2011-05-26 01:26:41 +0100482 VMState state(isolate_, GC);
Steve Blocka7e24c12009-10-30 11:49:00 +0000483
484#ifdef DEBUG
485 // Reset the allocation timeout to the GC interval, but make sure to
486 // allow at least a few allocations after a collection. The reason
487 // for this is that we have a lot of allocation sequences and we
488 // assume that a garbage collection will allow the subsequent
489 // allocation attempts to go through.
490 allocation_timeout_ = Max(6, FLAG_gc_interval);
491#endif
492
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800493 bool next_gc_likely_to_collect_more = false;
494
Steve Block44f0eee2011-05-26 01:26:41 +0100495 { GCTracer tracer(this);
Steve Blocka7e24c12009-10-30 11:49:00 +0000496 GarbageCollectionPrologue();
497 // The GC count was incremented in the prologue. Tell the tracer about
498 // it.
499 tracer.set_gc_count(gc_count_);
500
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 // Tell the tracer which collector we've selected.
502 tracer.set_collector(collector);
503
504 HistogramTimer* rate = (collector == SCAVENGER)
Steve Block44f0eee2011-05-26 01:26:41 +0100505 ? isolate_->counters()->gc_scavenger()
506 : isolate_->counters()->gc_compactor();
Steve Blocka7e24c12009-10-30 11:49:00 +0000507 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800508 next_gc_likely_to_collect_more =
509 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000510 rate->Stop();
511
512 GarbageCollectionEpilogue();
513 }
514
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800515 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000516}
517
518
519void Heap::PerformScavenge() {
Steve Block44f0eee2011-05-26 01:26:41 +0100520 GCTracer tracer(this);
John Reck59135872010-11-02 12:39:01 -0700521 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000522}
523
524
525#ifdef DEBUG
526// Helper class for verifying the symbol table.
527class SymbolTableVerifier : public ObjectVisitor {
528 public:
Steve Blocka7e24c12009-10-30 11:49:00 +0000529 void VisitPointers(Object** start, Object** end) {
530 // Visit all HeapObject pointers in [start, end).
531 for (Object** p = start; p < end; p++) {
532 if ((*p)->IsHeapObject()) {
533 // Check that the symbol is actually a symbol.
534 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
535 }
536 }
537 }
538};
539#endif // DEBUG
540
541
542static void VerifySymbolTable() {
543#ifdef DEBUG
544 SymbolTableVerifier verifier;
Steve Block44f0eee2011-05-26 01:26:41 +0100545 HEAP->symbol_table()->IterateElements(&verifier);
Steve Blocka7e24c12009-10-30 11:49:00 +0000546#endif // DEBUG
547}
548
549
Leon Clarkee46be812010-01-19 14:06:41 +0000550void Heap::ReserveSpace(
551 int new_space_size,
552 int pointer_space_size,
553 int data_space_size,
554 int code_space_size,
555 int map_space_size,
556 int cell_space_size,
557 int large_object_size) {
558 NewSpace* new_space = Heap::new_space();
559 PagedSpace* old_pointer_space = Heap::old_pointer_space();
560 PagedSpace* old_data_space = Heap::old_data_space();
561 PagedSpace* code_space = Heap::code_space();
562 PagedSpace* map_space = Heap::map_space();
563 PagedSpace* cell_space = Heap::cell_space();
564 LargeObjectSpace* lo_space = Heap::lo_space();
565 bool gc_performed = true;
566 while (gc_performed) {
567 gc_performed = false;
568 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100569 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000570 gc_performed = true;
571 }
572 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100573 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000574 gc_performed = true;
575 }
576 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100577 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000578 gc_performed = true;
579 }
580 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100581 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000582 gc_performed = true;
583 }
584 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100585 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000586 gc_performed = true;
587 }
588 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100589 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000590 gc_performed = true;
591 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100592 // We add a slack-factor of 2 in order to have space for a series of
593 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000594 large_object_size *= 2;
595 // The ReserveSpace method on the large object space checks how much
596 // we can expand the old generation. This includes expansion caused by
597 // allocation in the other spaces.
598 large_object_size += cell_space_size + map_space_size + code_space_size +
599 data_space_size + pointer_space_size;
600 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100601 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000602 gc_performed = true;
603 }
604 }
605}
606
607
Steve Blocka7e24c12009-10-30 11:49:00 +0000608void Heap::EnsureFromSpaceIsCommitted() {
609 if (new_space_.CommitFromSpaceIfNeeded()) return;
610
611 // Committing memory to from space failed.
612 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100613 PagedSpaces spaces;
614 for (PagedSpace* space = spaces.next();
615 space != NULL;
616 space = spaces.next()) {
617 space->RelinkPageListInChunkOrder(true);
618 }
619
Steve Blocka7e24c12009-10-30 11:49:00 +0000620 Shrink();
621 if (new_space_.CommitFromSpaceIfNeeded()) return;
622
623 // Committing memory to from space failed again.
624 // Memory is exhausted and we will die.
625 V8::FatalProcessOutOfMemory("Committing semi space failed.");
626}
627
628
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800629void Heap::ClearJSFunctionResultCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100630 if (isolate_->bootstrapper()->IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100631
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800632 Object* context = global_contexts_list_;
633 while (!context->IsUndefined()) {
634 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100635 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800636 Context::cast(context)->jsfunction_result_caches();
637 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100638 int length = caches->length();
639 for (int i = 0; i < length; i++) {
640 JSFunctionResultCache::cast(caches->get(i))->Clear();
641 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800642 // Get the next context:
643 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100644 }
Steve Block6ded16b2010-05-10 14:33:55 +0100645}
646
647
Steve Block44f0eee2011-05-26 01:26:41 +0100648
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100649void Heap::ClearNormalizedMapCaches() {
Steve Block44f0eee2011-05-26 01:26:41 +0100650 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100651
652 Object* context = global_contexts_list_;
653 while (!context->IsUndefined()) {
654 Context::cast(context)->normalized_map_cache()->Clear();
655 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
656 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100657}
658
659
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100660#ifdef DEBUG
661
662enum PageWatermarkValidity {
663 ALL_VALID,
664 ALL_INVALID
665};
666
667static void VerifyPageWatermarkValidity(PagedSpace* space,
668 PageWatermarkValidity validity) {
669 PageIterator it(space, PageIterator::PAGES_IN_USE);
670 bool expected_value = (validity == ALL_VALID);
671 while (it.has_next()) {
672 Page* page = it.next();
673 ASSERT(page->IsWatermarkValid() == expected_value);
674 }
675}
676#endif
677
Steve Block8defd9f2010-07-08 12:39:36 +0100678void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
679 double survival_rate =
680 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
681 start_new_space_size;
682
683 if (survival_rate > kYoungSurvivalRateThreshold) {
684 high_survival_rate_period_length_++;
685 } else {
686 high_survival_rate_period_length_ = 0;
687 }
688
689 double survival_rate_diff = survival_rate_ - survival_rate;
690
691 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
692 set_survival_rate_trend(DECREASING);
693 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
694 set_survival_rate_trend(INCREASING);
695 } else {
696 set_survival_rate_trend(STABLE);
697 }
698
699 survival_rate_ = survival_rate;
700}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100701
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800702bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700703 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800704 bool next_gc_likely_to_collect_more = false;
705
Ben Murdochf87a2032010-10-22 12:50:53 +0100706 if (collector != SCAVENGER) {
Steve Block44f0eee2011-05-26 01:26:41 +0100707 PROFILE(isolate_, CodeMovingGCEvent());
Ben Murdochf87a2032010-10-22 12:50:53 +0100708 }
709
Steve Blocka7e24c12009-10-30 11:49:00 +0000710 VerifySymbolTable();
711 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
712 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100713 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000714 global_gc_prologue_callback_();
715 }
Steve Block6ded16b2010-05-10 14:33:55 +0100716
717 GCType gc_type =
718 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
719
720 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
721 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
722 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
723 }
724 }
725
Steve Blocka7e24c12009-10-30 11:49:00 +0000726 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100727
Ben Murdochf87a2032010-10-22 12:50:53 +0100728 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100729
Steve Blocka7e24c12009-10-30 11:49:00 +0000730 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100731 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000732 MarkCompact(tracer);
Ben Murdoch257744e2011-11-30 15:57:28 +0000733 sweep_generation_++;
Steve Block8defd9f2010-07-08 12:39:36 +0100734 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
735 IsStableOrIncreasingSurvivalTrend();
736
737 UpdateSurvivalRateTrend(start_new_space_size);
738
John Reck59135872010-11-02 12:39:01 -0700739 intptr_t old_gen_size = PromotedSpaceSize();
740 old_gen_promotion_limit_ =
741 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
742 old_gen_allocation_limit_ =
743 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100744
John Reck59135872010-11-02 12:39:01 -0700745 if (high_survival_rate_during_scavenges &&
746 IsStableOrIncreasingSurvivalTrend()) {
747 // Stable high survival rates of young objects both during partial and
748 // full collection indicate that mutator is either building or modifying
749 // a structure with a long lifetime.
750 // In this case we aggressively raise old generation memory limits to
751 // postpone subsequent mark-sweep collection and thus trade memory
752 // space for the mutation speed.
753 old_gen_promotion_limit_ *= 2;
754 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100755 }
756
John Reck59135872010-11-02 12:39:01 -0700757 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100758 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100759 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100760 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100761 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100762
763 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000764 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000765
Steve Block44f0eee2011-05-26 01:26:41 +0100766 isolate_->counters()->objs_since_last_young()->Set(0);
Steve Blocka7e24c12009-10-30 11:49:00 +0000767
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000768 gc_post_processing_depth_++;
Ben Murdoch257744e2011-11-30 15:57:28 +0000769 { DisableAssertNoAllocation allow_allocation;
John Reck59135872010-11-02 12:39:01 -0700770 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800771 next_gc_likely_to_collect_more =
Ben Murdoch257744e2011-11-30 15:57:28 +0000772 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
John Reck59135872010-11-02 12:39:01 -0700773 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000774 gc_post_processing_depth_--;
John Reck59135872010-11-02 12:39:01 -0700775
Steve Block3ce2e202009-11-05 08:53:23 +0000776 // Update relocatables.
777 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000778
779 if (collector == MARK_COMPACTOR) {
780 // Register the amount of external allocated memory.
781 amount_of_external_allocated_memory_at_last_global_gc_ =
782 amount_of_external_allocated_memory_;
783 }
784
Steve Block6ded16b2010-05-10 14:33:55 +0100785 GCCallbackFlags callback_flags = tracer->is_compacting()
786 ? kGCCallbackFlagCompacted
787 : kNoGCCallbackFlags;
788 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
789 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
790 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
791 }
792 }
793
Steve Blocka7e24c12009-10-30 11:49:00 +0000794 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
795 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100796 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000797 global_gc_epilogue_callback_();
798 }
799 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800800
801 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000802}
803
804
Steve Blocka7e24c12009-10-30 11:49:00 +0000805void Heap::MarkCompact(GCTracer* tracer) {
806 gc_state_ = MARK_COMPACT;
Steve Block44f0eee2011-05-26 01:26:41 +0100807 LOG(isolate_, ResourceEvent("markcompact", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000808
Steve Block44f0eee2011-05-26 01:26:41 +0100809 mark_compact_collector_.Prepare(tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000810
Steve Block44f0eee2011-05-26 01:26:41 +0100811 bool is_compacting = mark_compact_collector_.IsCompacting();
Steve Blocka7e24c12009-10-30 11:49:00 +0000812
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100813 if (is_compacting) {
814 mc_count_++;
815 } else {
816 ms_count_++;
817 }
818 tracer->set_full_gc_count(mc_count_ + ms_count_);
819
Steve Blocka7e24c12009-10-30 11:49:00 +0000820 MarkCompactPrologue(is_compacting);
821
Steve Block44f0eee2011-05-26 01:26:41 +0100822 is_safe_to_read_maps_ = false;
823 mark_compact_collector_.CollectGarbage();
824 is_safe_to_read_maps_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +0000825
Steve Block44f0eee2011-05-26 01:26:41 +0100826 LOG(isolate_, ResourceEvent("markcompact", "end"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000827
828 gc_state_ = NOT_IN_GC;
829
830 Shrink();
831
Steve Block44f0eee2011-05-26 01:26:41 +0100832 isolate_->counters()->objs_since_last_full()->Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100833
834 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000835}
836
837
838void Heap::MarkCompactPrologue(bool is_compacting) {
839 // At any old GC clear the keyed lookup cache to enable collection of unused
840 // maps.
Steve Block44f0eee2011-05-26 01:26:41 +0100841 isolate_->keyed_lookup_cache()->Clear();
842 isolate_->context_slot_cache()->Clear();
843 isolate_->descriptor_lookup_cache()->Clear();
Ben Murdoch589d6972011-11-30 16:04:58 +0000844 StringSplitCache::Clear(string_split_cache());
Steve Blocka7e24c12009-10-30 11:49:00 +0000845
Steve Block44f0eee2011-05-26 01:26:41 +0100846 isolate_->compilation_cache()->MarkCompactPrologue();
Steve Blocka7e24c12009-10-30 11:49:00 +0000847
Kristian Monsen25f61362010-05-21 11:50:48 +0100848 CompletelyClearInstanceofCache();
849
Leon Clarkee46be812010-01-19 14:06:41 +0000850 if (is_compacting) FlushNumberStringCache();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000851 if (FLAG_cleanup_code_caches_at_gc) {
852 polymorphic_code_cache()->set_cache(undefined_value());
853 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000854
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100855 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000856}
857
858
859Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700860 Object* obj = NULL; // Initialization to please compiler.
861 { MaybeObject* maybe_obj = code_space_->FindObject(a);
862 if (!maybe_obj->ToObject(&obj)) {
863 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
864 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000865 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000866 return obj;
867}
868
869
870// Helper class for copying HeapObjects
871class ScavengeVisitor: public ObjectVisitor {
872 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100873 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000874
875 void VisitPointer(Object** p) { ScavengePointer(p); }
876
877 void VisitPointers(Object** start, Object** end) {
878 // Copy all HeapObject pointers in [start, end)
879 for (Object** p = start; p < end; p++) ScavengePointer(p);
880 }
881
882 private:
883 void ScavengePointer(Object** p) {
884 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +0100885 if (!heap_->InNewSpace(object)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
887 reinterpret_cast<HeapObject*>(object));
888 }
Steve Block44f0eee2011-05-26 01:26:41 +0100889
890 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000891};
892
893
Steve Blocka7e24c12009-10-30 11:49:00 +0000894#ifdef DEBUG
895// Visitor class to verify pointers in code or data space do not point into
896// new space.
897class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
898 public:
899 void VisitPointers(Object** start, Object**end) {
900 for (Object** current = start; current < end; current++) {
901 if ((*current)->IsHeapObject()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100902 ASSERT(!HEAP->InNewSpace(HeapObject::cast(*current)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000903 }
904 }
905 }
906};
907
908
909static void VerifyNonPointerSpacePointers() {
910 // Verify that there are no pointers to new space in spaces where we
911 // do not expect them.
912 VerifyNonPointerSpacePointersVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +0100913 HeapObjectIterator code_it(HEAP->code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000914 for (HeapObject* object = code_it.next();
915 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000916 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000917
Steve Block44f0eee2011-05-26 01:26:41 +0100918 HeapObjectIterator data_it(HEAP->old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000919 for (HeapObject* object = data_it.next();
920 object != NULL; object = data_it.next())
921 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000922}
923#endif
924
925
Steve Block6ded16b2010-05-10 14:33:55 +0100926void Heap::CheckNewSpaceExpansionCriteria() {
927 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
928 survived_since_last_expansion_ > new_space_.Capacity()) {
929 // Grow the size of new space if there is room to grow and enough
930 // data has survived scavenge since the last expansion.
931 new_space_.Grow();
932 survived_since_last_expansion_ = 0;
933 }
934}
935
936
Ben Murdoch257744e2011-11-30 15:57:28 +0000937static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
938 return heap->InNewSpace(*p) &&
939 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
940}
941
942
Steve Blocka7e24c12009-10-30 11:49:00 +0000943void Heap::Scavenge() {
944#ifdef DEBUG
945 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
946#endif
947
948 gc_state_ = SCAVENGE;
949
Ben Murdoch8b112d22011-06-08 16:22:53 +0100950 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
951
Steve Block44f0eee2011-05-26 01:26:41 +0100952 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100953#ifdef DEBUG
954 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
955 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
956#endif
957
958 // We do not update an allocation watermark of the top page during linear
959 // allocation to avoid overhead. So to maintain the watermark invariant
960 // we have to manually cache the watermark and mark the top page as having an
961 // invalid watermark. This guarantees that dirty regions iteration will use a
962 // correct watermark even if a linear allocation happens.
963 old_pointer_space_->FlushTopPageWatermark();
964 map_space_->FlushTopPageWatermark();
965
Steve Blocka7e24c12009-10-30 11:49:00 +0000966 // Implements Cheney's copying algorithm
Steve Block44f0eee2011-05-26 01:26:41 +0100967 LOG(isolate_, ResourceEvent("scavenge", "begin"));
Steve Blocka7e24c12009-10-30 11:49:00 +0000968
969 // Clear descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +0100970 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000971
972 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100973 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000974
Steve Block6ded16b2010-05-10 14:33:55 +0100975 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000976
977 // Flip the semispaces. After flipping, to space is empty, from space has
978 // live objects.
979 new_space_.Flip();
980 new_space_.ResetAllocationInfo();
981
982 // We need to sweep newly copied objects which can be either in the
983 // to space or promoted to the old generation. For to-space
984 // objects, we treat the bottom of the to space as a queue. Newly
985 // copied and unswept objects lie between a 'front' mark and the
986 // allocation pointer.
987 //
988 // Promoted objects can go into various old-generation spaces, and
989 // can be allocated internally in the spaces (from the free list).
990 // We treat the top of the to space as a queue of addresses of
991 // promoted objects. The addresses of newly promoted and unswept
992 // objects lie between a 'front' mark and a 'rear' mark that is
993 // updated as a side effect of promoting an object.
994 //
995 // There is guaranteed to be enough room at the top of the to space
996 // for the addresses of promoted objects: every object promoted
997 // frees up its size in bytes from the top of the new space, and
998 // objects are at least one pointer in size.
999 Address new_space_front = new_space_.ToSpaceLow();
Steve Block44f0eee2011-05-26 01:26:41 +01001000 promotion_queue_.Initialize(new_space_.ToSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +00001001
Steve Block44f0eee2011-05-26 01:26:41 +01001002 is_safe_to_read_maps_ = false;
1003 ScavengeVisitor scavenge_visitor(this);
Steve Blocka7e24c12009-10-30 11:49:00 +00001004 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001005 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001006
1007 // Copy objects reachable from the old generation. By definition,
1008 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001009 IterateDirtyRegions(old_pointer_space_,
Steve Block44f0eee2011-05-26 01:26:41 +01001010 &Heap::IteratePointersInDirtyRegion,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001011 &ScavengePointer,
1012 WATERMARK_CAN_BE_INVALID);
1013
1014 IterateDirtyRegions(map_space_,
1015 &IteratePointersInDirtyMapsRegion,
1016 &ScavengePointer,
1017 WATERMARK_CAN_BE_INVALID);
1018
1019 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001020
1021 // Copy objects reachable from cells by scavenging cell values directly.
1022 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001023 for (HeapObject* cell = cell_iterator.next();
1024 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001025 if (cell->IsJSGlobalPropertyCell()) {
1026 Address value_address =
1027 reinterpret_cast<Address>(cell) +
1028 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1029 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1030 }
1031 }
1032
Ben Murdochf87a2032010-10-22 12:50:53 +01001033 // Scavenge object reachable from the global contexts list directly.
1034 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1035
Leon Clarkee46be812010-01-19 14:06:41 +00001036 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001037 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
Ben Murdoch257744e2011-11-30 15:57:28 +00001038 &IsUnscavengedHeapObject);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001039 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1040 &scavenge_visitor);
Ben Murdoch257744e2011-11-30 15:57:28 +00001041 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1042
Leon Clarkee46be812010-01-19 14:06:41 +00001043
Steve Block6ded16b2010-05-10 14:33:55 +01001044 UpdateNewSpaceReferencesInExternalStringTable(
1045 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1046
Steve Block1e0659c2011-05-24 12:43:12 +01001047 LiveObjectList::UpdateReferencesForScavengeGC();
Steve Block44f0eee2011-05-26 01:26:41 +01001048 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Block1e0659c2011-05-24 12:43:12 +01001049
Leon Clarkee46be812010-01-19 14:06:41 +00001050 ASSERT(new_space_front == new_space_.top());
1051
Steve Block44f0eee2011-05-26 01:26:41 +01001052 is_safe_to_read_maps_ = true;
1053
Leon Clarkee46be812010-01-19 14:06:41 +00001054 // Set age mark.
1055 new_space_.set_age_mark(new_space_.top());
1056
1057 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001058 IncrementYoungSurvivorsCounter(static_cast<int>(
1059 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001060
Steve Block44f0eee2011-05-26 01:26:41 +01001061 LOG(isolate_, ResourceEvent("scavenge", "end"));
Leon Clarkee46be812010-01-19 14:06:41 +00001062
1063 gc_state_ = NOT_IN_GC;
1064}
1065
1066
Steve Block44f0eee2011-05-26 01:26:41 +01001067String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1068 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001069 MapWord first_word = HeapObject::cast(*p)->map_word();
1070
1071 if (!first_word.IsForwardingAddress()) {
1072 // Unreachable external string can be finalized.
Steve Block44f0eee2011-05-26 01:26:41 +01001073 heap->FinalizeExternalString(String::cast(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001074 return NULL;
1075 }
1076
1077 // String is still reachable.
1078 return String::cast(first_word.ToForwardingAddress());
1079}
1080
1081
1082void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1083 ExternalStringTableUpdaterCallback updater_func) {
Steve Block44f0eee2011-05-26 01:26:41 +01001084 external_string_table_.Verify();
Leon Clarkee46be812010-01-19 14:06:41 +00001085
Steve Block44f0eee2011-05-26 01:26:41 +01001086 if (external_string_table_.new_space_strings_.is_empty()) return;
Leon Clarkee46be812010-01-19 14:06:41 +00001087
Steve Block44f0eee2011-05-26 01:26:41 +01001088 Object** start = &external_string_table_.new_space_strings_[0];
1089 Object** end = start + external_string_table_.new_space_strings_.length();
Leon Clarkee46be812010-01-19 14:06:41 +00001090 Object** last = start;
1091
1092 for (Object** p = start; p < end; ++p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001093 ASSERT(InFromSpace(*p));
1094 String* target = updater_func(this, p);
Leon Clarkee46be812010-01-19 14:06:41 +00001095
Steve Block6ded16b2010-05-10 14:33:55 +01001096 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001097
Leon Clarkee46be812010-01-19 14:06:41 +00001098 ASSERT(target->IsExternalString());
1099
Steve Block44f0eee2011-05-26 01:26:41 +01001100 if (InNewSpace(target)) {
Leon Clarkee46be812010-01-19 14:06:41 +00001101 // String is still in new space. Update the table entry.
1102 *last = target;
1103 ++last;
1104 } else {
1105 // String got promoted. Move it to the old string list.
Steve Block44f0eee2011-05-26 01:26:41 +01001106 external_string_table_.AddOldString(target);
Leon Clarkee46be812010-01-19 14:06:41 +00001107 }
1108 }
1109
1110 ASSERT(last <= end);
Steve Block44f0eee2011-05-26 01:26:41 +01001111 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
Leon Clarkee46be812010-01-19 14:06:41 +00001112}
1113
1114
Steve Block44f0eee2011-05-26 01:26:41 +01001115static Object* ProcessFunctionWeakReferences(Heap* heap,
1116 Object* function,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001117 WeakObjectRetainer* retainer) {
Steve Block44f0eee2011-05-26 01:26:41 +01001118 Object* head = heap->undefined_value();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001119 JSFunction* tail = NULL;
1120 Object* candidate = function;
Steve Block44f0eee2011-05-26 01:26:41 +01001121 while (candidate != heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001122 // Check whether to keep the candidate in the list.
1123 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1124 Object* retain = retainer->RetainAs(candidate);
1125 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001126 if (head == heap->undefined_value()) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01001127 // First element in the list.
1128 head = candidate_function;
1129 } else {
1130 // Subsequent elements in the list.
1131 ASSERT(tail != NULL);
1132 tail->set_next_function_link(candidate_function);
1133 }
1134 // Retained function is new tail.
1135 tail = candidate_function;
1136 }
1137 // Move to next element in the list.
1138 candidate = candidate_function->next_function_link();
1139 }
1140
1141 // Terminate the list if there is one or more elements.
1142 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001143 tail->set_next_function_link(heap->undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001144 }
1145
1146 return head;
1147}
1148
1149
Ben Murdochf87a2032010-10-22 12:50:53 +01001150void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1151 Object* head = undefined_value();
1152 Context* tail = NULL;
1153 Object* candidate = global_contexts_list_;
Steve Block44f0eee2011-05-26 01:26:41 +01001154 while (candidate != undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001155 // Check whether to keep the candidate in the list.
1156 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1157 Object* retain = retainer->RetainAs(candidate);
1158 if (retain != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001159 if (head == undefined_value()) {
Ben Murdochf87a2032010-10-22 12:50:53 +01001160 // First element in the list.
1161 head = candidate_context;
1162 } else {
1163 // Subsequent elements in the list.
1164 ASSERT(tail != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001165 tail->set_unchecked(this,
1166 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001167 candidate_context,
1168 UPDATE_WRITE_BARRIER);
1169 }
1170 // Retained context is new tail.
1171 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001172
1173 // Process the weak list of optimized functions for the context.
1174 Object* function_list_head =
1175 ProcessFunctionWeakReferences(
Steve Block44f0eee2011-05-26 01:26:41 +01001176 this,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001177 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1178 retainer);
Steve Block44f0eee2011-05-26 01:26:41 +01001179 candidate_context->set_unchecked(this,
1180 Context::OPTIMIZED_FUNCTIONS_LIST,
Ben Murdochb0fe1622011-05-05 13:52:32 +01001181 function_list_head,
1182 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001183 }
1184 // Move to next element in the list.
1185 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1186 }
1187
1188 // Terminate the list if there is one or more elements.
1189 if (tail != NULL) {
Steve Block44f0eee2011-05-26 01:26:41 +01001190 tail->set_unchecked(this,
1191 Context::NEXT_CONTEXT_LINK,
Ben Murdochf87a2032010-10-22 12:50:53 +01001192 Heap::undefined_value(),
1193 UPDATE_WRITE_BARRIER);
1194 }
1195
1196 // Update the head of the list of contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01001197 global_contexts_list_ = head;
Ben Murdochf87a2032010-10-22 12:50:53 +01001198}
1199
1200
Iain Merrick75681382010-08-19 15:07:18 +01001201class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1202 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001203 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001204 Object* object = *p;
Steve Block44f0eee2011-05-26 01:26:41 +01001205 if (!heap->InNewSpace(object)) return;
Iain Merrick75681382010-08-19 15:07:18 +01001206 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1207 reinterpret_cast<HeapObject*>(object));
1208 }
1209};
1210
1211
Leon Clarkee46be812010-01-19 14:06:41 +00001212Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1213 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001214 do {
1215 ASSERT(new_space_front <= new_space_.top());
1216
1217 // The addresses new_space_front and new_space_.top() define a
1218 // queue of unprocessed copied objects. Process them until the
1219 // queue is empty.
1220 while (new_space_front < new_space_.top()) {
1221 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001222 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001223 }
1224
1225 // Promote and process all the to-be-promoted objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001226 while (!promotion_queue_.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001227 HeapObject* target;
1228 int size;
Steve Block44f0eee2011-05-26 01:26:41 +01001229 promotion_queue_.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001230
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001231 // Promoted object might be already partially visited
1232 // during dirty regions iteration. Thus we search specificly
1233 // for pointers to from semispace instead of looking for pointers
1234 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001235 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001236 IterateAndMarkPointersToFromSpace(target->address(),
1237 target->address() + size,
1238 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001239 }
1240
1241 // Take another spin if there are now unswept objects in new space
1242 // (there are currently no more unswept promoted objects).
1243 } while (new_space_front < new_space_.top());
1244
Leon Clarkee46be812010-01-19 14:06:41 +00001245 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001246}
1247
1248
Ben Murdoch8b112d22011-06-08 16:22:53 +01001249enum LoggingAndProfiling {
1250 LOGGING_AND_PROFILING_ENABLED,
1251 LOGGING_AND_PROFILING_DISABLED
1252};
1253
1254
1255typedef void (*ScavengingCallback)(Map* map,
1256 HeapObject** slot,
1257 HeapObject* object);
1258
1259
1260static Atomic32 scavenging_visitors_table_mode_;
1261static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
1262
1263
1264INLINE(static void DoScavengeObject(Map* map,
1265 HeapObject** slot,
1266 HeapObject* obj));
1267
1268
1269void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1270 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1271}
1272
1273
1274template<LoggingAndProfiling logging_and_profiling_mode>
Iain Merrick75681382010-08-19 15:07:18 +01001275class ScavengingVisitor : public StaticVisitorBase {
1276 public:
1277 static void Initialize() {
1278 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1279 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1280 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1281 table_.Register(kVisitByteArray, &EvacuateByteArray);
1282 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001283 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001284
Ben Murdochf87a2032010-10-22 12:50:53 +01001285 table_.Register(kVisitGlobalContext,
1286 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001287 template VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001288
1289 table_.Register(kVisitConsString,
1290 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001291 template VisitSpecialized<ConsString::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001292
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001293 table_.Register(kVisitSlicedString,
1294 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1295 template VisitSpecialized<SlicedString::kSize>);
1296
Iain Merrick75681382010-08-19 15:07:18 +01001297 table_.Register(kVisitSharedFunctionInfo,
1298 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001299 template VisitSpecialized<SharedFunctionInfo::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001300
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001301 table_.Register(kVisitJSWeakMap,
1302 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1303 Visit);
1304
Ben Murdoch257744e2011-11-30 15:57:28 +00001305 table_.Register(kVisitJSRegExp,
1306 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1307 Visit);
1308
Iain Merrick75681382010-08-19 15:07:18 +01001309 table_.Register(kVisitJSFunction,
1310 &ObjectEvacuationStrategy<POINTER_OBJECT>::
Ben Murdoch8b112d22011-06-08 16:22:53 +01001311 template VisitSpecialized<JSFunction::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001312
1313 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1314 kVisitDataObject,
1315 kVisitDataObjectGeneric>();
1316
1317 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1318 kVisitJSObject,
1319 kVisitJSObjectGeneric>();
1320
1321 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1322 kVisitStruct,
1323 kVisitStructGeneric>();
1324 }
1325
Ben Murdoch8b112d22011-06-08 16:22:53 +01001326 static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1327 return &table_;
Iain Merrick75681382010-08-19 15:07:18 +01001328 }
1329
Iain Merrick75681382010-08-19 15:07:18 +01001330 private:
1331 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1332 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1333
Steve Block44f0eee2011-05-26 01:26:41 +01001334 static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
Iain Merrick75681382010-08-19 15:07:18 +01001335 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001336#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001337 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001338#endif
Iain Merrick75681382010-08-19 15:07:18 +01001339 should_record = should_record || FLAG_log_gc;
Iain Merrick75681382010-08-19 15:07:18 +01001340 if (should_record) {
Steve Block44f0eee2011-05-26 01:26:41 +01001341 if (heap->new_space()->Contains(obj)) {
1342 heap->new_space()->RecordAllocation(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001343 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001344 heap->new_space()->RecordPromotion(obj);
Iain Merrick75681382010-08-19 15:07:18 +01001345 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001346 }
1347 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001348
Iain Merrick75681382010-08-19 15:07:18 +01001349 // Helper function used by CopyObject to copy a source object to an
1350 // allocated target object and update the forwarding pointer in the source
1351 // object. Returns the target object.
Steve Block44f0eee2011-05-26 01:26:41 +01001352 INLINE(static HeapObject* MigrateObject(Heap* heap,
1353 HeapObject* source,
Iain Merrick75681382010-08-19 15:07:18 +01001354 HeapObject* target,
1355 int size)) {
1356 // Copy the content of source to target.
Steve Block44f0eee2011-05-26 01:26:41 +01001357 heap->CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001358
Iain Merrick75681382010-08-19 15:07:18 +01001359 // Set the forwarding address.
1360 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001361
Ben Murdoch8b112d22011-06-08 16:22:53 +01001362 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001363 // Update NewSpace stats if necessary.
1364 RecordCopiedObject(heap, target);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001365 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
Ben Murdoch8b112d22011-06-08 16:22:53 +01001366 Isolate* isolate = heap->isolate();
1367 if (isolate->logger()->is_logging() ||
Ben Murdoch257744e2011-11-30 15:57:28 +00001368 CpuProfiler::is_profiling(isolate)) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001369 if (target->IsSharedFunctionInfo()) {
1370 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1371 source->address(), target->address()));
1372 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001373 }
Ben Murdoch8b112d22011-06-08 16:22:53 +01001374 }
1375
Iain Merrick75681382010-08-19 15:07:18 +01001376 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001377 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001378
1379
Iain Merrick75681382010-08-19 15:07:18 +01001380 template<ObjectContents object_contents, SizeRestriction size_restriction>
1381 static inline void EvacuateObject(Map* map,
1382 HeapObject** slot,
1383 HeapObject* object,
1384 int object_size) {
1385 ASSERT((size_restriction != SMALL) ||
1386 (object_size <= Page::kMaxHeapObjectSize));
1387 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001388
Steve Block44f0eee2011-05-26 01:26:41 +01001389 Heap* heap = map->heap();
1390 if (heap->ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001391 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001392
Iain Merrick75681382010-08-19 15:07:18 +01001393 if ((size_restriction != SMALL) &&
1394 (object_size > Page::kMaxHeapObjectSize)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001395 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001396 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001397 if (object_contents == DATA_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001398 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001399 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001400 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001401 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001402 }
1403
John Reck59135872010-11-02 12:39:01 -07001404 Object* result = NULL; // Initialization to please compiler.
1405 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001406 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001407 *slot = MigrateObject(heap, object , target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001408
Iain Merrick75681382010-08-19 15:07:18 +01001409 if (object_contents == POINTER_OBJECT) {
Steve Block44f0eee2011-05-26 01:26:41 +01001410 heap->promotion_queue()->insert(target, object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001411 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001412
Steve Block44f0eee2011-05-26 01:26:41 +01001413 heap->tracer()->increment_promoted_objects_size(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001414 return;
1415 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001416 }
John Reck59135872010-11-02 12:39:01 -07001417 Object* result =
Steve Block44f0eee2011-05-26 01:26:41 +01001418 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
1419 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001420 return;
1421 }
1422
Iain Merrick75681382010-08-19 15:07:18 +01001423
1424 static inline void EvacuateFixedArray(Map* map,
1425 HeapObject** slot,
1426 HeapObject* object) {
1427 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1428 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1429 slot,
1430 object,
1431 object_size);
1432 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001433
1434
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001435 static inline void EvacuateFixedDoubleArray(Map* map,
1436 HeapObject** slot,
1437 HeapObject* object) {
1438 int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
1439 int object_size = FixedDoubleArray::SizeFor(length);
1440 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map,
1441 slot,
1442 object,
1443 object_size);
1444 }
1445
1446
Iain Merrick75681382010-08-19 15:07:18 +01001447 static inline void EvacuateByteArray(Map* map,
1448 HeapObject** slot,
1449 HeapObject* object) {
1450 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1451 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1452 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001453
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001454
Iain Merrick75681382010-08-19 15:07:18 +01001455 static inline void EvacuateSeqAsciiString(Map* map,
1456 HeapObject** slot,
1457 HeapObject* object) {
1458 int object_size = SeqAsciiString::cast(object)->
1459 SeqAsciiStringSize(map->instance_type());
1460 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1461 }
1462
1463
1464 static inline void EvacuateSeqTwoByteString(Map* map,
1465 HeapObject** slot,
1466 HeapObject* object) {
1467 int object_size = SeqTwoByteString::cast(object)->
1468 SeqTwoByteStringSize(map->instance_type());
1469 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1470 }
1471
1472
1473 static inline bool IsShortcutCandidate(int type) {
1474 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1475 }
1476
1477 static inline void EvacuateShortcutCandidate(Map* map,
1478 HeapObject** slot,
1479 HeapObject* object) {
1480 ASSERT(IsShortcutCandidate(map->instance_type()));
1481
Steve Block44f0eee2011-05-26 01:26:41 +01001482 if (ConsString::cast(object)->unchecked_second() ==
1483 map->heap()->empty_string()) {
Iain Merrick75681382010-08-19 15:07:18 +01001484 HeapObject* first =
1485 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1486
1487 *slot = first;
1488
Steve Block44f0eee2011-05-26 01:26:41 +01001489 if (!map->heap()->InNewSpace(first)) {
Iain Merrick75681382010-08-19 15:07:18 +01001490 object->set_map_word(MapWord::FromForwardingAddress(first));
1491 return;
1492 }
1493
1494 MapWord first_word = first->map_word();
1495 if (first_word.IsForwardingAddress()) {
1496 HeapObject* target = first_word.ToForwardingAddress();
1497
1498 *slot = target;
1499 object->set_map_word(MapWord::FromForwardingAddress(target));
1500 return;
1501 }
1502
Ben Murdoch8b112d22011-06-08 16:22:53 +01001503 DoScavengeObject(first->map(), slot, first);
Iain Merrick75681382010-08-19 15:07:18 +01001504 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1505 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001506 }
Iain Merrick75681382010-08-19 15:07:18 +01001507
1508 int object_size = ConsString::kSize;
1509 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001510 }
1511
Iain Merrick75681382010-08-19 15:07:18 +01001512 template<ObjectContents object_contents>
1513 class ObjectEvacuationStrategy {
1514 public:
1515 template<int object_size>
1516 static inline void VisitSpecialized(Map* map,
1517 HeapObject** slot,
1518 HeapObject* object) {
1519 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1520 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001521
Iain Merrick75681382010-08-19 15:07:18 +01001522 static inline void Visit(Map* map,
1523 HeapObject** slot,
1524 HeapObject* object) {
1525 int object_size = map->instance_size();
1526 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1527 }
1528 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001529
Ben Murdoch8b112d22011-06-08 16:22:53 +01001530 static VisitorDispatchTable<ScavengingCallback> table_;
Iain Merrick75681382010-08-19 15:07:18 +01001531};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001532
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001533
Ben Murdoch8b112d22011-06-08 16:22:53 +01001534template<LoggingAndProfiling logging_and_profiling_mode>
1535VisitorDispatchTable<ScavengingCallback>
1536 ScavengingVisitor<logging_and_profiling_mode>::table_;
1537
1538
1539static void InitializeScavengingVisitorsTables() {
1540 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize();
1541 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize();
1542 scavenging_visitors_table_.CopyFrom(
1543 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable());
1544 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED;
1545}
1546
1547
1548void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() {
1549 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) {
1550 // Table was already updated by some isolate.
1551 return;
1552 }
1553
Ben Murdoch257744e2011-11-30 15:57:28 +00001554 if (isolate()->logger()->is_logging() |
1555 CpuProfiler::is_profiling(isolate()) ||
Ben Murdoch8b112d22011-06-08 16:22:53 +01001556 (isolate()->heap_profiler() != NULL &&
1557 isolate()->heap_profiler()->is_profiling())) {
1558 // If one of the isolates is doing scavenge at this moment of time
1559 // it might see this table in an inconsitent state when
1560 // some of the callbacks point to
1561 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1562 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1563 // However this does not lead to any bugs as such isolate does not have
1564 // profiling enabled and any isolate with enabled profiling is guaranteed
1565 // to see the table in the consistent state.
1566 scavenging_visitors_table_.CopyFrom(
1567 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1568
1569 // We use Release_Store to prevent reordering of this write before writes
1570 // to the table.
1571 Release_Store(&scavenging_visitors_table_mode_,
1572 LOGGING_AND_PROFILING_ENABLED);
1573 }
1574}
Steve Blocka7e24c12009-10-30 11:49:00 +00001575
1576
1577void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +01001578 ASSERT(HEAP->InFromSpace(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001579 MapWord first_word = object->map_word();
1580 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001581 Map* map = first_word.ToMap();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001582 DoScavengeObject(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001583}
1584
1585
John Reck59135872010-11-02 12:39:01 -07001586MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1587 int instance_size) {
1588 Object* result;
1589 { MaybeObject* maybe_result = AllocateRawMap();
1590 if (!maybe_result->ToObject(&result)) return maybe_result;
1591 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001592
1593 // Map::cast cannot be used due to uninitialized map field.
1594 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1595 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1596 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Steve Block44f0eee2011-05-26 01:26:41 +01001597 reinterpret_cast<Map*>(result)->set_visitor_id(
1598 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001599 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001600 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001601 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001602 reinterpret_cast<Map*>(result)->set_bit_field(0);
1603 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001604 return result;
1605}
1606
1607
John Reck59135872010-11-02 12:39:01 -07001608MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1609 Object* result;
1610 { MaybeObject* maybe_result = AllocateRawMap();
1611 if (!maybe_result->ToObject(&result)) return maybe_result;
1612 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001613
1614 Map* map = reinterpret_cast<Map*>(result);
1615 map->set_map(meta_map());
1616 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001617 map->set_visitor_id(
1618 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001619 map->set_prototype(null_value());
1620 map->set_constructor(null_value());
1621 map->set_instance_size(instance_size);
1622 map->set_inobject_properties(0);
1623 map->set_pre_allocated_property_fields(0);
Ben Murdoch257744e2011-11-30 15:57:28 +00001624 map->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001625 map->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001626 map->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001627 map->set_unused_property_fields(0);
1628 map->set_bit_field(0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001629 map->set_bit_field2(1 << Map::kIsExtensible);
Ben Murdoch589d6972011-11-30 16:04:58 +00001630 map->set_elements_kind(FAST_ELEMENTS);
Leon Clarkee46be812010-01-19 14:06:41 +00001631
1632 // If the map object is aligned fill the padding area with Smi 0 objects.
1633 if (Map::kPadStart < Map::kSize) {
1634 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1635 0,
1636 Map::kSize - Map::kPadStart);
1637 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001638 return map;
1639}
1640
1641
John Reck59135872010-11-02 12:39:01 -07001642MaybeObject* Heap::AllocateCodeCache() {
1643 Object* result;
1644 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1645 if (!maybe_result->ToObject(&result)) return maybe_result;
1646 }
Steve Block6ded16b2010-05-10 14:33:55 +01001647 CodeCache* code_cache = CodeCache::cast(result);
1648 code_cache->set_default_cache(empty_fixed_array());
1649 code_cache->set_normal_type_cache(undefined_value());
1650 return code_cache;
1651}
1652
1653
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001654MaybeObject* Heap::AllocatePolymorphicCodeCache() {
1655 return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
1656}
1657
1658
Steve Blocka7e24c12009-10-30 11:49:00 +00001659const Heap::StringTypeTable Heap::string_type_table[] = {
1660#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1661 {type, size, k##camel_name##MapRootIndex},
1662 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1663#undef STRING_TYPE_ELEMENT
1664};
1665
1666
1667const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1668#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1669 {contents, k##name##RootIndex},
1670 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1671#undef CONSTANT_SYMBOL_ELEMENT
1672};
1673
1674
1675const Heap::StructTable Heap::struct_table[] = {
1676#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1677 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1678 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1679#undef STRUCT_TABLE_ELEMENT
1680};
1681
1682
1683bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001684 Object* obj;
1685 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1686 if (!maybe_obj->ToObject(&obj)) return false;
1687 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001688 // Map::cast cannot be used due to uninitialized map field.
1689 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1690 set_meta_map(new_meta_map);
1691 new_meta_map->set_map(new_meta_map);
1692
John Reck59135872010-11-02 12:39:01 -07001693 { MaybeObject* maybe_obj =
1694 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1695 if (!maybe_obj->ToObject(&obj)) return false;
1696 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001697 set_fixed_array_map(Map::cast(obj));
1698
John Reck59135872010-11-02 12:39:01 -07001699 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1700 if (!maybe_obj->ToObject(&obj)) return false;
1701 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001702 set_oddball_map(Map::cast(obj));
1703
Steve Block6ded16b2010-05-10 14:33:55 +01001704 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001705 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1706 if (!maybe_obj->ToObject(&obj)) return false;
1707 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001708 set_empty_fixed_array(FixedArray::cast(obj));
1709
John Reck59135872010-11-02 12:39:01 -07001710 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1711 if (!maybe_obj->ToObject(&obj)) return false;
1712 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001713 set_null_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001714 Oddball::cast(obj)->set_kind(Oddball::kNull);
Steve Blocka7e24c12009-10-30 11:49:00 +00001715
1716 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001717 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1718 if (!maybe_obj->ToObject(&obj)) return false;
1719 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001720 set_empty_descriptor_array(DescriptorArray::cast(obj));
1721
1722 // Fix the instance_descriptors for the existing maps.
Ben Murdoch257744e2011-11-30 15:57:28 +00001723 meta_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001724 meta_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001725 meta_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001726
Ben Murdoch257744e2011-11-30 15:57:28 +00001727 fixed_array_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001728 fixed_array_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001729 fixed_array_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001730
Ben Murdoch257744e2011-11-30 15:57:28 +00001731 oddball_map()->init_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00001732 oddball_map()->set_code_cache(empty_fixed_array());
Steve Block053d10c2011-06-13 19:13:29 +01001733 oddball_map()->set_prototype_transitions(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001734
1735 // Fix prototype object for existing maps.
1736 meta_map()->set_prototype(null_value());
1737 meta_map()->set_constructor(null_value());
1738
1739 fixed_array_map()->set_prototype(null_value());
1740 fixed_array_map()->set_constructor(null_value());
1741
1742 oddball_map()->set_prototype(null_value());
1743 oddball_map()->set_constructor(null_value());
1744
John Reck59135872010-11-02 12:39:01 -07001745 { MaybeObject* maybe_obj =
1746 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1747 if (!maybe_obj->ToObject(&obj)) return false;
1748 }
Iain Merrick75681382010-08-19 15:07:18 +01001749 set_fixed_cow_array_map(Map::cast(obj));
1750 ASSERT(fixed_array_map() != fixed_cow_array_map());
1751
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001752 { MaybeObject* maybe_obj =
1753 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1754 if (!maybe_obj->ToObject(&obj)) return false;
1755 }
1756 set_serialized_scope_info_map(Map::cast(obj));
1757
John Reck59135872010-11-02 12:39:01 -07001758 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1759 if (!maybe_obj->ToObject(&obj)) return false;
1760 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001761 set_heap_number_map(Map::cast(obj));
1762
Ben Murdoch257744e2011-11-30 15:57:28 +00001763 { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
John Reck59135872010-11-02 12:39:01 -07001764 if (!maybe_obj->ToObject(&obj)) return false;
1765 }
Ben Murdoch257744e2011-11-30 15:57:28 +00001766 set_foreign_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001767
1768 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1769 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001770 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1771 if (!maybe_obj->ToObject(&obj)) return false;
1772 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001773 roots_[entry.index] = Map::cast(obj);
1774 }
1775
John Reck59135872010-11-02 12:39:01 -07001776 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1777 if (!maybe_obj->ToObject(&obj)) return false;
1778 }
Steve Blockd0582a62009-12-15 09:54:21 +00001779 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001780 Map::cast(obj)->set_is_undetectable();
1781
John Reck59135872010-11-02 12:39:01 -07001782 { MaybeObject* maybe_obj =
1783 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1784 if (!maybe_obj->ToObject(&obj)) return false;
1785 }
Steve Blockd0582a62009-12-15 09:54:21 +00001786 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001787 Map::cast(obj)->set_is_undetectable();
1788
John Reck59135872010-11-02 12:39:01 -07001789 { MaybeObject* maybe_obj =
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001790 AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
1791 if (!maybe_obj->ToObject(&obj)) return false;
1792 }
1793 set_fixed_double_array_map(Map::cast(obj));
1794
1795 { MaybeObject* maybe_obj =
John Reck59135872010-11-02 12:39:01 -07001796 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1797 if (!maybe_obj->ToObject(&obj)) return false;
1798 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001799 set_byte_array_map(Map::cast(obj));
1800
Ben Murdochb0fe1622011-05-05 13:52:32 +01001801 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1802 if (!maybe_obj->ToObject(&obj)) return false;
1803 }
1804 set_empty_byte_array(ByteArray::cast(obj));
1805
John Reck59135872010-11-02 12:39:01 -07001806 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01001807 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
John Reck59135872010-11-02 12:39:01 -07001808 if (!maybe_obj->ToObject(&obj)) return false;
1809 }
Steve Block44f0eee2011-05-26 01:26:41 +01001810 set_external_pixel_array_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001811
John Reck59135872010-11-02 12:39:01 -07001812 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1813 ExternalArray::kAlignedSize);
1814 if (!maybe_obj->ToObject(&obj)) return false;
1815 }
Steve Block3ce2e202009-11-05 08:53:23 +00001816 set_external_byte_array_map(Map::cast(obj));
1817
John Reck59135872010-11-02 12:39:01 -07001818 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1819 ExternalArray::kAlignedSize);
1820 if (!maybe_obj->ToObject(&obj)) return false;
1821 }
Steve Block3ce2e202009-11-05 08:53:23 +00001822 set_external_unsigned_byte_array_map(Map::cast(obj));
1823
John Reck59135872010-11-02 12:39:01 -07001824 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1825 ExternalArray::kAlignedSize);
1826 if (!maybe_obj->ToObject(&obj)) return false;
1827 }
Steve Block3ce2e202009-11-05 08:53:23 +00001828 set_external_short_array_map(Map::cast(obj));
1829
John Reck59135872010-11-02 12:39:01 -07001830 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1831 ExternalArray::kAlignedSize);
1832 if (!maybe_obj->ToObject(&obj)) return false;
1833 }
Steve Block3ce2e202009-11-05 08:53:23 +00001834 set_external_unsigned_short_array_map(Map::cast(obj));
1835
John Reck59135872010-11-02 12:39:01 -07001836 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1837 ExternalArray::kAlignedSize);
1838 if (!maybe_obj->ToObject(&obj)) return false;
1839 }
Steve Block3ce2e202009-11-05 08:53:23 +00001840 set_external_int_array_map(Map::cast(obj));
1841
John Reck59135872010-11-02 12:39:01 -07001842 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1843 ExternalArray::kAlignedSize);
1844 if (!maybe_obj->ToObject(&obj)) return false;
1845 }
Steve Block3ce2e202009-11-05 08:53:23 +00001846 set_external_unsigned_int_array_map(Map::cast(obj));
1847
John Reck59135872010-11-02 12:39:01 -07001848 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1849 ExternalArray::kAlignedSize);
1850 if (!maybe_obj->ToObject(&obj)) return false;
1851 }
Steve Block3ce2e202009-11-05 08:53:23 +00001852 set_external_float_array_map(Map::cast(obj));
1853
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001854 { MaybeObject* maybe_obj =
1855 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1856 if (!maybe_obj->ToObject(&obj)) return false;
1857 }
1858 set_non_strict_arguments_elements_map(Map::cast(obj));
1859
Ben Murdoch257744e2011-11-30 15:57:28 +00001860 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
1861 ExternalArray::kAlignedSize);
1862 if (!maybe_obj->ToObject(&obj)) return false;
1863 }
1864 set_external_double_array_map(Map::cast(obj));
1865
John Reck59135872010-11-02 12:39:01 -07001866 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1867 if (!maybe_obj->ToObject(&obj)) return false;
1868 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001869 set_code_map(Map::cast(obj));
1870
John Reck59135872010-11-02 12:39:01 -07001871 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1872 JSGlobalPropertyCell::kSize);
1873 if (!maybe_obj->ToObject(&obj)) return false;
1874 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001875 set_global_property_cell_map(Map::cast(obj));
1876
John Reck59135872010-11-02 12:39:01 -07001877 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1878 if (!maybe_obj->ToObject(&obj)) return false;
1879 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001880 set_one_pointer_filler_map(Map::cast(obj));
1881
John Reck59135872010-11-02 12:39:01 -07001882 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1883 if (!maybe_obj->ToObject(&obj)) return false;
1884 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001885 set_two_pointer_filler_map(Map::cast(obj));
1886
1887 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1888 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001889 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1890 if (!maybe_obj->ToObject(&obj)) return false;
1891 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001892 roots_[entry.index] = Map::cast(obj);
1893 }
1894
John Reck59135872010-11-02 12:39:01 -07001895 { MaybeObject* maybe_obj =
1896 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1897 if (!maybe_obj->ToObject(&obj)) return false;
1898 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001899 set_hash_table_map(Map::cast(obj));
1900
John Reck59135872010-11-02 12:39:01 -07001901 { MaybeObject* maybe_obj =
1902 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1903 if (!maybe_obj->ToObject(&obj)) return false;
1904 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001905 set_function_context_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001906
John Reck59135872010-11-02 12:39:01 -07001907 { MaybeObject* maybe_obj =
1908 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1909 if (!maybe_obj->ToObject(&obj)) return false;
1910 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001911 set_catch_context_map(Map::cast(obj));
1912
John Reck59135872010-11-02 12:39:01 -07001913 { MaybeObject* maybe_obj =
1914 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1915 if (!maybe_obj->ToObject(&obj)) return false;
1916 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001917 set_with_context_map(Map::cast(obj));
1918
1919 { MaybeObject* maybe_obj =
1920 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1921 if (!maybe_obj->ToObject(&obj)) return false;
1922 }
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001923 set_block_context_map(Map::cast(obj));
1924
1925 { MaybeObject* maybe_obj =
1926 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1927 if (!maybe_obj->ToObject(&obj)) return false;
1928 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001929 Map* global_context_map = Map::cast(obj);
1930 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1931 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001932
John Reck59135872010-11-02 12:39:01 -07001933 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1934 SharedFunctionInfo::kAlignedSize);
1935 if (!maybe_obj->ToObject(&obj)) return false;
1936 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001937 set_shared_function_info_map(Map::cast(obj));
1938
Steve Block1e0659c2011-05-24 12:43:12 +01001939 { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
1940 JSMessageObject::kSize);
1941 if (!maybe_obj->ToObject(&obj)) return false;
1942 }
1943 set_message_object_map(Map::cast(obj));
1944
Steve Block44f0eee2011-05-26 01:26:41 +01001945 ASSERT(!InNewSpace(empty_fixed_array()));
Steve Blocka7e24c12009-10-30 11:49:00 +00001946 return true;
1947}
1948
1949
John Reck59135872010-11-02 12:39:01 -07001950MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001951 // Statically ensure that it is safe to allocate heap numbers in paged
1952 // spaces.
1953 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1954 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1955
John Reck59135872010-11-02 12:39:01 -07001956 Object* result;
1957 { MaybeObject* maybe_result =
1958 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1959 if (!maybe_result->ToObject(&result)) return maybe_result;
1960 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001961
1962 HeapObject::cast(result)->set_map(heap_number_map());
1963 HeapNumber::cast(result)->set_value(value);
1964 return result;
1965}
1966
1967
John Reck59135872010-11-02 12:39:01 -07001968MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001969 // Use general version, if we're forced to always allocate.
1970 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1971
1972 // This version of AllocateHeapNumber is optimized for
1973 // allocation in new space.
1974 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1975 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001976 Object* result;
1977 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1978 if (!maybe_result->ToObject(&result)) return maybe_result;
1979 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001980 HeapObject::cast(result)->set_map(heap_number_map());
1981 HeapNumber::cast(result)->set_value(value);
1982 return result;
1983}
1984
1985
John Reck59135872010-11-02 12:39:01 -07001986MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1987 Object* result;
1988 { MaybeObject* maybe_result = AllocateRawCell();
1989 if (!maybe_result->ToObject(&result)) return maybe_result;
1990 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001991 HeapObject::cast(result)->set_map(global_property_cell_map());
1992 JSGlobalPropertyCell::cast(result)->set_value(value);
1993 return result;
1994}
1995
1996
John Reck59135872010-11-02 12:39:01 -07001997MaybeObject* Heap::CreateOddball(const char* to_string,
Steve Block44f0eee2011-05-26 01:26:41 +01001998 Object* to_number,
1999 byte kind) {
John Reck59135872010-11-02 12:39:01 -07002000 Object* result;
2001 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
2002 if (!maybe_result->ToObject(&result)) return maybe_result;
2003 }
Steve Block44f0eee2011-05-26 01:26:41 +01002004 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
Steve Blocka7e24c12009-10-30 11:49:00 +00002005}
2006
2007
2008bool Heap::CreateApiObjects() {
2009 Object* obj;
2010
John Reck59135872010-11-02 12:39:01 -07002011 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2012 if (!maybe_obj->ToObject(&obj)) return false;
2013 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002014 set_neander_map(Map::cast(obj));
2015
Steve Block44f0eee2011-05-26 01:26:41 +01002016 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
John Reck59135872010-11-02 12:39:01 -07002017 if (!maybe_obj->ToObject(&obj)) return false;
2018 }
2019 Object* elements;
2020 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2021 if (!maybe_elements->ToObject(&elements)) return false;
2022 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002023 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2024 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2025 set_message_listeners(JSObject::cast(obj));
2026
2027 return true;
2028}
2029
2030
Steve Blocka7e24c12009-10-30 11:49:00 +00002031void Heap::CreateJSEntryStub() {
2032 JSEntryStub stub;
2033 set_js_entry_code(*stub.GetCode());
2034}
2035
2036
2037void Heap::CreateJSConstructEntryStub() {
2038 JSConstructEntryStub stub;
2039 set_js_construct_entry_code(*stub.GetCode());
2040}
2041
2042
2043void Heap::CreateFixedStubs() {
2044 // Here we create roots for fixed stubs. They are needed at GC
2045 // for cooking and uncooking (check out frames.cc).
2046 // The eliminates the need for doing dictionary lookup in the
2047 // stub cache for these stubs.
2048 HandleScope scope;
2049 // gcc-4.4 has problem generating correct code of following snippet:
Steve Block44f0eee2011-05-26 01:26:41 +01002050 // { JSEntryStub stub;
2051 // js_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00002052 // }
Steve Block44f0eee2011-05-26 01:26:41 +01002053 // { JSConstructEntryStub stub;
2054 // js_construct_entry_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00002055 // }
2056 // To workaround the problem, make separate functions without inlining.
Steve Blocka7e24c12009-10-30 11:49:00 +00002057 Heap::CreateJSEntryStub();
2058 Heap::CreateJSConstructEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00002059}
2060
2061
2062bool Heap::CreateInitialObjects() {
2063 Object* obj;
2064
2065 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07002066 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2067 if (!maybe_obj->ToObject(&obj)) return false;
2068 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002069 set_minus_zero_value(obj);
2070 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2071
John Reck59135872010-11-02 12:39:01 -07002072 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2073 if (!maybe_obj->ToObject(&obj)) return false;
2074 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002075 set_nan_value(obj);
2076
John Reck59135872010-11-02 12:39:01 -07002077 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
2078 if (!maybe_obj->ToObject(&obj)) return false;
2079 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002080 set_undefined_value(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01002081 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
Steve Blocka7e24c12009-10-30 11:49:00 +00002082 ASSERT(!InNewSpace(undefined_value()));
2083
2084 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07002085 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2086 if (!maybe_obj->ToObject(&obj)) return false;
2087 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002088 // Don't use set_symbol_table() due to asserts.
2089 roots_[kSymbolTableRootIndex] = obj;
2090
2091 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07002092 Object* symbol;
2093 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
2094 if (!maybe_symbol->ToObject(&symbol)) return false;
2095 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002096 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
2097 Oddball::cast(undefined_value())->set_to_number(nan_value());
2098
Steve Blocka7e24c12009-10-30 11:49:00 +00002099 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07002100 { MaybeObject* maybe_obj =
Steve Block44f0eee2011-05-26 01:26:41 +01002101 Oddball::cast(null_value())->Initialize("null",
2102 Smi::FromInt(0),
2103 Oddball::kNull);
John Reck59135872010-11-02 12:39:01 -07002104 if (!maybe_obj->ToObject(&obj)) return false;
2105 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002106
Steve Block44f0eee2011-05-26 01:26:41 +01002107 { MaybeObject* maybe_obj = CreateOddball("true",
2108 Smi::FromInt(1),
2109 Oddball::kTrue);
John Reck59135872010-11-02 12:39:01 -07002110 if (!maybe_obj->ToObject(&obj)) return false;
2111 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002112 set_true_value(obj);
2113
Steve Block44f0eee2011-05-26 01:26:41 +01002114 { MaybeObject* maybe_obj = CreateOddball("false",
2115 Smi::FromInt(0),
2116 Oddball::kFalse);
John Reck59135872010-11-02 12:39:01 -07002117 if (!maybe_obj->ToObject(&obj)) return false;
2118 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002119 set_false_value(obj);
2120
Steve Block44f0eee2011-05-26 01:26:41 +01002121 { MaybeObject* maybe_obj = CreateOddball("hole",
2122 Smi::FromInt(-1),
2123 Oddball::kTheHole);
John Reck59135872010-11-02 12:39:01 -07002124 if (!maybe_obj->ToObject(&obj)) return false;
2125 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002126 set_the_hole_value(obj);
2127
Ben Murdoch086aeea2011-05-13 15:57:08 +01002128 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
Steve Block44f0eee2011-05-26 01:26:41 +01002129 Smi::FromInt(-4),
2130 Oddball::kArgumentMarker);
Ben Murdoch086aeea2011-05-13 15:57:08 +01002131 if (!maybe_obj->ToObject(&obj)) return false;
2132 }
2133 set_arguments_marker(obj);
2134
Steve Block44f0eee2011-05-26 01:26:41 +01002135 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2136 Smi::FromInt(-2),
2137 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002138 if (!maybe_obj->ToObject(&obj)) return false;
2139 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002140 set_no_interceptor_result_sentinel(obj);
2141
Steve Block44f0eee2011-05-26 01:26:41 +01002142 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2143 Smi::FromInt(-3),
2144 Oddball::kOther);
John Reck59135872010-11-02 12:39:01 -07002145 if (!maybe_obj->ToObject(&obj)) return false;
2146 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002147 set_termination_exception(obj);
2148
2149 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002150 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2151 if (!maybe_obj->ToObject(&obj)) return false;
2152 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002153 set_empty_string(String::cast(obj));
2154
2155 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002156 { MaybeObject* maybe_obj =
2157 LookupAsciiSymbol(constant_symbol_table[i].contents);
2158 if (!maybe_obj->ToObject(&obj)) return false;
2159 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002160 roots_[constant_symbol_table[i].index] = String::cast(obj);
2161 }
2162
2163 // Allocate the hidden symbol which is used to identify the hidden properties
2164 // in JSObjects. The hash code has a special value so that it will not match
2165 // the empty string when searching for the property. It cannot be part of the
2166 // loop above because it needs to be allocated manually with the special
2167 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2168 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002169 { MaybeObject* maybe_obj =
2170 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2171 if (!maybe_obj->ToObject(&obj)) return false;
2172 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002173 hidden_symbol_ = String::cast(obj);
2174
Ben Murdoch257744e2011-11-30 15:57:28 +00002175 // Allocate the foreign for __proto__.
John Reck59135872010-11-02 12:39:01 -07002176 { MaybeObject* maybe_obj =
Ben Murdoch257744e2011-11-30 15:57:28 +00002177 AllocateForeign((Address) &Accessors::ObjectPrototype);
John Reck59135872010-11-02 12:39:01 -07002178 if (!maybe_obj->ToObject(&obj)) return false;
2179 }
Ben Murdoch257744e2011-11-30 15:57:28 +00002180 set_prototype_accessors(Foreign::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00002181
2182 // Allocate the code_stubs dictionary. The initial size is set to avoid
2183 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002184 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2185 if (!maybe_obj->ToObject(&obj)) return false;
2186 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002187 set_code_stubs(NumberDictionary::cast(obj));
2188
2189 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2190 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002191 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2192 if (!maybe_obj->ToObject(&obj)) return false;
2193 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002194 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2195
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002196 { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
2197 if (!maybe_obj->ToObject(&obj)) return false;
2198 }
2199 set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
2200
Kristian Monsen25f61362010-05-21 11:50:48 +01002201 set_instanceof_cache_function(Smi::FromInt(0));
2202 set_instanceof_cache_map(Smi::FromInt(0));
2203 set_instanceof_cache_answer(Smi::FromInt(0));
2204
Steve Blocka7e24c12009-10-30 11:49:00 +00002205 CreateFixedStubs();
2206
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002207 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002208 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2209 if (!maybe_obj->ToObject(&obj)) return false;
2210 }
Steve Block44f0eee2011-05-26 01:26:41 +01002211 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
2212 obj);
John Reck59135872010-11-02 12:39:01 -07002213 if (!maybe_obj->ToObject(&obj)) return false;
2214 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002215 set_intrinsic_function_names(StringDictionary::cast(obj));
2216
Leon Clarkee46be812010-01-19 14:06:41 +00002217 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002218
Steve Block6ded16b2010-05-10 14:33:55 +01002219 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002220 { MaybeObject* maybe_obj =
2221 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2222 if (!maybe_obj->ToObject(&obj)) return false;
2223 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002224 set_single_character_string_cache(FixedArray::cast(obj));
2225
Ben Murdoch589d6972011-11-30 16:04:58 +00002226 // Allocate cache for string split.
2227 { MaybeObject* maybe_obj =
2228 AllocateFixedArray(StringSplitCache::kStringSplitCacheSize, TENURED);
2229 if (!maybe_obj->ToObject(&obj)) return false;
2230 }
2231 set_string_split_cache(FixedArray::cast(obj));
2232
Steve Blocka7e24c12009-10-30 11:49:00 +00002233 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002234 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2235 if (!maybe_obj->ToObject(&obj)) return false;
2236 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002237 set_natives_source_cache(FixedArray::cast(obj));
2238
Steve Block44f0eee2011-05-26 01:26:41 +01002239 // Handling of script id generation is in FACTORY->NewScript.
Steve Blocka7e24c12009-10-30 11:49:00 +00002240 set_last_script_id(undefined_value());
2241
2242 // Initialize keyed lookup cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002243 isolate_->keyed_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002244
2245 // Initialize context slot cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002246 isolate_->context_slot_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002247
2248 // Initialize descriptor cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002249 isolate_->descriptor_lookup_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002250
2251 // Initialize compilation cache.
Steve Block44f0eee2011-05-26 01:26:41 +01002252 isolate_->compilation_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +00002253
2254 return true;
2255}
2256
2257
Ben Murdoch589d6972011-11-30 16:04:58 +00002258Object* StringSplitCache::Lookup(
2259 FixedArray* cache, String* string, String* pattern) {
2260 if (!string->IsSymbol() || !pattern->IsSymbol()) return Smi::FromInt(0);
2261 uint32_t hash = string->Hash();
2262 uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2263 ~(kArrayEntriesPerCacheEntry - 1));
2264 if (cache->get(index + kStringOffset) == string &&
2265 cache->get(index + kPatternOffset) == pattern) {
2266 return cache->get(index + kArrayOffset);
2267 }
2268 index = ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2269 if (cache->get(index + kStringOffset) == string &&
2270 cache->get(index + kPatternOffset) == pattern) {
2271 return cache->get(index + kArrayOffset);
2272 }
2273 return Smi::FromInt(0);
2274}
2275
2276
2277void StringSplitCache::Enter(Heap* heap,
2278 FixedArray* cache,
2279 String* string,
2280 String* pattern,
2281 FixedArray* array) {
2282 if (!string->IsSymbol() || !pattern->IsSymbol()) return;
2283 uint32_t hash = string->Hash();
2284 uint32_t index = ((hash & (kStringSplitCacheSize - 1)) &
2285 ~(kArrayEntriesPerCacheEntry - 1));
2286 if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
2287 cache->set(index + kStringOffset, string);
2288 cache->set(index + kPatternOffset, pattern);
2289 cache->set(index + kArrayOffset, array);
2290 } else {
2291 uint32_t index2 =
2292 ((index + kArrayEntriesPerCacheEntry) & (kStringSplitCacheSize - 1));
2293 if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
2294 cache->set(index2 + kStringOffset, string);
2295 cache->set(index2 + kPatternOffset, pattern);
2296 cache->set(index2 + kArrayOffset, array);
2297 } else {
2298 cache->set(index2 + kStringOffset, Smi::FromInt(0));
2299 cache->set(index2 + kPatternOffset, Smi::FromInt(0));
2300 cache->set(index2 + kArrayOffset, Smi::FromInt(0));
2301 cache->set(index + kStringOffset, string);
2302 cache->set(index + kPatternOffset, pattern);
2303 cache->set(index + kArrayOffset, array);
2304 }
2305 }
2306 if (array->length() < 100) { // Limit how many new symbols we want to make.
2307 for (int i = 0; i < array->length(); i++) {
2308 String* str = String::cast(array->get(i));
2309 Object* symbol;
2310 MaybeObject* maybe_symbol = heap->LookupSymbol(str);
2311 if (maybe_symbol->ToObject(&symbol)) {
2312 array->set(i, symbol);
2313 }
2314 }
2315 }
2316 array->set_map(heap->fixed_cow_array_map());
2317}
2318
2319
2320void StringSplitCache::Clear(FixedArray* cache) {
2321 for (int i = 0; i < kStringSplitCacheSize; i++) {
2322 cache->set(i, Smi::FromInt(0));
2323 }
2324}
2325
2326
John Reck59135872010-11-02 12:39:01 -07002327MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002328 // Compute the size of the number string cache based on the max heap size.
2329 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2330 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2331 int number_string_cache_size = max_semispace_size_ / 512;
2332 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002333 Object* obj;
2334 MaybeObject* maybe_obj =
2335 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2336 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2337 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002338}
2339
2340
2341void Heap::FlushNumberStringCache() {
2342 // Flush the number to string cache.
2343 int len = number_string_cache()->length();
2344 for (int i = 0; i < len; i++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002345 number_string_cache()->set_undefined(this, i);
Leon Clarkee46be812010-01-19 14:06:41 +00002346 }
2347}
2348
2349
Steve Blocka7e24c12009-10-30 11:49:00 +00002350static inline int double_get_hash(double d) {
2351 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002352 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002353}
2354
2355
2356static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002357 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002358}
2359
2360
Steve Blocka7e24c12009-10-30 11:49:00 +00002361Object* Heap::GetNumberStringCache(Object* number) {
2362 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002363 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002364 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002365 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002366 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002367 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002368 }
2369 Object* key = number_string_cache()->get(hash * 2);
2370 if (key == number) {
2371 return String::cast(number_string_cache()->get(hash * 2 + 1));
2372 } else if (key->IsHeapNumber() &&
2373 number->IsHeapNumber() &&
2374 key->Number() == number->Number()) {
2375 return String::cast(number_string_cache()->get(hash * 2 + 1));
2376 }
2377 return undefined_value();
2378}
2379
2380
2381void Heap::SetNumberStringCache(Object* number, String* string) {
2382 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002383 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002384 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002385 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002386 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002387 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002388 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002389 number_string_cache()->set(hash * 2, number);
2390 }
2391 number_string_cache()->set(hash * 2 + 1, string);
2392}
2393
2394
John Reck59135872010-11-02 12:39:01 -07002395MaybeObject* Heap::NumberToString(Object* number,
2396 bool check_number_string_cache) {
Steve Block44f0eee2011-05-26 01:26:41 +01002397 isolate_->counters()->number_to_string_runtime()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002398 if (check_number_string_cache) {
2399 Object* cached = GetNumberStringCache(number);
2400 if (cached != undefined_value()) {
2401 return cached;
2402 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002403 }
2404
2405 char arr[100];
2406 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2407 const char* str;
2408 if (number->IsSmi()) {
2409 int num = Smi::cast(number)->value();
2410 str = IntToCString(num, buffer);
2411 } else {
2412 double num = HeapNumber::cast(number)->value();
2413 str = DoubleToCString(num, buffer);
2414 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002415
John Reck59135872010-11-02 12:39:01 -07002416 Object* js_string;
2417 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2418 if (maybe_js_string->ToObject(&js_string)) {
2419 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002420 }
John Reck59135872010-11-02 12:39:01 -07002421 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002422}
2423
2424
Steve Block3ce2e202009-11-05 08:53:23 +00002425Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2426 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2427}
2428
2429
2430Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2431 ExternalArrayType array_type) {
2432 switch (array_type) {
2433 case kExternalByteArray:
2434 return kExternalByteArrayMapRootIndex;
2435 case kExternalUnsignedByteArray:
2436 return kExternalUnsignedByteArrayMapRootIndex;
2437 case kExternalShortArray:
2438 return kExternalShortArrayMapRootIndex;
2439 case kExternalUnsignedShortArray:
2440 return kExternalUnsignedShortArrayMapRootIndex;
2441 case kExternalIntArray:
2442 return kExternalIntArrayMapRootIndex;
2443 case kExternalUnsignedIntArray:
2444 return kExternalUnsignedIntArrayMapRootIndex;
2445 case kExternalFloatArray:
2446 return kExternalFloatArrayMapRootIndex;
Ben Murdoch257744e2011-11-30 15:57:28 +00002447 case kExternalDoubleArray:
2448 return kExternalDoubleArrayMapRootIndex;
Steve Block44f0eee2011-05-26 01:26:41 +01002449 case kExternalPixelArray:
2450 return kExternalPixelArrayMapRootIndex;
Steve Block3ce2e202009-11-05 08:53:23 +00002451 default:
2452 UNREACHABLE();
2453 return kUndefinedValueRootIndex;
2454 }
2455}
2456
2457
John Reck59135872010-11-02 12:39:01 -07002458MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002459 // We need to distinguish the minus zero value and this cannot be
2460 // done after conversion to int. Doing this by comparing bit
2461 // patterns is faster than using fpclassify() et al.
2462 static const DoubleRepresentation minus_zero(-0.0);
2463
2464 DoubleRepresentation rep(value);
2465 if (rep.bits == minus_zero.bits) {
2466 return AllocateHeapNumber(-0.0, pretenure);
2467 }
2468
2469 int int_value = FastD2I(value);
2470 if (value == int_value && Smi::IsValid(int_value)) {
2471 return Smi::FromInt(int_value);
2472 }
2473
2474 // Materialize the value in the heap.
2475 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002476}
2477
2478
Ben Murdoch257744e2011-11-30 15:57:28 +00002479MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
2480 // Statically ensure that it is safe to allocate foreigns in paged spaces.
2481 STATIC_ASSERT(Foreign::kSize <= Page::kMaxHeapObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002482 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002483 Object* result;
Ben Murdoch257744e2011-11-30 15:57:28 +00002484 { MaybeObject* maybe_result = Allocate(foreign_map(), space);
John Reck59135872010-11-02 12:39:01 -07002485 if (!maybe_result->ToObject(&result)) return maybe_result;
2486 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002487
Ben Murdoch257744e2011-11-30 15:57:28 +00002488 Foreign::cast(result)->set_address(address);
Steve Blocka7e24c12009-10-30 11:49:00 +00002489 return result;
2490}
2491
2492
John Reck59135872010-11-02 12:39:01 -07002493MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002494 SharedFunctionInfo* share;
2495 MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2496 if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
Steve Blocka7e24c12009-10-30 11:49:00 +00002497
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002498 // Set pointer fields.
Steve Blocka7e24c12009-10-30 11:49:00 +00002499 share->set_name(name);
Steve Block44f0eee2011-05-26 01:26:41 +01002500 Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
Steve Blocka7e24c12009-10-30 11:49:00 +00002501 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002502 share->set_scope_info(SerializedScopeInfo::Empty());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002503 Code* construct_stub =
2504 isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
Steve Blocka7e24c12009-10-30 11:49:00 +00002505 share->set_construct_stub(construct_stub);
Steve Blocka7e24c12009-10-30 11:49:00 +00002506 share->set_instance_class_name(Object_symbol());
2507 share->set_function_data(undefined_value());
2508 share->set_script(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002509 share->set_debug_info(undefined_value());
2510 share->set_inferred_name(empty_string());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002511 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002512 share->set_this_property_assignments(undefined_value());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002513 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
2514
2515 // Set integer fields (smi or int, depending on the architecture).
2516 share->set_length(0);
2517 share->set_formal_parameter_count(0);
2518 share->set_expected_nof_properties(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002519 share->set_num_literals(0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002520 share->set_start_position_and_type(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002521 share->set_end_position(0);
2522 share->set_function_token_position(0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002523 // All compiler hints default to false or 0.
2524 share->set_compiler_hints(0);
2525 share->set_this_property_assignments_count(0);
2526 share->set_opt_count(0);
2527
2528 return share;
Steve Blocka7e24c12009-10-30 11:49:00 +00002529}
2530
2531
Steve Block1e0659c2011-05-24 12:43:12 +01002532MaybeObject* Heap::AllocateJSMessageObject(String* type,
2533 JSArray* arguments,
2534 int start_position,
2535 int end_position,
2536 Object* script,
2537 Object* stack_trace,
2538 Object* stack_frames) {
2539 Object* result;
2540 { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
2541 if (!maybe_result->ToObject(&result)) return maybe_result;
2542 }
2543 JSMessageObject* message = JSMessageObject::cast(result);
2544 message->set_properties(Heap::empty_fixed_array());
2545 message->set_elements(Heap::empty_fixed_array());
2546 message->set_type(type);
2547 message->set_arguments(arguments);
2548 message->set_start_position(start_position);
2549 message->set_end_position(end_position);
2550 message->set_script(script);
2551 message->set_stack_trace(stack_trace);
2552 message->set_stack_frames(stack_frames);
2553 return result;
2554}
2555
2556
2557
Steve Blockd0582a62009-12-15 09:54:21 +00002558// Returns true for a character in a range. Both limits are inclusive.
2559static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2560 // This makes uses of the the unsigned wraparound.
2561 return character - from <= to - from;
2562}
2563
2564
John Reck59135872010-11-02 12:39:01 -07002565MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Steve Block44f0eee2011-05-26 01:26:41 +01002566 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07002567 uint32_t c1,
2568 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002569 String* symbol;
2570 // Numeric strings have a different hash algorithm not known by
2571 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2572 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
Steve Block44f0eee2011-05-26 01:26:41 +01002573 heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
Steve Blockd0582a62009-12-15 09:54:21 +00002574 return symbol;
2575 // Now we know the length is 2, we might as well make use of that fact
2576 // when building the new string.
2577 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2578 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002579 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002580 { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
John Reck59135872010-11-02 12:39:01 -07002581 if (!maybe_result->ToObject(&result)) return maybe_result;
2582 }
Steve Blockd0582a62009-12-15 09:54:21 +00002583 char* dest = SeqAsciiString::cast(result)->GetChars();
2584 dest[0] = c1;
2585 dest[1] = c2;
2586 return result;
2587 } else {
John Reck59135872010-11-02 12:39:01 -07002588 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002589 { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
John Reck59135872010-11-02 12:39:01 -07002590 if (!maybe_result->ToObject(&result)) return maybe_result;
2591 }
Steve Blockd0582a62009-12-15 09:54:21 +00002592 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2593 dest[0] = c1;
2594 dest[1] = c2;
2595 return result;
2596 }
2597}
2598
2599
John Reck59135872010-11-02 12:39:01 -07002600MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002601 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002602 if (first_length == 0) {
2603 return second;
2604 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002605
2606 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002607 if (second_length == 0) {
2608 return first;
2609 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002610
2611 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002612
2613 // Optimization for 2-byte strings often used as keys in a decompression
2614 // dictionary. Check whether we already have the string in the symbol
2615 // table to prevent creation of many unneccesary strings.
2616 if (length == 2) {
2617 unsigned c1 = first->Get(0);
2618 unsigned c2 = second->Get(0);
Steve Block44f0eee2011-05-26 01:26:41 +01002619 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blockd0582a62009-12-15 09:54:21 +00002620 }
2621
Steve Block6ded16b2010-05-10 14:33:55 +01002622 bool first_is_ascii = first->IsAsciiRepresentation();
2623 bool second_is_ascii = second->IsAsciiRepresentation();
2624 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002625
2626 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002627 // of the new cons string is too large.
2628 if (length > String::kMaxLength || length < 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01002629 isolate()->context()->mark_out_of_memory();
Steve Blocka7e24c12009-10-30 11:49:00 +00002630 return Failure::OutOfMemoryException();
2631 }
2632
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002633 bool is_ascii_data_in_two_byte_string = false;
2634 if (!is_ascii) {
2635 // At least one of the strings uses two-byte representation so we
2636 // can't use the fast case code for short ascii strings below, but
2637 // we can try to save memory if all chars actually fit in ascii.
2638 is_ascii_data_in_two_byte_string =
2639 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2640 if (is_ascii_data_in_two_byte_string) {
Steve Block44f0eee2011-05-26 01:26:41 +01002641 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002642 }
2643 }
2644
Steve Blocka7e24c12009-10-30 11:49:00 +00002645 // If the resulting string is small make a flat string.
2646 if (length < String::kMinNonFlatLength) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002647 // Note that neither of the two inputs can be a slice because:
2648 STATIC_ASSERT(String::kMinNonFlatLength <= SlicedString::kMinLength);
Steve Blocka7e24c12009-10-30 11:49:00 +00002649 ASSERT(first->IsFlat());
2650 ASSERT(second->IsFlat());
2651 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002652 Object* result;
2653 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2654 if (!maybe_result->ToObject(&result)) return maybe_result;
2655 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002656 // Copy the characters into the new object.
2657 char* dest = SeqAsciiString::cast(result)->GetChars();
2658 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002659 const char* src;
2660 if (first->IsExternalString()) {
2661 src = ExternalAsciiString::cast(first)->resource()->data();
2662 } else {
2663 src = SeqAsciiString::cast(first)->GetChars();
2664 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002665 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2666 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002667 if (second->IsExternalString()) {
2668 src = ExternalAsciiString::cast(second)->resource()->data();
2669 } else {
2670 src = SeqAsciiString::cast(second)->GetChars();
2671 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002672 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2673 return result;
2674 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002675 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002676 Object* result;
2677 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2678 if (!maybe_result->ToObject(&result)) return maybe_result;
2679 }
Steve Block6ded16b2010-05-10 14:33:55 +01002680 // Copy the characters into the new object.
2681 char* dest = SeqAsciiString::cast(result)->GetChars();
2682 String::WriteToFlat(first, dest, 0, first_length);
2683 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block44f0eee2011-05-26 01:26:41 +01002684 isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002685 return result;
2686 }
2687
John Reck59135872010-11-02 12:39:01 -07002688 Object* result;
2689 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2690 if (!maybe_result->ToObject(&result)) return maybe_result;
2691 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002692 // Copy the characters into the new object.
2693 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2694 String::WriteToFlat(first, dest, 0, first_length);
2695 String::WriteToFlat(second, dest + first_length, 0, second_length);
2696 return result;
2697 }
2698 }
2699
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002700 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2701 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002702
John Reck59135872010-11-02 12:39:01 -07002703 Object* result;
2704 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2705 if (!maybe_result->ToObject(&result)) return maybe_result;
2706 }
Leon Clarke4515c472010-02-03 11:58:03 +00002707
2708 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002709 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002710 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002711 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002712 cons_string->set_hash_field(String::kEmptyHashField);
2713 cons_string->set_first(first, mode);
2714 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002715 return result;
2716}
2717
2718
John Reck59135872010-11-02 12:39:01 -07002719MaybeObject* Heap::AllocateSubString(String* buffer,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002720 int start,
2721 int end,
2722 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002723 int length = end - start;
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002724 if (length == 0) {
2725 return empty_string();
2726 } else if (length == 1) {
Steve Block44f0eee2011-05-26 01:26:41 +01002727 return LookupSingleCharacterStringFromCode(buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002728 } else if (length == 2) {
2729 // Optimization for 2-byte strings often used as keys in a decompression
2730 // dictionary. Check whether we already have the string in the symbol
2731 // table to prevent creation of many unneccesary strings.
2732 unsigned c1 = buffer->Get(start);
2733 unsigned c2 = buffer->Get(start + 1);
Steve Block44f0eee2011-05-26 01:26:41 +01002734 return MakeOrFindTwoCharacterString(this, c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002735 }
2736
2737 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002738 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002739
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002740 // TODO(1626): For now slicing external strings is not supported. However,
2741 // a flat cons string can have an external string as first part in some cases.
2742 // Therefore we have to single out this case as well.
2743 if (!FLAG_string_slices ||
2744 (buffer->IsConsString() &&
2745 (!buffer->IsFlat() ||
2746 !ConsString::cast(buffer)->first()->IsSeqString())) ||
2747 buffer->IsExternalString() ||
2748 length < SlicedString::kMinLength ||
2749 pretenure == TENURED) {
2750 Object* result;
2751 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2752 ? AllocateRawAsciiString(length, pretenure)
2753 : AllocateRawTwoByteString(length, pretenure);
2754 if (!maybe_result->ToObject(&result)) return maybe_result;
2755 }
2756 String* string_result = String::cast(result);
2757 // Copy the characters into the new object.
2758 if (buffer->IsAsciiRepresentation()) {
2759 ASSERT(string_result->IsAsciiRepresentation());
2760 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2761 String::WriteToFlat(buffer, dest, start, end);
2762 } else {
2763 ASSERT(string_result->IsTwoByteRepresentation());
2764 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2765 String::WriteToFlat(buffer, dest, start, end);
2766 }
2767 return result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002768 }
Steve Blockd0582a62009-12-15 09:54:21 +00002769
Ben Murdoch69a99ed2011-11-30 16:03:39 +00002770 ASSERT(buffer->IsFlat());
2771 ASSERT(!buffer->IsExternalString());
2772#if DEBUG
2773 buffer->StringVerify();
2774#endif
2775
2776 Object* result;
2777 { Map* map = buffer->IsAsciiRepresentation()
2778 ? sliced_ascii_string_map()
2779 : sliced_string_map();
2780 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2781 if (!maybe_result->ToObject(&result)) return maybe_result;
2782 }
2783
2784 AssertNoAllocation no_gc;
2785 SlicedString* sliced_string = SlicedString::cast(result);
2786 sliced_string->set_length(length);
2787 sliced_string->set_hash_field(String::kEmptyHashField);
2788 if (buffer->IsConsString()) {
2789 ConsString* cons = ConsString::cast(buffer);
2790 ASSERT(cons->second()->length() == 0);
2791 sliced_string->set_parent(cons->first());
2792 sliced_string->set_offset(start);
2793 } else if (buffer->IsSlicedString()) {
2794 // Prevent nesting sliced strings.
2795 SlicedString* parent_slice = SlicedString::cast(buffer);
2796 sliced_string->set_parent(parent_slice->parent());
2797 sliced_string->set_offset(start + parent_slice->offset());
2798 } else {
2799 sliced_string->set_parent(buffer);
2800 sliced_string->set_offset(start);
2801 }
2802 ASSERT(sliced_string->parent()->IsSeqString());
Steve Blocka7e24c12009-10-30 11:49:00 +00002803 return result;
2804}
2805
2806
John Reck59135872010-11-02 12:39:01 -07002807MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002808 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002809 size_t length = resource->length();
2810 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002811 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002812 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002813 }
2814
Steve Blockd0582a62009-12-15 09:54:21 +00002815 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002816 Object* result;
2817 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2818 if (!maybe_result->ToObject(&result)) return maybe_result;
2819 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002820
2821 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002822 external_string->set_length(static_cast<int>(length));
2823 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002824 external_string->set_resource(resource);
2825
2826 return result;
2827}
2828
2829
John Reck59135872010-11-02 12:39:01 -07002830MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002831 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002832 size_t length = resource->length();
2833 if (length > static_cast<size_t>(String::kMaxLength)) {
Steve Block44f0eee2011-05-26 01:26:41 +01002834 isolate()->context()->mark_out_of_memory();
Steve Blockd0582a62009-12-15 09:54:21 +00002835 return Failure::OutOfMemoryException();
2836 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002837
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002838 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002839 // ASCII characters. If yes, we use a different string map.
2840 static const size_t kAsciiCheckLengthLimit = 32;
2841 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2842 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002843 Map* map = is_ascii ?
Steve Block44f0eee2011-05-26 01:26:41 +01002844 external_string_with_ascii_data_map() : external_string_map();
John Reck59135872010-11-02 12:39:01 -07002845 Object* result;
2846 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2847 if (!maybe_result->ToObject(&result)) return maybe_result;
2848 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002849
2850 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002851 external_string->set_length(static_cast<int>(length));
2852 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002853 external_string->set_resource(resource);
2854
2855 return result;
2856}
2857
2858
John Reck59135872010-11-02 12:39:01 -07002859MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002860 if (code <= String::kMaxAsciiCharCode) {
Steve Block44f0eee2011-05-26 01:26:41 +01002861 Object* value = single_character_string_cache()->get(code);
2862 if (value != undefined_value()) return value;
Steve Blocka7e24c12009-10-30 11:49:00 +00002863
2864 char buffer[1];
2865 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002866 Object* result;
2867 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002868
John Reck59135872010-11-02 12:39:01 -07002869 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002870 single_character_string_cache()->set(code, result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002871 return result;
2872 }
2873
John Reck59135872010-11-02 12:39:01 -07002874 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01002875 { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
John Reck59135872010-11-02 12:39:01 -07002876 if (!maybe_result->ToObject(&result)) return maybe_result;
2877 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002878 String* answer = String::cast(result);
2879 answer->Set(0, code);
2880 return answer;
2881}
2882
2883
John Reck59135872010-11-02 12:39:01 -07002884MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002885 if (length < 0 || length > ByteArray::kMaxLength) {
2886 return Failure::OutOfMemoryException();
2887 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002888 if (pretenure == NOT_TENURED) {
2889 return AllocateByteArray(length);
2890 }
2891 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002892 Object* result;
2893 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2894 ? old_data_space_->AllocateRaw(size)
2895 : lo_space_->AllocateRaw(size);
2896 if (!maybe_result->ToObject(&result)) return maybe_result;
2897 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002898
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002899 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2900 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002901 return result;
2902}
2903
2904
John Reck59135872010-11-02 12:39:01 -07002905MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002906 if (length < 0 || length > ByteArray::kMaxLength) {
2907 return Failure::OutOfMemoryException();
2908 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002909 int size = ByteArray::SizeFor(length);
2910 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002911 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002912 Object* result;
2913 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2914 if (!maybe_result->ToObject(&result)) return maybe_result;
2915 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002916
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002917 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2918 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002919 return result;
2920}
2921
2922
2923void Heap::CreateFillerObjectAt(Address addr, int size) {
2924 if (size == 0) return;
2925 HeapObject* filler = HeapObject::FromAddress(addr);
2926 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002927 filler->set_map(one_pointer_filler_map());
2928 } else if (size == 2 * kPointerSize) {
2929 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002930 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002931 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002932 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2933 }
2934}
2935
2936
John Reck59135872010-11-02 12:39:01 -07002937MaybeObject* Heap::AllocateExternalArray(int length,
2938 ExternalArrayType array_type,
2939 void* external_pointer,
2940 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002941 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002942 Object* result;
2943 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2944 space,
2945 OLD_DATA_SPACE);
2946 if (!maybe_result->ToObject(&result)) return maybe_result;
2947 }
Steve Block3ce2e202009-11-05 08:53:23 +00002948
2949 reinterpret_cast<ExternalArray*>(result)->set_map(
2950 MapForExternalArrayType(array_type));
2951 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2952 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2953 external_pointer);
2954
2955 return result;
2956}
2957
2958
John Reck59135872010-11-02 12:39:01 -07002959MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2960 Code::Flags flags,
Steve Block44f0eee2011-05-26 01:26:41 +01002961 Handle<Object> self_reference,
2962 bool immovable) {
Leon Clarkeac952652010-07-15 11:15:24 +01002963 // Allocate ByteArray before the Code object, so that we do not risk
2964 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002965 Object* reloc_info;
2966 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2967 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2968 }
Leon Clarkeac952652010-07-15 11:15:24 +01002969
Steve Block44f0eee2011-05-26 01:26:41 +01002970 // Compute size.
Leon Clarkeac952652010-07-15 11:15:24 +01002971 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002972 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002973 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002974 MaybeObject* maybe_result;
Steve Block44f0eee2011-05-26 01:26:41 +01002975 // Large code objects and code objects which should stay at a fixed address
2976 // are allocated in large object space.
2977 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
John Reck59135872010-11-02 12:39:01 -07002978 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002979 } else {
John Reck59135872010-11-02 12:39:01 -07002980 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002981 }
2982
John Reck59135872010-11-02 12:39:01 -07002983 Object* result;
2984 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002985
2986 // Initialize the object
2987 HeapObject::cast(result)->set_map(code_map());
2988 Code* code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002989 ASSERT(!isolate_->code_range()->exists() ||
2990 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00002991 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002992 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002993 code->set_flags(flags);
Ben Murdochb8e0da22011-05-16 14:20:40 +01002994 if (code->is_call_stub() || code->is_keyed_call_stub()) {
2995 code->set_check_type(RECEIVER_MAP_CHECK);
2996 }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002997 code->set_deoptimization_data(empty_fixed_array());
Ben Murdoch257744e2011-11-30 15:57:28 +00002998 code->set_next_code_flushing_candidate(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002999 // Allow self references to created code object by patching the handle to
3000 // point to the newly allocated Code object.
3001 if (!self_reference.is_null()) {
3002 *(self_reference.location()) = code;
3003 }
3004 // Migrate generated code.
3005 // The generated code can contain Object** values (typically from handles)
3006 // that are dereferenced during the copy to point directly to the actual heap
3007 // objects. These pointers can include references to the code object itself,
3008 // through the self_reference parameter.
3009 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003010
3011#ifdef DEBUG
3012 code->Verify();
3013#endif
3014 return code;
3015}
3016
3017
John Reck59135872010-11-02 12:39:01 -07003018MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003019 // Allocate an object the same size as the code object.
3020 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07003021 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003022 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07003023 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003024 } else {
John Reck59135872010-11-02 12:39:01 -07003025 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003026 }
3027
John Reck59135872010-11-02 12:39:01 -07003028 Object* result;
3029 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003030
3031 // Copy code object.
3032 Address old_addr = code->address();
3033 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003034 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003035 // Relocate the copy.
3036 Code* new_code = Code::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01003037 ASSERT(!isolate_->code_range()->exists() ||
3038 isolate_->code_range()->contains(code->address()));
Steve Blocka7e24c12009-10-30 11:49:00 +00003039 new_code->Relocate(new_addr - old_addr);
3040 return new_code;
3041}
3042
3043
John Reck59135872010-11-02 12:39:01 -07003044MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01003045 // Allocate ByteArray before the Code object, so that we do not risk
3046 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07003047 Object* reloc_info_array;
3048 { MaybeObject* maybe_reloc_info_array =
3049 AllocateByteArray(reloc_info.length(), TENURED);
3050 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
3051 return maybe_reloc_info_array;
3052 }
3053 }
Leon Clarkeac952652010-07-15 11:15:24 +01003054
3055 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01003056
Ben Murdoch3bec4d22010-07-22 14:51:16 +01003057 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01003058
3059 Address old_addr = code->address();
3060
3061 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01003062 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01003063
John Reck59135872010-11-02 12:39:01 -07003064 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01003065 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07003066 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01003067 } else {
John Reck59135872010-11-02 12:39:01 -07003068 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01003069 }
3070
John Reck59135872010-11-02 12:39:01 -07003071 Object* result;
3072 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01003073
3074 // Copy code object.
3075 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3076
3077 // Copy header and instructions.
3078 memcpy(new_addr, old_addr, relocation_offset);
3079
Steve Block6ded16b2010-05-10 14:33:55 +01003080 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01003081 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01003082
Leon Clarkeac952652010-07-15 11:15:24 +01003083 // Copy patched rinfo.
3084 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01003085
3086 // Relocate the copy.
Steve Block44f0eee2011-05-26 01:26:41 +01003087 ASSERT(!isolate_->code_range()->exists() ||
3088 isolate_->code_range()->contains(code->address()));
Steve Block6ded16b2010-05-10 14:33:55 +01003089 new_code->Relocate(new_addr - old_addr);
3090
3091#ifdef DEBUG
3092 code->Verify();
3093#endif
3094 return new_code;
3095}
3096
3097
John Reck59135872010-11-02 12:39:01 -07003098MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003099 ASSERT(gc_state_ == NOT_IN_GC);
3100 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00003101 // If allocation failures are disallowed, we may allocate in a different
3102 // space when new space is full and the object is not a large object.
3103 AllocationSpace retry_space =
3104 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07003105 Object* result;
3106 { MaybeObject* maybe_result =
3107 AllocateRaw(map->instance_size(), space, retry_space);
3108 if (!maybe_result->ToObject(&result)) return maybe_result;
3109 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003110 HeapObject::cast(result)->set_map(map);
3111 return result;
3112}
3113
3114
John Reck59135872010-11-02 12:39:01 -07003115MaybeObject* Heap::InitializeFunction(JSFunction* function,
3116 SharedFunctionInfo* shared,
3117 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003118 ASSERT(!prototype->IsMap());
3119 function->initialize_properties();
3120 function->initialize_elements();
3121 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01003122 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00003123 function->set_prototype_or_initial_map(prototype);
3124 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00003125 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01003126 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00003127 return function;
3128}
3129
3130
John Reck59135872010-11-02 12:39:01 -07003131MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003132 // Allocate the prototype. Make sure to use the object function
3133 // from the function's context, since the function can be from a
3134 // different context.
3135 JSFunction* object_function =
3136 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07003137 Object* prototype;
3138 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
3139 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3140 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003141 // When creating the prototype for the function we must set its
3142 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07003143 Object* result;
3144 { MaybeObject* maybe_result =
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003145 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3146 constructor_symbol(), function, DONT_ENUM);
John Reck59135872010-11-02 12:39:01 -07003147 if (!maybe_result->ToObject(&result)) return maybe_result;
3148 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003149 return prototype;
3150}
3151
3152
John Reck59135872010-11-02 12:39:01 -07003153MaybeObject* Heap::AllocateFunction(Map* function_map,
3154 SharedFunctionInfo* shared,
3155 Object* prototype,
3156 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003157 AllocationSpace space =
3158 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07003159 Object* result;
3160 { MaybeObject* maybe_result = Allocate(function_map, space);
3161 if (!maybe_result->ToObject(&result)) return maybe_result;
3162 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003163 return InitializeFunction(JSFunction::cast(result), shared, prototype);
3164}
3165
3166
John Reck59135872010-11-02 12:39:01 -07003167MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003168 // To get fast allocation and map sharing for arguments objects we
3169 // allocate them based on an arguments boilerplate.
3170
Steve Block44f0eee2011-05-26 01:26:41 +01003171 JSObject* boilerplate;
3172 int arguments_object_size;
3173 bool strict_mode_callee = callee->IsJSFunction() &&
3174 JSFunction::cast(callee)->shared()->strict_mode();
3175 if (strict_mode_callee) {
3176 boilerplate =
3177 isolate()->context()->global_context()->
3178 strict_mode_arguments_boilerplate();
3179 arguments_object_size = kArgumentsObjectSizeStrict;
3180 } else {
3181 boilerplate =
3182 isolate()->context()->global_context()->arguments_boilerplate();
3183 arguments_object_size = kArgumentsObjectSize;
3184 }
3185
Steve Blocka7e24c12009-10-30 11:49:00 +00003186 // This calls Copy directly rather than using Heap::AllocateRaw so we
3187 // duplicate the check here.
3188 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
3189
Leon Clarkee46be812010-01-19 14:06:41 +00003190 // Check that the size of the boilerplate matches our
3191 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3192 // on the size being a known constant.
Steve Block44f0eee2011-05-26 01:26:41 +01003193 ASSERT(arguments_object_size == boilerplate->map()->instance_size());
Leon Clarkee46be812010-01-19 14:06:41 +00003194
3195 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07003196 Object* result;
3197 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01003198 AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
John Reck59135872010-11-02 12:39:01 -07003199 if (!maybe_result->ToObject(&result)) return maybe_result;
3200 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003201
3202 // Copy the content. The arguments boilerplate doesn't have any
3203 // fields that point to new space so it's safe to skip the write
3204 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003205 CopyBlock(HeapObject::cast(result)->address(),
3206 boilerplate->address(),
Steve Block44f0eee2011-05-26 01:26:41 +01003207 JSObject::kHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003208
Steve Block44f0eee2011-05-26 01:26:41 +01003209 // Set the length property.
3210 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
Steve Blocka7e24c12009-10-30 11:49:00 +00003211 Smi::FromInt(length),
3212 SKIP_WRITE_BARRIER);
Steve Block44f0eee2011-05-26 01:26:41 +01003213 // Set the callee property for non-strict mode arguments object only.
3214 if (!strict_mode_callee) {
3215 JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
3216 callee);
3217 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003218
3219 // Check the state of the object
3220 ASSERT(JSObject::cast(result)->HasFastProperties());
3221 ASSERT(JSObject::cast(result)->HasFastElements());
3222
3223 return result;
3224}
3225
3226
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003227static bool HasDuplicates(DescriptorArray* descriptors) {
3228 int count = descriptors->number_of_descriptors();
3229 if (count > 1) {
3230 String* prev_key = descriptors->GetKey(0);
3231 for (int i = 1; i != count; i++) {
3232 String* current_key = descriptors->GetKey(i);
3233 if (prev_key == current_key) return true;
3234 prev_key = current_key;
3235 }
3236 }
3237 return false;
3238}
3239
3240
John Reck59135872010-11-02 12:39:01 -07003241MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003242 ASSERT(!fun->has_initial_map());
3243
3244 // First create a new map with the size and number of in-object properties
3245 // suggested by the function.
3246 int instance_size = fun->shared()->CalculateInstanceSize();
3247 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07003248 Object* map_obj;
Steve Block44f0eee2011-05-26 01:26:41 +01003249 { MaybeObject* maybe_map_obj = AllocateMap(JS_OBJECT_TYPE, instance_size);
John Reck59135872010-11-02 12:39:01 -07003250 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
3251 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003252
3253 // Fetch or allocate prototype.
3254 Object* prototype;
3255 if (fun->has_instance_prototype()) {
3256 prototype = fun->instance_prototype();
3257 } else {
John Reck59135872010-11-02 12:39:01 -07003258 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
3259 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3260 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003261 }
3262 Map* map = Map::cast(map_obj);
3263 map->set_inobject_properties(in_object_properties);
3264 map->set_unused_property_fields(in_object_properties);
3265 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01003266 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003267
Andrei Popescu402d9372010-02-26 13:31:12 +00003268 // If the function has only simple this property assignments add
3269 // field descriptors for these to the initial map as the object
3270 // cannot be constructed without having these properties. Guard by
3271 // the inline_new flag so we only change the map if we generate a
3272 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00003273 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00003274 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003275 int count = fun->shared()->this_property_assignments_count();
3276 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003277 // Inline constructor can only handle inobject properties.
3278 fun->shared()->ForbidInlineConstructor();
3279 } else {
John Reck59135872010-11-02 12:39:01 -07003280 Object* descriptors_obj;
3281 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3282 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3283 return maybe_descriptors_obj;
3284 }
3285 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003286 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3287 for (int i = 0; i < count; i++) {
3288 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3289 ASSERT(name->IsSymbol());
3290 FieldDescriptor field(name, i, NONE);
3291 field.SetEnumerationIndex(i);
3292 descriptors->Set(i, &field);
3293 }
3294 descriptors->SetNextEnumerationIndex(count);
3295 descriptors->SortUnchecked();
3296
3297 // The descriptors may contain duplicates because the compiler does not
3298 // guarantee the uniqueness of property names (it would have required
3299 // quadratic time). Once the descriptors are sorted we can check for
3300 // duplicates in linear time.
3301 if (HasDuplicates(descriptors)) {
3302 fun->shared()->ForbidInlineConstructor();
3303 } else {
3304 map->set_instance_descriptors(descriptors);
3305 map->set_pre_allocated_property_fields(count);
3306 map->set_unused_property_fields(in_object_properties - count);
3307 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003308 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003309 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003310
3311 fun->shared()->StartInobjectSlackTracking(map);
3312
Steve Blocka7e24c12009-10-30 11:49:00 +00003313 return map;
3314}
3315
3316
3317void Heap::InitializeJSObjectFromMap(JSObject* obj,
3318 FixedArray* properties,
3319 Map* map) {
3320 obj->set_properties(properties);
3321 obj->initialize_elements();
3322 // TODO(1240798): Initialize the object's body using valid initial values
3323 // according to the object's initial map. For example, if the map's
3324 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3325 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3326 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3327 // verification code has to cope with (temporarily) invalid objects. See
3328 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003329 Object* filler;
3330 // We cannot always fill with one_pointer_filler_map because objects
3331 // created from API functions expect their internal fields to be initialized
3332 // with undefined_value.
3333 if (map->constructor()->IsJSFunction() &&
3334 JSFunction::cast(map->constructor())->shared()->
3335 IsInobjectSlackTrackingInProgress()) {
3336 // We might want to shrink the object later.
3337 ASSERT(obj->GetInternalFieldCount() == 0);
3338 filler = Heap::one_pointer_filler_map();
3339 } else {
3340 filler = Heap::undefined_value();
3341 }
3342 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003343}
3344
3345
John Reck59135872010-11-02 12:39:01 -07003346MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003347 // JSFunctions should be allocated using AllocateFunction to be
3348 // properly initialized.
3349 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3350
Steve Block8defd9f2010-07-08 12:39:36 +01003351 // Both types of global objects should be allocated using
3352 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003353 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3354 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3355
3356 // Allocate the backing storage for the properties.
3357 int prop_size =
3358 map->pre_allocated_property_fields() +
3359 map->unused_property_fields() -
3360 map->inobject_properties();
3361 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003362 Object* properties;
3363 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3364 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3365 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003366
3367 // Allocate the JSObject.
3368 AllocationSpace space =
3369 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3370 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003371 Object* obj;
3372 { MaybeObject* maybe_obj = Allocate(map, space);
3373 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3374 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003375
3376 // Initialize the JSObject.
3377 InitializeJSObjectFromMap(JSObject::cast(obj),
3378 FixedArray::cast(properties),
3379 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003380 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003381 return obj;
3382}
3383
3384
John Reck59135872010-11-02 12:39:01 -07003385MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3386 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003387 // Allocate the initial map if absent.
3388 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003389 Object* initial_map;
3390 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3391 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3392 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003393 constructor->set_initial_map(Map::cast(initial_map));
3394 Map::cast(initial_map)->set_constructor(constructor);
3395 }
3396 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003397 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003398 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003399#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003400 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003401 Object* non_failure;
3402 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3403#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003404 return result;
3405}
3406
3407
Ben Murdoch257744e2011-11-30 15:57:28 +00003408MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
3409 // Allocate map.
3410 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3411 // maps. Will probably depend on the identity of the handler object, too.
3412 Map* map;
3413 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
3414 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3415 map->set_prototype(prototype);
Ben Murdoch257744e2011-11-30 15:57:28 +00003416
3417 // Allocate the proxy object.
Ben Murdoch589d6972011-11-30 16:04:58 +00003418 JSProxy* result;
Ben Murdoch257744e2011-11-30 15:57:28 +00003419 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
Ben Murdoch589d6972011-11-30 16:04:58 +00003420 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
3421 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3422 result->set_handler(handler);
3423 return result;
3424}
3425
3426
3427MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
3428 Object* call_trap,
3429 Object* construct_trap,
3430 Object* prototype) {
3431 // Allocate map.
3432 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3433 // maps. Will probably depend on the identity of the handler object, too.
3434 Map* map;
3435 MaybeObject* maybe_map_obj =
3436 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
3437 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3438 map->set_prototype(prototype);
3439
3440 // Allocate the proxy object.
3441 JSFunctionProxy* result;
3442 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3443 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
3444 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3445 result->set_handler(handler);
3446 result->set_call_trap(call_trap);
3447 result->set_construct_trap(construct_trap);
Ben Murdoch257744e2011-11-30 15:57:28 +00003448 return result;
3449}
3450
3451
John Reck59135872010-11-02 12:39:01 -07003452MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003453 ASSERT(constructor->has_initial_map());
3454 Map* map = constructor->initial_map();
3455
3456 // Make sure no field properties are described in the initial map.
3457 // This guarantees us that normalizing the properties does not
3458 // require us to change property values to JSGlobalPropertyCells.
3459 ASSERT(map->NextFreePropertyIndex() == 0);
3460
3461 // Make sure we don't have a ton of pre-allocated slots in the
3462 // global objects. They will be unused once we normalize the object.
3463 ASSERT(map->unused_property_fields() == 0);
3464 ASSERT(map->inobject_properties() == 0);
3465
3466 // Initial size of the backing store to avoid resize of the storage during
3467 // bootstrapping. The size differs between the JS global object ad the
3468 // builtins object.
3469 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3470
3471 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003472 Object* obj;
3473 { MaybeObject* maybe_obj =
3474 StringDictionary::Allocate(
3475 map->NumberOfDescribedProperties() * 2 + initial_size);
3476 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3477 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003478 StringDictionary* dictionary = StringDictionary::cast(obj);
3479
3480 // The global object might be created from an object template with accessors.
3481 // Fill these accessors into the dictionary.
3482 DescriptorArray* descs = map->instance_descriptors();
3483 for (int i = 0; i < descs->number_of_descriptors(); i++) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003484 PropertyDetails details(descs->GetDetails(i));
Steve Blocka7e24c12009-10-30 11:49:00 +00003485 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3486 PropertyDetails d =
3487 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3488 Object* value = descs->GetCallbacksObject(i);
Steve Block44f0eee2011-05-26 01:26:41 +01003489 { MaybeObject* maybe_value = AllocateJSGlobalPropertyCell(value);
John Reck59135872010-11-02 12:39:01 -07003490 if (!maybe_value->ToObject(&value)) return maybe_value;
3491 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003492
John Reck59135872010-11-02 12:39:01 -07003493 Object* result;
3494 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3495 if (!maybe_result->ToObject(&result)) return maybe_result;
3496 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003497 dictionary = StringDictionary::cast(result);
3498 }
3499
3500 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003501 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3502 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3503 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003504 JSObject* global = JSObject::cast(obj);
3505 InitializeJSObjectFromMap(global, dictionary, map);
3506
3507 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003508 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3509 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3510 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003511 Map* new_map = Map::cast(obj);
3512
3513 // Setup the global object as a normalized object.
3514 global->set_map(new_map);
Ben Murdoch257744e2011-11-30 15:57:28 +00003515 global->map()->clear_instance_descriptors();
Steve Blocka7e24c12009-10-30 11:49:00 +00003516 global->set_properties(dictionary);
3517
3518 // Make sure result is a global object with properties in dictionary.
3519 ASSERT(global->IsGlobalObject());
3520 ASSERT(!global->HasFastProperties());
3521 return global;
3522}
3523
3524
John Reck59135872010-11-02 12:39:01 -07003525MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003526 // Never used to copy functions. If functions need to be copied we
3527 // have to be careful to clear the literals array.
3528 ASSERT(!source->IsJSFunction());
3529
3530 // Make the clone.
3531 Map* map = source->map();
3532 int object_size = map->instance_size();
3533 Object* clone;
3534
3535 // If we're forced to always allocate, we use the general allocation
3536 // functions which may leave us with an object in old space.
3537 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003538 { MaybeObject* maybe_clone =
3539 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3540 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3541 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003542 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003543 CopyBlock(clone_address,
3544 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003545 object_size);
3546 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003547 RecordWrites(clone_address,
3548 JSObject::kHeaderSize,
3549 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003550 } else {
John Reck59135872010-11-02 12:39:01 -07003551 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3552 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3553 }
Steve Block44f0eee2011-05-26 01:26:41 +01003554 ASSERT(InNewSpace(clone));
Steve Blocka7e24c12009-10-30 11:49:00 +00003555 // Since we know the clone is allocated in new space, we can copy
3556 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003557 CopyBlock(HeapObject::cast(clone)->address(),
3558 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003559 object_size);
3560 }
3561
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003562 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003563 FixedArray* properties = FixedArray::cast(source->properties());
3564 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003565 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003566 Object* elem;
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003567 { MaybeObject* maybe_elem;
3568 if (elements->map() == fixed_cow_array_map()) {
3569 maybe_elem = FixedArray::cast(elements);
3570 } else if (source->HasFastDoubleElements()) {
3571 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3572 } else {
3573 maybe_elem = CopyFixedArray(FixedArray::cast(elements));
3574 }
John Reck59135872010-11-02 12:39:01 -07003575 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3576 }
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003577 JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem));
Steve Blocka7e24c12009-10-30 11:49:00 +00003578 }
3579 // Update properties if necessary.
3580 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003581 Object* prop;
3582 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3583 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3584 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003585 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3586 }
3587 // Return the new clone.
3588 return clone;
3589}
3590
3591
Ben Murdoch589d6972011-11-30 16:04:58 +00003592MaybeObject* Heap::ReinitializeJSReceiver(
3593 JSReceiver* object, InstanceType type, int size) {
3594 ASSERT(type >= FIRST_JS_RECEIVER_TYPE);
3595
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003596 // Allocate fresh map.
3597 // TODO(rossberg): Once we optimize proxies, cache these maps.
3598 Map* map;
Ben Murdoch589d6972011-11-30 16:04:58 +00003599 MaybeObject* maybe_map_obj = AllocateMap(type, size);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003600 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3601
Ben Murdoch589d6972011-11-30 16:04:58 +00003602 // Check that the receiver has at least the size of the fresh object.
3603 int size_difference = object->map()->instance_size() - map->instance_size();
3604 ASSERT(size_difference >= 0);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003605
3606 map->set_prototype(object->map()->prototype());
3607
3608 // Allocate the backing storage for the properties.
3609 int prop_size = map->unused_property_fields() - map->inobject_properties();
3610 Object* properties;
3611 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3612 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3613 }
3614
3615 // Reset the map for the object.
3616 object->set_map(map);
3617
3618 // Reinitialize the object from the constructor map.
3619 InitializeJSObjectFromMap(JSObject::cast(object),
3620 FixedArray::cast(properties), map);
Ben Murdoch589d6972011-11-30 16:04:58 +00003621
3622 // Functions require some minimal initialization.
3623 if (type == JS_FUNCTION_TYPE) {
3624 String* name;
3625 MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>");
3626 if (!maybe_name->To<String>(&name)) return maybe_name;
3627 SharedFunctionInfo* shared;
3628 MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
3629 if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
3630 JSFunction* func;
3631 MaybeObject* maybe_func =
3632 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
3633 if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
3634 func->set_context(isolate()->context()->global_context());
3635 }
3636
3637 // Put in filler if the new object is smaller than the old.
3638 if (size_difference > 0) {
3639 CreateFillerObjectAt(
3640 object->address() + map->instance_size(), size_difference);
3641 }
3642
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00003643 return object;
3644}
3645
3646
John Reck59135872010-11-02 12:39:01 -07003647MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3648 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003649 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003650 Map* map = constructor->initial_map();
3651
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003652 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003653 // objects allocated using the constructor.
3654 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003655 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003656
3657 // Allocate the backing storage for the properties.
3658 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003659 Object* properties;
3660 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3661 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3662 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003663
3664 // Reset the map for the object.
3665 object->set_map(constructor->initial_map());
3666
3667 // Reinitialize the object from the constructor map.
3668 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3669 return object;
3670}
3671
3672
John Reck59135872010-11-02 12:39:01 -07003673MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3674 PretenureFlag pretenure) {
Ben Murdoch589d6972011-11-30 16:04:58 +00003675 if (string.length() == 1) {
3676 return Heap::LookupSingleCharacterStringFromCode(string[0]);
3677 }
John Reck59135872010-11-02 12:39:01 -07003678 Object* result;
3679 { MaybeObject* maybe_result =
3680 AllocateRawAsciiString(string.length(), pretenure);
3681 if (!maybe_result->ToObject(&result)) return maybe_result;
3682 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003683
3684 // Copy the characters into the new object.
3685 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3686 for (int i = 0; i < string.length(); i++) {
3687 string_result->SeqAsciiStringSet(i, string[i]);
3688 }
3689 return result;
3690}
3691
3692
Steve Block9fac8402011-05-12 15:51:54 +01003693MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3694 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003695 // V8 only supports characters in the Basic Multilingual Plane.
3696 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003697 // Count the number of characters in the UTF-8 string and check if
3698 // it is an ASCII string.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003699 Access<UnicodeCache::Utf8Decoder>
3700 decoder(isolate_->unicode_cache()->utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003701 decoder->Reset(string.start(), string.length());
3702 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003703 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003704 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003705 chars++;
3706 }
3707
John Reck59135872010-11-02 12:39:01 -07003708 Object* result;
3709 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3710 if (!maybe_result->ToObject(&result)) return maybe_result;
3711 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003712
3713 // Convert and copy the characters into the new object.
3714 String* string_result = String::cast(result);
3715 decoder->Reset(string.start(), string.length());
3716 for (int i = 0; i < chars; i++) {
3717 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003718 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003719 string_result->Set(i, r);
3720 }
3721 return result;
3722}
3723
3724
John Reck59135872010-11-02 12:39:01 -07003725MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3726 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003727 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003728 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003729 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003730 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003731 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003732 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003733 }
John Reck59135872010-11-02 12:39:01 -07003734 Object* result;
3735 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003736
3737 // Copy the characters into the new object, which may be either ASCII or
3738 // UTF-16.
3739 String* string_result = String::cast(result);
3740 for (int i = 0; i < string.length(); i++) {
3741 string_result->Set(i, string[i]);
3742 }
3743 return result;
3744}
3745
3746
3747Map* Heap::SymbolMapForString(String* string) {
3748 // If the string is in new space it cannot be used as a symbol.
3749 if (InNewSpace(string)) return NULL;
3750
3751 // Find the corresponding symbol map for strings.
3752 Map* map = string->map();
Steve Block44f0eee2011-05-26 01:26:41 +01003753 if (map == ascii_string_map()) {
3754 return ascii_symbol_map();
3755 }
3756 if (map == string_map()) {
3757 return symbol_map();
3758 }
3759 if (map == cons_string_map()) {
3760 return cons_symbol_map();
3761 }
3762 if (map == cons_ascii_string_map()) {
3763 return cons_ascii_symbol_map();
3764 }
3765 if (map == external_string_map()) {
3766 return external_symbol_map();
3767 }
3768 if (map == external_ascii_string_map()) {
3769 return external_ascii_symbol_map();
3770 }
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003771 if (map == external_string_with_ascii_data_map()) {
3772 return external_symbol_with_ascii_data_map();
3773 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003774
3775 // No match found.
3776 return NULL;
3777}
3778
3779
John Reck59135872010-11-02 12:39:01 -07003780MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3781 int chars,
3782 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003783 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003784 // Ensure the chars matches the number of characters in the buffer.
3785 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3786 // Determine whether the string is ascii.
3787 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003788 while (buffer->has_more()) {
3789 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3790 is_ascii = false;
3791 break;
3792 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003793 }
3794 buffer->Rewind();
3795
3796 // Compute map and object size.
3797 int size;
3798 Map* map;
3799
3800 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003801 if (chars > SeqAsciiString::kMaxLength) {
3802 return Failure::OutOfMemoryException();
3803 }
Steve Blockd0582a62009-12-15 09:54:21 +00003804 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003805 size = SeqAsciiString::SizeFor(chars);
3806 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003807 if (chars > SeqTwoByteString::kMaxLength) {
3808 return Failure::OutOfMemoryException();
3809 }
Steve Blockd0582a62009-12-15 09:54:21 +00003810 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003811 size = SeqTwoByteString::SizeFor(chars);
3812 }
3813
3814 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003815 Object* result;
3816 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3817 ? lo_space_->AllocateRaw(size)
3818 : old_data_space_->AllocateRaw(size);
3819 if (!maybe_result->ToObject(&result)) return maybe_result;
3820 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003821
3822 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003823 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003824 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003825 answer->set_length(chars);
3826 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003827
3828 ASSERT_EQ(size, answer->Size());
3829
3830 // Fill in the characters.
3831 for (int i = 0; i < chars; i++) {
3832 answer->Set(i, buffer->GetNext());
3833 }
3834 return answer;
3835}
3836
3837
John Reck59135872010-11-02 12:39:01 -07003838MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003839 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3840 return Failure::OutOfMemoryException();
3841 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003842
3843 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003844 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003845
Leon Clarkee46be812010-01-19 14:06:41 +00003846 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3847 AllocationSpace retry_space = OLD_DATA_SPACE;
3848
Steve Blocka7e24c12009-10-30 11:49:00 +00003849 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003850 if (size > kMaxObjectSizeInNewSpace) {
3851 // Allocate in large object space, retry space will be ignored.
3852 space = LO_SPACE;
3853 } else if (size > MaxObjectSizeInPagedSpace()) {
3854 // Allocate in new space, retry in large object space.
3855 retry_space = LO_SPACE;
3856 }
3857 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3858 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003859 }
John Reck59135872010-11-02 12:39:01 -07003860 Object* result;
3861 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3862 if (!maybe_result->ToObject(&result)) return maybe_result;
3863 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003864
Steve Blocka7e24c12009-10-30 11:49:00 +00003865 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003866 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003867 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003868 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003869 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3870 return result;
3871}
3872
3873
John Reck59135872010-11-02 12:39:01 -07003874MaybeObject* Heap::AllocateRawTwoByteString(int length,
3875 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003876 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3877 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003878 }
Leon Clarkee46be812010-01-19 14:06:41 +00003879 int size = SeqTwoByteString::SizeFor(length);
3880 ASSERT(size <= SeqTwoByteString::kMaxSize);
3881 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3882 AllocationSpace retry_space = OLD_DATA_SPACE;
3883
3884 if (space == NEW_SPACE) {
3885 if (size > kMaxObjectSizeInNewSpace) {
3886 // Allocate in large object space, retry space will be ignored.
3887 space = LO_SPACE;
3888 } else if (size > MaxObjectSizeInPagedSpace()) {
3889 // Allocate in new space, retry in large object space.
3890 retry_space = LO_SPACE;
3891 }
3892 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3893 space = LO_SPACE;
3894 }
John Reck59135872010-11-02 12:39:01 -07003895 Object* result;
3896 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3897 if (!maybe_result->ToObject(&result)) return maybe_result;
3898 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003899
Steve Blocka7e24c12009-10-30 11:49:00 +00003900 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003901 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003902 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003903 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003904 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3905 return result;
3906}
3907
3908
John Reck59135872010-11-02 12:39:01 -07003909MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003910 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003911 Object* result;
3912 { MaybeObject* maybe_result =
3913 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3914 if (!maybe_result->ToObject(&result)) return maybe_result;
3915 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003916 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003917 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3918 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003919 return result;
3920}
3921
3922
John Reck59135872010-11-02 12:39:01 -07003923MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003924 if (length < 0 || length > FixedArray::kMaxLength) {
3925 return Failure::OutOfMemoryException();
3926 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003927 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003928 // Use the general function if we're forced to always allocate.
3929 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3930 // Allocate the raw data for a fixed array.
3931 int size = FixedArray::SizeFor(length);
3932 return size <= kMaxObjectSizeInNewSpace
3933 ? new_space_.AllocateRaw(size)
3934 : lo_space_->AllocateRawFixedArray(size);
3935}
3936
3937
John Reck59135872010-11-02 12:39:01 -07003938MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003939 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003940 Object* obj;
3941 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3942 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3943 }
Steve Block44f0eee2011-05-26 01:26:41 +01003944 if (InNewSpace(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003945 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003946 dst->set_map(map);
3947 CopyBlock(dst->address() + kPointerSize,
3948 src->address() + kPointerSize,
3949 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003950 return obj;
3951 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003952 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003953 FixedArray* result = FixedArray::cast(obj);
3954 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003955
Steve Blocka7e24c12009-10-30 11:49:00 +00003956 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003957 AssertNoAllocation no_gc;
3958 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003959 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3960 return result;
3961}
3962
3963
Ben Murdoch69a99ed2011-11-30 16:03:39 +00003964MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
3965 Map* map) {
3966 int len = src->length();
3967 Object* obj;
3968 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
3969 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3970 }
3971 HeapObject* dst = HeapObject::cast(obj);
3972 dst->set_map(map);
3973 CopyBlock(
3974 dst->address() + FixedDoubleArray::kLengthOffset,
3975 src->address() + FixedDoubleArray::kLengthOffset,
3976 FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
3977 return obj;
3978}
3979
3980
John Reck59135872010-11-02 12:39:01 -07003981MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003982 ASSERT(length >= 0);
3983 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003984 Object* result;
3985 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3986 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003987 }
John Reck59135872010-11-02 12:39:01 -07003988 // Initialize header.
3989 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3990 array->set_map(fixed_array_map());
3991 array->set_length(length);
3992 // Initialize body.
Steve Block44f0eee2011-05-26 01:26:41 +01003993 ASSERT(!InNewSpace(undefined_value()));
John Reck59135872010-11-02 12:39:01 -07003994 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003995 return result;
3996}
3997
3998
John Reck59135872010-11-02 12:39:01 -07003999MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00004000 if (length < 0 || length > FixedArray::kMaxLength) {
4001 return Failure::OutOfMemoryException();
4002 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004003
Leon Clarkee46be812010-01-19 14:06:41 +00004004 AllocationSpace space =
4005 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00004006 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00004007 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4008 // Too big for new space.
4009 space = LO_SPACE;
4010 } else if (space == OLD_POINTER_SPACE &&
4011 size > MaxObjectSizeInPagedSpace()) {
4012 // Too big for old pointer space.
4013 space = LO_SPACE;
4014 }
4015
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004016 AllocationSpace retry_space =
4017 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
4018
4019 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00004020}
4021
4022
John Reck59135872010-11-02 12:39:01 -07004023MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
Steve Block44f0eee2011-05-26 01:26:41 +01004024 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07004025 int length,
4026 PretenureFlag pretenure,
4027 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01004028 ASSERT(length >= 0);
Steve Block44f0eee2011-05-26 01:26:41 +01004029 ASSERT(heap->empty_fixed_array()->IsFixedArray());
4030 if (length == 0) return heap->empty_fixed_array();
Steve Block6ded16b2010-05-10 14:33:55 +01004031
Steve Block44f0eee2011-05-26 01:26:41 +01004032 ASSERT(!heap->InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07004033 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004034 { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07004035 if (!maybe_result->ToObject(&result)) return maybe_result;
4036 }
Steve Block6ded16b2010-05-10 14:33:55 +01004037
Steve Block44f0eee2011-05-26 01:26:41 +01004038 HeapObject::cast(result)->set_map(heap->fixed_array_map());
Steve Block6ded16b2010-05-10 14:33:55 +01004039 FixedArray* array = FixedArray::cast(result);
4040 array->set_length(length);
4041 MemsetPointer(array->data_start(), filler, length);
4042 return array;
4043}
4044
4045
John Reck59135872010-11-02 12:39:01 -07004046MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01004047 return AllocateFixedArrayWithFiller(this,
4048 length,
4049 pretenure,
4050 undefined_value());
Steve Block6ded16b2010-05-10 14:33:55 +01004051}
4052
4053
John Reck59135872010-11-02 12:39:01 -07004054MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
4055 PretenureFlag pretenure) {
Steve Block44f0eee2011-05-26 01:26:41 +01004056 return AllocateFixedArrayWithFiller(this,
4057 length,
4058 pretenure,
4059 the_hole_value());
Steve Block6ded16b2010-05-10 14:33:55 +01004060}
4061
4062
John Reck59135872010-11-02 12:39:01 -07004063MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01004064 if (length == 0) return empty_fixed_array();
4065
John Reck59135872010-11-02 12:39:01 -07004066 Object* obj;
4067 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
4068 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4069 }
Steve Block6ded16b2010-05-10 14:33:55 +01004070
4071 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
4072 FixedArray::cast(obj)->set_length(length);
4073 return obj;
4074}
4075
4076
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004077MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
4078 int size = FixedDoubleArray::SizeFor(0);
4079 Object* result;
4080 { MaybeObject* maybe_result =
4081 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4082 if (!maybe_result->ToObject(&result)) return maybe_result;
4083 }
4084 // Initialize the object.
4085 reinterpret_cast<FixedDoubleArray*>(result)->set_map(
4086 fixed_double_array_map());
4087 reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
4088 return result;
4089}
4090
4091
4092MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
4093 int length,
4094 PretenureFlag pretenure) {
4095 if (length == 0) return empty_fixed_double_array();
4096
4097 Object* obj;
4098 { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
4099 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
4100 }
4101
4102 reinterpret_cast<FixedDoubleArray*>(obj)->set_map(fixed_double_array_map());
4103 FixedDoubleArray::cast(obj)->set_length(length);
4104 return obj;
4105}
4106
4107
4108MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
4109 PretenureFlag pretenure) {
4110 if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4111 return Failure::OutOfMemoryException();
4112 }
4113
4114 AllocationSpace space =
4115 (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4116 int size = FixedDoubleArray::SizeFor(length);
4117 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
4118 // Too big for new space.
4119 space = LO_SPACE;
4120 } else if (space == OLD_DATA_SPACE &&
4121 size > MaxObjectSizeInPagedSpace()) {
4122 // Too big for old data space.
4123 space = LO_SPACE;
4124 }
4125
4126 AllocationSpace retry_space =
4127 (size <= MaxObjectSizeInPagedSpace()) ? OLD_DATA_SPACE : LO_SPACE;
4128
4129 return AllocateRaw(size, space, retry_space);
4130}
4131
4132
John Reck59135872010-11-02 12:39:01 -07004133MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
4134 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004135 { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
John Reck59135872010-11-02 12:39:01 -07004136 if (!maybe_result->ToObject(&result)) return maybe_result;
4137 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004138 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00004139 ASSERT(result->IsHashTable());
4140 return result;
4141}
4142
4143
John Reck59135872010-11-02 12:39:01 -07004144MaybeObject* Heap::AllocateGlobalContext() {
4145 Object* result;
4146 { MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01004147 AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07004148 if (!maybe_result->ToObject(&result)) return maybe_result;
4149 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004150 Context* context = reinterpret_cast<Context*>(result);
4151 context->set_map(global_context_map());
4152 ASSERT(context->IsGlobalContext());
4153 ASSERT(result->IsContext());
4154 return result;
4155}
4156
4157
John Reck59135872010-11-02 12:39:01 -07004158MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004159 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07004160 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004161 { MaybeObject* maybe_result = AllocateFixedArray(length);
John Reck59135872010-11-02 12:39:01 -07004162 if (!maybe_result->ToObject(&result)) return maybe_result;
4163 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004164 Context* context = reinterpret_cast<Context*>(result);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004165 context->set_map(function_context_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00004166 context->set_closure(function);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004167 context->set_previous(function->context());
Steve Blocka7e24c12009-10-30 11:49:00 +00004168 context->set_extension(NULL);
4169 context->set_global(function->context()->global());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004170 return context;
Steve Blocka7e24c12009-10-30 11:49:00 +00004171}
4172
4173
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004174MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
4175 Context* previous,
4176 String* name,
4177 Object* thrown_object) {
4178 STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
4179 Object* result;
4180 { MaybeObject* maybe_result =
4181 AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
4182 if (!maybe_result->ToObject(&result)) return maybe_result;
4183 }
4184 Context* context = reinterpret_cast<Context*>(result);
4185 context->set_map(catch_context_map());
4186 context->set_closure(function);
4187 context->set_previous(previous);
4188 context->set_extension(name);
4189 context->set_global(previous->global());
4190 context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
4191 return context;
4192}
4193
4194
4195MaybeObject* Heap::AllocateWithContext(JSFunction* function,
4196 Context* previous,
4197 JSObject* extension) {
John Reck59135872010-11-02 12:39:01 -07004198 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004199 { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07004200 if (!maybe_result->ToObject(&result)) return maybe_result;
4201 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004202 Context* context = reinterpret_cast<Context*>(result);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004203 context->set_map(with_context_map());
4204 context->set_closure(function);
Steve Blocka7e24c12009-10-30 11:49:00 +00004205 context->set_previous(previous);
4206 context->set_extension(extension);
4207 context->set_global(previous->global());
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004208 return context;
Steve Blocka7e24c12009-10-30 11:49:00 +00004209}
4210
4211
Ben Murdoch69a99ed2011-11-30 16:03:39 +00004212MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
4213 Context* previous,
4214 SerializedScopeInfo* scope_info) {
4215 Object* result;
4216 { MaybeObject* maybe_result =
4217 AllocateFixedArrayWithHoles(scope_info->NumberOfContextSlots());
4218 if (!maybe_result->ToObject(&result)) return maybe_result;
4219 }
4220 Context* context = reinterpret_cast<Context*>(result);
4221 context->set_map(block_context_map());
4222 context->set_closure(function);
4223 context->set_previous(previous);
4224 context->set_extension(scope_info);
4225 context->set_global(previous->global());
4226 return context;
4227}
4228
4229
4230MaybeObject* Heap::AllocateSerializedScopeInfo(int length) {
4231 Object* result;
4232 { MaybeObject* maybe_result = AllocateFixedArray(length, TENURED);
4233 if (!maybe_result->ToObject(&result)) return maybe_result;
4234 }
4235 SerializedScopeInfo* scope_info =
4236 reinterpret_cast<SerializedScopeInfo*>(result);
4237 scope_info->set_map(serialized_scope_info_map());
4238 return scope_info;
4239}
4240
4241
John Reck59135872010-11-02 12:39:01 -07004242MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004243 Map* map;
4244 switch (type) {
Steve Block44f0eee2011-05-26 01:26:41 +01004245#define MAKE_CASE(NAME, Name, name) \
4246 case NAME##_TYPE: map = name##_map(); break;
Steve Blocka7e24c12009-10-30 11:49:00 +00004247STRUCT_LIST(MAKE_CASE)
4248#undef MAKE_CASE
4249 default:
4250 UNREACHABLE();
4251 return Failure::InternalError();
4252 }
4253 int size = map->instance_size();
4254 AllocationSpace space =
4255 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07004256 Object* result;
Steve Block44f0eee2011-05-26 01:26:41 +01004257 { MaybeObject* maybe_result = Allocate(map, space);
John Reck59135872010-11-02 12:39:01 -07004258 if (!maybe_result->ToObject(&result)) return maybe_result;
4259 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004260 Struct::cast(result)->InitializeBody(size);
4261 return result;
4262}
4263
4264
4265bool Heap::IdleNotification() {
4266 static const int kIdlesBeforeScavenge = 4;
4267 static const int kIdlesBeforeMarkSweep = 7;
4268 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00004269 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01004270 static const unsigned int kGCsBetweenCleanup = 4;
Steve Block44f0eee2011-05-26 01:26:41 +01004271
4272 if (!last_idle_notification_gc_count_init_) {
4273 last_idle_notification_gc_count_ = gc_count_;
4274 last_idle_notification_gc_count_init_ = true;
4275 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004276
Steve Block6ded16b2010-05-10 14:33:55 +01004277 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004278 bool finished = false;
4279
Ben Murdochdb5a90a2011-01-06 18:27:03 +00004280 // Reset the number of idle notifications received when a number of
4281 // GCs have taken place. This allows another round of cleanup based
4282 // on idle notifications if enough work has been carried out to
4283 // provoke a number of garbage collections.
Steve Block44f0eee2011-05-26 01:26:41 +01004284 if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
4285 number_idle_notifications_ =
4286 Min(number_idle_notifications_ + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00004287 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004288 number_idle_notifications_ = 0;
4289 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004290 }
4291
Steve Block44f0eee2011-05-26 01:26:41 +01004292 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01004293 if (contexts_disposed_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01004294 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01004295 CollectAllGarbage(false);
4296 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01004297 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01004298 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004299 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01004300 last_idle_notification_gc_count_ = gc_count_;
4301 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00004302 // Before doing the mark-sweep collections we clear the
4303 // compilation cache to avoid hanging on to source code and
4304 // generated code for cached functions.
Steve Block44f0eee2011-05-26 01:26:41 +01004305 isolate_->compilation_cache()->Clear();
Steve Blockd0582a62009-12-15 09:54:21 +00004306
Steve Blocka7e24c12009-10-30 11:49:00 +00004307 CollectAllGarbage(false);
4308 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01004309 last_idle_notification_gc_count_ = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004310
Steve Block44f0eee2011-05-26 01:26:41 +01004311 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004312 CollectAllGarbage(true);
4313 new_space_.Shrink();
Steve Block44f0eee2011-05-26 01:26:41 +01004314 last_idle_notification_gc_count_ = gc_count_;
4315 number_idle_notifications_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00004316 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01004317 } else if (contexts_disposed_ > 0) {
4318 if (FLAG_expose_gc) {
4319 contexts_disposed_ = 0;
4320 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01004321 HistogramTimerScope scope(isolate_->counters()->gc_context());
Steve Block6ded16b2010-05-10 14:33:55 +01004322 CollectAllGarbage(false);
Steve Block44f0eee2011-05-26 01:26:41 +01004323 last_idle_notification_gc_count_ = gc_count_;
Steve Block6ded16b2010-05-10 14:33:55 +01004324 }
4325 // If this is the first idle notification, we reset the
4326 // notification count to avoid letting idle notifications for
4327 // context disposal garbage collections start a potentially too
4328 // aggressive idle GC cycle.
Steve Block44f0eee2011-05-26 01:26:41 +01004329 if (number_idle_notifications_ <= 1) {
4330 number_idle_notifications_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +01004331 uncommit = false;
4332 }
Steve Block44f0eee2011-05-26 01:26:41 +01004333 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
Ben Murdochdb5a90a2011-01-06 18:27:03 +00004334 // If we have received more than kIdlesBeforeMarkCompact idle
4335 // notifications we do not perform any cleanup because we don't
4336 // expect to gain much by doing so.
4337 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004338 }
4339
Steve Block6ded16b2010-05-10 14:33:55 +01004340 // Make sure that we have no pending context disposals and
4341 // conditionally uncommit from space.
4342 ASSERT(contexts_disposed_ == 0);
Steve Block44f0eee2011-05-26 01:26:41 +01004343 if (uncommit) UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00004344 return finished;
4345}
4346
4347
4348#ifdef DEBUG
4349
4350void Heap::Print() {
4351 if (!HasBeenSetup()) return;
Steve Block44f0eee2011-05-26 01:26:41 +01004352 isolate()->PrintStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00004353 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004354 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4355 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00004356}
4357
4358
4359void Heap::ReportCodeStatistics(const char* title) {
4360 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4361 PagedSpace::ResetCodeStatistics();
4362 // We do not look for code in new space, map space, or old space. If code
4363 // somehow ends up in those spaces, we would miss it here.
4364 code_space_->CollectCodeStatistics();
4365 lo_space_->CollectCodeStatistics();
4366 PagedSpace::ReportCodeStatistics();
4367}
4368
4369
4370// This function expects that NewSpace's allocated objects histogram is
4371// populated (via a call to CollectStatistics or else as a side effect of a
4372// just-completed scavenge collection).
4373void Heap::ReportHeapStatistics(const char* title) {
4374 USE(title);
4375 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4376 title, gc_count_);
4377 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01004378 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
4379 old_gen_promotion_limit_);
4380 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4381 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004382
4383 PrintF("\n");
4384 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
Steve Block44f0eee2011-05-26 01:26:41 +01004385 isolate_->global_handles()->PrintStats();
Steve Blocka7e24c12009-10-30 11:49:00 +00004386 PrintF("\n");
4387
4388 PrintF("Heap statistics : ");
Steve Block44f0eee2011-05-26 01:26:41 +01004389 isolate_->memory_allocator()->ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00004390 PrintF("To space : ");
4391 new_space_.ReportStatistics();
4392 PrintF("Old pointer space : ");
4393 old_pointer_space_->ReportStatistics();
4394 PrintF("Old data space : ");
4395 old_data_space_->ReportStatistics();
4396 PrintF("Code space : ");
4397 code_space_->ReportStatistics();
4398 PrintF("Map space : ");
4399 map_space_->ReportStatistics();
4400 PrintF("Cell space : ");
4401 cell_space_->ReportStatistics();
4402 PrintF("Large object space : ");
4403 lo_space_->ReportStatistics();
4404 PrintF(">>>>>> ========================================= >>>>>>\n");
4405}
4406
4407#endif // DEBUG
4408
4409bool Heap::Contains(HeapObject* value) {
4410 return Contains(value->address());
4411}
4412
4413
4414bool Heap::Contains(Address addr) {
4415 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4416 return HasBeenSetup() &&
4417 (new_space_.ToSpaceContains(addr) ||
4418 old_pointer_space_->Contains(addr) ||
4419 old_data_space_->Contains(addr) ||
4420 code_space_->Contains(addr) ||
4421 map_space_->Contains(addr) ||
4422 cell_space_->Contains(addr) ||
4423 lo_space_->SlowContains(addr));
4424}
4425
4426
4427bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4428 return InSpace(value->address(), space);
4429}
4430
4431
4432bool Heap::InSpace(Address addr, AllocationSpace space) {
4433 if (OS::IsOutsideAllocatedSpace(addr)) return false;
4434 if (!HasBeenSetup()) return false;
4435
4436 switch (space) {
4437 case NEW_SPACE:
4438 return new_space_.ToSpaceContains(addr);
4439 case OLD_POINTER_SPACE:
4440 return old_pointer_space_->Contains(addr);
4441 case OLD_DATA_SPACE:
4442 return old_data_space_->Contains(addr);
4443 case CODE_SPACE:
4444 return code_space_->Contains(addr);
4445 case MAP_SPACE:
4446 return map_space_->Contains(addr);
4447 case CELL_SPACE:
4448 return cell_space_->Contains(addr);
4449 case LO_SPACE:
4450 return lo_space_->SlowContains(addr);
4451 }
4452
4453 return false;
4454}
4455
4456
4457#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004458static void DummyScavengePointer(HeapObject** p) {
4459}
4460
4461
4462static void VerifyPointersUnderWatermark(
4463 PagedSpace* space,
4464 DirtyRegionCallback visit_dirty_region) {
4465 PageIterator it(space, PageIterator::PAGES_IN_USE);
4466
4467 while (it.has_next()) {
4468 Page* page = it.next();
4469 Address start = page->ObjectAreaStart();
4470 Address end = page->AllocationWatermark();
4471
Steve Block44f0eee2011-05-26 01:26:41 +01004472 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004473 start,
4474 end,
4475 visit_dirty_region,
4476 &DummyScavengePointer);
4477 }
4478}
4479
4480
4481static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4482 LargeObjectIterator it(space);
4483 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4484 if (object->IsFixedArray()) {
4485 Address slot_address = object->address();
4486 Address end = object->address() + object->Size();
4487
4488 while (slot_address < end) {
4489 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4490 // When we are not in GC the Heap::InNewSpace() predicate
4491 // checks that pointers which satisfy predicate point into
4492 // the active semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01004493 HEAP->InNewSpace(*slot);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004494 slot_address += kPointerSize;
4495 }
4496 }
4497 }
4498}
4499
4500
Steve Blocka7e24c12009-10-30 11:49:00 +00004501void Heap::Verify() {
4502 ASSERT(HasBeenSetup());
4503
4504 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00004505 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00004506
4507 new_space_.Verify();
4508
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004509 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
4510 old_pointer_space_->Verify(&dirty_regions_visitor);
4511 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004512
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004513 VerifyPointersUnderWatermark(old_pointer_space_,
4514 &IteratePointersInDirtyRegion);
4515 VerifyPointersUnderWatermark(map_space_,
4516 &IteratePointersInDirtyMapsRegion);
4517 VerifyPointersUnderWatermark(lo_space_);
4518
4519 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4520 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4521
4522 VerifyPointersVisitor no_dirty_regions_visitor;
4523 old_data_space_->Verify(&no_dirty_regions_visitor);
4524 code_space_->Verify(&no_dirty_regions_visitor);
4525 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004526
4527 lo_space_->Verify();
4528}
4529#endif // DEBUG
4530
4531
John Reck59135872010-11-02 12:39:01 -07004532MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004533 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004534 Object* new_table;
4535 { MaybeObject* maybe_new_table =
4536 symbol_table()->LookupSymbol(string, &symbol);
4537 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4538 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004539 // Can't use set_symbol_table because SymbolTable::cast knows that
4540 // SymbolTable is a singleton and checks for identity.
4541 roots_[kSymbolTableRootIndex] = new_table;
4542 ASSERT(symbol != NULL);
4543 return symbol;
4544}
4545
4546
Steve Block9fac8402011-05-12 15:51:54 +01004547MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4548 Object* symbol = NULL;
4549 Object* new_table;
4550 { MaybeObject* maybe_new_table =
4551 symbol_table()->LookupAsciiSymbol(string, &symbol);
4552 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4553 }
4554 // Can't use set_symbol_table because SymbolTable::cast knows that
4555 // SymbolTable is a singleton and checks for identity.
4556 roots_[kSymbolTableRootIndex] = new_table;
4557 ASSERT(symbol != NULL);
4558 return symbol;
4559}
4560
4561
Ben Murdoch257744e2011-11-30 15:57:28 +00004562MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
4563 int from,
4564 int length) {
4565 Object* symbol = NULL;
4566 Object* new_table;
4567 { MaybeObject* maybe_new_table =
4568 symbol_table()->LookupSubStringAsciiSymbol(string,
4569 from,
4570 length,
4571 &symbol);
4572 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4573 }
4574 // Can't use set_symbol_table because SymbolTable::cast knows that
4575 // SymbolTable is a singleton and checks for identity.
4576 roots_[kSymbolTableRootIndex] = new_table;
4577 ASSERT(symbol != NULL);
4578 return symbol;
4579}
4580
4581
Steve Block9fac8402011-05-12 15:51:54 +01004582MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4583 Object* symbol = NULL;
4584 Object* new_table;
4585 { MaybeObject* maybe_new_table =
4586 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4587 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4588 }
4589 // Can't use set_symbol_table because SymbolTable::cast knows that
4590 // SymbolTable is a singleton and checks for identity.
4591 roots_[kSymbolTableRootIndex] = new_table;
4592 ASSERT(symbol != NULL);
4593 return symbol;
4594}
4595
4596
John Reck59135872010-11-02 12:39:01 -07004597MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004598 if (string->IsSymbol()) return string;
4599 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004600 Object* new_table;
4601 { MaybeObject* maybe_new_table =
4602 symbol_table()->LookupString(string, &symbol);
4603 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4604 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004605 // Can't use set_symbol_table because SymbolTable::cast knows that
4606 // SymbolTable is a singleton and checks for identity.
4607 roots_[kSymbolTableRootIndex] = new_table;
4608 ASSERT(symbol != NULL);
4609 return symbol;
4610}
4611
4612
4613bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4614 if (string->IsSymbol()) {
4615 *symbol = string;
4616 return true;
4617 }
4618 return symbol_table()->LookupSymbolIfExists(string, symbol);
4619}
4620
4621
4622#ifdef DEBUG
4623void Heap::ZapFromSpace() {
Steve Block1e0659c2011-05-24 12:43:12 +01004624 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure());
Steve Blocka7e24c12009-10-30 11:49:00 +00004625 for (Address a = new_space_.FromSpaceLow();
4626 a < new_space_.FromSpaceHigh();
4627 a += kPointerSize) {
4628 Memory::Address_at(a) = kFromSpaceZapValue;
4629 }
4630}
4631#endif // DEBUG
4632
4633
Steve Block44f0eee2011-05-26 01:26:41 +01004634bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4635 Address start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004636 Address end,
4637 ObjectSlotCallback copy_object_func) {
4638 Address slot_address = start;
4639 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004640
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004641 while (slot_address < end) {
4642 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004643 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004644 ASSERT((*slot)->IsHeapObject());
4645 copy_object_func(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004646 if (heap->InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004647 ASSERT((*slot)->IsHeapObject());
4648 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004649 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004650 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004651 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004652 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004653 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004654}
4655
4656
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004657// Compute start address of the first map following given addr.
4658static inline Address MapStartAlign(Address addr) {
4659 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4660 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4661}
Steve Blocka7e24c12009-10-30 11:49:00 +00004662
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004663
4664// Compute end address of the first map preceding given addr.
4665static inline Address MapEndAlign(Address addr) {
4666 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4667 return page + ((addr - page) / Map::kSize * Map::kSize);
4668}
4669
4670
4671static bool IteratePointersInDirtyMaps(Address start,
4672 Address end,
4673 ObjectSlotCallback copy_object_func) {
4674 ASSERT(MapStartAlign(start) == start);
4675 ASSERT(MapEndAlign(end) == end);
4676
4677 Address map_address = start;
4678 bool pointers_to_new_space_found = false;
4679
Steve Block44f0eee2011-05-26 01:26:41 +01004680 Heap* heap = HEAP;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004681 while (map_address < end) {
Steve Block44f0eee2011-05-26 01:26:41 +01004682 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004683 ASSERT(Memory::Object_at(map_address)->IsMap());
4684
4685 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4686 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4687
Steve Block44f0eee2011-05-26 01:26:41 +01004688 if (Heap::IteratePointersInDirtyRegion(heap,
4689 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004690 pointer_fields_end,
4691 copy_object_func)) {
4692 pointers_to_new_space_found = true;
4693 }
4694
4695 map_address += Map::kSize;
4696 }
4697
4698 return pointers_to_new_space_found;
4699}
4700
4701
4702bool Heap::IteratePointersInDirtyMapsRegion(
Steve Block44f0eee2011-05-26 01:26:41 +01004703 Heap* heap,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004704 Address start,
4705 Address end,
4706 ObjectSlotCallback copy_object_func) {
4707 Address map_aligned_start = MapStartAlign(start);
4708 Address map_aligned_end = MapEndAlign(end);
4709
4710 bool contains_pointers_to_new_space = false;
4711
4712 if (map_aligned_start != start) {
4713 Address prev_map = map_aligned_start - Map::kSize;
4714 ASSERT(Memory::Object_at(prev_map)->IsMap());
4715
4716 Address pointer_fields_start =
4717 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4718
4719 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004720 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004721
4722 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004723 IteratePointersInDirtyRegion(heap,
4724 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004725 pointer_fields_end,
4726 copy_object_func)
4727 || contains_pointers_to_new_space;
4728 }
4729
4730 contains_pointers_to_new_space =
4731 IteratePointersInDirtyMaps(map_aligned_start,
4732 map_aligned_end,
4733 copy_object_func)
4734 || contains_pointers_to_new_space;
4735
4736 if (map_aligned_end != end) {
4737 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4738
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004739 Address pointer_fields_start =
4740 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004741
4742 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004743 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004744
4745 contains_pointers_to_new_space =
Steve Block44f0eee2011-05-26 01:26:41 +01004746 IteratePointersInDirtyRegion(heap,
4747 pointer_fields_start,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004748 pointer_fields_end,
4749 copy_object_func)
4750 || contains_pointers_to_new_space;
4751 }
4752
4753 return contains_pointers_to_new_space;
4754}
4755
4756
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004757void Heap::IterateAndMarkPointersToFromSpace(Address start,
4758 Address end,
4759 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004760 Address slot_address = start;
4761 Page* page = Page::FromAddress(start);
4762
4763 uint32_t marks = page->GetRegionMarks();
4764
4765 while (slot_address < end) {
4766 Object** slot = reinterpret_cast<Object**>(slot_address);
Steve Block44f0eee2011-05-26 01:26:41 +01004767 if (InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004768 ASSERT((*slot)->IsHeapObject());
4769 callback(reinterpret_cast<HeapObject**>(slot));
Steve Block44f0eee2011-05-26 01:26:41 +01004770 if (InNewSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004771 ASSERT((*slot)->IsHeapObject());
4772 marks |= page->GetRegionMaskForAddress(slot_address);
4773 }
4774 }
4775 slot_address += kPointerSize;
4776 }
4777
4778 page->SetRegionMarks(marks);
4779}
4780
4781
4782uint32_t Heap::IterateDirtyRegions(
4783 uint32_t marks,
4784 Address area_start,
4785 Address area_end,
4786 DirtyRegionCallback visit_dirty_region,
4787 ObjectSlotCallback copy_object_func) {
4788 uint32_t newmarks = 0;
4789 uint32_t mask = 1;
4790
4791 if (area_start >= area_end) {
4792 return newmarks;
4793 }
4794
4795 Address region_start = area_start;
4796
4797 // area_start does not necessarily coincide with start of the first region.
4798 // Thus to calculate the beginning of the next region we have to align
4799 // area_start by Page::kRegionSize.
4800 Address second_region =
4801 reinterpret_cast<Address>(
4802 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4803 ~Page::kRegionAlignmentMask);
4804
4805 // Next region might be beyond area_end.
4806 Address region_end = Min(second_region, area_end);
4807
4808 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004809 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004810 newmarks |= mask;
4811 }
4812 }
4813 mask <<= 1;
4814
4815 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4816 region_start = region_end;
4817 region_end = region_start + Page::kRegionSize;
4818
4819 while (region_end <= area_end) {
4820 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004821 if (visit_dirty_region(this,
4822 region_start,
4823 region_end,
4824 copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004825 newmarks |= mask;
4826 }
4827 }
4828
4829 region_start = region_end;
4830 region_end = region_start + Page::kRegionSize;
4831
4832 mask <<= 1;
4833 }
4834
4835 if (region_start != area_end) {
4836 // A small piece of area left uniterated because area_end does not coincide
4837 // with region end. Check whether region covering last part of area is
4838 // dirty.
4839 if (marks & mask) {
Steve Block44f0eee2011-05-26 01:26:41 +01004840 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004841 newmarks |= mask;
4842 }
4843 }
4844 }
4845
4846 return newmarks;
4847}
4848
4849
4850
4851void Heap::IterateDirtyRegions(
4852 PagedSpace* space,
4853 DirtyRegionCallback visit_dirty_region,
4854 ObjectSlotCallback copy_object_func,
4855 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004856
4857 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004858
Steve Blocka7e24c12009-10-30 11:49:00 +00004859 while (it.has_next()) {
4860 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004861 uint32_t marks = page->GetRegionMarks();
4862
4863 if (marks != Page::kAllRegionsCleanMarks) {
4864 Address start = page->ObjectAreaStart();
4865
4866 // Do not try to visit pointers beyond page allocation watermark.
4867 // Page can contain garbage pointers there.
4868 Address end;
4869
4870 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4871 page->IsWatermarkValid()) {
4872 end = page->AllocationWatermark();
4873 } else {
4874 end = page->CachedAllocationWatermark();
4875 }
4876
4877 ASSERT(space == old_pointer_space_ ||
4878 (space == map_space_ &&
4879 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4880
4881 page->SetRegionMarks(IterateDirtyRegions(marks,
4882 start,
4883 end,
4884 visit_dirty_region,
4885 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004886 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004887
4888 // Mark page watermark as invalid to maintain watermark validity invariant.
4889 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4890 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004891 }
4892}
4893
4894
Steve Blockd0582a62009-12-15 09:54:21 +00004895void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4896 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004897 IterateWeakRoots(v, mode);
4898}
4899
4900
4901void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004902 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004903 v->Synchronize("symbol_table");
Ben Murdoch257744e2011-11-30 15:57:28 +00004904 if (mode != VISIT_ALL_IN_SCAVENGE &&
4905 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00004906 // Scavenge collections have special processing for this.
Steve Block44f0eee2011-05-26 01:26:41 +01004907 external_string_table_.Iterate(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004908 }
4909 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004910}
4911
4912
Steve Blockd0582a62009-12-15 09:54:21 +00004913void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004914 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004915 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004916
Iain Merrick75681382010-08-19 15:07:18 +01004917 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004918 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004919
Steve Block44f0eee2011-05-26 01:26:41 +01004920 isolate_->bootstrapper()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004921 v->Synchronize("bootstrapper");
Steve Block44f0eee2011-05-26 01:26:41 +01004922 isolate_->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004923 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004924 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004925 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004926
4927#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +01004928 isolate_->debug()->Iterate(v);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004929 if (isolate_->deoptimizer_data() != NULL) {
4930 isolate_->deoptimizer_data()->Iterate(v);
4931 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004932#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004933 v->Synchronize("debug");
Steve Block44f0eee2011-05-26 01:26:41 +01004934 isolate_->compilation_cache()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004935 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004936
4937 // Iterate over local handles in handle scopes.
Steve Block44f0eee2011-05-26 01:26:41 +01004938 isolate_->handle_scope_implementer()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004939 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004940
Leon Clarkee46be812010-01-19 14:06:41 +00004941 // Iterate over the builtin code objects and code stubs in the
4942 // heap. Note that it is not necessary to iterate over code objects
4943 // on scavenge collections.
Ben Murdoch257744e2011-11-30 15:57:28 +00004944 if (mode != VISIT_ALL_IN_SCAVENGE &&
4945 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
Steve Block44f0eee2011-05-26 01:26:41 +01004946 isolate_->builtins()->IterateBuiltins(v);
Leon Clarkee46be812010-01-19 14:06:41 +00004947 }
Steve Blockd0582a62009-12-15 09:54:21 +00004948 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004949
4950 // Iterate over global handles.
Ben Murdoch257744e2011-11-30 15:57:28 +00004951 switch (mode) {
4952 case VISIT_ONLY_STRONG:
4953 isolate_->global_handles()->IterateStrongRoots(v);
4954 break;
4955 case VISIT_ALL_IN_SCAVENGE:
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00004956 isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
Ben Murdoch257744e2011-11-30 15:57:28 +00004957 break;
4958 case VISIT_ALL_IN_SWEEP_NEWSPACE:
4959 case VISIT_ALL:
4960 isolate_->global_handles()->IterateAllRoots(v);
4961 break;
Steve Blockd0582a62009-12-15 09:54:21 +00004962 }
4963 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004964
4965 // Iterate over pointers being held by inactive threads.
Steve Block44f0eee2011-05-26 01:26:41 +01004966 isolate_->thread_manager()->Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004967 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004968
4969 // Iterate over the pointers the Serialization/Deserialization code is
4970 // holding.
4971 // During garbage collection this keeps the partial snapshot cache alive.
4972 // During deserialization of the startup snapshot this creates the partial
4973 // snapshot cache and deserializes the objects it refers to. During
4974 // serialization this does nothing, since the partial snapshot cache is
4975 // empty. However the next thing we do is create the partial snapshot,
4976 // filling up the partial snapshot cache with objects it needs as we go.
4977 SerializerDeserializer::Iterate(v);
4978 // We don't do a v->Synchronize call here, because in debug mode that will
4979 // output a flag to the snapshot. However at this point the serializer and
4980 // deserializer are deliberately a little unsynchronized (see above) so the
4981 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004982}
Steve Blocka7e24c12009-10-30 11:49:00 +00004983
4984
Steve Blocka7e24c12009-10-30 11:49:00 +00004985// TODO(1236194): Since the heap size is configurable on the command line
4986// and through the API, we should gracefully handle the case that the heap
4987// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004988bool Heap::ConfigureHeap(int max_semispace_size,
4989 int max_old_gen_size,
4990 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004991 if (HasBeenSetup()) return false;
4992
Steve Block3ce2e202009-11-05 08:53:23 +00004993 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4994
4995 if (Snapshot::IsEnabled()) {
4996 // If we are using a snapshot we always reserve the default amount
4997 // of memory for each semispace because code in the snapshot has
4998 // write-barrier code that relies on the size and alignment of new
4999 // space. We therefore cannot use a larger max semispace size
5000 // than the default reserved semispace size.
5001 if (max_semispace_size_ > reserved_semispace_size_) {
5002 max_semispace_size_ = reserved_semispace_size_;
5003 }
5004 } else {
5005 // If we are not using snapshots we reserve space for the actual
5006 // max semispace size.
5007 reserved_semispace_size_ = max_semispace_size_;
5008 }
5009
5010 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08005011 if (max_executable_size > 0) {
5012 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5013 }
5014
5015 // The max executable size must be less than or equal to the max old
5016 // generation size.
5017 if (max_executable_size_ > max_old_generation_size_) {
5018 max_executable_size_ = max_old_generation_size_;
5019 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005020
5021 // The new space size must be a power of two to support single-bit testing
5022 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00005023 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
5024 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
5025 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
5026 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005027
5028 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00005029 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00005030
Steve Block44f0eee2011-05-26 01:26:41 +01005031 configured_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005032 return true;
5033}
5034
5035
5036bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08005037 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
5038 FLAG_max_old_space_size * MB,
5039 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00005040}
5041
5042
Ben Murdochbb769b22010-08-11 14:56:33 +01005043void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01005044 *stats->start_marker = HeapStats::kStartMarker;
5045 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01005046 *stats->new_space_size = new_space_.SizeAsInt();
5047 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00005048 *stats->old_pointer_space_size = old_pointer_space_->Size();
5049 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5050 *stats->old_data_space_size = old_data_space_->Size();
5051 *stats->old_data_space_capacity = old_data_space_->Capacity();
5052 *stats->code_space_size = code_space_->Size();
5053 *stats->code_space_capacity = code_space_->Capacity();
5054 *stats->map_space_size = map_space_->Size();
5055 *stats->map_space_capacity = map_space_->Capacity();
5056 *stats->cell_space_size = cell_space_->Size();
5057 *stats->cell_space_capacity = cell_space_->Capacity();
5058 *stats->lo_space_size = lo_space_->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01005059 isolate_->global_handles()->RecordStats(stats);
5060 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
Ben Murdochbb769b22010-08-11 14:56:33 +01005061 *stats->memory_allocator_capacity =
Steve Block44f0eee2011-05-26 01:26:41 +01005062 isolate()->memory_allocator()->Size() +
5063 isolate()->memory_allocator()->Available();
Iain Merrick75681382010-08-19 15:07:18 +01005064 *stats->os_error = OS::GetLastError();
Steve Block44f0eee2011-05-26 01:26:41 +01005065 isolate()->memory_allocator()->Available();
Ben Murdochbb769b22010-08-11 14:56:33 +01005066 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01005067 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01005068 for (HeapObject* obj = iterator.next();
5069 obj != NULL;
5070 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01005071 InstanceType type = obj->map()->instance_type();
5072 ASSERT(0 <= type && type <= LAST_TYPE);
5073 stats->objects_per_type[type]++;
5074 stats->size_per_type[type] += obj->Size();
5075 }
5076 }
Steve Blockd0582a62009-12-15 09:54:21 +00005077}
5078
5079
Ben Murdochf87a2032010-10-22 12:50:53 +01005080intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005081 return old_pointer_space_->Size()
5082 + old_data_space_->Size()
5083 + code_space_->Size()
5084 + map_space_->Size()
5085 + cell_space_->Size()
5086 + lo_space_->Size();
5087}
5088
5089
5090int Heap::PromotedExternalMemorySize() {
5091 if (amount_of_external_allocated_memory_
5092 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5093 return amount_of_external_allocated_memory_
5094 - amount_of_external_allocated_memory_at_last_global_gc_;
5095}
5096
Steve Block44f0eee2011-05-26 01:26:41 +01005097#ifdef DEBUG
5098
5099// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5100static const int kMarkTag = 2;
5101
5102
5103class HeapDebugUtils {
5104 public:
5105 explicit HeapDebugUtils(Heap* heap)
5106 : search_for_any_global_(false),
5107 search_target_(NULL),
5108 found_target_(false),
5109 object_stack_(20),
5110 heap_(heap) {
5111 }
5112
5113 class MarkObjectVisitor : public ObjectVisitor {
5114 public:
5115 explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5116
5117 void VisitPointers(Object** start, Object** end) {
5118 // Copy all HeapObject pointers in [start, end)
5119 for (Object** p = start; p < end; p++) {
5120 if ((*p)->IsHeapObject())
5121 utils_->MarkObjectRecursively(p);
5122 }
5123 }
5124
5125 HeapDebugUtils* utils_;
5126 };
5127
5128 void MarkObjectRecursively(Object** p) {
5129 if (!(*p)->IsHeapObject()) return;
5130
5131 HeapObject* obj = HeapObject::cast(*p);
5132
5133 Object* map = obj->map();
5134
5135 if (!map->IsHeapObject()) return; // visited before
5136
5137 if (found_target_) return; // stop if target found
5138 object_stack_.Add(obj);
5139 if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
5140 (!search_for_any_global_ && (obj == search_target_))) {
5141 found_target_ = true;
5142 return;
5143 }
5144
5145 // not visited yet
5146 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5147
5148 Address map_addr = map_p->address();
5149
5150 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5151
5152 MarkObjectRecursively(&map);
5153
5154 MarkObjectVisitor mark_visitor(this);
5155
5156 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5157 &mark_visitor);
5158
5159 if (!found_target_) // don't pop if found the target
5160 object_stack_.RemoveLast();
5161 }
5162
5163
5164 class UnmarkObjectVisitor : public ObjectVisitor {
5165 public:
5166 explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5167
5168 void VisitPointers(Object** start, Object** end) {
5169 // Copy all HeapObject pointers in [start, end)
5170 for (Object** p = start; p < end; p++) {
5171 if ((*p)->IsHeapObject())
5172 utils_->UnmarkObjectRecursively(p);
5173 }
5174 }
5175
5176 HeapDebugUtils* utils_;
5177 };
5178
5179
5180 void UnmarkObjectRecursively(Object** p) {
5181 if (!(*p)->IsHeapObject()) return;
5182
5183 HeapObject* obj = HeapObject::cast(*p);
5184
5185 Object* map = obj->map();
5186
5187 if (map->IsHeapObject()) return; // unmarked already
5188
5189 Address map_addr = reinterpret_cast<Address>(map);
5190
5191 map_addr -= kMarkTag;
5192
5193 ASSERT_TAG_ALIGNED(map_addr);
5194
5195 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5196
5197 obj->set_map(reinterpret_cast<Map*>(map_p));
5198
5199 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5200
5201 UnmarkObjectVisitor unmark_visitor(this);
5202
5203 obj->IterateBody(Map::cast(map_p)->instance_type(),
5204 obj->SizeFromMap(Map::cast(map_p)),
5205 &unmark_visitor);
5206 }
5207
5208
5209 void MarkRootObjectRecursively(Object** root) {
5210 if (search_for_any_global_) {
5211 ASSERT(search_target_ == NULL);
5212 } else {
5213 ASSERT(search_target_->IsHeapObject());
5214 }
5215 found_target_ = false;
5216 object_stack_.Clear();
5217
5218 MarkObjectRecursively(root);
5219 UnmarkObjectRecursively(root);
5220
5221 if (found_target_) {
5222 PrintF("=====================================\n");
5223 PrintF("==== Path to object ====\n");
5224 PrintF("=====================================\n\n");
5225
5226 ASSERT(!object_stack_.is_empty());
5227 for (int i = 0; i < object_stack_.length(); i++) {
5228 if (i > 0) PrintF("\n |\n |\n V\n\n");
5229 Object* obj = object_stack_[i];
5230 obj->Print();
5231 }
5232 PrintF("=====================================\n");
5233 }
5234 }
5235
5236 // Helper class for visiting HeapObjects recursively.
5237 class MarkRootVisitor: public ObjectVisitor {
5238 public:
5239 explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
5240
5241 void VisitPointers(Object** start, Object** end) {
5242 // Visit all HeapObject pointers in [start, end)
5243 for (Object** p = start; p < end; p++) {
5244 if ((*p)->IsHeapObject())
5245 utils_->MarkRootObjectRecursively(p);
5246 }
5247 }
5248
5249 HeapDebugUtils* utils_;
5250 };
5251
5252 bool search_for_any_global_;
5253 Object* search_target_;
5254 bool found_target_;
5255 List<Object*> object_stack_;
5256 Heap* heap_;
5257
5258 friend class Heap;
5259};
5260
5261#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005262
5263bool Heap::Setup(bool create_heap_objects) {
Steve Block44f0eee2011-05-26 01:26:41 +01005264#ifdef DEBUG
5265 debug_utils_ = new HeapDebugUtils(this);
5266#endif
5267
Steve Blocka7e24c12009-10-30 11:49:00 +00005268 // Initialize heap spaces and initial maps and objects. Whenever something
5269 // goes wrong, just return false. The caller should check the results and
5270 // call Heap::TearDown() to release allocated memory.
5271 //
5272 // If the heap is not yet configured (eg, through the API), configure it.
5273 // Configuration is based on the flags new-space-size (really the semispace
5274 // size) and old-space-size if set or the initial values of semispace_size_
5275 // and old_generation_size_ otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +01005276 if (!configured_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005277 if (!ConfigureHeapDefault()) return false;
5278 }
5279
Steve Block44f0eee2011-05-26 01:26:41 +01005280 gc_initializer_mutex->Lock();
5281 static bool initialized_gc = false;
5282 if (!initialized_gc) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01005283 initialized_gc = true;
5284 InitializeScavengingVisitorsTables();
5285 NewSpaceScavenger::Initialize();
5286 MarkCompactCollector::Initialize();
Steve Block44f0eee2011-05-26 01:26:41 +01005287 }
5288 gc_initializer_mutex->Unlock();
Iain Merrick75681382010-08-19 15:07:18 +01005289
Kristian Monsen80d68ea2010-09-08 11:05:35 +01005290 MarkMapPointersAsEncoded(false);
5291
Steve Blocka7e24c12009-10-30 11:49:00 +00005292 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00005293 // space. The chunk is double the size of the requested reserved
5294 // new space size to ensure that we can find a pair of semispaces that
5295 // are contiguous and aligned to their size.
Steve Block44f0eee2011-05-26 01:26:41 +01005296 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
5297 return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00005298 void* chunk =
Steve Block44f0eee2011-05-26 01:26:41 +01005299 isolate_->memory_allocator()->ReserveInitialChunk(
5300 4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005301 if (chunk == NULL) return false;
5302
5303 // Align the pair of semispaces to their size, which must be a power
5304 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00005305 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00005306 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
5307 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
5308 return false;
5309 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005310
5311 // Initialize old pointer space.
5312 old_pointer_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01005313 new OldSpace(this,
5314 max_old_generation_size_,
5315 OLD_POINTER_SPACE,
5316 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005317 if (old_pointer_space_ == NULL) return false;
5318 if (!old_pointer_space_->Setup(NULL, 0)) return false;
5319
5320 // Initialize old data space.
5321 old_data_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01005322 new OldSpace(this,
5323 max_old_generation_size_,
5324 OLD_DATA_SPACE,
5325 NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005326 if (old_data_space_ == NULL) return false;
5327 if (!old_data_space_->Setup(NULL, 0)) return false;
5328
5329 // Initialize the code space, set its maximum capacity to the old
5330 // generation size. It needs executable memory.
5331 // On 64-bit platform(s), we put all code objects in a 2 GB range of
5332 // virtual address space, so that they can call each other with near calls.
5333 if (code_range_size_ > 0) {
Steve Block44f0eee2011-05-26 01:26:41 +01005334 if (!isolate_->code_range()->Setup(code_range_size_)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005335 return false;
5336 }
5337 }
5338
5339 code_space_ =
Steve Block44f0eee2011-05-26 01:26:41 +01005340 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005341 if (code_space_ == NULL) return false;
5342 if (!code_space_->Setup(NULL, 0)) return false;
5343
5344 // Initialize map space.
Steve Block44f0eee2011-05-26 01:26:41 +01005345 map_space_ = new MapSpace(this, FLAG_use_big_map_space
Leon Clarkee46be812010-01-19 14:06:41 +00005346 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00005347 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
5348 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00005349 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005350 if (map_space_ == NULL) return false;
5351 if (!map_space_->Setup(NULL, 0)) return false;
5352
5353 // Initialize global property cell space.
Steve Block44f0eee2011-05-26 01:26:41 +01005354 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005355 if (cell_space_ == NULL) return false;
5356 if (!cell_space_->Setup(NULL, 0)) return false;
5357
5358 // The large object code space may contain code or data. We set the memory
5359 // to be non-executable here for safety, but this means we need to enable it
5360 // explicitly when allocating large code objects.
Steve Block44f0eee2011-05-26 01:26:41 +01005361 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005362 if (lo_space_ == NULL) return false;
5363 if (!lo_space_->Setup()) return false;
5364
5365 if (create_heap_objects) {
5366 // Create initial maps.
5367 if (!CreateInitialMaps()) return false;
5368 if (!CreateApiObjects()) return false;
5369
5370 // Create initial objects
5371 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01005372
5373 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00005374 }
5375
Steve Block44f0eee2011-05-26 01:26:41 +01005376 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5377 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00005378
Steve Blocka7e24c12009-10-30 11:49:00 +00005379 return true;
5380}
5381
5382
Steve Blockd0582a62009-12-15 09:54:21 +00005383void Heap::SetStackLimits() {
Steve Block44f0eee2011-05-26 01:26:41 +01005384 ASSERT(isolate_ != NULL);
5385 ASSERT(isolate_ == isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00005386 // On 64 bit machines, pointers are generally out of range of Smis. We write
5387 // something that looks like an out of range Smi to the GC.
5388
Steve Blockd0582a62009-12-15 09:54:21 +00005389 // Set up the special root array entries containing the stack limits.
5390 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00005391 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00005392 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01005393 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blockd0582a62009-12-15 09:54:21 +00005394 roots_[kRealStackLimitRootIndex] =
5395 reinterpret_cast<Object*>(
Steve Block44f0eee2011-05-26 01:26:41 +01005396 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00005397}
5398
5399
5400void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01005401 if (FLAG_print_cumulative_gc_stat) {
5402 PrintF("\n\n");
5403 PrintF("gc_count=%d ", gc_count_);
5404 PrintF("mark_sweep_count=%d ", ms_count_);
5405 PrintF("mark_compact_count=%d ", mc_count_);
Steve Block44f0eee2011-05-26 01:26:41 +01005406 PrintF("max_gc_pause=%d ", get_max_gc_pause());
5407 PrintF("min_in_mutator=%d ", get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01005408 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
Steve Block44f0eee2011-05-26 01:26:41 +01005409 get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01005410 PrintF("\n\n");
5411 }
5412
Steve Block44f0eee2011-05-26 01:26:41 +01005413 isolate_->global_handles()->TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +00005414
Steve Block44f0eee2011-05-26 01:26:41 +01005415 external_string_table_.TearDown();
Leon Clarkee46be812010-01-19 14:06:41 +00005416
Steve Blocka7e24c12009-10-30 11:49:00 +00005417 new_space_.TearDown();
5418
5419 if (old_pointer_space_ != NULL) {
5420 old_pointer_space_->TearDown();
5421 delete old_pointer_space_;
5422 old_pointer_space_ = NULL;
5423 }
5424
5425 if (old_data_space_ != NULL) {
5426 old_data_space_->TearDown();
5427 delete old_data_space_;
5428 old_data_space_ = NULL;
5429 }
5430
5431 if (code_space_ != NULL) {
5432 code_space_->TearDown();
5433 delete code_space_;
5434 code_space_ = NULL;
5435 }
5436
5437 if (map_space_ != NULL) {
5438 map_space_->TearDown();
5439 delete map_space_;
5440 map_space_ = NULL;
5441 }
5442
5443 if (cell_space_ != NULL) {
5444 cell_space_->TearDown();
5445 delete cell_space_;
5446 cell_space_ = NULL;
5447 }
5448
5449 if (lo_space_ != NULL) {
5450 lo_space_->TearDown();
5451 delete lo_space_;
5452 lo_space_ = NULL;
5453 }
5454
Steve Block44f0eee2011-05-26 01:26:41 +01005455 isolate_->memory_allocator()->TearDown();
5456
5457#ifdef DEBUG
5458 delete debug_utils_;
5459 debug_utils_ = NULL;
5460#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005461}
5462
5463
5464void Heap::Shrink() {
5465 // Try to shrink all paged spaces.
5466 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00005467 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5468 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00005469}
5470
5471
Steve Block6ded16b2010-05-10 14:33:55 +01005472void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5473 ASSERT(callback != NULL);
5474 GCPrologueCallbackPair pair(callback, gc_type);
5475 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5476 return gc_prologue_callbacks_.Add(pair);
5477}
5478
5479
5480void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
5481 ASSERT(callback != NULL);
5482 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5483 if (gc_prologue_callbacks_[i].callback == callback) {
5484 gc_prologue_callbacks_.Remove(i);
5485 return;
5486 }
5487 }
5488 UNREACHABLE();
5489}
5490
5491
5492void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
5493 ASSERT(callback != NULL);
5494 GCEpilogueCallbackPair pair(callback, gc_type);
5495 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5496 return gc_epilogue_callbacks_.Add(pair);
5497}
5498
5499
5500void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
5501 ASSERT(callback != NULL);
5502 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5503 if (gc_epilogue_callbacks_[i].callback == callback) {
5504 gc_epilogue_callbacks_.Remove(i);
5505 return;
5506 }
5507 }
5508 UNREACHABLE();
5509}
5510
5511
Steve Blocka7e24c12009-10-30 11:49:00 +00005512#ifdef DEBUG
5513
5514class PrintHandleVisitor: public ObjectVisitor {
5515 public:
5516 void VisitPointers(Object** start, Object** end) {
5517 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01005518 PrintF(" handle %p to %p\n",
5519 reinterpret_cast<void*>(p),
5520 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00005521 }
5522};
5523
5524void Heap::PrintHandles() {
5525 PrintF("Handles:\n");
5526 PrintHandleVisitor v;
Steve Block44f0eee2011-05-26 01:26:41 +01005527 isolate_->handle_scope_implementer()->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +00005528}
5529
5530#endif
5531
5532
5533Space* AllSpaces::next() {
5534 switch (counter_++) {
5535 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005536 return HEAP->new_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005537 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005538 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005539 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005540 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005541 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005542 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005543 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005544 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005545 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005546 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005547 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005548 return HEAP->lo_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005549 default:
5550 return NULL;
5551 }
5552}
5553
5554
5555PagedSpace* PagedSpaces::next() {
5556 switch (counter_++) {
5557 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005558 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005559 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005560 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005561 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005562 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005563 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005564 return HEAP->map_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005565 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005566 return HEAP->cell_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005567 default:
5568 return NULL;
5569 }
5570}
5571
5572
5573
5574OldSpace* OldSpaces::next() {
5575 switch (counter_++) {
5576 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005577 return HEAP->old_pointer_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005578 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005579 return HEAP->old_data_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005580 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005581 return HEAP->code_space();
Steve Blocka7e24c12009-10-30 11:49:00 +00005582 default:
5583 return NULL;
5584 }
5585}
5586
5587
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005588SpaceIterator::SpaceIterator()
5589 : current_space_(FIRST_SPACE),
5590 iterator_(NULL),
5591 size_func_(NULL) {
5592}
5593
5594
5595SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
5596 : current_space_(FIRST_SPACE),
5597 iterator_(NULL),
5598 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005599}
5600
5601
5602SpaceIterator::~SpaceIterator() {
5603 // Delete active iterator if any.
5604 delete iterator_;
5605}
5606
5607
5608bool SpaceIterator::has_next() {
5609 // Iterate until no more spaces.
5610 return current_space_ != LAST_SPACE;
5611}
5612
5613
5614ObjectIterator* SpaceIterator::next() {
5615 if (iterator_ != NULL) {
5616 delete iterator_;
5617 iterator_ = NULL;
5618 // Move to the next space
5619 current_space_++;
5620 if (current_space_ > LAST_SPACE) {
5621 return NULL;
5622 }
5623 }
5624
5625 // Return iterator for the new current space.
5626 return CreateIterator();
5627}
5628
5629
5630// Create an iterator for the space to iterate.
5631ObjectIterator* SpaceIterator::CreateIterator() {
5632 ASSERT(iterator_ == NULL);
5633
5634 switch (current_space_) {
5635 case NEW_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005636 iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005637 break;
5638 case OLD_POINTER_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005639 iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005640 break;
5641 case OLD_DATA_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005642 iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005643 break;
5644 case CODE_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005645 iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005646 break;
5647 case MAP_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005648 iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005649 break;
5650 case CELL_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005651 iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005652 break;
5653 case LO_SPACE:
Steve Block44f0eee2011-05-26 01:26:41 +01005654 iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00005655 break;
5656 }
5657
5658 // Return the newly allocated iterator;
5659 ASSERT(iterator_ != NULL);
5660 return iterator_;
5661}
5662
5663
Ben Murdochb0fe1622011-05-05 13:52:32 +01005664class HeapObjectsFilter {
5665 public:
5666 virtual ~HeapObjectsFilter() {}
5667 virtual bool SkipObject(HeapObject* object) = 0;
5668};
5669
5670
5671class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005672 public:
5673 FreeListNodesFilter() {
5674 MarkFreeListNodes();
5675 }
5676
Ben Murdochb0fe1622011-05-05 13:52:32 +01005677 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005678 if (object->IsMarked()) {
5679 object->ClearMark();
5680 return true;
5681 } else {
5682 return false;
5683 }
5684 }
5685
5686 private:
5687 void MarkFreeListNodes() {
Steve Block44f0eee2011-05-26 01:26:41 +01005688 Heap* heap = HEAP;
5689 heap->old_pointer_space()->MarkFreeListNodes();
5690 heap->old_data_space()->MarkFreeListNodes();
5691 MarkCodeSpaceFreeListNodes(heap);
5692 heap->map_space()->MarkFreeListNodes();
5693 heap->cell_space()->MarkFreeListNodes();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005694 }
5695
Steve Block44f0eee2011-05-26 01:26:41 +01005696 void MarkCodeSpaceFreeListNodes(Heap* heap) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005697 // For code space, using FreeListNode::IsFreeListNode is OK.
Steve Block44f0eee2011-05-26 01:26:41 +01005698 HeapObjectIterator iter(heap->code_space());
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005699 for (HeapObject* obj = iter.next_object();
5700 obj != NULL;
5701 obj = iter.next_object()) {
5702 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5703 }
5704 }
5705
5706 AssertNoAllocation no_alloc;
5707};
5708
5709
Ben Murdochb0fe1622011-05-05 13:52:32 +01005710class UnreachableObjectsFilter : public HeapObjectsFilter {
5711 public:
5712 UnreachableObjectsFilter() {
5713 MarkUnreachableObjects();
5714 }
5715
5716 bool SkipObject(HeapObject* object) {
5717 if (object->IsMarked()) {
5718 object->ClearMark();
5719 return true;
5720 } else {
5721 return false;
5722 }
5723 }
5724
5725 private:
5726 class UnmarkingVisitor : public ObjectVisitor {
5727 public:
5728 UnmarkingVisitor() : list_(10) {}
5729
5730 void VisitPointers(Object** start, Object** end) {
5731 for (Object** p = start; p < end; p++) {
5732 if (!(*p)->IsHeapObject()) continue;
5733 HeapObject* obj = HeapObject::cast(*p);
5734 if (obj->IsMarked()) {
5735 obj->ClearMark();
5736 list_.Add(obj);
5737 }
5738 }
5739 }
5740
5741 bool can_process() { return !list_.is_empty(); }
5742
5743 void ProcessNext() {
5744 HeapObject* obj = list_.RemoveLast();
5745 obj->Iterate(this);
5746 }
5747
5748 private:
5749 List<HeapObject*> list_;
5750 };
5751
5752 void MarkUnreachableObjects() {
5753 HeapIterator iterator;
5754 for (HeapObject* obj = iterator.next();
5755 obj != NULL;
5756 obj = iterator.next()) {
5757 obj->SetMark();
5758 }
5759 UnmarkingVisitor visitor;
Steve Block44f0eee2011-05-26 01:26:41 +01005760 HEAP->IterateRoots(&visitor, VISIT_ALL);
Ben Murdochb0fe1622011-05-05 13:52:32 +01005761 while (visitor.can_process())
5762 visitor.ProcessNext();
5763 }
5764
5765 AssertNoAllocation no_alloc;
5766};
5767
5768
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005769HeapIterator::HeapIterator()
5770 : filtering_(HeapIterator::kNoFiltering),
5771 filter_(NULL) {
5772 Init();
5773}
5774
5775
Ben Murdochb0fe1622011-05-05 13:52:32 +01005776HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005777 : filtering_(filtering),
5778 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005779 Init();
5780}
5781
5782
5783HeapIterator::~HeapIterator() {
5784 Shutdown();
5785}
5786
5787
5788void HeapIterator::Init() {
5789 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005790 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5791 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5792 switch (filtering_) {
5793 case kFilterFreeListNodes:
5794 filter_ = new FreeListNodesFilter;
5795 break;
5796 case kFilterUnreachable:
5797 filter_ = new UnreachableObjectsFilter;
5798 break;
5799 default:
5800 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005801 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005802 object_iterator_ = space_iterator_->next();
5803}
5804
5805
5806void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005807#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005808 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005809 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005810 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005811 ASSERT(object_iterator_ == NULL);
5812 }
5813#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005814 // Make sure the last iterator is deallocated.
5815 delete space_iterator_;
5816 space_iterator_ = NULL;
5817 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005818 delete filter_;
5819 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005820}
5821
5822
Leon Clarked91b9f72010-01-27 17:25:45 +00005823HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005824 if (filter_ == NULL) return NextObject();
5825
5826 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005827 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005828 return obj;
5829}
5830
5831
5832HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005833 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005834 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005835
Leon Clarked91b9f72010-01-27 17:25:45 +00005836 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005837 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005838 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005839 } else {
5840 // Go though the spaces looking for one that has objects.
5841 while (space_iterator_->has_next()) {
5842 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005843 if (HeapObject* obj = object_iterator_->next_object()) {
5844 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005845 }
5846 }
5847 }
5848 // Done with the last space.
5849 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005850 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005851}
5852
5853
5854void HeapIterator::reset() {
5855 // Restart the iterator.
5856 Shutdown();
5857 Init();
5858}
5859
5860
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005861#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
Steve Blocka7e24c12009-10-30 11:49:00 +00005862
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005863Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00005864
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005865class PathTracer::MarkVisitor: public ObjectVisitor {
Steve Blocka7e24c12009-10-30 11:49:00 +00005866 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005867 explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00005868 void VisitPointers(Object** start, Object** end) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005869 // Scan all HeapObject pointers in [start, end)
5870 for (Object** p = start; !tracer_->found() && (p < end); p++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005871 if ((*p)->IsHeapObject())
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005872 tracer_->MarkRecursively(p, this);
Steve Blocka7e24c12009-10-30 11:49:00 +00005873 }
5874 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005875
5876 private:
5877 PathTracer* tracer_;
Steve Blocka7e24c12009-10-30 11:49:00 +00005878};
5879
Steve Blocka7e24c12009-10-30 11:49:00 +00005880
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005881class PathTracer::UnmarkVisitor: public ObjectVisitor {
5882 public:
5883 explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5884 void VisitPointers(Object** start, Object** end) {
5885 // Scan all HeapObject pointers in [start, end)
5886 for (Object** p = start; p < end; p++) {
5887 if ((*p)->IsHeapObject())
5888 tracer_->UnmarkRecursively(p, this);
5889 }
5890 }
5891
5892 private:
5893 PathTracer* tracer_;
5894};
5895
5896
5897void PathTracer::VisitPointers(Object** start, Object** end) {
5898 bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5899 // Visit all HeapObject pointers in [start, end)
5900 for (Object** p = start; !done && (p < end); p++) {
5901 if ((*p)->IsHeapObject()) {
5902 TracePathFrom(p);
5903 done = ((what_to_find_ == FIND_FIRST) && found_target_);
5904 }
5905 }
5906}
5907
5908
5909void PathTracer::Reset() {
5910 found_target_ = false;
5911 object_stack_.Clear();
5912}
5913
5914
5915void PathTracer::TracePathFrom(Object** root) {
5916 ASSERT((search_target_ == kAnyGlobalObject) ||
5917 search_target_->IsHeapObject());
5918 found_target_in_trace_ = false;
5919 object_stack_.Clear();
5920
5921 MarkVisitor mark_visitor(this);
5922 MarkRecursively(root, &mark_visitor);
5923
5924 UnmarkVisitor unmark_visitor(this);
5925 UnmarkRecursively(root, &unmark_visitor);
5926
5927 ProcessResults();
5928}
5929
5930
5931void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005932 if (!(*p)->IsHeapObject()) return;
5933
5934 HeapObject* obj = HeapObject::cast(*p);
5935
5936 Object* map = obj->map();
5937
5938 if (!map->IsHeapObject()) return; // visited before
5939
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005940 if (found_target_in_trace_) return; // stop if target found
5941 object_stack_.Add(obj);
5942 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5943 (obj == search_target_)) {
5944 found_target_in_trace_ = true;
5945 found_target_ = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00005946 return;
5947 }
5948
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005949 bool is_global_context = obj->IsGlobalContext();
5950
Steve Blocka7e24c12009-10-30 11:49:00 +00005951 // not visited yet
5952 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5953
5954 Address map_addr = map_p->address();
5955
5956 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5957
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005958 // Scan the object body.
5959 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5960 // This is specialized to scan Context's properly.
5961 Object** start = reinterpret_cast<Object**>(obj->address() +
5962 Context::kHeaderSize);
5963 Object** end = reinterpret_cast<Object**>(obj->address() +
5964 Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5965 mark_visitor->VisitPointers(start, end);
5966 } else {
5967 obj->IterateBody(map_p->instance_type(),
5968 obj->SizeFromMap(map_p),
5969 mark_visitor);
5970 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005971
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005972 // Scan the map after the body because the body is a lot more interesting
5973 // when doing leak detection.
5974 MarkRecursively(&map, mark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00005975
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005976 if (!found_target_in_trace_) // don't pop if found the target
5977 object_stack_.RemoveLast();
Steve Blocka7e24c12009-10-30 11:49:00 +00005978}
5979
5980
Ben Murdoche0cee9b2011-05-25 10:26:03 +01005981void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005982 if (!(*p)->IsHeapObject()) return;
5983
5984 HeapObject* obj = HeapObject::cast(*p);
5985
5986 Object* map = obj->map();
5987
5988 if (map->IsHeapObject()) return; // unmarked already
5989
5990 Address map_addr = reinterpret_cast<Address>(map);
5991
5992 map_addr -= kMarkTag;
5993
5994 ASSERT_TAG_ALIGNED(map_addr);
5995
5996 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5997
5998 obj->set_map(reinterpret_cast<Map*>(map_p));
5999
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006000 UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00006001
6002 obj->IterateBody(Map::cast(map_p)->instance_type(),
6003 obj->SizeFromMap(Map::cast(map_p)),
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006004 unmark_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00006005}
6006
6007
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006008void PathTracer::ProcessResults() {
6009 if (found_target_) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006010 PrintF("=====================================\n");
6011 PrintF("==== Path to object ====\n");
6012 PrintF("=====================================\n\n");
6013
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006014 ASSERT(!object_stack_.is_empty());
6015 for (int i = 0; i < object_stack_.length(); i++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006016 if (i > 0) PrintF("\n |\n |\n V\n\n");
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006017 Object* obj = object_stack_[i];
6018#ifdef OBJECT_PRINT
Steve Blocka7e24c12009-10-30 11:49:00 +00006019 obj->Print();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006020#else
6021 obj->ShortPrint();
6022#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00006023 }
6024 PrintF("=====================================\n");
6025 }
6026}
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006027#endif // DEBUG || LIVE_OBJECT_LIST
Steve Blocka7e24c12009-10-30 11:49:00 +00006028
6029
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006030#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00006031// Triggers a depth-first traversal of reachable objects from roots
6032// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00006033void Heap::TracePathToObject(Object* target) {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006034 PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6035 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00006036}
6037
6038
6039// Triggers a depth-first traversal of reachable objects from roots
6040// and finds a path to any global object and prints it. Useful for
6041// determining the source for leaks of global objects.
6042void Heap::TracePathToGlobal() {
Ben Murdoche0cee9b2011-05-25 10:26:03 +01006043 PathTracer tracer(PathTracer::kAnyGlobalObject,
6044 PathTracer::FIND_ALL,
6045 VISIT_ALL);
6046 IterateRoots(&tracer, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00006047}
6048#endif
6049
6050
Ben Murdochf87a2032010-10-22 12:50:53 +01006051static intptr_t CountTotalHolesSize() {
6052 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01006053 OldSpaces spaces;
6054 for (OldSpace* space = spaces.next();
6055 space != NULL;
6056 space = spaces.next()) {
6057 holes_size += space->Waste() + space->AvailableFree();
6058 }
6059 return holes_size;
6060}
6061
6062
Steve Block44f0eee2011-05-26 01:26:41 +01006063GCTracer::GCTracer(Heap* heap)
Steve Blocka7e24c12009-10-30 11:49:00 +00006064 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01006065 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00006066 gc_count_(0),
6067 full_gc_count_(0),
6068 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01006069 marked_count_(0),
6070 allocated_since_last_gc_(0),
6071 spent_in_mutator_(0),
Steve Block44f0eee2011-05-26 01:26:41 +01006072 promoted_objects_size_(0),
6073 heap_(heap) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006074 // These two fields reflect the state of the previous full collection.
6075 // Set them before they are changed by the collector.
Steve Block44f0eee2011-05-26 01:26:41 +01006076 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
6077 previous_marked_count_ =
6078 heap_->mark_compact_collector_.previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01006079 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00006080 start_time_ = OS::TimeCurrentMillis();
Steve Block44f0eee2011-05-26 01:26:41 +01006081 start_size_ = heap_->SizeOfObjects();
Leon Clarkef7060e22010-06-03 12:02:55 +01006082
6083 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6084 scopes_[i] = 0;
6085 }
6086
6087 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6088
Steve Block44f0eee2011-05-26 01:26:41 +01006089 allocated_since_last_gc_ =
6090 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
Leon Clarkef7060e22010-06-03 12:02:55 +01006091
Steve Block44f0eee2011-05-26 01:26:41 +01006092 if (heap_->last_gc_end_timestamp_ > 0) {
6093 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
Leon Clarkef7060e22010-06-03 12:02:55 +01006094 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006095}
6096
6097
6098GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00006099 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01006100 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6101
Steve Block44f0eee2011-05-26 01:26:41 +01006102 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
Leon Clarkef7060e22010-06-03 12:02:55 +01006103
Steve Block44f0eee2011-05-26 01:26:41 +01006104 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6105 heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01006106
Steve Block44f0eee2011-05-26 01:26:41 +01006107 int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
Leon Clarkef7060e22010-06-03 12:02:55 +01006108
6109 // Update cumulative GC statistics if required.
6110 if (FLAG_print_cumulative_gc_stat) {
Steve Block44f0eee2011-05-26 01:26:41 +01006111 heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6112 heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6113 heap_->alive_after_last_gc_);
Leon Clarkef7060e22010-06-03 12:02:55 +01006114 if (!first_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01006115 heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6116 static_cast<int>(spent_in_mutator_));
Leon Clarkef7060e22010-06-03 12:02:55 +01006117 }
6118 }
6119
6120 if (!FLAG_trace_gc_nvp) {
6121 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6122
6123 PrintF("%s %.1f -> %.1f MB, ",
6124 CollectorString(),
6125 static_cast<double>(start_size_) / MB,
6126 SizeOfHeapObjects());
6127
6128 if (external_time > 0) PrintF("%d / ", external_time);
6129 PrintF("%d ms.\n", time);
6130 } else {
6131 PrintF("pause=%d ", time);
6132 PrintF("mutator=%d ",
6133 static_cast<int>(spent_in_mutator_));
6134
6135 PrintF("gc=");
6136 switch (collector_) {
6137 case SCAVENGER:
6138 PrintF("s");
6139 break;
6140 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01006141 PrintF("%s",
6142 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
Leon Clarkef7060e22010-06-03 12:02:55 +01006143 break;
6144 default:
6145 UNREACHABLE();
6146 }
6147 PrintF(" ");
6148
6149 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6150 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6151 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01006152 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01006153 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
6154
Ben Murdochf87a2032010-10-22 12:50:53 +01006155 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
Steve Block44f0eee2011-05-26 01:26:41 +01006156 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
Ben Murdochf87a2032010-10-22 12:50:53 +01006157 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6158 in_free_list_or_wasted_before_gc_);
6159 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01006160
Ben Murdochf87a2032010-10-22 12:50:53 +01006161 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6162 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01006163
6164 PrintF("\n");
6165 }
Steve Blocka7e24c12009-10-30 11:49:00 +00006166
Steve Block44f0eee2011-05-26 01:26:41 +01006167 heap_->PrintShortHeapStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00006168}
6169
6170
6171const char* GCTracer::CollectorString() {
6172 switch (collector_) {
6173 case SCAVENGER:
6174 return "Scavenge";
6175 case MARK_COMPACTOR:
Steve Block44f0eee2011-05-26 01:26:41 +01006176 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact"
6177 : "Mark-sweep";
Steve Blocka7e24c12009-10-30 11:49:00 +00006178 }
6179 return "Unknown GC";
6180}
6181
6182
6183int KeyedLookupCache::Hash(Map* map, String* name) {
6184 // Uses only lower 32 bits if pointers are larger.
6185 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00006186 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00006187 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00006188}
6189
6190
6191int KeyedLookupCache::Lookup(Map* map, String* name) {
6192 int index = Hash(map, name);
6193 Key& key = keys_[index];
6194 if ((key.map == map) && key.name->Equals(name)) {
6195 return field_offsets_[index];
6196 }
Steve Block44f0eee2011-05-26 01:26:41 +01006197 return kNotFound;
Steve Blocka7e24c12009-10-30 11:49:00 +00006198}
6199
6200
6201void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
6202 String* symbol;
Steve Block44f0eee2011-05-26 01:26:41 +01006203 if (HEAP->LookupSymbolIfExists(name, &symbol)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006204 int index = Hash(map, symbol);
6205 Key& key = keys_[index];
6206 key.map = map;
6207 key.name = symbol;
6208 field_offsets_[index] = field_offset;
6209 }
6210}
6211
6212
6213void KeyedLookupCache::Clear() {
6214 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6215}
6216
6217
Steve Blocka7e24c12009-10-30 11:49:00 +00006218void DescriptorLookupCache::Clear() {
6219 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
6220}
6221
6222
Steve Blocka7e24c12009-10-30 11:49:00 +00006223#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01006224void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00006225 ASSERT(FLAG_gc_greedy);
Steve Block44f0eee2011-05-26 01:26:41 +01006226 if (isolate_->bootstrapper()->IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +01006227 if (disallow_allocation_failure()) return;
6228 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00006229}
6230#endif
6231
6232
Steve Block44f0eee2011-05-26 01:26:41 +01006233TranscendentalCache::SubCache::SubCache(Type t)
6234 : type_(t),
6235 isolate_(Isolate::Current()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00006236 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
6237 uint32_t in1 = 0xffffffffu; // generated by the FPU.
6238 for (int i = 0; i < kCacheSize; i++) {
6239 elements_[i].in[0] = in0;
6240 elements_[i].in[1] = in1;
6241 elements_[i].output = NULL;
6242 }
6243}
6244
6245
Steve Blocka7e24c12009-10-30 11:49:00 +00006246void TranscendentalCache::Clear() {
6247 for (int i = 0; i < kNumberOfCaches; i++) {
6248 if (caches_[i] != NULL) {
6249 delete caches_[i];
6250 caches_[i] = NULL;
6251 }
6252 }
6253}
6254
6255
Leon Clarkee46be812010-01-19 14:06:41 +00006256void ExternalStringTable::CleanUp() {
6257 int last = 0;
6258 for (int i = 0; i < new_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01006259 if (new_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
6260 if (heap_->InNewSpace(new_space_strings_[i])) {
Leon Clarkee46be812010-01-19 14:06:41 +00006261 new_space_strings_[last++] = new_space_strings_[i];
6262 } else {
6263 old_space_strings_.Add(new_space_strings_[i]);
6264 }
6265 }
6266 new_space_strings_.Rewind(last);
6267 last = 0;
6268 for (int i = 0; i < old_space_strings_.length(); ++i) {
Steve Block44f0eee2011-05-26 01:26:41 +01006269 if (old_space_strings_[i] == heap_->raw_unchecked_null_value()) continue;
6270 ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
Leon Clarkee46be812010-01-19 14:06:41 +00006271 old_space_strings_[last++] = old_space_strings_[i];
6272 }
6273 old_space_strings_.Rewind(last);
6274 Verify();
6275}
6276
6277
6278void ExternalStringTable::TearDown() {
6279 new_space_strings_.Free();
6280 old_space_strings_.Free();
6281}
6282
6283
Steve Blocka7e24c12009-10-30 11:49:00 +00006284} } // namespace v8::internal