blob: 5832ccbb3900a7a9227397bdad8c3cd1032b15ec [file] [log] [blame]
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
38#include "mark-compact.h"
39#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010040#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010041#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080042#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000043#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000044#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000045#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010046#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010047#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000048#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000049#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000050#endif
51
Steve Block6ded16b2010-05-10 14:33:55 +010052
Steve Blocka7e24c12009-10-30 11:49:00 +000053namespace v8 {
54namespace internal {
55
56
57String* Heap::hidden_symbol_;
58Object* Heap::roots_[Heap::kRootListLength];
Ben Murdochf87a2032010-10-22 12:50:53 +010059Object* Heap::global_contexts_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +000060
John Reck59135872010-11-02 12:39:01 -070061
Steve Blocka7e24c12009-10-30 11:49:00 +000062NewSpace Heap::new_space_;
63OldSpace* Heap::old_pointer_space_ = NULL;
64OldSpace* Heap::old_data_space_ = NULL;
65OldSpace* Heap::code_space_ = NULL;
66MapSpace* Heap::map_space_ = NULL;
67CellSpace* Heap::cell_space_ = NULL;
68LargeObjectSpace* Heap::lo_space_ = NULL;
69
John Reck59135872010-11-02 12:39:01 -070070static const intptr_t kMinimumPromotionLimit = 2 * MB;
71static const intptr_t kMinimumAllocationLimit = 8 * MB;
72
Ben Murdochf87a2032010-10-22 12:50:53 +010073intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
74intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
Steve Blocka7e24c12009-10-30 11:49:00 +000075
76int Heap::old_gen_exhausted_ = false;
77
78int Heap::amount_of_external_allocated_memory_ = 0;
79int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
80
81// semispace_size_ should be a power of 2 and old_generation_size_ should be
82// a multiple of Page::kPageSize.
83#if defined(ANDROID)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080084static const int default_max_semispace_size_ = 2*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010085intptr_t Heap::max_old_generation_size_ = 192*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000086int Heap::initial_semispace_size_ = 128*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010087intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -080088intptr_t Heap::max_executable_size_ = max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +000089#elif defined(V8_TARGET_ARCH_X64)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080090static const int default_max_semispace_size_ = 16*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010091intptr_t Heap::max_old_generation_size_ = 1*GB;
Steve Blocka7e24c12009-10-30 11:49:00 +000092int Heap::initial_semispace_size_ = 1*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010093intptr_t Heap::code_range_size_ = 512*MB;
Russell Brenner90bac252010-11-18 13:33:46 -080094intptr_t Heap::max_executable_size_ = 256*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000095#else
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080096static const int default_max_semispace_size_ = 8*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010097intptr_t Heap::max_old_generation_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000098int Heap::initial_semispace_size_ = 512*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010099intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800100intptr_t Heap::max_executable_size_ = 128*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +0000101#endif
102
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800103// Allow build-time customization of the max semispace size. Building
104// V8 with snapshots and a non-default max semispace size is much
105// easier if you can define it as part of the build environment.
106#if defined(V8_MAX_SEMISPACE_SIZE)
107int Heap::max_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
108#else
109int Heap::max_semispace_size_ = default_max_semispace_size_;
110#endif
111
Steve Block3ce2e202009-11-05 08:53:23 +0000112// The snapshot semispace size will be the default semispace size if
113// snapshotting is used and will be the requested semispace size as
114// set up by ConfigureHeap otherwise.
115int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
116
Steve Block6ded16b2010-05-10 14:33:55 +0100117List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
118List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
119
Steve Blocka7e24c12009-10-30 11:49:00 +0000120GCCallback Heap::global_gc_prologue_callback_ = NULL;
121GCCallback Heap::global_gc_epilogue_callback_ = NULL;
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100122HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000123
124// Variables set based on semispace_size_ and old_generation_size_ in
125// ConfigureHeap.
Steve Block3ce2e202009-11-05 08:53:23 +0000126
127// Will be 4 * reserved_semispace_size_ to ensure that young
128// generation can be aligned to its size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000129int Heap::survived_since_last_expansion_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100130intptr_t Heap::external_allocation_limit_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000131
132Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
133
134int Heap::mc_count_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100135int Heap::ms_count_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000136int Heap::gc_count_ = 0;
137
Leon Clarkef7060e22010-06-03 12:02:55 +0100138GCTracer* Heap::tracer_ = NULL;
139
Steve Block6ded16b2010-05-10 14:33:55 +0100140int Heap::unflattened_strings_length_ = 0;
141
Steve Blocka7e24c12009-10-30 11:49:00 +0000142int Heap::always_allocate_scope_depth_ = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000143int Heap::linear_allocation_scope_depth_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100144int Heap::contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000145
Steve Block8defd9f2010-07-08 12:39:36 +0100146int Heap::young_survivors_after_last_gc_ = 0;
147int Heap::high_survival_rate_period_length_ = 0;
148double Heap::survival_rate_ = 0;
149Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
150Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
151
Steve Blocka7e24c12009-10-30 11:49:00 +0000152#ifdef DEBUG
153bool Heap::allocation_allowed_ = true;
154
155int Heap::allocation_timeout_ = 0;
156bool Heap::disallow_allocation_failure_ = false;
157#endif // DEBUG
158
Ben Murdochf87a2032010-10-22 12:50:53 +0100159intptr_t GCTracer::alive_after_last_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100160double GCTracer::last_gc_end_timestamp_ = 0.0;
161int GCTracer::max_gc_pause_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100162intptr_t GCTracer::max_alive_after_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100163int GCTracer::min_in_mutator_ = kMaxInt;
Steve Blocka7e24c12009-10-30 11:49:00 +0000164
Ben Murdochf87a2032010-10-22 12:50:53 +0100165intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000166 if (!HasBeenSetup()) return 0;
167
168 return new_space_.Capacity() +
169 old_pointer_space_->Capacity() +
170 old_data_space_->Capacity() +
171 code_space_->Capacity() +
172 map_space_->Capacity() +
173 cell_space_->Capacity();
174}
175
176
Ben Murdochf87a2032010-10-22 12:50:53 +0100177intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000178 if (!HasBeenSetup()) return 0;
179
180 return new_space_.CommittedMemory() +
181 old_pointer_space_->CommittedMemory() +
182 old_data_space_->CommittedMemory() +
183 code_space_->CommittedMemory() +
184 map_space_->CommittedMemory() +
185 cell_space_->CommittedMemory() +
186 lo_space_->Size();
187}
188
Russell Brenner90bac252010-11-18 13:33:46 -0800189intptr_t Heap::CommittedMemoryExecutable() {
190 if (!HasBeenSetup()) return 0;
191
192 return MemoryAllocator::SizeExecutable();
193}
194
Steve Block3ce2e202009-11-05 08:53:23 +0000195
Ben Murdochf87a2032010-10-22 12:50:53 +0100196intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000197 if (!HasBeenSetup()) return 0;
198
199 return new_space_.Available() +
200 old_pointer_space_->Available() +
201 old_data_space_->Available() +
202 code_space_->Available() +
203 map_space_->Available() +
204 cell_space_->Available();
205}
206
207
208bool Heap::HasBeenSetup() {
209 return old_pointer_space_ != NULL &&
210 old_data_space_ != NULL &&
211 code_space_ != NULL &&
212 map_space_ != NULL &&
213 cell_space_ != NULL &&
214 lo_space_ != NULL;
215}
216
217
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100218int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
219 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
220 ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
221 MapWord map_word = object->map_word();
222 map_word.ClearMark();
223 map_word.ClearOverflow();
224 return object->SizeFromMap(map_word.ToMap());
225}
226
227
228int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
229 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
230 ASSERT(MarkCompactCollector::are_map_pointers_encoded());
231 uint32_t marker = Memory::uint32_at(object->address());
232 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
233 return kIntSize;
234 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
235 return Memory::int_at(object->address() + kIntSize);
236 } else {
237 MapWord map_word = object->map_word();
238 Address map_address = map_word.DecodeMapAddress(Heap::map_space());
239 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
240 return object->SizeFromMap(map);
241 }
242}
243
244
Steve Blocka7e24c12009-10-30 11:49:00 +0000245GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
246 // Is global GC requested?
247 if (space != NEW_SPACE || FLAG_gc_global) {
248 Counters::gc_compactor_caused_by_request.Increment();
249 return MARK_COMPACTOR;
250 }
251
252 // Is enough data promoted to justify a global GC?
253 if (OldGenerationPromotionLimitReached()) {
254 Counters::gc_compactor_caused_by_promoted_data.Increment();
255 return MARK_COMPACTOR;
256 }
257
258 // Have allocation in OLD and LO failed?
259 if (old_gen_exhausted_) {
260 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
261 return MARK_COMPACTOR;
262 }
263
264 // Is there enough space left in OLD to guarantee that a scavenge can
265 // succeed?
266 //
267 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
268 // for object promotion. It counts only the bytes that the memory
269 // allocator has not yet allocated from the OS and assigned to any space,
270 // and does not count available bytes already in the old space or code
271 // space. Undercounting is safe---we may get an unrequested full GC when
272 // a scavenge would have succeeded.
273 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
274 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
275 return MARK_COMPACTOR;
276 }
277
278 // Default
279 return SCAVENGER;
280}
281
282
283// TODO(1238405): Combine the infrastructure for --heap-stats and
284// --log-gc to avoid the complicated preprocessor and flag testing.
285#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
286void Heap::ReportStatisticsBeforeGC() {
287 // Heap::ReportHeapStatistics will also log NewSpace statistics when
288 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
289 // following logic is used to avoid double logging.
290#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
291 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
292 if (FLAG_heap_stats) {
293 ReportHeapStatistics("Before GC");
294 } else if (FLAG_log_gc) {
295 new_space_.ReportStatistics();
296 }
297 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
298#elif defined(DEBUG)
299 if (FLAG_heap_stats) {
300 new_space_.CollectStatistics();
301 ReportHeapStatistics("Before GC");
302 new_space_.ClearHistograms();
303 }
304#elif defined(ENABLE_LOGGING_AND_PROFILING)
305 if (FLAG_log_gc) {
306 new_space_.CollectStatistics();
307 new_space_.ReportStatistics();
308 new_space_.ClearHistograms();
309 }
310#endif
311}
312
313
314#if defined(ENABLE_LOGGING_AND_PROFILING)
315void Heap::PrintShortHeapStatistics() {
316 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100317 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
318 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000319 MemoryAllocator::Size(),
320 MemoryAllocator::Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100321 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
322 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000323 Heap::new_space_.Size(),
324 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100325 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
326 ", available: %8" V8_PTR_PREFIX "d"
327 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000328 old_pointer_space_->Size(),
329 old_pointer_space_->Available(),
330 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100331 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
332 ", available: %8" V8_PTR_PREFIX "d"
333 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000334 old_data_space_->Size(),
335 old_data_space_->Available(),
336 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100337 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
338 ", available: %8" V8_PTR_PREFIX "d"
339 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000340 code_space_->Size(),
341 code_space_->Available(),
342 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100343 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
344 ", available: %8" V8_PTR_PREFIX "d"
345 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000346 map_space_->Size(),
347 map_space_->Available(),
348 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100349 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
350 ", available: %8" V8_PTR_PREFIX "d"
351 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000352 cell_space_->Size(),
353 cell_space_->Available(),
354 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100355 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
356 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000357 lo_space_->Size(),
358 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000359}
360#endif
361
362
363// TODO(1238405): Combine the infrastructure for --heap-stats and
364// --log-gc to avoid the complicated preprocessor and flag testing.
365void Heap::ReportStatisticsAfterGC() {
366 // Similar to the before GC, we use some complicated logic to ensure that
367 // NewSpace statistics are logged exactly once when --log-gc is turned on.
368#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
369 if (FLAG_heap_stats) {
370 new_space_.CollectStatistics();
371 ReportHeapStatistics("After GC");
372 } else if (FLAG_log_gc) {
373 new_space_.ReportStatistics();
374 }
375#elif defined(DEBUG)
376 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
377#elif defined(ENABLE_LOGGING_AND_PROFILING)
378 if (FLAG_log_gc) new_space_.ReportStatistics();
379#endif
380}
381#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
382
383
384void Heap::GarbageCollectionPrologue() {
385 TranscendentalCache::Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100386 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000387 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100388 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000389#ifdef DEBUG
390 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
391 allow_allocation(false);
392
393 if (FLAG_verify_heap) {
394 Verify();
395 }
396
397 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000398#endif
399
400#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
401 ReportStatisticsBeforeGC();
402#endif
403}
404
Ben Murdochf87a2032010-10-22 12:50:53 +0100405intptr_t Heap::SizeOfObjects() {
406 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000407 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800409 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 }
411 return total;
412}
413
414void Heap::GarbageCollectionEpilogue() {
415#ifdef DEBUG
416 allow_allocation(true);
417 ZapFromSpace();
418
419 if (FLAG_verify_heap) {
420 Verify();
421 }
422
423 if (FLAG_print_global_handles) GlobalHandles::Print();
424 if (FLAG_print_handles) PrintHandles();
425 if (FLAG_gc_verbose) Print();
426 if (FLAG_code_stats) ReportCodeStatistics("After GC");
427#endif
428
Ben Murdochf87a2032010-10-22 12:50:53 +0100429 Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000430
431 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
432 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
433#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
434 ReportStatisticsAfterGC();
435#endif
436#ifdef ENABLE_DEBUGGER_SUPPORT
437 Debug::AfterGarbageCollection();
438#endif
439}
440
441
John Reck59135872010-11-02 12:39:01 -0700442void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000443 // Since we are ignoring the return value, the exact choice of space does
444 // not matter, so long as we do not specify NEW_SPACE, which would not
445 // cause a full GC.
446 MarkCompactCollector::SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700447 CollectGarbage(OLD_POINTER_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000448 MarkCompactCollector::SetForceCompaction(false);
449}
450
451
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800452void Heap::CollectAllAvailableGarbage() {
453 // Since we are ignoring the return value, the exact choice of space does
454 // not matter, so long as we do not specify NEW_SPACE, which would not
455 // cause a full GC.
456 MarkCompactCollector::SetForceCompaction(true);
457
458 // Major GC would invoke weak handle callbacks on weakly reachable
459 // handles, but won't collect weakly reachable objects until next
460 // major GC. Therefore if we collect aggressively and weak handle callback
461 // has been invoked, we rerun major GC to release objects which become
462 // garbage.
463 // Note: as weak callbacks can execute arbitrary code, we cannot
464 // hope that eventually there will be no weak callbacks invocations.
465 // Therefore stop recollecting after several attempts.
466 const int kMaxNumberOfAttempts = 7;
467 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
468 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
469 break;
470 }
471 }
472 MarkCompactCollector::SetForceCompaction(false);
473}
474
475
476bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000477 // The VM is in the GC state until exiting this function.
478 VMState state(GC);
479
480#ifdef DEBUG
481 // Reset the allocation timeout to the GC interval, but make sure to
482 // allow at least a few allocations after a collection. The reason
483 // for this is that we have a lot of allocation sequences and we
484 // assume that a garbage collection will allow the subsequent
485 // allocation attempts to go through.
486 allocation_timeout_ = Max(6, FLAG_gc_interval);
487#endif
488
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800489 bool next_gc_likely_to_collect_more = false;
490
Steve Blocka7e24c12009-10-30 11:49:00 +0000491 { GCTracer tracer;
492 GarbageCollectionPrologue();
493 // The GC count was incremented in the prologue. Tell the tracer about
494 // it.
495 tracer.set_gc_count(gc_count_);
496
Steve Blocka7e24c12009-10-30 11:49:00 +0000497 // Tell the tracer which collector we've selected.
498 tracer.set_collector(collector);
499
500 HistogramTimer* rate = (collector == SCAVENGER)
501 ? &Counters::gc_scavenger
502 : &Counters::gc_compactor;
503 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800504 next_gc_likely_to_collect_more =
505 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000506 rate->Stop();
507
508 GarbageCollectionEpilogue();
509 }
510
511
512#ifdef ENABLE_LOGGING_AND_PROFILING
513 if (FLAG_log_gc) HeapProfiler::WriteSample();
Ben Murdochf87a2032010-10-22 12:50:53 +0100514 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
Steve Blocka7e24c12009-10-30 11:49:00 +0000515#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800516
517 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000518}
519
520
521void Heap::PerformScavenge() {
522 GCTracer tracer;
John Reck59135872010-11-02 12:39:01 -0700523 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000524}
525
526
527#ifdef DEBUG
528// Helper class for verifying the symbol table.
529class SymbolTableVerifier : public ObjectVisitor {
530 public:
531 SymbolTableVerifier() { }
532 void VisitPointers(Object** start, Object** end) {
533 // Visit all HeapObject pointers in [start, end).
534 for (Object** p = start; p < end; p++) {
535 if ((*p)->IsHeapObject()) {
536 // Check that the symbol is actually a symbol.
537 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
538 }
539 }
540 }
541};
542#endif // DEBUG
543
544
545static void VerifySymbolTable() {
546#ifdef DEBUG
547 SymbolTableVerifier verifier;
548 Heap::symbol_table()->IterateElements(&verifier);
549#endif // DEBUG
550}
551
552
Leon Clarkee46be812010-01-19 14:06:41 +0000553void Heap::ReserveSpace(
554 int new_space_size,
555 int pointer_space_size,
556 int data_space_size,
557 int code_space_size,
558 int map_space_size,
559 int cell_space_size,
560 int large_object_size) {
561 NewSpace* new_space = Heap::new_space();
562 PagedSpace* old_pointer_space = Heap::old_pointer_space();
563 PagedSpace* old_data_space = Heap::old_data_space();
564 PagedSpace* code_space = Heap::code_space();
565 PagedSpace* map_space = Heap::map_space();
566 PagedSpace* cell_space = Heap::cell_space();
567 LargeObjectSpace* lo_space = Heap::lo_space();
568 bool gc_performed = true;
569 while (gc_performed) {
570 gc_performed = false;
571 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100572 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000573 gc_performed = true;
574 }
575 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100576 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000577 gc_performed = true;
578 }
579 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100580 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000581 gc_performed = true;
582 }
583 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100584 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000585 gc_performed = true;
586 }
587 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100588 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000589 gc_performed = true;
590 }
591 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100592 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000593 gc_performed = true;
594 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100595 // We add a slack-factor of 2 in order to have space for a series of
596 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000597 large_object_size *= 2;
598 // The ReserveSpace method on the large object space checks how much
599 // we can expand the old generation. This includes expansion caused by
600 // allocation in the other spaces.
601 large_object_size += cell_space_size + map_space_size + code_space_size +
602 data_space_size + pointer_space_size;
603 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100604 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000605 gc_performed = true;
606 }
607 }
608}
609
610
Steve Blocka7e24c12009-10-30 11:49:00 +0000611void Heap::EnsureFromSpaceIsCommitted() {
612 if (new_space_.CommitFromSpaceIfNeeded()) return;
613
614 // Committing memory to from space failed.
615 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100616 PagedSpaces spaces;
617 for (PagedSpace* space = spaces.next();
618 space != NULL;
619 space = spaces.next()) {
620 space->RelinkPageListInChunkOrder(true);
621 }
622
Steve Blocka7e24c12009-10-30 11:49:00 +0000623 Shrink();
624 if (new_space_.CommitFromSpaceIfNeeded()) return;
625
626 // Committing memory to from space failed again.
627 // Memory is exhausted and we will die.
628 V8::FatalProcessOutOfMemory("Committing semi space failed.");
629}
630
631
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800632void Heap::ClearJSFunctionResultCaches() {
633 if (Bootstrapper::IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100634
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800635 Object* context = global_contexts_list_;
636 while (!context->IsUndefined()) {
637 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100638 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800639 Context::cast(context)->jsfunction_result_caches();
640 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100641 int length = caches->length();
642 for (int i = 0; i < length; i++) {
643 JSFunctionResultCache::cast(caches->get(i))->Clear();
644 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800645 // Get the next context:
646 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100647 }
Steve Block6ded16b2010-05-10 14:33:55 +0100648}
649
650
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100651void Heap::ClearNormalizedMapCaches() {
652 if (Bootstrapper::IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100653
654 Object* context = global_contexts_list_;
655 while (!context->IsUndefined()) {
656 Context::cast(context)->normalized_map_cache()->Clear();
657 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
658 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100659}
660
661
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100662#ifdef DEBUG
663
664enum PageWatermarkValidity {
665 ALL_VALID,
666 ALL_INVALID
667};
668
669static void VerifyPageWatermarkValidity(PagedSpace* space,
670 PageWatermarkValidity validity) {
671 PageIterator it(space, PageIterator::PAGES_IN_USE);
672 bool expected_value = (validity == ALL_VALID);
673 while (it.has_next()) {
674 Page* page = it.next();
675 ASSERT(page->IsWatermarkValid() == expected_value);
676 }
677}
678#endif
679
Steve Block8defd9f2010-07-08 12:39:36 +0100680void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
681 double survival_rate =
682 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
683 start_new_space_size;
684
685 if (survival_rate > kYoungSurvivalRateThreshold) {
686 high_survival_rate_period_length_++;
687 } else {
688 high_survival_rate_period_length_ = 0;
689 }
690
691 double survival_rate_diff = survival_rate_ - survival_rate;
692
693 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
694 set_survival_rate_trend(DECREASING);
695 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
696 set_survival_rate_trend(INCREASING);
697 } else {
698 set_survival_rate_trend(STABLE);
699 }
700
701 survival_rate_ = survival_rate;
702}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100703
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800704bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700705 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800706 bool next_gc_likely_to_collect_more = false;
707
Ben Murdochf87a2032010-10-22 12:50:53 +0100708 if (collector != SCAVENGER) {
709 PROFILE(CodeMovingGCEvent());
710 }
711
Steve Blocka7e24c12009-10-30 11:49:00 +0000712 VerifySymbolTable();
713 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
714 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100715 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000716 global_gc_prologue_callback_();
717 }
Steve Block6ded16b2010-05-10 14:33:55 +0100718
719 GCType gc_type =
720 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
721
722 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
723 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
724 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
725 }
726 }
727
Steve Blocka7e24c12009-10-30 11:49:00 +0000728 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100729
Ben Murdochf87a2032010-10-22 12:50:53 +0100730 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100731
Steve Blocka7e24c12009-10-30 11:49:00 +0000732 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100733 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000734 MarkCompact(tracer);
735
Steve Block8defd9f2010-07-08 12:39:36 +0100736 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
737 IsStableOrIncreasingSurvivalTrend();
738
739 UpdateSurvivalRateTrend(start_new_space_size);
740
John Reck59135872010-11-02 12:39:01 -0700741 intptr_t old_gen_size = PromotedSpaceSize();
742 old_gen_promotion_limit_ =
743 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
744 old_gen_allocation_limit_ =
745 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100746
John Reck59135872010-11-02 12:39:01 -0700747 if (high_survival_rate_during_scavenges &&
748 IsStableOrIncreasingSurvivalTrend()) {
749 // Stable high survival rates of young objects both during partial and
750 // full collection indicate that mutator is either building or modifying
751 // a structure with a long lifetime.
752 // In this case we aggressively raise old generation memory limits to
753 // postpone subsequent mark-sweep collection and thus trade memory
754 // space for the mutation speed.
755 old_gen_promotion_limit_ *= 2;
756 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100757 }
758
John Reck59135872010-11-02 12:39:01 -0700759 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100760 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100761 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100762 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100763 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100764
765 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000766 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000767
768 Counters::objs_since_last_young.Set(0);
769
John Reck59135872010-11-02 12:39:01 -0700770 if (collector == MARK_COMPACTOR) {
771 DisableAssertNoAllocation allow_allocation;
772 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800773 next_gc_likely_to_collect_more =
774 GlobalHandles::PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700775 }
776
Steve Block3ce2e202009-11-05 08:53:23 +0000777 // Update relocatables.
778 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000779
780 if (collector == MARK_COMPACTOR) {
781 // Register the amount of external allocated memory.
782 amount_of_external_allocated_memory_at_last_global_gc_ =
783 amount_of_external_allocated_memory_;
784 }
785
Steve Block6ded16b2010-05-10 14:33:55 +0100786 GCCallbackFlags callback_flags = tracer->is_compacting()
787 ? kGCCallbackFlagCompacted
788 : kNoGCCallbackFlags;
789 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
790 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
791 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
792 }
793 }
794
Steve Blocka7e24c12009-10-30 11:49:00 +0000795 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
796 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100797 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000798 global_gc_epilogue_callback_();
799 }
800 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800801
802 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000803}
804
805
Steve Blocka7e24c12009-10-30 11:49:00 +0000806void Heap::MarkCompact(GCTracer* tracer) {
807 gc_state_ = MARK_COMPACT;
Steve Blocka7e24c12009-10-30 11:49:00 +0000808 LOG(ResourceEvent("markcompact", "begin"));
809
810 MarkCompactCollector::Prepare(tracer);
811
812 bool is_compacting = MarkCompactCollector::IsCompacting();
813
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100814 if (is_compacting) {
815 mc_count_++;
816 } else {
817 ms_count_++;
818 }
819 tracer->set_full_gc_count(mc_count_ + ms_count_);
820
Steve Blocka7e24c12009-10-30 11:49:00 +0000821 MarkCompactPrologue(is_compacting);
822
823 MarkCompactCollector::CollectGarbage();
824
Steve Blocka7e24c12009-10-30 11:49:00 +0000825 LOG(ResourceEvent("markcompact", "end"));
826
827 gc_state_ = NOT_IN_GC;
828
829 Shrink();
830
831 Counters::objs_since_last_full.Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100832
833 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000834}
835
836
837void Heap::MarkCompactPrologue(bool is_compacting) {
838 // At any old GC clear the keyed lookup cache to enable collection of unused
839 // maps.
840 KeyedLookupCache::Clear();
841 ContextSlotCache::Clear();
842 DescriptorLookupCache::Clear();
843
Ben Murdochb0fe1622011-05-05 13:52:32 +0100844 RuntimeProfiler::MarkCompactPrologue(is_compacting);
845
Steve Blocka7e24c12009-10-30 11:49:00 +0000846 CompilationCache::MarkCompactPrologue();
847
Kristian Monsen25f61362010-05-21 11:50:48 +0100848 CompletelyClearInstanceofCache();
849
Leon Clarkee46be812010-01-19 14:06:41 +0000850 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000851
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100852 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000853}
854
855
856Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700857 Object* obj = NULL; // Initialization to please compiler.
858 { MaybeObject* maybe_obj = code_space_->FindObject(a);
859 if (!maybe_obj->ToObject(&obj)) {
860 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
861 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000862 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000863 return obj;
864}
865
866
867// Helper class for copying HeapObjects
868class ScavengeVisitor: public ObjectVisitor {
869 public:
870
871 void VisitPointer(Object** p) { ScavengePointer(p); }
872
873 void VisitPointers(Object** start, Object** end) {
874 // Copy all HeapObject pointers in [start, end)
875 for (Object** p = start; p < end; p++) ScavengePointer(p);
876 }
877
878 private:
879 void ScavengePointer(Object** p) {
880 Object* object = *p;
881 if (!Heap::InNewSpace(object)) return;
882 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
883 reinterpret_cast<HeapObject*>(object));
884 }
885};
886
887
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100888// A queue of objects promoted during scavenge. Each object is accompanied
889// by it's size to avoid dereferencing a map pointer for scanning.
Steve Blocka7e24c12009-10-30 11:49:00 +0000890class PromotionQueue {
891 public:
892 void Initialize(Address start_address) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100893 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000894 }
895
896 bool is_empty() { return front_ <= rear_; }
897
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100898 void insert(HeapObject* target, int size) {
899 *(--rear_) = reinterpret_cast<intptr_t>(target);
900 *(--rear_) = size;
Steve Blocka7e24c12009-10-30 11:49:00 +0000901 // Assert no overflow into live objects.
902 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
903 }
904
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100905 void remove(HeapObject** target, int* size) {
906 *target = reinterpret_cast<HeapObject*>(*(--front_));
907 *size = static_cast<int>(*(--front_));
Steve Blocka7e24c12009-10-30 11:49:00 +0000908 // Assert no underflow.
909 ASSERT(front_ >= rear_);
910 }
911
912 private:
913 // The front of the queue is higher in memory than the rear.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100914 intptr_t* front_;
915 intptr_t* rear_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000916};
917
918
919// Shared state read by the scavenge collector and set by ScavengeObject.
920static PromotionQueue promotion_queue;
921
922
923#ifdef DEBUG
924// Visitor class to verify pointers in code or data space do not point into
925// new space.
926class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
927 public:
928 void VisitPointers(Object** start, Object**end) {
929 for (Object** current = start; current < end; current++) {
930 if ((*current)->IsHeapObject()) {
931 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
932 }
933 }
934 }
935};
936
937
938static void VerifyNonPointerSpacePointers() {
939 // Verify that there are no pointers to new space in spaces where we
940 // do not expect them.
941 VerifyNonPointerSpacePointersVisitor v;
942 HeapObjectIterator code_it(Heap::code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000943 for (HeapObject* object = code_it.next();
944 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000945 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000946
947 HeapObjectIterator data_it(Heap::old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000948 for (HeapObject* object = data_it.next();
949 object != NULL; object = data_it.next())
950 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000951}
952#endif
953
954
Steve Block6ded16b2010-05-10 14:33:55 +0100955void Heap::CheckNewSpaceExpansionCriteria() {
956 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
957 survived_since_last_expansion_ > new_space_.Capacity()) {
958 // Grow the size of new space if there is room to grow and enough
959 // data has survived scavenge since the last expansion.
960 new_space_.Grow();
961 survived_since_last_expansion_ = 0;
962 }
963}
964
965
Steve Blocka7e24c12009-10-30 11:49:00 +0000966void Heap::Scavenge() {
967#ifdef DEBUG
968 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
969#endif
970
971 gc_state_ = SCAVENGE;
972
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100973 Page::FlipMeaningOfInvalidatedWatermarkFlag();
974#ifdef DEBUG
975 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
976 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
977#endif
978
979 // We do not update an allocation watermark of the top page during linear
980 // allocation to avoid overhead. So to maintain the watermark invariant
981 // we have to manually cache the watermark and mark the top page as having an
982 // invalid watermark. This guarantees that dirty regions iteration will use a
983 // correct watermark even if a linear allocation happens.
984 old_pointer_space_->FlushTopPageWatermark();
985 map_space_->FlushTopPageWatermark();
986
Steve Blocka7e24c12009-10-30 11:49:00 +0000987 // Implements Cheney's copying algorithm
988 LOG(ResourceEvent("scavenge", "begin"));
989
990 // Clear descriptor cache.
991 DescriptorLookupCache::Clear();
992
993 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100994 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000995
Steve Block6ded16b2010-05-10 14:33:55 +0100996 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000997
998 // Flip the semispaces. After flipping, to space is empty, from space has
999 // live objects.
1000 new_space_.Flip();
1001 new_space_.ResetAllocationInfo();
1002
1003 // We need to sweep newly copied objects which can be either in the
1004 // to space or promoted to the old generation. For to-space
1005 // objects, we treat the bottom of the to space as a queue. Newly
1006 // copied and unswept objects lie between a 'front' mark and the
1007 // allocation pointer.
1008 //
1009 // Promoted objects can go into various old-generation spaces, and
1010 // can be allocated internally in the spaces (from the free list).
1011 // We treat the top of the to space as a queue of addresses of
1012 // promoted objects. The addresses of newly promoted and unswept
1013 // objects lie between a 'front' mark and a 'rear' mark that is
1014 // updated as a side effect of promoting an object.
1015 //
1016 // There is guaranteed to be enough room at the top of the to space
1017 // for the addresses of promoted objects: every object promoted
1018 // frees up its size in bytes from the top of the new space, and
1019 // objects are at least one pointer in size.
1020 Address new_space_front = new_space_.ToSpaceLow();
1021 promotion_queue.Initialize(new_space_.ToSpaceHigh());
1022
1023 ScavengeVisitor scavenge_visitor;
1024 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001025 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001026
1027 // Copy objects reachable from the old generation. By definition,
1028 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001029 IterateDirtyRegions(old_pointer_space_,
1030 &IteratePointersInDirtyRegion,
1031 &ScavengePointer,
1032 WATERMARK_CAN_BE_INVALID);
1033
1034 IterateDirtyRegions(map_space_,
1035 &IteratePointersInDirtyMapsRegion,
1036 &ScavengePointer,
1037 WATERMARK_CAN_BE_INVALID);
1038
1039 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001040
1041 // Copy objects reachable from cells by scavenging cell values directly.
1042 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001043 for (HeapObject* cell = cell_iterator.next();
1044 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001045 if (cell->IsJSGlobalPropertyCell()) {
1046 Address value_address =
1047 reinterpret_cast<Address>(cell) +
1048 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1049 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1050 }
1051 }
1052
Ben Murdochf87a2032010-10-22 12:50:53 +01001053 // Scavenge object reachable from the global contexts list directly.
1054 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1055
Ben Murdochb0fe1622011-05-05 13:52:32 +01001056 // Scavenge objects reachable from the runtime-profiler sampler
1057 // window directly.
1058 Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress();
1059 int sampler_window_size = RuntimeProfiler::SamplerWindowSize();
1060 scavenge_visitor.VisitPointers(
1061 sampler_window_address,
1062 sampler_window_address + sampler_window_size);
1063
Leon Clarkee46be812010-01-19 14:06:41 +00001064 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1065
Steve Block6ded16b2010-05-10 14:33:55 +01001066 UpdateNewSpaceReferencesInExternalStringTable(
1067 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1068
Leon Clarkee46be812010-01-19 14:06:41 +00001069 ASSERT(new_space_front == new_space_.top());
1070
1071 // Set age mark.
1072 new_space_.set_age_mark(new_space_.top());
1073
1074 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001075 IncrementYoungSurvivorsCounter(static_cast<int>(
1076 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001077
1078 LOG(ResourceEvent("scavenge", "end"));
1079
1080 gc_state_ = NOT_IN_GC;
1081}
1082
1083
Steve Block6ded16b2010-05-10 14:33:55 +01001084String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
1085 MapWord first_word = HeapObject::cast(*p)->map_word();
1086
1087 if (!first_word.IsForwardingAddress()) {
1088 // Unreachable external string can be finalized.
1089 FinalizeExternalString(String::cast(*p));
1090 return NULL;
1091 }
1092
1093 // String is still reachable.
1094 return String::cast(first_word.ToForwardingAddress());
1095}
1096
1097
1098void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1099 ExternalStringTableUpdaterCallback updater_func) {
Leon Clarkee46be812010-01-19 14:06:41 +00001100 ExternalStringTable::Verify();
1101
1102 if (ExternalStringTable::new_space_strings_.is_empty()) return;
1103
1104 Object** start = &ExternalStringTable::new_space_strings_[0];
1105 Object** end = start + ExternalStringTable::new_space_strings_.length();
1106 Object** last = start;
1107
1108 for (Object** p = start; p < end; ++p) {
1109 ASSERT(Heap::InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001110 String* target = updater_func(p);
Leon Clarkee46be812010-01-19 14:06:41 +00001111
Steve Block6ded16b2010-05-10 14:33:55 +01001112 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001113
Leon Clarkee46be812010-01-19 14:06:41 +00001114 ASSERT(target->IsExternalString());
1115
1116 if (Heap::InNewSpace(target)) {
1117 // String is still in new space. Update the table entry.
1118 *last = target;
1119 ++last;
1120 } else {
1121 // String got promoted. Move it to the old string list.
1122 ExternalStringTable::AddOldString(target);
1123 }
1124 }
1125
1126 ASSERT(last <= end);
1127 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
1128}
1129
1130
Ben Murdochb0fe1622011-05-05 13:52:32 +01001131static Object* ProcessFunctionWeakReferences(Object* function,
1132 WeakObjectRetainer* retainer) {
1133 Object* head = Heap::undefined_value();
1134 JSFunction* tail = NULL;
1135 Object* candidate = function;
1136 while (!candidate->IsUndefined()) {
1137 // Check whether to keep the candidate in the list.
1138 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1139 Object* retain = retainer->RetainAs(candidate);
1140 if (retain != NULL) {
1141 if (head->IsUndefined()) {
1142 // First element in the list.
1143 head = candidate_function;
1144 } else {
1145 // Subsequent elements in the list.
1146 ASSERT(tail != NULL);
1147 tail->set_next_function_link(candidate_function);
1148 }
1149 // Retained function is new tail.
1150 tail = candidate_function;
1151 }
1152 // Move to next element in the list.
1153 candidate = candidate_function->next_function_link();
1154 }
1155
1156 // Terminate the list if there is one or more elements.
1157 if (tail != NULL) {
1158 tail->set_next_function_link(Heap::undefined_value());
1159 }
1160
1161 return head;
1162}
1163
1164
Ben Murdochf87a2032010-10-22 12:50:53 +01001165void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1166 Object* head = undefined_value();
1167 Context* tail = NULL;
1168 Object* candidate = global_contexts_list_;
1169 while (!candidate->IsUndefined()) {
1170 // Check whether to keep the candidate in the list.
1171 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1172 Object* retain = retainer->RetainAs(candidate);
1173 if (retain != NULL) {
1174 if (head->IsUndefined()) {
1175 // First element in the list.
1176 head = candidate_context;
1177 } else {
1178 // Subsequent elements in the list.
1179 ASSERT(tail != NULL);
1180 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1181 candidate_context,
1182 UPDATE_WRITE_BARRIER);
1183 }
1184 // Retained context is new tail.
1185 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001186
1187 // Process the weak list of optimized functions for the context.
1188 Object* function_list_head =
1189 ProcessFunctionWeakReferences(
1190 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1191 retainer);
1192 candidate_context->set_unchecked(Context::OPTIMIZED_FUNCTIONS_LIST,
1193 function_list_head,
1194 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001195 }
1196 // Move to next element in the list.
1197 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1198 }
1199
1200 // Terminate the list if there is one or more elements.
1201 if (tail != NULL) {
1202 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1203 Heap::undefined_value(),
1204 UPDATE_WRITE_BARRIER);
1205 }
1206
1207 // Update the head of the list of contexts.
1208 Heap::global_contexts_list_ = head;
1209}
1210
1211
Iain Merrick75681382010-08-19 15:07:18 +01001212class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1213 public:
1214 static inline void VisitPointer(Object** p) {
1215 Object* object = *p;
1216 if (!Heap::InNewSpace(object)) return;
1217 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1218 reinterpret_cast<HeapObject*>(object));
1219 }
1220};
1221
1222
Leon Clarkee46be812010-01-19 14:06:41 +00001223Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1224 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001225 do {
1226 ASSERT(new_space_front <= new_space_.top());
1227
1228 // The addresses new_space_front and new_space_.top() define a
1229 // queue of unprocessed copied objects. Process them until the
1230 // queue is empty.
1231 while (new_space_front < new_space_.top()) {
1232 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001233 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001234 }
1235
1236 // Promote and process all the to-be-promoted objects.
1237 while (!promotion_queue.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001238 HeapObject* target;
1239 int size;
1240 promotion_queue.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001241
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001242 // Promoted object might be already partially visited
1243 // during dirty regions iteration. Thus we search specificly
1244 // for pointers to from semispace instead of looking for pointers
1245 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001246 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001247 IterateAndMarkPointersToFromSpace(target->address(),
1248 target->address() + size,
1249 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001250 }
1251
1252 // Take another spin if there are now unswept objects in new space
1253 // (there are currently no more unswept promoted objects).
1254 } while (new_space_front < new_space_.top());
1255
Leon Clarkee46be812010-01-19 14:06:41 +00001256 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001257}
1258
1259
Iain Merrick75681382010-08-19 15:07:18 +01001260class ScavengingVisitor : public StaticVisitorBase {
1261 public:
1262 static void Initialize() {
1263 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1264 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1265 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1266 table_.Register(kVisitByteArray, &EvacuateByteArray);
1267 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdochf87a2032010-10-22 12:50:53 +01001268 table_.Register(kVisitGlobalContext,
1269 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1270 VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001271
1272 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
1273
1274 table_.Register(kVisitConsString,
1275 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1276 VisitSpecialized<ConsString::kSize>);
1277
1278 table_.Register(kVisitSharedFunctionInfo,
1279 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1280 VisitSpecialized<SharedFunctionInfo::kSize>);
1281
1282 table_.Register(kVisitJSFunction,
1283 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1284 VisitSpecialized<JSFunction::kSize>);
1285
1286 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1287 kVisitDataObject,
1288 kVisitDataObjectGeneric>();
1289
1290 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1291 kVisitJSObject,
1292 kVisitJSObjectGeneric>();
1293
1294 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1295 kVisitStruct,
1296 kVisitStructGeneric>();
1297 }
1298
1299
1300 static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
1301 table_.GetVisitor(map)(map, slot, obj);
1302 }
1303
1304
1305 private:
1306 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1307 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1308
Steve Blocka7e24c12009-10-30 11:49:00 +00001309#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001310 static void RecordCopiedObject(HeapObject* obj) {
1311 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001312#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001313 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001314#endif
1315#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001316 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001317#endif
Iain Merrick75681382010-08-19 15:07:18 +01001318 if (should_record) {
1319 if (Heap::new_space()->Contains(obj)) {
1320 Heap::new_space()->RecordAllocation(obj);
1321 } else {
1322 Heap::new_space()->RecordPromotion(obj);
1323 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001324 }
1325 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001326#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1327
Iain Merrick75681382010-08-19 15:07:18 +01001328 // Helper function used by CopyObject to copy a source object to an
1329 // allocated target object and update the forwarding pointer in the source
1330 // object. Returns the target object.
1331 INLINE(static HeapObject* MigrateObject(HeapObject* source,
1332 HeapObject* target,
1333 int size)) {
1334 // Copy the content of source to target.
1335 Heap::CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001336
Iain Merrick75681382010-08-19 15:07:18 +01001337 // Set the forwarding address.
1338 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001339
1340#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001341 // Update NewSpace stats if necessary.
1342 RecordCopiedObject(target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001343#endif
Iain Merrick75681382010-08-19 15:07:18 +01001344 HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001345#if defined(ENABLE_LOGGING_AND_PROFILING)
1346 if (Logger::is_logging() || CpuProfiler::is_profiling()) {
1347 if (target->IsJSFunction()) {
1348 PROFILE(FunctionMoveEvent(source->address(), target->address()));
Ben Murdochf87a2032010-10-22 12:50:53 +01001349 PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001350 }
1351 }
1352#endif
Iain Merrick75681382010-08-19 15:07:18 +01001353 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001354 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001355
1356
Iain Merrick75681382010-08-19 15:07:18 +01001357 template<ObjectContents object_contents, SizeRestriction size_restriction>
1358 static inline void EvacuateObject(Map* map,
1359 HeapObject** slot,
1360 HeapObject* object,
1361 int object_size) {
1362 ASSERT((size_restriction != SMALL) ||
1363 (object_size <= Page::kMaxHeapObjectSize));
1364 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001365
Iain Merrick75681382010-08-19 15:07:18 +01001366 if (Heap::ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001367 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001368
Iain Merrick75681382010-08-19 15:07:18 +01001369 if ((size_restriction != SMALL) &&
1370 (object_size > Page::kMaxHeapObjectSize)) {
John Reck59135872010-11-02 12:39:01 -07001371 maybe_result = Heap::lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001372 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001373 if (object_contents == DATA_OBJECT) {
John Reck59135872010-11-02 12:39:01 -07001374 maybe_result = Heap::old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001375 } else {
John Reck59135872010-11-02 12:39:01 -07001376 maybe_result = Heap::old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001377 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001378 }
1379
John Reck59135872010-11-02 12:39:01 -07001380 Object* result = NULL; // Initialization to please compiler.
1381 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001382 HeapObject* target = HeapObject::cast(result);
1383 *slot = MigrateObject(object, target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001384
Iain Merrick75681382010-08-19 15:07:18 +01001385 if (object_contents == POINTER_OBJECT) {
1386 promotion_queue.insert(target, object_size);
1387 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001388
Iain Merrick75681382010-08-19 15:07:18 +01001389 Heap::tracer()->increment_promoted_objects_size(object_size);
1390 return;
1391 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001392 }
John Reck59135872010-11-02 12:39:01 -07001393 Object* result =
1394 Heap::new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
Iain Merrick75681382010-08-19 15:07:18 +01001395 *slot = MigrateObject(object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001396 return;
1397 }
1398
Iain Merrick75681382010-08-19 15:07:18 +01001399
1400 static inline void EvacuateFixedArray(Map* map,
1401 HeapObject** slot,
1402 HeapObject* object) {
1403 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1404 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1405 slot,
1406 object,
1407 object_size);
1408 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001409
1410
Iain Merrick75681382010-08-19 15:07:18 +01001411 static inline void EvacuateByteArray(Map* map,
1412 HeapObject** slot,
1413 HeapObject* object) {
1414 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1415 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1416 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001417
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001418
Iain Merrick75681382010-08-19 15:07:18 +01001419 static inline void EvacuateSeqAsciiString(Map* map,
1420 HeapObject** slot,
1421 HeapObject* object) {
1422 int object_size = SeqAsciiString::cast(object)->
1423 SeqAsciiStringSize(map->instance_type());
1424 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1425 }
1426
1427
1428 static inline void EvacuateSeqTwoByteString(Map* map,
1429 HeapObject** slot,
1430 HeapObject* object) {
1431 int object_size = SeqTwoByteString::cast(object)->
1432 SeqTwoByteStringSize(map->instance_type());
1433 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1434 }
1435
1436
1437 static inline bool IsShortcutCandidate(int type) {
1438 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1439 }
1440
1441 static inline void EvacuateShortcutCandidate(Map* map,
1442 HeapObject** slot,
1443 HeapObject* object) {
1444 ASSERT(IsShortcutCandidate(map->instance_type()));
1445
1446 if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
1447 HeapObject* first =
1448 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1449
1450 *slot = first;
1451
1452 if (!Heap::InNewSpace(first)) {
1453 object->set_map_word(MapWord::FromForwardingAddress(first));
1454 return;
1455 }
1456
1457 MapWord first_word = first->map_word();
1458 if (first_word.IsForwardingAddress()) {
1459 HeapObject* target = first_word.ToForwardingAddress();
1460
1461 *slot = target;
1462 object->set_map_word(MapWord::FromForwardingAddress(target));
1463 return;
1464 }
1465
1466 Scavenge(first->map(), slot, first);
1467 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1468 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001469 }
Iain Merrick75681382010-08-19 15:07:18 +01001470
1471 int object_size = ConsString::kSize;
1472 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001473 }
1474
Iain Merrick75681382010-08-19 15:07:18 +01001475 template<ObjectContents object_contents>
1476 class ObjectEvacuationStrategy {
1477 public:
1478 template<int object_size>
1479 static inline void VisitSpecialized(Map* map,
1480 HeapObject** slot,
1481 HeapObject* object) {
1482 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1483 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001484
Iain Merrick75681382010-08-19 15:07:18 +01001485 static inline void Visit(Map* map,
1486 HeapObject** slot,
1487 HeapObject* object) {
1488 int object_size = map->instance_size();
1489 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1490 }
1491 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001492
Iain Merrick75681382010-08-19 15:07:18 +01001493 typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001494
Iain Merrick75681382010-08-19 15:07:18 +01001495 static VisitorDispatchTable<Callback> table_;
1496};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001497
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001498
Iain Merrick75681382010-08-19 15:07:18 +01001499VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001500
1501
1502void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1503 ASSERT(InFromSpace(object));
1504 MapWord first_word = object->map_word();
1505 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001506 Map* map = first_word.ToMap();
Iain Merrick75681382010-08-19 15:07:18 +01001507 ScavengingVisitor::Scavenge(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001508}
1509
1510
1511void Heap::ScavengePointer(HeapObject** p) {
1512 ScavengeObject(p, *p);
1513}
1514
1515
John Reck59135872010-11-02 12:39:01 -07001516MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1517 int instance_size) {
1518 Object* result;
1519 { MaybeObject* maybe_result = AllocateRawMap();
1520 if (!maybe_result->ToObject(&result)) return maybe_result;
1521 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001522
1523 // Map::cast cannot be used due to uninitialized map field.
1524 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1525 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1526 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001527 reinterpret_cast<Map*>(result)->
Iain Merrick75681382010-08-19 15:07:18 +01001528 set_visitor_id(
1529 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001530 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001531 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001532 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001533 reinterpret_cast<Map*>(result)->set_bit_field(0);
1534 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001535 return result;
1536}
1537
1538
John Reck59135872010-11-02 12:39:01 -07001539MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1540 Object* result;
1541 { MaybeObject* maybe_result = AllocateRawMap();
1542 if (!maybe_result->ToObject(&result)) return maybe_result;
1543 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001544
1545 Map* map = reinterpret_cast<Map*>(result);
1546 map->set_map(meta_map());
1547 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001548 map->set_visitor_id(
1549 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001550 map->set_prototype(null_value());
1551 map->set_constructor(null_value());
1552 map->set_instance_size(instance_size);
1553 map->set_inobject_properties(0);
1554 map->set_pre_allocated_property_fields(0);
1555 map->set_instance_descriptors(empty_descriptor_array());
1556 map->set_code_cache(empty_fixed_array());
1557 map->set_unused_property_fields(0);
1558 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001559 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001560
1561 // If the map object is aligned fill the padding area with Smi 0 objects.
1562 if (Map::kPadStart < Map::kSize) {
1563 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1564 0,
1565 Map::kSize - Map::kPadStart);
1566 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001567 return map;
1568}
1569
1570
John Reck59135872010-11-02 12:39:01 -07001571MaybeObject* Heap::AllocateCodeCache() {
1572 Object* result;
1573 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1574 if (!maybe_result->ToObject(&result)) return maybe_result;
1575 }
Steve Block6ded16b2010-05-10 14:33:55 +01001576 CodeCache* code_cache = CodeCache::cast(result);
1577 code_cache->set_default_cache(empty_fixed_array());
1578 code_cache->set_normal_type_cache(undefined_value());
1579 return code_cache;
1580}
1581
1582
Steve Blocka7e24c12009-10-30 11:49:00 +00001583const Heap::StringTypeTable Heap::string_type_table[] = {
1584#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1585 {type, size, k##camel_name##MapRootIndex},
1586 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1587#undef STRING_TYPE_ELEMENT
1588};
1589
1590
1591const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1592#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1593 {contents, k##name##RootIndex},
1594 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1595#undef CONSTANT_SYMBOL_ELEMENT
1596};
1597
1598
1599const Heap::StructTable Heap::struct_table[] = {
1600#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1601 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1602 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1603#undef STRUCT_TABLE_ELEMENT
1604};
1605
1606
1607bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001608 Object* obj;
1609 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1610 if (!maybe_obj->ToObject(&obj)) return false;
1611 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001612 // Map::cast cannot be used due to uninitialized map field.
1613 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1614 set_meta_map(new_meta_map);
1615 new_meta_map->set_map(new_meta_map);
1616
John Reck59135872010-11-02 12:39:01 -07001617 { MaybeObject* maybe_obj =
1618 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1619 if (!maybe_obj->ToObject(&obj)) return false;
1620 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001621 set_fixed_array_map(Map::cast(obj));
1622
John Reck59135872010-11-02 12:39:01 -07001623 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1624 if (!maybe_obj->ToObject(&obj)) return false;
1625 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001626 set_oddball_map(Map::cast(obj));
1627
Steve Block6ded16b2010-05-10 14:33:55 +01001628 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001629 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1630 if (!maybe_obj->ToObject(&obj)) return false;
1631 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001632 set_empty_fixed_array(FixedArray::cast(obj));
1633
John Reck59135872010-11-02 12:39:01 -07001634 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1635 if (!maybe_obj->ToObject(&obj)) return false;
1636 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001637 set_null_value(obj);
1638
1639 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001640 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1641 if (!maybe_obj->ToObject(&obj)) return false;
1642 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001643 set_empty_descriptor_array(DescriptorArray::cast(obj));
1644
1645 // Fix the instance_descriptors for the existing maps.
1646 meta_map()->set_instance_descriptors(empty_descriptor_array());
1647 meta_map()->set_code_cache(empty_fixed_array());
1648
1649 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1650 fixed_array_map()->set_code_cache(empty_fixed_array());
1651
1652 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1653 oddball_map()->set_code_cache(empty_fixed_array());
1654
1655 // Fix prototype object for existing maps.
1656 meta_map()->set_prototype(null_value());
1657 meta_map()->set_constructor(null_value());
1658
1659 fixed_array_map()->set_prototype(null_value());
1660 fixed_array_map()->set_constructor(null_value());
1661
1662 oddball_map()->set_prototype(null_value());
1663 oddball_map()->set_constructor(null_value());
1664
John Reck59135872010-11-02 12:39:01 -07001665 { MaybeObject* maybe_obj =
1666 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1667 if (!maybe_obj->ToObject(&obj)) return false;
1668 }
Iain Merrick75681382010-08-19 15:07:18 +01001669 set_fixed_cow_array_map(Map::cast(obj));
1670 ASSERT(fixed_array_map() != fixed_cow_array_map());
1671
John Reck59135872010-11-02 12:39:01 -07001672 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1673 if (!maybe_obj->ToObject(&obj)) return false;
1674 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001675 set_heap_number_map(Map::cast(obj));
1676
John Reck59135872010-11-02 12:39:01 -07001677 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1678 if (!maybe_obj->ToObject(&obj)) return false;
1679 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001680 set_proxy_map(Map::cast(obj));
1681
1682 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1683 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001684 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1685 if (!maybe_obj->ToObject(&obj)) return false;
1686 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001687 roots_[entry.index] = Map::cast(obj);
1688 }
1689
John Reck59135872010-11-02 12:39:01 -07001690 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1691 if (!maybe_obj->ToObject(&obj)) return false;
1692 }
Steve Blockd0582a62009-12-15 09:54:21 +00001693 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001694 Map::cast(obj)->set_is_undetectable();
1695
John Reck59135872010-11-02 12:39:01 -07001696 { MaybeObject* maybe_obj =
1697 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1698 if (!maybe_obj->ToObject(&obj)) return false;
1699 }
Steve Blockd0582a62009-12-15 09:54:21 +00001700 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001701 Map::cast(obj)->set_is_undetectable();
1702
John Reck59135872010-11-02 12:39:01 -07001703 { MaybeObject* maybe_obj =
1704 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1705 if (!maybe_obj->ToObject(&obj)) return false;
1706 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001707 set_byte_array_map(Map::cast(obj));
1708
Ben Murdochb0fe1622011-05-05 13:52:32 +01001709 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1710 if (!maybe_obj->ToObject(&obj)) return false;
1711 }
1712 set_empty_byte_array(ByteArray::cast(obj));
1713
John Reck59135872010-11-02 12:39:01 -07001714 { MaybeObject* maybe_obj =
1715 AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1716 if (!maybe_obj->ToObject(&obj)) return false;
1717 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001718 set_pixel_array_map(Map::cast(obj));
1719
John Reck59135872010-11-02 12:39:01 -07001720 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1721 ExternalArray::kAlignedSize);
1722 if (!maybe_obj->ToObject(&obj)) return false;
1723 }
Steve Block3ce2e202009-11-05 08:53:23 +00001724 set_external_byte_array_map(Map::cast(obj));
1725
John Reck59135872010-11-02 12:39:01 -07001726 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1727 ExternalArray::kAlignedSize);
1728 if (!maybe_obj->ToObject(&obj)) return false;
1729 }
Steve Block3ce2e202009-11-05 08:53:23 +00001730 set_external_unsigned_byte_array_map(Map::cast(obj));
1731
John Reck59135872010-11-02 12:39:01 -07001732 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1733 ExternalArray::kAlignedSize);
1734 if (!maybe_obj->ToObject(&obj)) return false;
1735 }
Steve Block3ce2e202009-11-05 08:53:23 +00001736 set_external_short_array_map(Map::cast(obj));
1737
John Reck59135872010-11-02 12:39:01 -07001738 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1739 ExternalArray::kAlignedSize);
1740 if (!maybe_obj->ToObject(&obj)) return false;
1741 }
Steve Block3ce2e202009-11-05 08:53:23 +00001742 set_external_unsigned_short_array_map(Map::cast(obj));
1743
John Reck59135872010-11-02 12:39:01 -07001744 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1745 ExternalArray::kAlignedSize);
1746 if (!maybe_obj->ToObject(&obj)) return false;
1747 }
Steve Block3ce2e202009-11-05 08:53:23 +00001748 set_external_int_array_map(Map::cast(obj));
1749
John Reck59135872010-11-02 12:39:01 -07001750 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1751 ExternalArray::kAlignedSize);
1752 if (!maybe_obj->ToObject(&obj)) return false;
1753 }
Steve Block3ce2e202009-11-05 08:53:23 +00001754 set_external_unsigned_int_array_map(Map::cast(obj));
1755
John Reck59135872010-11-02 12:39:01 -07001756 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1757 ExternalArray::kAlignedSize);
1758 if (!maybe_obj->ToObject(&obj)) return false;
1759 }
Steve Block3ce2e202009-11-05 08:53:23 +00001760 set_external_float_array_map(Map::cast(obj));
1761
John Reck59135872010-11-02 12:39:01 -07001762 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1763 if (!maybe_obj->ToObject(&obj)) return false;
1764 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001765 set_code_map(Map::cast(obj));
1766
John Reck59135872010-11-02 12:39:01 -07001767 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1768 JSGlobalPropertyCell::kSize);
1769 if (!maybe_obj->ToObject(&obj)) return false;
1770 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001771 set_global_property_cell_map(Map::cast(obj));
1772
John Reck59135872010-11-02 12:39:01 -07001773 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1774 if (!maybe_obj->ToObject(&obj)) return false;
1775 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001776 set_one_pointer_filler_map(Map::cast(obj));
1777
John Reck59135872010-11-02 12:39:01 -07001778 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1779 if (!maybe_obj->ToObject(&obj)) return false;
1780 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001781 set_two_pointer_filler_map(Map::cast(obj));
1782
1783 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1784 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001785 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1786 if (!maybe_obj->ToObject(&obj)) return false;
1787 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001788 roots_[entry.index] = Map::cast(obj);
1789 }
1790
John Reck59135872010-11-02 12:39:01 -07001791 { MaybeObject* maybe_obj =
1792 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1793 if (!maybe_obj->ToObject(&obj)) return false;
1794 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001795 set_hash_table_map(Map::cast(obj));
1796
John Reck59135872010-11-02 12:39:01 -07001797 { MaybeObject* maybe_obj =
1798 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1799 if (!maybe_obj->ToObject(&obj)) return false;
1800 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 set_context_map(Map::cast(obj));
1802
John Reck59135872010-11-02 12:39:01 -07001803 { MaybeObject* maybe_obj =
1804 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1805 if (!maybe_obj->ToObject(&obj)) return false;
1806 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001807 set_catch_context_map(Map::cast(obj));
1808
John Reck59135872010-11-02 12:39:01 -07001809 { MaybeObject* maybe_obj =
1810 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1811 if (!maybe_obj->ToObject(&obj)) return false;
1812 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001813 Map* global_context_map = Map::cast(obj);
1814 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1815 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001816
John Reck59135872010-11-02 12:39:01 -07001817 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1818 SharedFunctionInfo::kAlignedSize);
1819 if (!maybe_obj->ToObject(&obj)) return false;
1820 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001821 set_shared_function_info_map(Map::cast(obj));
1822
1823 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1824 return true;
1825}
1826
1827
John Reck59135872010-11-02 12:39:01 -07001828MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001829 // Statically ensure that it is safe to allocate heap numbers in paged
1830 // spaces.
1831 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1832 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1833
John Reck59135872010-11-02 12:39:01 -07001834 Object* result;
1835 { MaybeObject* maybe_result =
1836 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1837 if (!maybe_result->ToObject(&result)) return maybe_result;
1838 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001839
1840 HeapObject::cast(result)->set_map(heap_number_map());
1841 HeapNumber::cast(result)->set_value(value);
1842 return result;
1843}
1844
1845
John Reck59135872010-11-02 12:39:01 -07001846MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001847 // Use general version, if we're forced to always allocate.
1848 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1849
1850 // This version of AllocateHeapNumber is optimized for
1851 // allocation in new space.
1852 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1853 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001854 Object* result;
1855 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1856 if (!maybe_result->ToObject(&result)) return maybe_result;
1857 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001858 HeapObject::cast(result)->set_map(heap_number_map());
1859 HeapNumber::cast(result)->set_value(value);
1860 return result;
1861}
1862
1863
John Reck59135872010-11-02 12:39:01 -07001864MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1865 Object* result;
1866 { MaybeObject* maybe_result = AllocateRawCell();
1867 if (!maybe_result->ToObject(&result)) return maybe_result;
1868 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001869 HeapObject::cast(result)->set_map(global_property_cell_map());
1870 JSGlobalPropertyCell::cast(result)->set_value(value);
1871 return result;
1872}
1873
1874
John Reck59135872010-11-02 12:39:01 -07001875MaybeObject* Heap::CreateOddball(const char* to_string,
1876 Object* to_number) {
1877 Object* result;
1878 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1879 if (!maybe_result->ToObject(&result)) return maybe_result;
1880 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001881 return Oddball::cast(result)->Initialize(to_string, to_number);
1882}
1883
1884
1885bool Heap::CreateApiObjects() {
1886 Object* obj;
1887
John Reck59135872010-11-02 12:39:01 -07001888 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1889 if (!maybe_obj->ToObject(&obj)) return false;
1890 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001891 set_neander_map(Map::cast(obj));
1892
John Reck59135872010-11-02 12:39:01 -07001893 { MaybeObject* maybe_obj = Heap::AllocateJSObjectFromMap(neander_map());
1894 if (!maybe_obj->ToObject(&obj)) return false;
1895 }
1896 Object* elements;
1897 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1898 if (!maybe_elements->ToObject(&elements)) return false;
1899 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001900 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1901 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1902 set_message_listeners(JSObject::cast(obj));
1903
1904 return true;
1905}
1906
1907
1908void Heap::CreateCEntryStub() {
1909 CEntryStub stub(1);
1910 set_c_entry_code(*stub.GetCode());
1911}
1912
1913
Steve Block6ded16b2010-05-10 14:33:55 +01001914#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001915void Heap::CreateRegExpCEntryStub() {
1916 RegExpCEntryStub stub;
1917 set_re_c_entry_code(*stub.GetCode());
1918}
1919#endif
1920
1921
Steve Blocka7e24c12009-10-30 11:49:00 +00001922void Heap::CreateJSEntryStub() {
1923 JSEntryStub stub;
1924 set_js_entry_code(*stub.GetCode());
1925}
1926
1927
1928void Heap::CreateJSConstructEntryStub() {
1929 JSConstructEntryStub stub;
1930 set_js_construct_entry_code(*stub.GetCode());
1931}
1932
1933
1934void Heap::CreateFixedStubs() {
1935 // Here we create roots for fixed stubs. They are needed at GC
1936 // for cooking and uncooking (check out frames.cc).
1937 // The eliminates the need for doing dictionary lookup in the
1938 // stub cache for these stubs.
1939 HandleScope scope;
1940 // gcc-4.4 has problem generating correct code of following snippet:
1941 // { CEntryStub stub;
1942 // c_entry_code_ = *stub.GetCode();
1943 // }
Leon Clarke4515c472010-02-03 11:58:03 +00001944 // { DebuggerStatementStub stub;
1945 // debugger_statement_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001946 // }
1947 // To workaround the problem, make separate functions without inlining.
1948 Heap::CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001949 Heap::CreateJSEntryStub();
1950 Heap::CreateJSConstructEntryStub();
Steve Block6ded16b2010-05-10 14:33:55 +01001951#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001952 Heap::CreateRegExpCEntryStub();
1953#endif
1954}
1955
1956
1957bool Heap::CreateInitialObjects() {
1958 Object* obj;
1959
1960 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001961 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1962 if (!maybe_obj->ToObject(&obj)) return false;
1963 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001964 set_minus_zero_value(obj);
1965 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1966
John Reck59135872010-11-02 12:39:01 -07001967 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1968 if (!maybe_obj->ToObject(&obj)) return false;
1969 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001970 set_nan_value(obj);
1971
John Reck59135872010-11-02 12:39:01 -07001972 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1973 if (!maybe_obj->ToObject(&obj)) return false;
1974 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001975 set_undefined_value(obj);
1976 ASSERT(!InNewSpace(undefined_value()));
1977
1978 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07001979 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1980 if (!maybe_obj->ToObject(&obj)) return false;
1981 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001982 // Don't use set_symbol_table() due to asserts.
1983 roots_[kSymbolTableRootIndex] = obj;
1984
1985 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07001986 Object* symbol;
1987 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
1988 if (!maybe_symbol->ToObject(&symbol)) return false;
1989 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001990 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1991 Oddball::cast(undefined_value())->set_to_number(nan_value());
1992
Steve Blocka7e24c12009-10-30 11:49:00 +00001993 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07001994 { MaybeObject* maybe_obj =
1995 Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1996 if (!maybe_obj->ToObject(&obj)) return false;
1997 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001998
John Reck59135872010-11-02 12:39:01 -07001999 { MaybeObject* maybe_obj = CreateOddball("true", Smi::FromInt(1));
2000 if (!maybe_obj->ToObject(&obj)) return false;
2001 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002002 set_true_value(obj);
2003
John Reck59135872010-11-02 12:39:01 -07002004 { MaybeObject* maybe_obj = CreateOddball("false", Smi::FromInt(0));
2005 if (!maybe_obj->ToObject(&obj)) return false;
2006 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002007 set_false_value(obj);
2008
John Reck59135872010-11-02 12:39:01 -07002009 { MaybeObject* maybe_obj = CreateOddball("hole", Smi::FromInt(-1));
2010 if (!maybe_obj->ToObject(&obj)) return false;
2011 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002012 set_the_hole_value(obj);
2013
Ben Murdoch086aeea2011-05-13 15:57:08 +01002014 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2015 Smi::FromInt(-4));
2016 if (!maybe_obj->ToObject(&obj)) return false;
2017 }
2018 set_arguments_marker(obj);
2019
John Reck59135872010-11-02 12:39:01 -07002020 { MaybeObject* maybe_obj =
2021 CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
2022 if (!maybe_obj->ToObject(&obj)) return false;
2023 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002024 set_no_interceptor_result_sentinel(obj);
2025
John Reck59135872010-11-02 12:39:01 -07002026 { MaybeObject* maybe_obj =
2027 CreateOddball("termination_exception", Smi::FromInt(-3));
2028 if (!maybe_obj->ToObject(&obj)) return false;
2029 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002030 set_termination_exception(obj);
2031
2032 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002033 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2034 if (!maybe_obj->ToObject(&obj)) return false;
2035 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002036 set_empty_string(String::cast(obj));
2037
2038 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002039 { MaybeObject* maybe_obj =
2040 LookupAsciiSymbol(constant_symbol_table[i].contents);
2041 if (!maybe_obj->ToObject(&obj)) return false;
2042 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002043 roots_[constant_symbol_table[i].index] = String::cast(obj);
2044 }
2045
2046 // Allocate the hidden symbol which is used to identify the hidden properties
2047 // in JSObjects. The hash code has a special value so that it will not match
2048 // the empty string when searching for the property. It cannot be part of the
2049 // loop above because it needs to be allocated manually with the special
2050 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2051 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002052 { MaybeObject* maybe_obj =
2053 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2054 if (!maybe_obj->ToObject(&obj)) return false;
2055 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002056 hidden_symbol_ = String::cast(obj);
2057
2058 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07002059 { MaybeObject* maybe_obj =
2060 AllocateProxy((Address) &Accessors::ObjectPrototype);
2061 if (!maybe_obj->ToObject(&obj)) return false;
2062 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002063 set_prototype_accessors(Proxy::cast(obj));
2064
2065 // Allocate the code_stubs dictionary. The initial size is set to avoid
2066 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002067 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2068 if (!maybe_obj->ToObject(&obj)) return false;
2069 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002070 set_code_stubs(NumberDictionary::cast(obj));
2071
2072 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2073 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002074 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2075 if (!maybe_obj->ToObject(&obj)) return false;
2076 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002077 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2078
Kristian Monsen25f61362010-05-21 11:50:48 +01002079 set_instanceof_cache_function(Smi::FromInt(0));
2080 set_instanceof_cache_map(Smi::FromInt(0));
2081 set_instanceof_cache_answer(Smi::FromInt(0));
2082
Steve Blocka7e24c12009-10-30 11:49:00 +00002083 CreateFixedStubs();
2084
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002085 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002086 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2087 if (!maybe_obj->ToObject(&obj)) return false;
2088 }
2089 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(obj);
2090 if (!maybe_obj->ToObject(&obj)) return false;
2091 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002092 set_intrinsic_function_names(StringDictionary::cast(obj));
2093
Leon Clarkee46be812010-01-19 14:06:41 +00002094 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002095
Steve Block6ded16b2010-05-10 14:33:55 +01002096 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002097 { MaybeObject* maybe_obj =
2098 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2099 if (!maybe_obj->ToObject(&obj)) return false;
2100 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002101 set_single_character_string_cache(FixedArray::cast(obj));
2102
2103 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002104 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2105 if (!maybe_obj->ToObject(&obj)) return false;
2106 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002107 set_natives_source_cache(FixedArray::cast(obj));
2108
2109 // Handling of script id generation is in Factory::NewScript.
2110 set_last_script_id(undefined_value());
2111
2112 // Initialize keyed lookup cache.
2113 KeyedLookupCache::Clear();
2114
2115 // Initialize context slot cache.
2116 ContextSlotCache::Clear();
2117
2118 // Initialize descriptor cache.
2119 DescriptorLookupCache::Clear();
2120
2121 // Initialize compilation cache.
2122 CompilationCache::Clear();
2123
2124 return true;
2125}
2126
2127
John Reck59135872010-11-02 12:39:01 -07002128MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002129 // Compute the size of the number string cache based on the max heap size.
2130 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2131 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2132 int number_string_cache_size = max_semispace_size_ / 512;
2133 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002134 Object* obj;
2135 MaybeObject* maybe_obj =
2136 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2137 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2138 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002139}
2140
2141
2142void Heap::FlushNumberStringCache() {
2143 // Flush the number to string cache.
2144 int len = number_string_cache()->length();
2145 for (int i = 0; i < len; i++) {
2146 number_string_cache()->set_undefined(i);
2147 }
2148}
2149
2150
Steve Blocka7e24c12009-10-30 11:49:00 +00002151static inline int double_get_hash(double d) {
2152 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002153 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002154}
2155
2156
2157static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002158 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002159}
2160
2161
Steve Blocka7e24c12009-10-30 11:49:00 +00002162Object* Heap::GetNumberStringCache(Object* number) {
2163 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002164 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002165 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002166 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002167 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002168 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002169 }
2170 Object* key = number_string_cache()->get(hash * 2);
2171 if (key == number) {
2172 return String::cast(number_string_cache()->get(hash * 2 + 1));
2173 } else if (key->IsHeapNumber() &&
2174 number->IsHeapNumber() &&
2175 key->Number() == number->Number()) {
2176 return String::cast(number_string_cache()->get(hash * 2 + 1));
2177 }
2178 return undefined_value();
2179}
2180
2181
2182void Heap::SetNumberStringCache(Object* number, String* string) {
2183 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002184 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002185 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002186 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002187 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002188 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002189 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002190 number_string_cache()->set(hash * 2, number);
2191 }
2192 number_string_cache()->set(hash * 2 + 1, string);
2193}
2194
2195
John Reck59135872010-11-02 12:39:01 -07002196MaybeObject* Heap::NumberToString(Object* number,
2197 bool check_number_string_cache) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002198 Counters::number_to_string_runtime.Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002199 if (check_number_string_cache) {
2200 Object* cached = GetNumberStringCache(number);
2201 if (cached != undefined_value()) {
2202 return cached;
2203 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002204 }
2205
2206 char arr[100];
2207 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2208 const char* str;
2209 if (number->IsSmi()) {
2210 int num = Smi::cast(number)->value();
2211 str = IntToCString(num, buffer);
2212 } else {
2213 double num = HeapNumber::cast(number)->value();
2214 str = DoubleToCString(num, buffer);
2215 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002216
John Reck59135872010-11-02 12:39:01 -07002217 Object* js_string;
2218 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2219 if (maybe_js_string->ToObject(&js_string)) {
2220 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002221 }
John Reck59135872010-11-02 12:39:01 -07002222 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002223}
2224
2225
Steve Block3ce2e202009-11-05 08:53:23 +00002226Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2227 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2228}
2229
2230
2231Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2232 ExternalArrayType array_type) {
2233 switch (array_type) {
2234 case kExternalByteArray:
2235 return kExternalByteArrayMapRootIndex;
2236 case kExternalUnsignedByteArray:
2237 return kExternalUnsignedByteArrayMapRootIndex;
2238 case kExternalShortArray:
2239 return kExternalShortArrayMapRootIndex;
2240 case kExternalUnsignedShortArray:
2241 return kExternalUnsignedShortArrayMapRootIndex;
2242 case kExternalIntArray:
2243 return kExternalIntArrayMapRootIndex;
2244 case kExternalUnsignedIntArray:
2245 return kExternalUnsignedIntArrayMapRootIndex;
2246 case kExternalFloatArray:
2247 return kExternalFloatArrayMapRootIndex;
2248 default:
2249 UNREACHABLE();
2250 return kUndefinedValueRootIndex;
2251 }
2252}
2253
2254
John Reck59135872010-11-02 12:39:01 -07002255MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002256 // We need to distinguish the minus zero value and this cannot be
2257 // done after conversion to int. Doing this by comparing bit
2258 // patterns is faster than using fpclassify() et al.
2259 static const DoubleRepresentation minus_zero(-0.0);
2260
2261 DoubleRepresentation rep(value);
2262 if (rep.bits == minus_zero.bits) {
2263 return AllocateHeapNumber(-0.0, pretenure);
2264 }
2265
2266 int int_value = FastD2I(value);
2267 if (value == int_value && Smi::IsValid(int_value)) {
2268 return Smi::FromInt(int_value);
2269 }
2270
2271 // Materialize the value in the heap.
2272 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002273}
2274
2275
John Reck59135872010-11-02 12:39:01 -07002276MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002277 // Statically ensure that it is safe to allocate proxies in paged spaces.
2278 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2279 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002280 Object* result;
2281 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2282 if (!maybe_result->ToObject(&result)) return maybe_result;
2283 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002284
2285 Proxy::cast(result)->set_proxy(proxy);
2286 return result;
2287}
2288
2289
John Reck59135872010-11-02 12:39:01 -07002290MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2291 Object* result;
2292 { MaybeObject* maybe_result =
2293 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2294 if (!maybe_result->ToObject(&result)) return maybe_result;
2295 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002296
2297 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2298 share->set_name(name);
2299 Code* illegal = Builtins::builtin(Builtins::Illegal);
2300 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002301 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00002302 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
2303 share->set_construct_stub(construct_stub);
2304 share->set_expected_nof_properties(0);
2305 share->set_length(0);
2306 share->set_formal_parameter_count(0);
2307 share->set_instance_class_name(Object_symbol());
2308 share->set_function_data(undefined_value());
2309 share->set_script(undefined_value());
2310 share->set_start_position_and_type(0);
2311 share->set_debug_info(undefined_value());
2312 share->set_inferred_name(empty_string());
2313 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002314 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002315 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002316 share->set_this_property_assignments_count(0);
2317 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002318 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002319 share->set_num_literals(0);
2320 share->set_end_position(0);
2321 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002322 return result;
2323}
2324
2325
Steve Blockd0582a62009-12-15 09:54:21 +00002326// Returns true for a character in a range. Both limits are inclusive.
2327static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2328 // This makes uses of the the unsigned wraparound.
2329 return character - from <= to - from;
2330}
2331
2332
John Reck59135872010-11-02 12:39:01 -07002333MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2334 uint32_t c1,
2335 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002336 String* symbol;
2337 // Numeric strings have a different hash algorithm not known by
2338 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2339 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2340 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2341 return symbol;
2342 // Now we know the length is 2, we might as well make use of that fact
2343 // when building the new string.
2344 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2345 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002346 Object* result;
2347 { MaybeObject* maybe_result = Heap::AllocateRawAsciiString(2);
2348 if (!maybe_result->ToObject(&result)) return maybe_result;
2349 }
Steve Blockd0582a62009-12-15 09:54:21 +00002350 char* dest = SeqAsciiString::cast(result)->GetChars();
2351 dest[0] = c1;
2352 dest[1] = c2;
2353 return result;
2354 } else {
John Reck59135872010-11-02 12:39:01 -07002355 Object* result;
2356 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(2);
2357 if (!maybe_result->ToObject(&result)) return maybe_result;
2358 }
Steve Blockd0582a62009-12-15 09:54:21 +00002359 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2360 dest[0] = c1;
2361 dest[1] = c2;
2362 return result;
2363 }
2364}
2365
2366
John Reck59135872010-11-02 12:39:01 -07002367MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002368 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002369 if (first_length == 0) {
2370 return second;
2371 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002372
2373 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002374 if (second_length == 0) {
2375 return first;
2376 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002377
2378 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002379
2380 // Optimization for 2-byte strings often used as keys in a decompression
2381 // dictionary. Check whether we already have the string in the symbol
2382 // table to prevent creation of many unneccesary strings.
2383 if (length == 2) {
2384 unsigned c1 = first->Get(0);
2385 unsigned c2 = second->Get(0);
2386 return MakeOrFindTwoCharacterString(c1, c2);
2387 }
2388
Steve Block6ded16b2010-05-10 14:33:55 +01002389 bool first_is_ascii = first->IsAsciiRepresentation();
2390 bool second_is_ascii = second->IsAsciiRepresentation();
2391 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002392
2393 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002394 // of the new cons string is too large.
2395 if (length > String::kMaxLength || length < 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002396 Top::context()->mark_out_of_memory();
2397 return Failure::OutOfMemoryException();
2398 }
2399
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002400 bool is_ascii_data_in_two_byte_string = false;
2401 if (!is_ascii) {
2402 // At least one of the strings uses two-byte representation so we
2403 // can't use the fast case code for short ascii strings below, but
2404 // we can try to save memory if all chars actually fit in ascii.
2405 is_ascii_data_in_two_byte_string =
2406 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2407 if (is_ascii_data_in_two_byte_string) {
2408 Counters::string_add_runtime_ext_to_ascii.Increment();
2409 }
2410 }
2411
Steve Blocka7e24c12009-10-30 11:49:00 +00002412 // If the resulting string is small make a flat string.
2413 if (length < String::kMinNonFlatLength) {
2414 ASSERT(first->IsFlat());
2415 ASSERT(second->IsFlat());
2416 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002417 Object* result;
2418 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2419 if (!maybe_result->ToObject(&result)) return maybe_result;
2420 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002421 // Copy the characters into the new object.
2422 char* dest = SeqAsciiString::cast(result)->GetChars();
2423 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002424 const char* src;
2425 if (first->IsExternalString()) {
2426 src = ExternalAsciiString::cast(first)->resource()->data();
2427 } else {
2428 src = SeqAsciiString::cast(first)->GetChars();
2429 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002430 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2431 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002432 if (second->IsExternalString()) {
2433 src = ExternalAsciiString::cast(second)->resource()->data();
2434 } else {
2435 src = SeqAsciiString::cast(second)->GetChars();
2436 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002437 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2438 return result;
2439 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002440 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002441 Object* result;
2442 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2443 if (!maybe_result->ToObject(&result)) return maybe_result;
2444 }
Steve Block6ded16b2010-05-10 14:33:55 +01002445 // Copy the characters into the new object.
2446 char* dest = SeqAsciiString::cast(result)->GetChars();
2447 String::WriteToFlat(first, dest, 0, first_length);
2448 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block6ded16b2010-05-10 14:33:55 +01002449 return result;
2450 }
2451
John Reck59135872010-11-02 12:39:01 -07002452 Object* result;
2453 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2454 if (!maybe_result->ToObject(&result)) return maybe_result;
2455 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002456 // Copy the characters into the new object.
2457 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2458 String::WriteToFlat(first, dest, 0, first_length);
2459 String::WriteToFlat(second, dest + first_length, 0, second_length);
2460 return result;
2461 }
2462 }
2463
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002464 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2465 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002466
John Reck59135872010-11-02 12:39:01 -07002467 Object* result;
2468 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2469 if (!maybe_result->ToObject(&result)) return maybe_result;
2470 }
Leon Clarke4515c472010-02-03 11:58:03 +00002471
2472 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002473 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002474 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002475 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002476 cons_string->set_hash_field(String::kEmptyHashField);
2477 cons_string->set_first(first, mode);
2478 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002479 return result;
2480}
2481
2482
John Reck59135872010-11-02 12:39:01 -07002483MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002484 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002485 int end,
2486 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002487 int length = end - start;
2488
2489 if (length == 1) {
2490 return Heap::LookupSingleCharacterStringFromCode(
2491 buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002492 } else if (length == 2) {
2493 // Optimization for 2-byte strings often used as keys in a decompression
2494 // dictionary. Check whether we already have the string in the symbol
2495 // table to prevent creation of many unneccesary strings.
2496 unsigned c1 = buffer->Get(start);
2497 unsigned c2 = buffer->Get(start + 1);
2498 return MakeOrFindTwoCharacterString(c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002499 }
2500
2501 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002502 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002503
John Reck59135872010-11-02 12:39:01 -07002504 Object* result;
2505 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2506 ? AllocateRawAsciiString(length, pretenure )
2507 : AllocateRawTwoByteString(length, pretenure);
2508 if (!maybe_result->ToObject(&result)) return maybe_result;
2509 }
Steve Blockd0582a62009-12-15 09:54:21 +00002510 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002511 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002512 if (buffer->IsAsciiRepresentation()) {
2513 ASSERT(string_result->IsAsciiRepresentation());
2514 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2515 String::WriteToFlat(buffer, dest, start, end);
2516 } else {
2517 ASSERT(string_result->IsTwoByteRepresentation());
2518 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2519 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002520 }
Steve Blockd0582a62009-12-15 09:54:21 +00002521
Steve Blocka7e24c12009-10-30 11:49:00 +00002522 return result;
2523}
2524
2525
John Reck59135872010-11-02 12:39:01 -07002526MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002527 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002528 size_t length = resource->length();
2529 if (length > static_cast<size_t>(String::kMaxLength)) {
2530 Top::context()->mark_out_of_memory();
2531 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002532 }
2533
Steve Blockd0582a62009-12-15 09:54:21 +00002534 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002535 Object* result;
2536 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2537 if (!maybe_result->ToObject(&result)) return maybe_result;
2538 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002539
2540 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002541 external_string->set_length(static_cast<int>(length));
2542 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002543 external_string->set_resource(resource);
2544
2545 return result;
2546}
2547
2548
John Reck59135872010-11-02 12:39:01 -07002549MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002550 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002551 size_t length = resource->length();
2552 if (length > static_cast<size_t>(String::kMaxLength)) {
2553 Top::context()->mark_out_of_memory();
2554 return Failure::OutOfMemoryException();
2555 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002556
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002557 // For small strings we check whether the resource contains only
Steve Block9fac8402011-05-12 15:51:54 +01002558 // ASCII characters. If yes, we use a different string map.
2559 static const size_t kAsciiCheckLengthLimit = 32;
2560 bool is_ascii = length <= kAsciiCheckLengthLimit &&
2561 String::IsAscii(resource->data(), static_cast<int>(length));
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002562 Map* map = is_ascii ?
2563 Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
John Reck59135872010-11-02 12:39:01 -07002564 Object* result;
2565 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2566 if (!maybe_result->ToObject(&result)) return maybe_result;
2567 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002568
2569 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002570 external_string->set_length(static_cast<int>(length));
2571 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002572 external_string->set_resource(resource);
2573
2574 return result;
2575}
2576
2577
John Reck59135872010-11-02 12:39:01 -07002578MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002579 if (code <= String::kMaxAsciiCharCode) {
2580 Object* value = Heap::single_character_string_cache()->get(code);
2581 if (value != Heap::undefined_value()) return value;
2582
2583 char buffer[1];
2584 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002585 Object* result;
2586 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002587
John Reck59135872010-11-02 12:39:01 -07002588 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002589 Heap::single_character_string_cache()->set(code, result);
2590 return result;
2591 }
2592
John Reck59135872010-11-02 12:39:01 -07002593 Object* result;
2594 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(1);
2595 if (!maybe_result->ToObject(&result)) return maybe_result;
2596 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002597 String* answer = String::cast(result);
2598 answer->Set(0, code);
2599 return answer;
2600}
2601
2602
John Reck59135872010-11-02 12:39:01 -07002603MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002604 if (length < 0 || length > ByteArray::kMaxLength) {
2605 return Failure::OutOfMemoryException();
2606 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002607 if (pretenure == NOT_TENURED) {
2608 return AllocateByteArray(length);
2609 }
2610 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002611 Object* result;
2612 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2613 ? old_data_space_->AllocateRaw(size)
2614 : lo_space_->AllocateRaw(size);
2615 if (!maybe_result->ToObject(&result)) return maybe_result;
2616 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002617
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002618 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2619 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002620 return result;
2621}
2622
2623
John Reck59135872010-11-02 12:39:01 -07002624MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002625 if (length < 0 || length > ByteArray::kMaxLength) {
2626 return Failure::OutOfMemoryException();
2627 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002628 int size = ByteArray::SizeFor(length);
2629 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002630 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002631 Object* result;
2632 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2633 if (!maybe_result->ToObject(&result)) return maybe_result;
2634 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002635
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002636 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2637 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002638 return result;
2639}
2640
2641
2642void Heap::CreateFillerObjectAt(Address addr, int size) {
2643 if (size == 0) return;
2644 HeapObject* filler = HeapObject::FromAddress(addr);
2645 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002646 filler->set_map(one_pointer_filler_map());
2647 } else if (size == 2 * kPointerSize) {
2648 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002649 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002650 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002651 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2652 }
2653}
2654
2655
John Reck59135872010-11-02 12:39:01 -07002656MaybeObject* Heap::AllocatePixelArray(int length,
Steve Blocka7e24c12009-10-30 11:49:00 +00002657 uint8_t* external_pointer,
2658 PretenureFlag pretenure) {
2659 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002660 Object* result;
2661 { MaybeObject* maybe_result =
2662 AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
2663 if (!maybe_result->ToObject(&result)) return maybe_result;
2664 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002665
2666 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2667 reinterpret_cast<PixelArray*>(result)->set_length(length);
2668 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2669
2670 return result;
2671}
2672
2673
John Reck59135872010-11-02 12:39:01 -07002674MaybeObject* Heap::AllocateExternalArray(int length,
2675 ExternalArrayType array_type,
2676 void* external_pointer,
2677 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002678 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002679 Object* result;
2680 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2681 space,
2682 OLD_DATA_SPACE);
2683 if (!maybe_result->ToObject(&result)) return maybe_result;
2684 }
Steve Block3ce2e202009-11-05 08:53:23 +00002685
2686 reinterpret_cast<ExternalArray*>(result)->set_map(
2687 MapForExternalArrayType(array_type));
2688 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2689 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2690 external_pointer);
2691
2692 return result;
2693}
2694
2695
John Reck59135872010-11-02 12:39:01 -07002696MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2697 Code::Flags flags,
2698 Handle<Object> self_reference) {
Leon Clarkeac952652010-07-15 11:15:24 +01002699 // Allocate ByteArray before the Code object, so that we do not risk
2700 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002701 Object* reloc_info;
2702 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2703 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2704 }
Leon Clarkeac952652010-07-15 11:15:24 +01002705
Steve Blocka7e24c12009-10-30 11:49:00 +00002706 // Compute size
Leon Clarkeac952652010-07-15 11:15:24 +01002707 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002708 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002709 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002710 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002711 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002712 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002713 } else {
John Reck59135872010-11-02 12:39:01 -07002714 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002715 }
2716
John Reck59135872010-11-02 12:39:01 -07002717 Object* result;
2718 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002719
2720 // Initialize the object
2721 HeapObject::cast(result)->set_map(code_map());
2722 Code* code = Code::cast(result);
2723 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2724 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002725 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002726 code->set_flags(flags);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002727 code->set_deoptimization_data(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002728 // Allow self references to created code object by patching the handle to
2729 // point to the newly allocated Code object.
2730 if (!self_reference.is_null()) {
2731 *(self_reference.location()) = code;
2732 }
2733 // Migrate generated code.
2734 // The generated code can contain Object** values (typically from handles)
2735 // that are dereferenced during the copy to point directly to the actual heap
2736 // objects. These pointers can include references to the code object itself,
2737 // through the self_reference parameter.
2738 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002739
2740#ifdef DEBUG
2741 code->Verify();
2742#endif
2743 return code;
2744}
2745
2746
John Reck59135872010-11-02 12:39:01 -07002747MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002748 // Allocate an object the same size as the code object.
2749 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002750 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002751 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002752 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002753 } else {
John Reck59135872010-11-02 12:39:01 -07002754 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002755 }
2756
John Reck59135872010-11-02 12:39:01 -07002757 Object* result;
2758 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002759
2760 // Copy code object.
2761 Address old_addr = code->address();
2762 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002763 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002764 // Relocate the copy.
2765 Code* new_code = Code::cast(result);
2766 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2767 new_code->Relocate(new_addr - old_addr);
2768 return new_code;
2769}
2770
2771
John Reck59135872010-11-02 12:39:01 -07002772MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002773 // Allocate ByteArray before the Code object, so that we do not risk
2774 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002775 Object* reloc_info_array;
2776 { MaybeObject* maybe_reloc_info_array =
2777 AllocateByteArray(reloc_info.length(), TENURED);
2778 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2779 return maybe_reloc_info_array;
2780 }
2781 }
Leon Clarkeac952652010-07-15 11:15:24 +01002782
2783 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002784
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002785 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002786
2787 Address old_addr = code->address();
2788
2789 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002790 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002791
John Reck59135872010-11-02 12:39:01 -07002792 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002793 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002794 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002795 } else {
John Reck59135872010-11-02 12:39:01 -07002796 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002797 }
2798
John Reck59135872010-11-02 12:39:01 -07002799 Object* result;
2800 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002801
2802 // Copy code object.
2803 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2804
2805 // Copy header and instructions.
2806 memcpy(new_addr, old_addr, relocation_offset);
2807
Steve Block6ded16b2010-05-10 14:33:55 +01002808 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002809 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002810
Leon Clarkeac952652010-07-15 11:15:24 +01002811 // Copy patched rinfo.
2812 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002813
2814 // Relocate the copy.
2815 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2816 new_code->Relocate(new_addr - old_addr);
2817
2818#ifdef DEBUG
2819 code->Verify();
2820#endif
2821 return new_code;
2822}
2823
2824
John Reck59135872010-11-02 12:39:01 -07002825MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002826 ASSERT(gc_state_ == NOT_IN_GC);
2827 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002828 // If allocation failures are disallowed, we may allocate in a different
2829 // space when new space is full and the object is not a large object.
2830 AllocationSpace retry_space =
2831 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002832 Object* result;
2833 { MaybeObject* maybe_result =
2834 AllocateRaw(map->instance_size(), space, retry_space);
2835 if (!maybe_result->ToObject(&result)) return maybe_result;
2836 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002837 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002838#ifdef ENABLE_LOGGING_AND_PROFILING
2839 ProducerHeapProfile::RecordJSObjectAllocation(result);
2840#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002841 return result;
2842}
2843
2844
John Reck59135872010-11-02 12:39:01 -07002845MaybeObject* Heap::InitializeFunction(JSFunction* function,
2846 SharedFunctionInfo* shared,
2847 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002848 ASSERT(!prototype->IsMap());
2849 function->initialize_properties();
2850 function->initialize_elements();
2851 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002852 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002853 function->set_prototype_or_initial_map(prototype);
2854 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002855 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002856 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002857 return function;
2858}
2859
2860
John Reck59135872010-11-02 12:39:01 -07002861MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002862 // Allocate the prototype. Make sure to use the object function
2863 // from the function's context, since the function can be from a
2864 // different context.
2865 JSFunction* object_function =
2866 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002867 Object* prototype;
2868 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2869 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2870 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002871 // When creating the prototype for the function we must set its
2872 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002873 Object* result;
2874 { MaybeObject* maybe_result =
2875 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2876 function,
2877 DONT_ENUM);
2878 if (!maybe_result->ToObject(&result)) return maybe_result;
2879 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002880 return prototype;
2881}
2882
2883
John Reck59135872010-11-02 12:39:01 -07002884MaybeObject* Heap::AllocateFunction(Map* function_map,
2885 SharedFunctionInfo* shared,
2886 Object* prototype,
2887 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002888 AllocationSpace space =
2889 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002890 Object* result;
2891 { MaybeObject* maybe_result = Allocate(function_map, space);
2892 if (!maybe_result->ToObject(&result)) return maybe_result;
2893 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002894 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2895}
2896
2897
John Reck59135872010-11-02 12:39:01 -07002898MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002899 // To get fast allocation and map sharing for arguments objects we
2900 // allocate them based on an arguments boilerplate.
2901
2902 // This calls Copy directly rather than using Heap::AllocateRaw so we
2903 // duplicate the check here.
2904 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2905
2906 JSObject* boilerplate =
2907 Top::context()->global_context()->arguments_boilerplate();
2908
Leon Clarkee46be812010-01-19 14:06:41 +00002909 // Check that the size of the boilerplate matches our
2910 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2911 // on the size being a known constant.
2912 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2913
2914 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002915 Object* result;
2916 { MaybeObject* maybe_result =
2917 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2918 if (!maybe_result->ToObject(&result)) return maybe_result;
2919 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002920
2921 // Copy the content. The arguments boilerplate doesn't have any
2922 // fields that point to new space so it's safe to skip the write
2923 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002924 CopyBlock(HeapObject::cast(result)->address(),
2925 boilerplate->address(),
Leon Clarkee46be812010-01-19 14:06:41 +00002926 kArgumentsObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002927
2928 // Set the two properties.
2929 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2930 callee);
2931 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2932 Smi::FromInt(length),
2933 SKIP_WRITE_BARRIER);
2934
2935 // Check the state of the object
2936 ASSERT(JSObject::cast(result)->HasFastProperties());
2937 ASSERT(JSObject::cast(result)->HasFastElements());
2938
2939 return result;
2940}
2941
2942
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002943static bool HasDuplicates(DescriptorArray* descriptors) {
2944 int count = descriptors->number_of_descriptors();
2945 if (count > 1) {
2946 String* prev_key = descriptors->GetKey(0);
2947 for (int i = 1; i != count; i++) {
2948 String* current_key = descriptors->GetKey(i);
2949 if (prev_key == current_key) return true;
2950 prev_key = current_key;
2951 }
2952 }
2953 return false;
2954}
2955
2956
John Reck59135872010-11-02 12:39:01 -07002957MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002958 ASSERT(!fun->has_initial_map());
2959
2960 // First create a new map with the size and number of in-object properties
2961 // suggested by the function.
2962 int instance_size = fun->shared()->CalculateInstanceSize();
2963 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07002964 Object* map_obj;
2965 { MaybeObject* maybe_map_obj =
2966 Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
2967 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
2968 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002969
2970 // Fetch or allocate prototype.
2971 Object* prototype;
2972 if (fun->has_instance_prototype()) {
2973 prototype = fun->instance_prototype();
2974 } else {
John Reck59135872010-11-02 12:39:01 -07002975 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
2976 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2977 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002978 }
2979 Map* map = Map::cast(map_obj);
2980 map->set_inobject_properties(in_object_properties);
2981 map->set_unused_property_fields(in_object_properties);
2982 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01002983 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002984
Andrei Popescu402d9372010-02-26 13:31:12 +00002985 // If the function has only simple this property assignments add
2986 // field descriptors for these to the initial map as the object
2987 // cannot be constructed without having these properties. Guard by
2988 // the inline_new flag so we only change the map if we generate a
2989 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00002990 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00002991 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002992 int count = fun->shared()->this_property_assignments_count();
2993 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002994 // Inline constructor can only handle inobject properties.
2995 fun->shared()->ForbidInlineConstructor();
2996 } else {
John Reck59135872010-11-02 12:39:01 -07002997 Object* descriptors_obj;
2998 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
2999 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3000 return maybe_descriptors_obj;
3001 }
3002 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003003 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3004 for (int i = 0; i < count; i++) {
3005 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3006 ASSERT(name->IsSymbol());
3007 FieldDescriptor field(name, i, NONE);
3008 field.SetEnumerationIndex(i);
3009 descriptors->Set(i, &field);
3010 }
3011 descriptors->SetNextEnumerationIndex(count);
3012 descriptors->SortUnchecked();
3013
3014 // The descriptors may contain duplicates because the compiler does not
3015 // guarantee the uniqueness of property names (it would have required
3016 // quadratic time). Once the descriptors are sorted we can check for
3017 // duplicates in linear time.
3018 if (HasDuplicates(descriptors)) {
3019 fun->shared()->ForbidInlineConstructor();
3020 } else {
3021 map->set_instance_descriptors(descriptors);
3022 map->set_pre_allocated_property_fields(count);
3023 map->set_unused_property_fields(in_object_properties - count);
3024 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003025 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003026 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003027
3028 fun->shared()->StartInobjectSlackTracking(map);
3029
Steve Blocka7e24c12009-10-30 11:49:00 +00003030 return map;
3031}
3032
3033
3034void Heap::InitializeJSObjectFromMap(JSObject* obj,
3035 FixedArray* properties,
3036 Map* map) {
3037 obj->set_properties(properties);
3038 obj->initialize_elements();
3039 // TODO(1240798): Initialize the object's body using valid initial values
3040 // according to the object's initial map. For example, if the map's
3041 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3042 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3043 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3044 // verification code has to cope with (temporarily) invalid objects. See
3045 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003046 Object* filler;
3047 // We cannot always fill with one_pointer_filler_map because objects
3048 // created from API functions expect their internal fields to be initialized
3049 // with undefined_value.
3050 if (map->constructor()->IsJSFunction() &&
3051 JSFunction::cast(map->constructor())->shared()->
3052 IsInobjectSlackTrackingInProgress()) {
3053 // We might want to shrink the object later.
3054 ASSERT(obj->GetInternalFieldCount() == 0);
3055 filler = Heap::one_pointer_filler_map();
3056 } else {
3057 filler = Heap::undefined_value();
3058 }
3059 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003060}
3061
3062
John Reck59135872010-11-02 12:39:01 -07003063MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003064 // JSFunctions should be allocated using AllocateFunction to be
3065 // properly initialized.
3066 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3067
Steve Block8defd9f2010-07-08 12:39:36 +01003068 // Both types of global objects should be allocated using
3069 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003070 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3071 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3072
3073 // Allocate the backing storage for the properties.
3074 int prop_size =
3075 map->pre_allocated_property_fields() +
3076 map->unused_property_fields() -
3077 map->inobject_properties();
3078 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003079 Object* properties;
3080 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3081 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3082 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003083
3084 // Allocate the JSObject.
3085 AllocationSpace space =
3086 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3087 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003088 Object* obj;
3089 { MaybeObject* maybe_obj = Allocate(map, space);
3090 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3091 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003092
3093 // Initialize the JSObject.
3094 InitializeJSObjectFromMap(JSObject::cast(obj),
3095 FixedArray::cast(properties),
3096 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003097 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003098 return obj;
3099}
3100
3101
John Reck59135872010-11-02 12:39:01 -07003102MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3103 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003104 // Allocate the initial map if absent.
3105 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003106 Object* initial_map;
3107 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3108 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3109 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003110 constructor->set_initial_map(Map::cast(initial_map));
3111 Map::cast(initial_map)->set_constructor(constructor);
3112 }
3113 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003114 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003115 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003116#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003117 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003118 Object* non_failure;
3119 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3120#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003121 return result;
3122}
3123
3124
John Reck59135872010-11-02 12:39:01 -07003125MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003126 ASSERT(constructor->has_initial_map());
3127 Map* map = constructor->initial_map();
3128
3129 // Make sure no field properties are described in the initial map.
3130 // This guarantees us that normalizing the properties does not
3131 // require us to change property values to JSGlobalPropertyCells.
3132 ASSERT(map->NextFreePropertyIndex() == 0);
3133
3134 // Make sure we don't have a ton of pre-allocated slots in the
3135 // global objects. They will be unused once we normalize the object.
3136 ASSERT(map->unused_property_fields() == 0);
3137 ASSERT(map->inobject_properties() == 0);
3138
3139 // Initial size of the backing store to avoid resize of the storage during
3140 // bootstrapping. The size differs between the JS global object ad the
3141 // builtins object.
3142 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3143
3144 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003145 Object* obj;
3146 { MaybeObject* maybe_obj =
3147 StringDictionary::Allocate(
3148 map->NumberOfDescribedProperties() * 2 + initial_size);
3149 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3150 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003151 StringDictionary* dictionary = StringDictionary::cast(obj);
3152
3153 // The global object might be created from an object template with accessors.
3154 // Fill these accessors into the dictionary.
3155 DescriptorArray* descs = map->instance_descriptors();
3156 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3157 PropertyDetails details = descs->GetDetails(i);
3158 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3159 PropertyDetails d =
3160 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3161 Object* value = descs->GetCallbacksObject(i);
John Reck59135872010-11-02 12:39:01 -07003162 { MaybeObject* maybe_value = Heap::AllocateJSGlobalPropertyCell(value);
3163 if (!maybe_value->ToObject(&value)) return maybe_value;
3164 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003165
John Reck59135872010-11-02 12:39:01 -07003166 Object* result;
3167 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3168 if (!maybe_result->ToObject(&result)) return maybe_result;
3169 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003170 dictionary = StringDictionary::cast(result);
3171 }
3172
3173 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003174 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3175 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3176 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003177 JSObject* global = JSObject::cast(obj);
3178 InitializeJSObjectFromMap(global, dictionary, map);
3179
3180 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003181 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3182 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3183 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003184 Map* new_map = Map::cast(obj);
3185
3186 // Setup the global object as a normalized object.
3187 global->set_map(new_map);
3188 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
3189 global->set_properties(dictionary);
3190
3191 // Make sure result is a global object with properties in dictionary.
3192 ASSERT(global->IsGlobalObject());
3193 ASSERT(!global->HasFastProperties());
3194 return global;
3195}
3196
3197
John Reck59135872010-11-02 12:39:01 -07003198MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003199 // Never used to copy functions. If functions need to be copied we
3200 // have to be careful to clear the literals array.
3201 ASSERT(!source->IsJSFunction());
3202
3203 // Make the clone.
3204 Map* map = source->map();
3205 int object_size = map->instance_size();
3206 Object* clone;
3207
3208 // If we're forced to always allocate, we use the general allocation
3209 // functions which may leave us with an object in old space.
3210 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003211 { MaybeObject* maybe_clone =
3212 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3213 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3214 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003215 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003216 CopyBlock(clone_address,
3217 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003218 object_size);
3219 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003220 RecordWrites(clone_address,
3221 JSObject::kHeaderSize,
3222 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003223 } else {
John Reck59135872010-11-02 12:39:01 -07003224 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3225 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3226 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003227 ASSERT(Heap::InNewSpace(clone));
3228 // Since we know the clone is allocated in new space, we can copy
3229 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003230 CopyBlock(HeapObject::cast(clone)->address(),
3231 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003232 object_size);
3233 }
3234
3235 FixedArray* elements = FixedArray::cast(source->elements());
3236 FixedArray* properties = FixedArray::cast(source->properties());
3237 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003238 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003239 Object* elem;
3240 { MaybeObject* maybe_elem =
3241 (elements->map() == fixed_cow_array_map()) ?
3242 elements : CopyFixedArray(elements);
3243 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3244 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003245 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3246 }
3247 // Update properties if necessary.
3248 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003249 Object* prop;
3250 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3251 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3252 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003253 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3254 }
3255 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003256#ifdef ENABLE_LOGGING_AND_PROFILING
3257 ProducerHeapProfile::RecordJSObjectAllocation(clone);
3258#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003259 return clone;
3260}
3261
3262
John Reck59135872010-11-02 12:39:01 -07003263MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3264 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003265 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003266 Map* map = constructor->initial_map();
3267
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003268 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003269 // objects allocated using the constructor.
3270 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003271 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003272
3273 // Allocate the backing storage for the properties.
3274 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003275 Object* properties;
3276 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3277 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3278 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003279
3280 // Reset the map for the object.
3281 object->set_map(constructor->initial_map());
3282
3283 // Reinitialize the object from the constructor map.
3284 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3285 return object;
3286}
3287
3288
John Reck59135872010-11-02 12:39:01 -07003289MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3290 PretenureFlag pretenure) {
3291 Object* result;
3292 { MaybeObject* maybe_result =
3293 AllocateRawAsciiString(string.length(), pretenure);
3294 if (!maybe_result->ToObject(&result)) return maybe_result;
3295 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003296
3297 // Copy the characters into the new object.
3298 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3299 for (int i = 0; i < string.length(); i++) {
3300 string_result->SeqAsciiStringSet(i, string[i]);
3301 }
3302 return result;
3303}
3304
3305
Steve Block9fac8402011-05-12 15:51:54 +01003306MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
3307 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003308 // V8 only supports characters in the Basic Multilingual Plane.
3309 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003310 // Count the number of characters in the UTF-8 string and check if
3311 // it is an ASCII string.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003312 Access<ScannerConstants::Utf8Decoder>
3313 decoder(ScannerConstants::utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003314 decoder->Reset(string.start(), string.length());
3315 int chars = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00003316 while (decoder->has_more()) {
Steve Block9fac8402011-05-12 15:51:54 +01003317 decoder->GetNext();
Steve Blocka7e24c12009-10-30 11:49:00 +00003318 chars++;
3319 }
3320
John Reck59135872010-11-02 12:39:01 -07003321 Object* result;
3322 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3323 if (!maybe_result->ToObject(&result)) return maybe_result;
3324 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003325
3326 // Convert and copy the characters into the new object.
3327 String* string_result = String::cast(result);
3328 decoder->Reset(string.start(), string.length());
3329 for (int i = 0; i < chars; i++) {
3330 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003331 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003332 string_result->Set(i, r);
3333 }
3334 return result;
3335}
3336
3337
John Reck59135872010-11-02 12:39:01 -07003338MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3339 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003340 // Check if the string is an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003341 MaybeObject* maybe_result;
Steve Block9fac8402011-05-12 15:51:54 +01003342 if (String::IsAscii(string.start(), string.length())) {
John Reck59135872010-11-02 12:39:01 -07003343 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003344 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003345 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003346 }
John Reck59135872010-11-02 12:39:01 -07003347 Object* result;
3348 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003349
3350 // Copy the characters into the new object, which may be either ASCII or
3351 // UTF-16.
3352 String* string_result = String::cast(result);
3353 for (int i = 0; i < string.length(); i++) {
3354 string_result->Set(i, string[i]);
3355 }
3356 return result;
3357}
3358
3359
3360Map* Heap::SymbolMapForString(String* string) {
3361 // If the string is in new space it cannot be used as a symbol.
3362 if (InNewSpace(string)) return NULL;
3363
3364 // Find the corresponding symbol map for strings.
3365 Map* map = string->map();
Steve Blockd0582a62009-12-15 09:54:21 +00003366 if (map == ascii_string_map()) return ascii_symbol_map();
3367 if (map == string_map()) return symbol_map();
3368 if (map == cons_string_map()) return cons_symbol_map();
3369 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
3370 if (map == external_string_map()) return external_symbol_map();
3371 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003372 if (map == external_string_with_ascii_data_map()) {
3373 return external_symbol_with_ascii_data_map();
3374 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003375
3376 // No match found.
3377 return NULL;
3378}
3379
3380
John Reck59135872010-11-02 12:39:01 -07003381MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3382 int chars,
3383 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003384 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003385 // Ensure the chars matches the number of characters in the buffer.
3386 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3387 // Determine whether the string is ascii.
3388 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003389 while (buffer->has_more()) {
3390 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3391 is_ascii = false;
3392 break;
3393 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003394 }
3395 buffer->Rewind();
3396
3397 // Compute map and object size.
3398 int size;
3399 Map* map;
3400
3401 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003402 if (chars > SeqAsciiString::kMaxLength) {
3403 return Failure::OutOfMemoryException();
3404 }
Steve Blockd0582a62009-12-15 09:54:21 +00003405 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003406 size = SeqAsciiString::SizeFor(chars);
3407 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003408 if (chars > SeqTwoByteString::kMaxLength) {
3409 return Failure::OutOfMemoryException();
3410 }
Steve Blockd0582a62009-12-15 09:54:21 +00003411 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003412 size = SeqTwoByteString::SizeFor(chars);
3413 }
3414
3415 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003416 Object* result;
3417 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3418 ? lo_space_->AllocateRaw(size)
3419 : old_data_space_->AllocateRaw(size);
3420 if (!maybe_result->ToObject(&result)) return maybe_result;
3421 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003422
3423 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003424 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003425 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003426 answer->set_length(chars);
3427 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003428
3429 ASSERT_EQ(size, answer->Size());
3430
3431 // Fill in the characters.
3432 for (int i = 0; i < chars; i++) {
3433 answer->Set(i, buffer->GetNext());
3434 }
3435 return answer;
3436}
3437
3438
John Reck59135872010-11-02 12:39:01 -07003439MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003440 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3441 return Failure::OutOfMemoryException();
3442 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003443
3444 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003445 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003446
Leon Clarkee46be812010-01-19 14:06:41 +00003447 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3448 AllocationSpace retry_space = OLD_DATA_SPACE;
3449
Steve Blocka7e24c12009-10-30 11:49:00 +00003450 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003451 if (size > kMaxObjectSizeInNewSpace) {
3452 // Allocate in large object space, retry space will be ignored.
3453 space = LO_SPACE;
3454 } else if (size > MaxObjectSizeInPagedSpace()) {
3455 // Allocate in new space, retry in large object space.
3456 retry_space = LO_SPACE;
3457 }
3458 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3459 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003460 }
John Reck59135872010-11-02 12:39:01 -07003461 Object* result;
3462 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3463 if (!maybe_result->ToObject(&result)) return maybe_result;
3464 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003465
Steve Blocka7e24c12009-10-30 11:49:00 +00003466 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003467 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003468 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003469 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003470 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3471 return result;
3472}
3473
3474
John Reck59135872010-11-02 12:39:01 -07003475MaybeObject* Heap::AllocateRawTwoByteString(int length,
3476 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003477 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3478 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003479 }
Leon Clarkee46be812010-01-19 14:06:41 +00003480 int size = SeqTwoByteString::SizeFor(length);
3481 ASSERT(size <= SeqTwoByteString::kMaxSize);
3482 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3483 AllocationSpace retry_space = OLD_DATA_SPACE;
3484
3485 if (space == NEW_SPACE) {
3486 if (size > kMaxObjectSizeInNewSpace) {
3487 // Allocate in large object space, retry space will be ignored.
3488 space = LO_SPACE;
3489 } else if (size > MaxObjectSizeInPagedSpace()) {
3490 // Allocate in new space, retry in large object space.
3491 retry_space = LO_SPACE;
3492 }
3493 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3494 space = LO_SPACE;
3495 }
John Reck59135872010-11-02 12:39:01 -07003496 Object* result;
3497 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3498 if (!maybe_result->ToObject(&result)) return maybe_result;
3499 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003500
Steve Blocka7e24c12009-10-30 11:49:00 +00003501 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003502 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003503 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003504 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003505 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3506 return result;
3507}
3508
3509
John Reck59135872010-11-02 12:39:01 -07003510MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003511 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003512 Object* result;
3513 { MaybeObject* maybe_result =
3514 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3515 if (!maybe_result->ToObject(&result)) return maybe_result;
3516 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003517 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003518 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3519 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003520 return result;
3521}
3522
3523
John Reck59135872010-11-02 12:39:01 -07003524MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003525 if (length < 0 || length > FixedArray::kMaxLength) {
3526 return Failure::OutOfMemoryException();
3527 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003528 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003529 // Use the general function if we're forced to always allocate.
3530 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3531 // Allocate the raw data for a fixed array.
3532 int size = FixedArray::SizeFor(length);
3533 return size <= kMaxObjectSizeInNewSpace
3534 ? new_space_.AllocateRaw(size)
3535 : lo_space_->AllocateRawFixedArray(size);
3536}
3537
3538
John Reck59135872010-11-02 12:39:01 -07003539MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003540 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003541 Object* obj;
3542 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3543 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3544 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003545 if (Heap::InNewSpace(obj)) {
3546 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003547 dst->set_map(map);
3548 CopyBlock(dst->address() + kPointerSize,
3549 src->address() + kPointerSize,
3550 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003551 return obj;
3552 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003553 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003554 FixedArray* result = FixedArray::cast(obj);
3555 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003556
Steve Blocka7e24c12009-10-30 11:49:00 +00003557 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003558 AssertNoAllocation no_gc;
3559 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003560 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3561 return result;
3562}
3563
3564
John Reck59135872010-11-02 12:39:01 -07003565MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003566 ASSERT(length >= 0);
3567 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003568 Object* result;
3569 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3570 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003571 }
John Reck59135872010-11-02 12:39:01 -07003572 // Initialize header.
3573 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3574 array->set_map(fixed_array_map());
3575 array->set_length(length);
3576 // Initialize body.
3577 ASSERT(!Heap::InNewSpace(undefined_value()));
3578 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003579 return result;
3580}
3581
3582
John Reck59135872010-11-02 12:39:01 -07003583MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003584 if (length < 0 || length > FixedArray::kMaxLength) {
3585 return Failure::OutOfMemoryException();
3586 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003587
Leon Clarkee46be812010-01-19 14:06:41 +00003588 AllocationSpace space =
3589 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003590 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003591 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3592 // Too big for new space.
3593 space = LO_SPACE;
3594 } else if (space == OLD_POINTER_SPACE &&
3595 size > MaxObjectSizeInPagedSpace()) {
3596 // Too big for old pointer space.
3597 space = LO_SPACE;
3598 }
3599
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003600 AllocationSpace retry_space =
3601 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3602
3603 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003604}
3605
3606
John Reck59135872010-11-02 12:39:01 -07003607MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
3608 int length,
3609 PretenureFlag pretenure,
3610 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003611 ASSERT(length >= 0);
3612 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3613 if (length == 0) return Heap::empty_fixed_array();
3614
3615 ASSERT(!Heap::InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003616 Object* result;
3617 { MaybeObject* maybe_result = Heap::AllocateRawFixedArray(length, pretenure);
3618 if (!maybe_result->ToObject(&result)) return maybe_result;
3619 }
Steve Block6ded16b2010-05-10 14:33:55 +01003620
3621 HeapObject::cast(result)->set_map(Heap::fixed_array_map());
3622 FixedArray* array = FixedArray::cast(result);
3623 array->set_length(length);
3624 MemsetPointer(array->data_start(), filler, length);
3625 return array;
3626}
3627
3628
John Reck59135872010-11-02 12:39:01 -07003629MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003630 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3631}
3632
3633
John Reck59135872010-11-02 12:39:01 -07003634MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3635 PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003636 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
3637}
3638
3639
John Reck59135872010-11-02 12:39:01 -07003640MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003641 if (length == 0) return empty_fixed_array();
3642
John Reck59135872010-11-02 12:39:01 -07003643 Object* obj;
3644 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3645 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3646 }
Steve Block6ded16b2010-05-10 14:33:55 +01003647
3648 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3649 FixedArray::cast(obj)->set_length(length);
3650 return obj;
3651}
3652
3653
John Reck59135872010-11-02 12:39:01 -07003654MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3655 Object* result;
3656 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length, pretenure);
3657 if (!maybe_result->ToObject(&result)) return maybe_result;
3658 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003659 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003660 ASSERT(result->IsHashTable());
3661 return result;
3662}
3663
3664
John Reck59135872010-11-02 12:39:01 -07003665MaybeObject* Heap::AllocateGlobalContext() {
3666 Object* result;
3667 { MaybeObject* maybe_result =
3668 Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3669 if (!maybe_result->ToObject(&result)) return maybe_result;
3670 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003671 Context* context = reinterpret_cast<Context*>(result);
3672 context->set_map(global_context_map());
3673 ASSERT(context->IsGlobalContext());
3674 ASSERT(result->IsContext());
3675 return result;
3676}
3677
3678
John Reck59135872010-11-02 12:39:01 -07003679MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003680 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003681 Object* result;
3682 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length);
3683 if (!maybe_result->ToObject(&result)) return maybe_result;
3684 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003685 Context* context = reinterpret_cast<Context*>(result);
3686 context->set_map(context_map());
3687 context->set_closure(function);
3688 context->set_fcontext(context);
3689 context->set_previous(NULL);
3690 context->set_extension(NULL);
3691 context->set_global(function->context()->global());
3692 ASSERT(!context->IsGlobalContext());
3693 ASSERT(context->is_function_context());
3694 ASSERT(result->IsContext());
3695 return result;
3696}
3697
3698
John Reck59135872010-11-02 12:39:01 -07003699MaybeObject* Heap::AllocateWithContext(Context* previous,
3700 JSObject* extension,
3701 bool is_catch_context) {
3702 Object* result;
3703 { MaybeObject* maybe_result =
3704 Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3705 if (!maybe_result->ToObject(&result)) return maybe_result;
3706 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003707 Context* context = reinterpret_cast<Context*>(result);
3708 context->set_map(is_catch_context ? catch_context_map() : context_map());
3709 context->set_closure(previous->closure());
3710 context->set_fcontext(previous->fcontext());
3711 context->set_previous(previous);
3712 context->set_extension(extension);
3713 context->set_global(previous->global());
3714 ASSERT(!context->IsGlobalContext());
3715 ASSERT(!context->is_function_context());
3716 ASSERT(result->IsContext());
3717 return result;
3718}
3719
3720
John Reck59135872010-11-02 12:39:01 -07003721MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003722 Map* map;
3723 switch (type) {
3724#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3725STRUCT_LIST(MAKE_CASE)
3726#undef MAKE_CASE
3727 default:
3728 UNREACHABLE();
3729 return Failure::InternalError();
3730 }
3731 int size = map->instance_size();
3732 AllocationSpace space =
3733 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003734 Object* result;
3735 { MaybeObject* maybe_result = Heap::Allocate(map, space);
3736 if (!maybe_result->ToObject(&result)) return maybe_result;
3737 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003738 Struct::cast(result)->InitializeBody(size);
3739 return result;
3740}
3741
3742
3743bool Heap::IdleNotification() {
3744 static const int kIdlesBeforeScavenge = 4;
3745 static const int kIdlesBeforeMarkSweep = 7;
3746 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003747 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
3748 static const int kGCsBetweenCleanup = 4;
Steve Blocka7e24c12009-10-30 11:49:00 +00003749 static int number_idle_notifications = 0;
3750 static int last_gc_count = gc_count_;
3751
Steve Block6ded16b2010-05-10 14:33:55 +01003752 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003753 bool finished = false;
3754
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003755 // Reset the number of idle notifications received when a number of
3756 // GCs have taken place. This allows another round of cleanup based
3757 // on idle notifications if enough work has been carried out to
3758 // provoke a number of garbage collections.
3759 if (gc_count_ < last_gc_count + kGCsBetweenCleanup) {
3760 number_idle_notifications =
3761 Min(number_idle_notifications + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003762 } else {
3763 number_idle_notifications = 0;
3764 last_gc_count = gc_count_;
3765 }
3766
3767 if (number_idle_notifications == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003768 if (contexts_disposed_ > 0) {
3769 HistogramTimerScope scope(&Counters::gc_context);
3770 CollectAllGarbage(false);
3771 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003772 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003773 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003774 new_space_.Shrink();
3775 last_gc_count = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003776 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003777 // Before doing the mark-sweep collections we clear the
3778 // compilation cache to avoid hanging on to source code and
3779 // generated code for cached functions.
3780 CompilationCache::Clear();
3781
Steve Blocka7e24c12009-10-30 11:49:00 +00003782 CollectAllGarbage(false);
3783 new_space_.Shrink();
3784 last_gc_count = gc_count_;
3785
3786 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3787 CollectAllGarbage(true);
3788 new_space_.Shrink();
3789 last_gc_count = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003790 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003791
3792 } else if (contexts_disposed_ > 0) {
3793 if (FLAG_expose_gc) {
3794 contexts_disposed_ = 0;
3795 } else {
3796 HistogramTimerScope scope(&Counters::gc_context);
3797 CollectAllGarbage(false);
3798 last_gc_count = gc_count_;
3799 }
3800 // If this is the first idle notification, we reset the
3801 // notification count to avoid letting idle notifications for
3802 // context disposal garbage collections start a potentially too
3803 // aggressive idle GC cycle.
3804 if (number_idle_notifications <= 1) {
3805 number_idle_notifications = 0;
3806 uncommit = false;
3807 }
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003808 } else if (number_idle_notifications > kIdlesBeforeMarkCompact) {
3809 // If we have received more than kIdlesBeforeMarkCompact idle
3810 // notifications we do not perform any cleanup because we don't
3811 // expect to gain much by doing so.
3812 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003813 }
3814
Steve Block6ded16b2010-05-10 14:33:55 +01003815 // Make sure that we have no pending context disposals and
3816 // conditionally uncommit from space.
3817 ASSERT(contexts_disposed_ == 0);
3818 if (uncommit) Heap::UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003819 return finished;
3820}
3821
3822
3823#ifdef DEBUG
3824
3825void Heap::Print() {
3826 if (!HasBeenSetup()) return;
3827 Top::PrintStack();
3828 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003829 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3830 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003831}
3832
3833
3834void Heap::ReportCodeStatistics(const char* title) {
3835 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3836 PagedSpace::ResetCodeStatistics();
3837 // We do not look for code in new space, map space, or old space. If code
3838 // somehow ends up in those spaces, we would miss it here.
3839 code_space_->CollectCodeStatistics();
3840 lo_space_->CollectCodeStatistics();
3841 PagedSpace::ReportCodeStatistics();
3842}
3843
3844
3845// This function expects that NewSpace's allocated objects histogram is
3846// populated (via a call to CollectStatistics or else as a side effect of a
3847// just-completed scavenge collection).
3848void Heap::ReportHeapStatistics(const char* title) {
3849 USE(title);
3850 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3851 title, gc_count_);
3852 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003853 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3854 old_gen_promotion_limit_);
3855 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3856 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003857
3858 PrintF("\n");
3859 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3860 GlobalHandles::PrintStats();
3861 PrintF("\n");
3862
3863 PrintF("Heap statistics : ");
3864 MemoryAllocator::ReportStatistics();
3865 PrintF("To space : ");
3866 new_space_.ReportStatistics();
3867 PrintF("Old pointer space : ");
3868 old_pointer_space_->ReportStatistics();
3869 PrintF("Old data space : ");
3870 old_data_space_->ReportStatistics();
3871 PrintF("Code space : ");
3872 code_space_->ReportStatistics();
3873 PrintF("Map space : ");
3874 map_space_->ReportStatistics();
3875 PrintF("Cell space : ");
3876 cell_space_->ReportStatistics();
3877 PrintF("Large object space : ");
3878 lo_space_->ReportStatistics();
3879 PrintF(">>>>>> ========================================= >>>>>>\n");
3880}
3881
3882#endif // DEBUG
3883
3884bool Heap::Contains(HeapObject* value) {
3885 return Contains(value->address());
3886}
3887
3888
3889bool Heap::Contains(Address addr) {
3890 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3891 return HasBeenSetup() &&
3892 (new_space_.ToSpaceContains(addr) ||
3893 old_pointer_space_->Contains(addr) ||
3894 old_data_space_->Contains(addr) ||
3895 code_space_->Contains(addr) ||
3896 map_space_->Contains(addr) ||
3897 cell_space_->Contains(addr) ||
3898 lo_space_->SlowContains(addr));
3899}
3900
3901
3902bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3903 return InSpace(value->address(), space);
3904}
3905
3906
3907bool Heap::InSpace(Address addr, AllocationSpace space) {
3908 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3909 if (!HasBeenSetup()) return false;
3910
3911 switch (space) {
3912 case NEW_SPACE:
3913 return new_space_.ToSpaceContains(addr);
3914 case OLD_POINTER_SPACE:
3915 return old_pointer_space_->Contains(addr);
3916 case OLD_DATA_SPACE:
3917 return old_data_space_->Contains(addr);
3918 case CODE_SPACE:
3919 return code_space_->Contains(addr);
3920 case MAP_SPACE:
3921 return map_space_->Contains(addr);
3922 case CELL_SPACE:
3923 return cell_space_->Contains(addr);
3924 case LO_SPACE:
3925 return lo_space_->SlowContains(addr);
3926 }
3927
3928 return false;
3929}
3930
3931
3932#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003933static void DummyScavengePointer(HeapObject** p) {
3934}
3935
3936
3937static void VerifyPointersUnderWatermark(
3938 PagedSpace* space,
3939 DirtyRegionCallback visit_dirty_region) {
3940 PageIterator it(space, PageIterator::PAGES_IN_USE);
3941
3942 while (it.has_next()) {
3943 Page* page = it.next();
3944 Address start = page->ObjectAreaStart();
3945 Address end = page->AllocationWatermark();
3946
3947 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
3948 start,
3949 end,
3950 visit_dirty_region,
3951 &DummyScavengePointer);
3952 }
3953}
3954
3955
3956static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3957 LargeObjectIterator it(space);
3958 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3959 if (object->IsFixedArray()) {
3960 Address slot_address = object->address();
3961 Address end = object->address() + object->Size();
3962
3963 while (slot_address < end) {
3964 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3965 // When we are not in GC the Heap::InNewSpace() predicate
3966 // checks that pointers which satisfy predicate point into
3967 // the active semispace.
3968 Heap::InNewSpace(*slot);
3969 slot_address += kPointerSize;
3970 }
3971 }
3972 }
3973}
3974
3975
Steve Blocka7e24c12009-10-30 11:49:00 +00003976void Heap::Verify() {
3977 ASSERT(HasBeenSetup());
3978
3979 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00003980 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00003981
3982 new_space_.Verify();
3983
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003984 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
3985 old_pointer_space_->Verify(&dirty_regions_visitor);
3986 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003987
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003988 VerifyPointersUnderWatermark(old_pointer_space_,
3989 &IteratePointersInDirtyRegion);
3990 VerifyPointersUnderWatermark(map_space_,
3991 &IteratePointersInDirtyMapsRegion);
3992 VerifyPointersUnderWatermark(lo_space_);
3993
3994 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
3995 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
3996
3997 VerifyPointersVisitor no_dirty_regions_visitor;
3998 old_data_space_->Verify(&no_dirty_regions_visitor);
3999 code_space_->Verify(&no_dirty_regions_visitor);
4000 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004001
4002 lo_space_->Verify();
4003}
4004#endif // DEBUG
4005
4006
John Reck59135872010-11-02 12:39:01 -07004007MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004008 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004009 Object* new_table;
4010 { MaybeObject* maybe_new_table =
4011 symbol_table()->LookupSymbol(string, &symbol);
4012 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4013 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004014 // Can't use set_symbol_table because SymbolTable::cast knows that
4015 // SymbolTable is a singleton and checks for identity.
4016 roots_[kSymbolTableRootIndex] = new_table;
4017 ASSERT(symbol != NULL);
4018 return symbol;
4019}
4020
4021
Steve Block9fac8402011-05-12 15:51:54 +01004022MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
4023 Object* symbol = NULL;
4024 Object* new_table;
4025 { MaybeObject* maybe_new_table =
4026 symbol_table()->LookupAsciiSymbol(string, &symbol);
4027 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4028 }
4029 // Can't use set_symbol_table because SymbolTable::cast knows that
4030 // SymbolTable is a singleton and checks for identity.
4031 roots_[kSymbolTableRootIndex] = new_table;
4032 ASSERT(symbol != NULL);
4033 return symbol;
4034}
4035
4036
4037MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
4038 Object* symbol = NULL;
4039 Object* new_table;
4040 { MaybeObject* maybe_new_table =
4041 symbol_table()->LookupTwoByteSymbol(string, &symbol);
4042 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4043 }
4044 // Can't use set_symbol_table because SymbolTable::cast knows that
4045 // SymbolTable is a singleton and checks for identity.
4046 roots_[kSymbolTableRootIndex] = new_table;
4047 ASSERT(symbol != NULL);
4048 return symbol;
4049}
4050
4051
John Reck59135872010-11-02 12:39:01 -07004052MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004053 if (string->IsSymbol()) return string;
4054 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004055 Object* new_table;
4056 { MaybeObject* maybe_new_table =
4057 symbol_table()->LookupString(string, &symbol);
4058 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4059 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004060 // Can't use set_symbol_table because SymbolTable::cast knows that
4061 // SymbolTable is a singleton and checks for identity.
4062 roots_[kSymbolTableRootIndex] = new_table;
4063 ASSERT(symbol != NULL);
4064 return symbol;
4065}
4066
4067
4068bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4069 if (string->IsSymbol()) {
4070 *symbol = string;
4071 return true;
4072 }
4073 return symbol_table()->LookupSymbolIfExists(string, symbol);
4074}
4075
4076
4077#ifdef DEBUG
4078void Heap::ZapFromSpace() {
4079 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
4080 for (Address a = new_space_.FromSpaceLow();
4081 a < new_space_.FromSpaceHigh();
4082 a += kPointerSize) {
4083 Memory::Address_at(a) = kFromSpaceZapValue;
4084 }
4085}
4086#endif // DEBUG
4087
4088
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004089bool Heap::IteratePointersInDirtyRegion(Address start,
4090 Address end,
4091 ObjectSlotCallback copy_object_func) {
4092 Address slot_address = start;
4093 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004094
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004095 while (slot_address < end) {
4096 Object** slot = reinterpret_cast<Object**>(slot_address);
4097 if (Heap::InNewSpace(*slot)) {
4098 ASSERT((*slot)->IsHeapObject());
4099 copy_object_func(reinterpret_cast<HeapObject**>(slot));
4100 if (Heap::InNewSpace(*slot)) {
4101 ASSERT((*slot)->IsHeapObject());
4102 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004103 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004104 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004105 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004106 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004107 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004108}
4109
4110
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004111// Compute start address of the first map following given addr.
4112static inline Address MapStartAlign(Address addr) {
4113 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4114 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4115}
Steve Blocka7e24c12009-10-30 11:49:00 +00004116
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004117
4118// Compute end address of the first map preceding given addr.
4119static inline Address MapEndAlign(Address addr) {
4120 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4121 return page + ((addr - page) / Map::kSize * Map::kSize);
4122}
4123
4124
4125static bool IteratePointersInDirtyMaps(Address start,
4126 Address end,
4127 ObjectSlotCallback copy_object_func) {
4128 ASSERT(MapStartAlign(start) == start);
4129 ASSERT(MapEndAlign(end) == end);
4130
4131 Address map_address = start;
4132 bool pointers_to_new_space_found = false;
4133
4134 while (map_address < end) {
4135 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
4136 ASSERT(Memory::Object_at(map_address)->IsMap());
4137
4138 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4139 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4140
4141 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
4142 pointer_fields_end,
4143 copy_object_func)) {
4144 pointers_to_new_space_found = true;
4145 }
4146
4147 map_address += Map::kSize;
4148 }
4149
4150 return pointers_to_new_space_found;
4151}
4152
4153
4154bool Heap::IteratePointersInDirtyMapsRegion(
4155 Address start,
4156 Address end,
4157 ObjectSlotCallback copy_object_func) {
4158 Address map_aligned_start = MapStartAlign(start);
4159 Address map_aligned_end = MapEndAlign(end);
4160
4161 bool contains_pointers_to_new_space = false;
4162
4163 if (map_aligned_start != start) {
4164 Address prev_map = map_aligned_start - Map::kSize;
4165 ASSERT(Memory::Object_at(prev_map)->IsMap());
4166
4167 Address pointer_fields_start =
4168 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4169
4170 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004171 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004172
4173 contains_pointers_to_new_space =
4174 IteratePointersInDirtyRegion(pointer_fields_start,
4175 pointer_fields_end,
4176 copy_object_func)
4177 || contains_pointers_to_new_space;
4178 }
4179
4180 contains_pointers_to_new_space =
4181 IteratePointersInDirtyMaps(map_aligned_start,
4182 map_aligned_end,
4183 copy_object_func)
4184 || contains_pointers_to_new_space;
4185
4186 if (map_aligned_end != end) {
4187 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4188
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004189 Address pointer_fields_start =
4190 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004191
4192 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004193 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004194
4195 contains_pointers_to_new_space =
4196 IteratePointersInDirtyRegion(pointer_fields_start,
4197 pointer_fields_end,
4198 copy_object_func)
4199 || contains_pointers_to_new_space;
4200 }
4201
4202 return contains_pointers_to_new_space;
4203}
4204
4205
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004206void Heap::IterateAndMarkPointersToFromSpace(Address start,
4207 Address end,
4208 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004209 Address slot_address = start;
4210 Page* page = Page::FromAddress(start);
4211
4212 uint32_t marks = page->GetRegionMarks();
4213
4214 while (slot_address < end) {
4215 Object** slot = reinterpret_cast<Object**>(slot_address);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004216 if (Heap::InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004217 ASSERT((*slot)->IsHeapObject());
4218 callback(reinterpret_cast<HeapObject**>(slot));
4219 if (Heap::InNewSpace(*slot)) {
4220 ASSERT((*slot)->IsHeapObject());
4221 marks |= page->GetRegionMaskForAddress(slot_address);
4222 }
4223 }
4224 slot_address += kPointerSize;
4225 }
4226
4227 page->SetRegionMarks(marks);
4228}
4229
4230
4231uint32_t Heap::IterateDirtyRegions(
4232 uint32_t marks,
4233 Address area_start,
4234 Address area_end,
4235 DirtyRegionCallback visit_dirty_region,
4236 ObjectSlotCallback copy_object_func) {
4237 uint32_t newmarks = 0;
4238 uint32_t mask = 1;
4239
4240 if (area_start >= area_end) {
4241 return newmarks;
4242 }
4243
4244 Address region_start = area_start;
4245
4246 // area_start does not necessarily coincide with start of the first region.
4247 // Thus to calculate the beginning of the next region we have to align
4248 // area_start by Page::kRegionSize.
4249 Address second_region =
4250 reinterpret_cast<Address>(
4251 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4252 ~Page::kRegionAlignmentMask);
4253
4254 // Next region might be beyond area_end.
4255 Address region_end = Min(second_region, area_end);
4256
4257 if (marks & mask) {
4258 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4259 newmarks |= mask;
4260 }
4261 }
4262 mask <<= 1;
4263
4264 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4265 region_start = region_end;
4266 region_end = region_start + Page::kRegionSize;
4267
4268 while (region_end <= area_end) {
4269 if (marks & mask) {
4270 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4271 newmarks |= mask;
4272 }
4273 }
4274
4275 region_start = region_end;
4276 region_end = region_start + Page::kRegionSize;
4277
4278 mask <<= 1;
4279 }
4280
4281 if (region_start != area_end) {
4282 // A small piece of area left uniterated because area_end does not coincide
4283 // with region end. Check whether region covering last part of area is
4284 // dirty.
4285 if (marks & mask) {
4286 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
4287 newmarks |= mask;
4288 }
4289 }
4290 }
4291
4292 return newmarks;
4293}
4294
4295
4296
4297void Heap::IterateDirtyRegions(
4298 PagedSpace* space,
4299 DirtyRegionCallback visit_dirty_region,
4300 ObjectSlotCallback copy_object_func,
4301 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004302
4303 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004304
Steve Blocka7e24c12009-10-30 11:49:00 +00004305 while (it.has_next()) {
4306 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004307 uint32_t marks = page->GetRegionMarks();
4308
4309 if (marks != Page::kAllRegionsCleanMarks) {
4310 Address start = page->ObjectAreaStart();
4311
4312 // Do not try to visit pointers beyond page allocation watermark.
4313 // Page can contain garbage pointers there.
4314 Address end;
4315
4316 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4317 page->IsWatermarkValid()) {
4318 end = page->AllocationWatermark();
4319 } else {
4320 end = page->CachedAllocationWatermark();
4321 }
4322
4323 ASSERT(space == old_pointer_space_ ||
4324 (space == map_space_ &&
4325 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4326
4327 page->SetRegionMarks(IterateDirtyRegions(marks,
4328 start,
4329 end,
4330 visit_dirty_region,
4331 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004332 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004333
4334 // Mark page watermark as invalid to maintain watermark validity invariant.
4335 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4336 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004337 }
4338}
4339
4340
Steve Blockd0582a62009-12-15 09:54:21 +00004341void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4342 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004343 IterateWeakRoots(v, mode);
4344}
4345
4346
4347void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004348 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004349 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004350 if (mode != VISIT_ALL_IN_SCAVENGE) {
4351 // Scavenge collections have special processing for this.
4352 ExternalStringTable::Iterate(v);
4353 }
4354 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004355}
4356
4357
Steve Blockd0582a62009-12-15 09:54:21 +00004358void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004359 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004360 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004361
Iain Merrick75681382010-08-19 15:07:18 +01004362 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004363 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004364
4365 Bootstrapper::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004366 v->Synchronize("bootstrapper");
Steve Blocka7e24c12009-10-30 11:49:00 +00004367 Top::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004368 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004369 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004370 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004371
4372#ifdef ENABLE_DEBUGGER_SUPPORT
4373 Debug::Iterate(v);
4374#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004375 v->Synchronize("debug");
Steve Blocka7e24c12009-10-30 11:49:00 +00004376 CompilationCache::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004377 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004378
4379 // Iterate over local handles in handle scopes.
4380 HandleScopeImplementer::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004381 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004382
Leon Clarkee46be812010-01-19 14:06:41 +00004383 // Iterate over the builtin code objects and code stubs in the
4384 // heap. Note that it is not necessary to iterate over code objects
4385 // on scavenge collections.
4386 if (mode != VISIT_ALL_IN_SCAVENGE) {
4387 Builtins::IterateBuiltins(v);
4388 }
Steve Blockd0582a62009-12-15 09:54:21 +00004389 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004390
4391 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004392 if (mode == VISIT_ONLY_STRONG) {
4393 GlobalHandles::IterateStrongRoots(v);
4394 } else {
4395 GlobalHandles::IterateAllRoots(v);
4396 }
4397 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004398
4399 // Iterate over pointers being held by inactive threads.
4400 ThreadManager::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004401 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004402
4403 // Iterate over the pointers the Serialization/Deserialization code is
4404 // holding.
4405 // During garbage collection this keeps the partial snapshot cache alive.
4406 // During deserialization of the startup snapshot this creates the partial
4407 // snapshot cache and deserializes the objects it refers to. During
4408 // serialization this does nothing, since the partial snapshot cache is
4409 // empty. However the next thing we do is create the partial snapshot,
4410 // filling up the partial snapshot cache with objects it needs as we go.
4411 SerializerDeserializer::Iterate(v);
4412 // We don't do a v->Synchronize call here, because in debug mode that will
4413 // output a flag to the snapshot. However at this point the serializer and
4414 // deserializer are deliberately a little unsynchronized (see above) so the
4415 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004416}
Steve Blocka7e24c12009-10-30 11:49:00 +00004417
4418
4419// Flag is set when the heap has been configured. The heap can be repeatedly
4420// configured through the API until it is setup.
4421static bool heap_configured = false;
4422
4423// TODO(1236194): Since the heap size is configurable on the command line
4424// and through the API, we should gracefully handle the case that the heap
4425// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004426bool Heap::ConfigureHeap(int max_semispace_size,
4427 int max_old_gen_size,
4428 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004429 if (HasBeenSetup()) return false;
4430
Steve Block3ce2e202009-11-05 08:53:23 +00004431 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4432
4433 if (Snapshot::IsEnabled()) {
4434 // If we are using a snapshot we always reserve the default amount
4435 // of memory for each semispace because code in the snapshot has
4436 // write-barrier code that relies on the size and alignment of new
4437 // space. We therefore cannot use a larger max semispace size
4438 // than the default reserved semispace size.
4439 if (max_semispace_size_ > reserved_semispace_size_) {
4440 max_semispace_size_ = reserved_semispace_size_;
4441 }
4442 } else {
4443 // If we are not using snapshots we reserve space for the actual
4444 // max semispace size.
4445 reserved_semispace_size_ = max_semispace_size_;
4446 }
4447
4448 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004449 if (max_executable_size > 0) {
4450 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4451 }
4452
4453 // The max executable size must be less than or equal to the max old
4454 // generation size.
4455 if (max_executable_size_ > max_old_generation_size_) {
4456 max_executable_size_ = max_old_generation_size_;
4457 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004458
4459 // The new space size must be a power of two to support single-bit testing
4460 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004461 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4462 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4463 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4464 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004465
4466 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004467 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004468
4469 heap_configured = true;
4470 return true;
4471}
4472
4473
4474bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004475 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4476 FLAG_max_old_space_size * MB,
4477 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004478}
4479
4480
Ben Murdochbb769b22010-08-11 14:56:33 +01004481void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004482 *stats->start_marker = HeapStats::kStartMarker;
4483 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004484 *stats->new_space_size = new_space_.SizeAsInt();
4485 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004486 *stats->old_pointer_space_size = old_pointer_space_->Size();
4487 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4488 *stats->old_data_space_size = old_data_space_->Size();
4489 *stats->old_data_space_capacity = old_data_space_->Capacity();
4490 *stats->code_space_size = code_space_->Size();
4491 *stats->code_space_capacity = code_space_->Capacity();
4492 *stats->map_space_size = map_space_->Size();
4493 *stats->map_space_capacity = map_space_->Capacity();
4494 *stats->cell_space_size = cell_space_->Size();
4495 *stats->cell_space_capacity = cell_space_->Capacity();
4496 *stats->lo_space_size = lo_space_->Size();
4497 GlobalHandles::RecordStats(stats);
Ben Murdochbb769b22010-08-11 14:56:33 +01004498 *stats->memory_allocator_size = MemoryAllocator::Size();
4499 *stats->memory_allocator_capacity =
4500 MemoryAllocator::Size() + MemoryAllocator::Available();
Iain Merrick75681382010-08-19 15:07:18 +01004501 *stats->os_error = OS::GetLastError();
Ben Murdochbb769b22010-08-11 14:56:33 +01004502 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004503 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004504 for (HeapObject* obj = iterator.next();
4505 obj != NULL;
4506 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004507 InstanceType type = obj->map()->instance_type();
4508 ASSERT(0 <= type && type <= LAST_TYPE);
4509 stats->objects_per_type[type]++;
4510 stats->size_per_type[type] += obj->Size();
4511 }
4512 }
Steve Blockd0582a62009-12-15 09:54:21 +00004513}
4514
4515
Ben Murdochf87a2032010-10-22 12:50:53 +01004516intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004517 return old_pointer_space_->Size()
4518 + old_data_space_->Size()
4519 + code_space_->Size()
4520 + map_space_->Size()
4521 + cell_space_->Size()
4522 + lo_space_->Size();
4523}
4524
4525
4526int Heap::PromotedExternalMemorySize() {
4527 if (amount_of_external_allocated_memory_
4528 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4529 return amount_of_external_allocated_memory_
4530 - amount_of_external_allocated_memory_at_last_global_gc_;
4531}
4532
4533
4534bool Heap::Setup(bool create_heap_objects) {
4535 // Initialize heap spaces and initial maps and objects. Whenever something
4536 // goes wrong, just return false. The caller should check the results and
4537 // call Heap::TearDown() to release allocated memory.
4538 //
4539 // If the heap is not yet configured (eg, through the API), configure it.
4540 // Configuration is based on the flags new-space-size (really the semispace
4541 // size) and old-space-size if set or the initial values of semispace_size_
4542 // and old_generation_size_ otherwise.
4543 if (!heap_configured) {
4544 if (!ConfigureHeapDefault()) return false;
4545 }
4546
Iain Merrick75681382010-08-19 15:07:18 +01004547 ScavengingVisitor::Initialize();
4548 NewSpaceScavenger::Initialize();
4549 MarkCompactCollector::Initialize();
4550
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004551 MarkMapPointersAsEncoded(false);
4552
Steve Blocka7e24c12009-10-30 11:49:00 +00004553 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004554 // space. The chunk is double the size of the requested reserved
4555 // new space size to ensure that we can find a pair of semispaces that
4556 // are contiguous and aligned to their size.
Russell Brenner90bac252010-11-18 13:33:46 -08004557 if (!MemoryAllocator::Setup(MaxReserved(), MaxExecutableSize())) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004558 void* chunk =
Steve Block3ce2e202009-11-05 08:53:23 +00004559 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004560 if (chunk == NULL) return false;
4561
4562 // Align the pair of semispaces to their size, which must be a power
4563 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004564 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004565 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4566 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4567 return false;
4568 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004569
4570 // Initialize old pointer space.
4571 old_pointer_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004572 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004573 if (old_pointer_space_ == NULL) return false;
4574 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4575
4576 // Initialize old data space.
4577 old_data_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004578 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004579 if (old_data_space_ == NULL) return false;
4580 if (!old_data_space_->Setup(NULL, 0)) return false;
4581
4582 // Initialize the code space, set its maximum capacity to the old
4583 // generation size. It needs executable memory.
4584 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4585 // virtual address space, so that they can call each other with near calls.
4586 if (code_range_size_ > 0) {
4587 if (!CodeRange::Setup(code_range_size_)) {
4588 return false;
4589 }
4590 }
4591
4592 code_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004593 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004594 if (code_space_ == NULL) return false;
4595 if (!code_space_->Setup(NULL, 0)) return false;
4596
4597 // Initialize map space.
Leon Clarkee46be812010-01-19 14:06:41 +00004598 map_space_ = new MapSpace(FLAG_use_big_map_space
4599 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004600 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4601 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004602 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004603 if (map_space_ == NULL) return false;
4604 if (!map_space_->Setup(NULL, 0)) return false;
4605
4606 // Initialize global property cell space.
Steve Block3ce2e202009-11-05 08:53:23 +00004607 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004608 if (cell_space_ == NULL) return false;
4609 if (!cell_space_->Setup(NULL, 0)) return false;
4610
4611 // The large object code space may contain code or data. We set the memory
4612 // to be non-executable here for safety, but this means we need to enable it
4613 // explicitly when allocating large code objects.
4614 lo_space_ = new LargeObjectSpace(LO_SPACE);
4615 if (lo_space_ == NULL) return false;
4616 if (!lo_space_->Setup()) return false;
4617
4618 if (create_heap_objects) {
4619 // Create initial maps.
4620 if (!CreateInitialMaps()) return false;
4621 if (!CreateApiObjects()) return false;
4622
4623 // Create initial objects
4624 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004625
4626 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004627 }
4628
Ben Murdochf87a2032010-10-22 12:50:53 +01004629 LOG(IntPtrTEvent("heap-capacity", Capacity()));
4630 LOG(IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004631
Steve Block3ce2e202009-11-05 08:53:23 +00004632#ifdef ENABLE_LOGGING_AND_PROFILING
4633 // This should be called only after initial objects have been created.
4634 ProducerHeapProfile::Setup();
4635#endif
4636
Steve Blocka7e24c12009-10-30 11:49:00 +00004637 return true;
4638}
4639
4640
Steve Blockd0582a62009-12-15 09:54:21 +00004641void Heap::SetStackLimits() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004642 // On 64 bit machines, pointers are generally out of range of Smis. We write
4643 // something that looks like an out of range Smi to the GC.
4644
Steve Blockd0582a62009-12-15 09:54:21 +00004645 // Set up the special root array entries containing the stack limits.
4646 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004647 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004648 reinterpret_cast<Object*>(
4649 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
4650 roots_[kRealStackLimitRootIndex] =
4651 reinterpret_cast<Object*>(
4652 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004653}
4654
4655
4656void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004657 if (FLAG_print_cumulative_gc_stat) {
4658 PrintF("\n\n");
4659 PrintF("gc_count=%d ", gc_count_);
4660 PrintF("mark_sweep_count=%d ", ms_count_);
4661 PrintF("mark_compact_count=%d ", mc_count_);
4662 PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
4663 PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004664 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
4665 GCTracer::get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004666 PrintF("\n\n");
4667 }
4668
Steve Blocka7e24c12009-10-30 11:49:00 +00004669 GlobalHandles::TearDown();
4670
Leon Clarkee46be812010-01-19 14:06:41 +00004671 ExternalStringTable::TearDown();
4672
Steve Blocka7e24c12009-10-30 11:49:00 +00004673 new_space_.TearDown();
4674
4675 if (old_pointer_space_ != NULL) {
4676 old_pointer_space_->TearDown();
4677 delete old_pointer_space_;
4678 old_pointer_space_ = NULL;
4679 }
4680
4681 if (old_data_space_ != NULL) {
4682 old_data_space_->TearDown();
4683 delete old_data_space_;
4684 old_data_space_ = NULL;
4685 }
4686
4687 if (code_space_ != NULL) {
4688 code_space_->TearDown();
4689 delete code_space_;
4690 code_space_ = NULL;
4691 }
4692
4693 if (map_space_ != NULL) {
4694 map_space_->TearDown();
4695 delete map_space_;
4696 map_space_ = NULL;
4697 }
4698
4699 if (cell_space_ != NULL) {
4700 cell_space_->TearDown();
4701 delete cell_space_;
4702 cell_space_ = NULL;
4703 }
4704
4705 if (lo_space_ != NULL) {
4706 lo_space_->TearDown();
4707 delete lo_space_;
4708 lo_space_ = NULL;
4709 }
4710
4711 MemoryAllocator::TearDown();
4712}
4713
4714
4715void Heap::Shrink() {
4716 // Try to shrink all paged spaces.
4717 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004718 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
4719 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00004720}
4721
4722
4723#ifdef ENABLE_HEAP_PROTECTION
4724
4725void Heap::Protect() {
4726 if (HasBeenSetup()) {
4727 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004728 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4729 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004730 }
4731}
4732
4733
4734void Heap::Unprotect() {
4735 if (HasBeenSetup()) {
4736 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004737 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4738 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004739 }
4740}
4741
4742#endif
4743
4744
Steve Block6ded16b2010-05-10 14:33:55 +01004745void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
4746 ASSERT(callback != NULL);
4747 GCPrologueCallbackPair pair(callback, gc_type);
4748 ASSERT(!gc_prologue_callbacks_.Contains(pair));
4749 return gc_prologue_callbacks_.Add(pair);
4750}
4751
4752
4753void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
4754 ASSERT(callback != NULL);
4755 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
4756 if (gc_prologue_callbacks_[i].callback == callback) {
4757 gc_prologue_callbacks_.Remove(i);
4758 return;
4759 }
4760 }
4761 UNREACHABLE();
4762}
4763
4764
4765void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
4766 ASSERT(callback != NULL);
4767 GCEpilogueCallbackPair pair(callback, gc_type);
4768 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
4769 return gc_epilogue_callbacks_.Add(pair);
4770}
4771
4772
4773void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
4774 ASSERT(callback != NULL);
4775 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
4776 if (gc_epilogue_callbacks_[i].callback == callback) {
4777 gc_epilogue_callbacks_.Remove(i);
4778 return;
4779 }
4780 }
4781 UNREACHABLE();
4782}
4783
4784
Steve Blocka7e24c12009-10-30 11:49:00 +00004785#ifdef DEBUG
4786
4787class PrintHandleVisitor: public ObjectVisitor {
4788 public:
4789 void VisitPointers(Object** start, Object** end) {
4790 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01004791 PrintF(" handle %p to %p\n",
4792 reinterpret_cast<void*>(p),
4793 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00004794 }
4795};
4796
4797void Heap::PrintHandles() {
4798 PrintF("Handles:\n");
4799 PrintHandleVisitor v;
4800 HandleScopeImplementer::Iterate(&v);
4801}
4802
4803#endif
4804
4805
4806Space* AllSpaces::next() {
4807 switch (counter_++) {
4808 case NEW_SPACE:
4809 return Heap::new_space();
4810 case OLD_POINTER_SPACE:
4811 return Heap::old_pointer_space();
4812 case OLD_DATA_SPACE:
4813 return Heap::old_data_space();
4814 case CODE_SPACE:
4815 return Heap::code_space();
4816 case MAP_SPACE:
4817 return Heap::map_space();
4818 case CELL_SPACE:
4819 return Heap::cell_space();
4820 case LO_SPACE:
4821 return Heap::lo_space();
4822 default:
4823 return NULL;
4824 }
4825}
4826
4827
4828PagedSpace* PagedSpaces::next() {
4829 switch (counter_++) {
4830 case OLD_POINTER_SPACE:
4831 return Heap::old_pointer_space();
4832 case OLD_DATA_SPACE:
4833 return Heap::old_data_space();
4834 case CODE_SPACE:
4835 return Heap::code_space();
4836 case MAP_SPACE:
4837 return Heap::map_space();
4838 case CELL_SPACE:
4839 return Heap::cell_space();
4840 default:
4841 return NULL;
4842 }
4843}
4844
4845
4846
4847OldSpace* OldSpaces::next() {
4848 switch (counter_++) {
4849 case OLD_POINTER_SPACE:
4850 return Heap::old_pointer_space();
4851 case OLD_DATA_SPACE:
4852 return Heap::old_data_space();
4853 case CODE_SPACE:
4854 return Heap::code_space();
4855 default:
4856 return NULL;
4857 }
4858}
4859
4860
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004861SpaceIterator::SpaceIterator()
4862 : current_space_(FIRST_SPACE),
4863 iterator_(NULL),
4864 size_func_(NULL) {
4865}
4866
4867
4868SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
4869 : current_space_(FIRST_SPACE),
4870 iterator_(NULL),
4871 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004872}
4873
4874
4875SpaceIterator::~SpaceIterator() {
4876 // Delete active iterator if any.
4877 delete iterator_;
4878}
4879
4880
4881bool SpaceIterator::has_next() {
4882 // Iterate until no more spaces.
4883 return current_space_ != LAST_SPACE;
4884}
4885
4886
4887ObjectIterator* SpaceIterator::next() {
4888 if (iterator_ != NULL) {
4889 delete iterator_;
4890 iterator_ = NULL;
4891 // Move to the next space
4892 current_space_++;
4893 if (current_space_ > LAST_SPACE) {
4894 return NULL;
4895 }
4896 }
4897
4898 // Return iterator for the new current space.
4899 return CreateIterator();
4900}
4901
4902
4903// Create an iterator for the space to iterate.
4904ObjectIterator* SpaceIterator::CreateIterator() {
4905 ASSERT(iterator_ == NULL);
4906
4907 switch (current_space_) {
4908 case NEW_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004909 iterator_ = new SemiSpaceIterator(Heap::new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004910 break;
4911 case OLD_POINTER_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004912 iterator_ = new HeapObjectIterator(Heap::old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004913 break;
4914 case OLD_DATA_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004915 iterator_ = new HeapObjectIterator(Heap::old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004916 break;
4917 case CODE_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004918 iterator_ = new HeapObjectIterator(Heap::code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004919 break;
4920 case MAP_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004921 iterator_ = new HeapObjectIterator(Heap::map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004922 break;
4923 case CELL_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004924 iterator_ = new HeapObjectIterator(Heap::cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004925 break;
4926 case LO_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004927 iterator_ = new LargeObjectIterator(Heap::lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004928 break;
4929 }
4930
4931 // Return the newly allocated iterator;
4932 ASSERT(iterator_ != NULL);
4933 return iterator_;
4934}
4935
4936
Ben Murdochb0fe1622011-05-05 13:52:32 +01004937class HeapObjectsFilter {
4938 public:
4939 virtual ~HeapObjectsFilter() {}
4940 virtual bool SkipObject(HeapObject* object) = 0;
4941};
4942
4943
4944class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004945 public:
4946 FreeListNodesFilter() {
4947 MarkFreeListNodes();
4948 }
4949
Ben Murdochb0fe1622011-05-05 13:52:32 +01004950 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004951 if (object->IsMarked()) {
4952 object->ClearMark();
4953 return true;
4954 } else {
4955 return false;
4956 }
4957 }
4958
4959 private:
4960 void MarkFreeListNodes() {
4961 Heap::old_pointer_space()->MarkFreeListNodes();
4962 Heap::old_data_space()->MarkFreeListNodes();
4963 MarkCodeSpaceFreeListNodes();
4964 Heap::map_space()->MarkFreeListNodes();
4965 Heap::cell_space()->MarkFreeListNodes();
4966 }
4967
4968 void MarkCodeSpaceFreeListNodes() {
4969 // For code space, using FreeListNode::IsFreeListNode is OK.
4970 HeapObjectIterator iter(Heap::code_space());
4971 for (HeapObject* obj = iter.next_object();
4972 obj != NULL;
4973 obj = iter.next_object()) {
4974 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
4975 }
4976 }
4977
4978 AssertNoAllocation no_alloc;
4979};
4980
4981
Ben Murdochb0fe1622011-05-05 13:52:32 +01004982class UnreachableObjectsFilter : public HeapObjectsFilter {
4983 public:
4984 UnreachableObjectsFilter() {
4985 MarkUnreachableObjects();
4986 }
4987
4988 bool SkipObject(HeapObject* object) {
4989 if (object->IsMarked()) {
4990 object->ClearMark();
4991 return true;
4992 } else {
4993 return false;
4994 }
4995 }
4996
4997 private:
4998 class UnmarkingVisitor : public ObjectVisitor {
4999 public:
5000 UnmarkingVisitor() : list_(10) {}
5001
5002 void VisitPointers(Object** start, Object** end) {
5003 for (Object** p = start; p < end; p++) {
5004 if (!(*p)->IsHeapObject()) continue;
5005 HeapObject* obj = HeapObject::cast(*p);
5006 if (obj->IsMarked()) {
5007 obj->ClearMark();
5008 list_.Add(obj);
5009 }
5010 }
5011 }
5012
5013 bool can_process() { return !list_.is_empty(); }
5014
5015 void ProcessNext() {
5016 HeapObject* obj = list_.RemoveLast();
5017 obj->Iterate(this);
5018 }
5019
5020 private:
5021 List<HeapObject*> list_;
5022 };
5023
5024 void MarkUnreachableObjects() {
5025 HeapIterator iterator;
5026 for (HeapObject* obj = iterator.next();
5027 obj != NULL;
5028 obj = iterator.next()) {
5029 obj->SetMark();
5030 }
5031 UnmarkingVisitor visitor;
5032 Heap::IterateRoots(&visitor, VISIT_ONLY_STRONG);
5033 while (visitor.can_process())
5034 visitor.ProcessNext();
5035 }
5036
5037 AssertNoAllocation no_alloc;
5038};
5039
5040
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005041HeapIterator::HeapIterator()
5042 : filtering_(HeapIterator::kNoFiltering),
5043 filter_(NULL) {
5044 Init();
5045}
5046
5047
Ben Murdochb0fe1622011-05-05 13:52:32 +01005048HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005049 : filtering_(filtering),
5050 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005051 Init();
5052}
5053
5054
5055HeapIterator::~HeapIterator() {
5056 Shutdown();
5057}
5058
5059
5060void HeapIterator::Init() {
5061 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005062 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5063 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5064 switch (filtering_) {
5065 case kFilterFreeListNodes:
5066 filter_ = new FreeListNodesFilter;
5067 break;
5068 case kFilterUnreachable:
5069 filter_ = new UnreachableObjectsFilter;
5070 break;
5071 default:
5072 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005073 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005074 object_iterator_ = space_iterator_->next();
5075}
5076
5077
5078void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005079#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005080 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005081 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005082 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005083 ASSERT(object_iterator_ == NULL);
5084 }
5085#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005086 // Make sure the last iterator is deallocated.
5087 delete space_iterator_;
5088 space_iterator_ = NULL;
5089 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005090 delete filter_;
5091 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005092}
5093
5094
Leon Clarked91b9f72010-01-27 17:25:45 +00005095HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005096 if (filter_ == NULL) return NextObject();
5097
5098 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005099 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005100 return obj;
5101}
5102
5103
5104HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005105 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005106 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005107
Leon Clarked91b9f72010-01-27 17:25:45 +00005108 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005109 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005110 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005111 } else {
5112 // Go though the spaces looking for one that has objects.
5113 while (space_iterator_->has_next()) {
5114 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005115 if (HeapObject* obj = object_iterator_->next_object()) {
5116 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005117 }
5118 }
5119 }
5120 // Done with the last space.
5121 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005122 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005123}
5124
5125
5126void HeapIterator::reset() {
5127 // Restart the iterator.
5128 Shutdown();
5129 Init();
5130}
5131
5132
5133#ifdef DEBUG
5134
5135static bool search_for_any_global;
5136static Object* search_target;
5137static bool found_target;
5138static List<Object*> object_stack(20);
5139
5140
5141// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5142static const int kMarkTag = 2;
5143
5144static void MarkObjectRecursively(Object** p);
5145class MarkObjectVisitor : public ObjectVisitor {
5146 public:
5147 void VisitPointers(Object** start, Object** end) {
5148 // Copy all HeapObject pointers in [start, end)
5149 for (Object** p = start; p < end; p++) {
5150 if ((*p)->IsHeapObject())
5151 MarkObjectRecursively(p);
5152 }
5153 }
5154};
5155
5156static MarkObjectVisitor mark_visitor;
5157
5158static void MarkObjectRecursively(Object** p) {
5159 if (!(*p)->IsHeapObject()) return;
5160
5161 HeapObject* obj = HeapObject::cast(*p);
5162
5163 Object* map = obj->map();
5164
5165 if (!map->IsHeapObject()) return; // visited before
5166
5167 if (found_target) return; // stop if target found
5168 object_stack.Add(obj);
5169 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
5170 (!search_for_any_global && (obj == search_target))) {
5171 found_target = true;
5172 return;
5173 }
5174
5175 // not visited yet
5176 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5177
5178 Address map_addr = map_p->address();
5179
5180 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5181
5182 MarkObjectRecursively(&map);
5183
5184 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5185 &mark_visitor);
5186
5187 if (!found_target) // don't pop if found the target
5188 object_stack.RemoveLast();
5189}
5190
5191
5192static void UnmarkObjectRecursively(Object** p);
5193class UnmarkObjectVisitor : public ObjectVisitor {
5194 public:
5195 void VisitPointers(Object** start, Object** end) {
5196 // Copy all HeapObject pointers in [start, end)
5197 for (Object** p = start; p < end; p++) {
5198 if ((*p)->IsHeapObject())
5199 UnmarkObjectRecursively(p);
5200 }
5201 }
5202};
5203
5204static UnmarkObjectVisitor unmark_visitor;
5205
5206static void UnmarkObjectRecursively(Object** p) {
5207 if (!(*p)->IsHeapObject()) return;
5208
5209 HeapObject* obj = HeapObject::cast(*p);
5210
5211 Object* map = obj->map();
5212
5213 if (map->IsHeapObject()) return; // unmarked already
5214
5215 Address map_addr = reinterpret_cast<Address>(map);
5216
5217 map_addr -= kMarkTag;
5218
5219 ASSERT_TAG_ALIGNED(map_addr);
5220
5221 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5222
5223 obj->set_map(reinterpret_cast<Map*>(map_p));
5224
5225 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5226
5227 obj->IterateBody(Map::cast(map_p)->instance_type(),
5228 obj->SizeFromMap(Map::cast(map_p)),
5229 &unmark_visitor);
5230}
5231
5232
5233static void MarkRootObjectRecursively(Object** root) {
5234 if (search_for_any_global) {
5235 ASSERT(search_target == NULL);
5236 } else {
5237 ASSERT(search_target->IsHeapObject());
5238 }
5239 found_target = false;
5240 object_stack.Clear();
5241
5242 MarkObjectRecursively(root);
5243 UnmarkObjectRecursively(root);
5244
5245 if (found_target) {
5246 PrintF("=====================================\n");
5247 PrintF("==== Path to object ====\n");
5248 PrintF("=====================================\n\n");
5249
5250 ASSERT(!object_stack.is_empty());
5251 for (int i = 0; i < object_stack.length(); i++) {
5252 if (i > 0) PrintF("\n |\n |\n V\n\n");
5253 Object* obj = object_stack[i];
5254 obj->Print();
5255 }
5256 PrintF("=====================================\n");
5257 }
5258}
5259
5260
5261// Helper class for visiting HeapObjects recursively.
5262class MarkRootVisitor: public ObjectVisitor {
5263 public:
5264 void VisitPointers(Object** start, Object** end) {
5265 // Visit all HeapObject pointers in [start, end)
5266 for (Object** p = start; p < end; p++) {
5267 if ((*p)->IsHeapObject())
5268 MarkRootObjectRecursively(p);
5269 }
5270 }
5271};
5272
5273
5274// Triggers a depth-first traversal of reachable objects from roots
5275// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005276void Heap::TracePathToObject(Object* target) {
5277 search_target = target;
Steve Blocka7e24c12009-10-30 11:49:00 +00005278 search_for_any_global = false;
5279
5280 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005281 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005282}
5283
5284
5285// Triggers a depth-first traversal of reachable objects from roots
5286// and finds a path to any global object and prints it. Useful for
5287// determining the source for leaks of global objects.
5288void Heap::TracePathToGlobal() {
5289 search_target = NULL;
5290 search_for_any_global = true;
5291
5292 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005293 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005294}
5295#endif
5296
5297
Ben Murdochf87a2032010-10-22 12:50:53 +01005298static intptr_t CountTotalHolesSize() {
5299 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005300 OldSpaces spaces;
5301 for (OldSpace* space = spaces.next();
5302 space != NULL;
5303 space = spaces.next()) {
5304 holes_size += space->Waste() + space->AvailableFree();
5305 }
5306 return holes_size;
5307}
5308
5309
Steve Blocka7e24c12009-10-30 11:49:00 +00005310GCTracer::GCTracer()
5311 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005312 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005313 gc_count_(0),
5314 full_gc_count_(0),
5315 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005316 marked_count_(0),
5317 allocated_since_last_gc_(0),
5318 spent_in_mutator_(0),
5319 promoted_objects_size_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005320 // These two fields reflect the state of the previous full collection.
5321 // Set them before they are changed by the collector.
5322 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
5323 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005324 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005325 start_time_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005326 start_size_ = Heap::SizeOfObjects();
5327
5328 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5329 scopes_[i] = 0;
5330 }
5331
5332 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5333
5334 allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
5335
5336 if (last_gc_end_timestamp_ > 0) {
5337 spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
5338 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005339}
5340
5341
5342GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005343 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005344 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5345
5346 bool first_gc = (last_gc_end_timestamp_ == 0);
5347
5348 alive_after_last_gc_ = Heap::SizeOfObjects();
5349 last_gc_end_timestamp_ = OS::TimeCurrentMillis();
5350
5351 int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
5352
5353 // Update cumulative GC statistics if required.
5354 if (FLAG_print_cumulative_gc_stat) {
5355 max_gc_pause_ = Max(max_gc_pause_, time);
5356 max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
5357 if (!first_gc) {
5358 min_in_mutator_ = Min(min_in_mutator_,
5359 static_cast<int>(spent_in_mutator_));
5360 }
5361 }
5362
5363 if (!FLAG_trace_gc_nvp) {
5364 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5365
5366 PrintF("%s %.1f -> %.1f MB, ",
5367 CollectorString(),
5368 static_cast<double>(start_size_) / MB,
5369 SizeOfHeapObjects());
5370
5371 if (external_time > 0) PrintF("%d / ", external_time);
5372 PrintF("%d ms.\n", time);
5373 } else {
5374 PrintF("pause=%d ", time);
5375 PrintF("mutator=%d ",
5376 static_cast<int>(spent_in_mutator_));
5377
5378 PrintF("gc=");
5379 switch (collector_) {
5380 case SCAVENGER:
5381 PrintF("s");
5382 break;
5383 case MARK_COMPACTOR:
5384 PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
5385 break;
5386 default:
5387 UNREACHABLE();
5388 }
5389 PrintF(" ");
5390
5391 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5392 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5393 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005394 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005395 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5396
Ben Murdochf87a2032010-10-22 12:50:53 +01005397 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
5398 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
5399 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5400 in_free_list_or_wasted_before_gc_);
5401 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005402
Ben Murdochf87a2032010-10-22 12:50:53 +01005403 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5404 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005405
5406 PrintF("\n");
5407 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005408
5409#if defined(ENABLE_LOGGING_AND_PROFILING)
5410 Heap::PrintShortHeapStatistics();
5411#endif
5412}
5413
5414
5415const char* GCTracer::CollectorString() {
5416 switch (collector_) {
5417 case SCAVENGER:
5418 return "Scavenge";
5419 case MARK_COMPACTOR:
5420 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
5421 : "Mark-sweep";
5422 }
5423 return "Unknown GC";
5424}
5425
5426
5427int KeyedLookupCache::Hash(Map* map, String* name) {
5428 // Uses only lower 32 bits if pointers are larger.
5429 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005430 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005431 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005432}
5433
5434
5435int KeyedLookupCache::Lookup(Map* map, String* name) {
5436 int index = Hash(map, name);
5437 Key& key = keys_[index];
5438 if ((key.map == map) && key.name->Equals(name)) {
5439 return field_offsets_[index];
5440 }
5441 return -1;
5442}
5443
5444
5445void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5446 String* symbol;
5447 if (Heap::LookupSymbolIfExists(name, &symbol)) {
5448 int index = Hash(map, symbol);
5449 Key& key = keys_[index];
5450 key.map = map;
5451 key.name = symbol;
5452 field_offsets_[index] = field_offset;
5453 }
5454}
5455
5456
5457void KeyedLookupCache::Clear() {
5458 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5459}
5460
5461
5462KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
5463
5464
5465int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
5466
5467
5468void DescriptorLookupCache::Clear() {
5469 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5470}
5471
5472
5473DescriptorLookupCache::Key
5474DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
5475
5476int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
5477
5478
5479#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005480void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005481 ASSERT(FLAG_gc_greedy);
Ben Murdochf87a2032010-10-22 12:50:53 +01005482 if (Bootstrapper::IsActive()) return;
5483 if (disallow_allocation_failure()) return;
5484 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005485}
5486#endif
5487
5488
5489TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
5490 : type_(t) {
5491 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5492 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5493 for (int i = 0; i < kCacheSize; i++) {
5494 elements_[i].in[0] = in0;
5495 elements_[i].in[1] = in1;
5496 elements_[i].output = NULL;
5497 }
5498}
5499
5500
5501TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
5502
5503
5504void TranscendentalCache::Clear() {
5505 for (int i = 0; i < kNumberOfCaches; i++) {
5506 if (caches_[i] != NULL) {
5507 delete caches_[i];
5508 caches_[i] = NULL;
5509 }
5510 }
5511}
5512
5513
Leon Clarkee46be812010-01-19 14:06:41 +00005514void ExternalStringTable::CleanUp() {
5515 int last = 0;
5516 for (int i = 0; i < new_space_strings_.length(); ++i) {
5517 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5518 if (Heap::InNewSpace(new_space_strings_[i])) {
5519 new_space_strings_[last++] = new_space_strings_[i];
5520 } else {
5521 old_space_strings_.Add(new_space_strings_[i]);
5522 }
5523 }
5524 new_space_strings_.Rewind(last);
5525 last = 0;
5526 for (int i = 0; i < old_space_strings_.length(); ++i) {
5527 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5528 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
5529 old_space_strings_[last++] = old_space_strings_[i];
5530 }
5531 old_space_strings_.Rewind(last);
5532 Verify();
5533}
5534
5535
5536void ExternalStringTable::TearDown() {
5537 new_space_strings_.Free();
5538 old_space_strings_.Free();
5539}
5540
5541
5542List<Object*> ExternalStringTable::new_space_strings_;
5543List<Object*> ExternalStringTable::old_space_strings_;
5544
Steve Blocka7e24c12009-10-30 11:49:00 +00005545} } // namespace v8::internal