blob: 1e9999164ccf2a9394fbca35a37737e27a4f6729 [file] [log] [blame]
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
38#include "mark-compact.h"
39#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010040#include "objects-visiting.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010041#include "runtime-profiler.h"
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080042#include "scanner-base.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000043#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000044#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000045#include "v8threads.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010046#include "vm-state-inl.h"
Steve Block6ded16b2010-05-10 14:33:55 +010047#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000048#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000049#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000050#endif
51
Steve Block6ded16b2010-05-10 14:33:55 +010052
Steve Blocka7e24c12009-10-30 11:49:00 +000053namespace v8 {
54namespace internal {
55
56
57String* Heap::hidden_symbol_;
58Object* Heap::roots_[Heap::kRootListLength];
Ben Murdochf87a2032010-10-22 12:50:53 +010059Object* Heap::global_contexts_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +000060
John Reck59135872010-11-02 12:39:01 -070061
Steve Blocka7e24c12009-10-30 11:49:00 +000062NewSpace Heap::new_space_;
63OldSpace* Heap::old_pointer_space_ = NULL;
64OldSpace* Heap::old_data_space_ = NULL;
65OldSpace* Heap::code_space_ = NULL;
66MapSpace* Heap::map_space_ = NULL;
67CellSpace* Heap::cell_space_ = NULL;
68LargeObjectSpace* Heap::lo_space_ = NULL;
69
John Reck59135872010-11-02 12:39:01 -070070static const intptr_t kMinimumPromotionLimit = 2 * MB;
71static const intptr_t kMinimumAllocationLimit = 8 * MB;
72
Ben Murdochf87a2032010-10-22 12:50:53 +010073intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
74intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
Steve Blocka7e24c12009-10-30 11:49:00 +000075
76int Heap::old_gen_exhausted_ = false;
77
78int Heap::amount_of_external_allocated_memory_ = 0;
79int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
80
81// semispace_size_ should be a power of 2 and old_generation_size_ should be
82// a multiple of Page::kPageSize.
83#if defined(ANDROID)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080084static const int default_max_semispace_size_ = 2*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010085intptr_t Heap::max_old_generation_size_ = 192*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000086int Heap::initial_semispace_size_ = 128*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010087intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -080088intptr_t Heap::max_executable_size_ = max_old_generation_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +000089#elif defined(V8_TARGET_ARCH_X64)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080090static const int default_max_semispace_size_ = 16*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010091intptr_t Heap::max_old_generation_size_ = 1*GB;
Steve Blocka7e24c12009-10-30 11:49:00 +000092int Heap::initial_semispace_size_ = 1*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010093intptr_t Heap::code_range_size_ = 512*MB;
Russell Brenner90bac252010-11-18 13:33:46 -080094intptr_t Heap::max_executable_size_ = 256*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000095#else
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -080096static const int default_max_semispace_size_ = 8*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010097intptr_t Heap::max_old_generation_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000098int Heap::initial_semispace_size_ = 512*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010099intptr_t Heap::code_range_size_ = 0;
Russell Brenner90bac252010-11-18 13:33:46 -0800100intptr_t Heap::max_executable_size_ = 128*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +0000101#endif
102
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800103// Allow build-time customization of the max semispace size. Building
104// V8 with snapshots and a non-default max semispace size is much
105// easier if you can define it as part of the build environment.
106#if defined(V8_MAX_SEMISPACE_SIZE)
107int Heap::max_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
108#else
109int Heap::max_semispace_size_ = default_max_semispace_size_;
110#endif
111
Steve Block3ce2e202009-11-05 08:53:23 +0000112// The snapshot semispace size will be the default semispace size if
113// snapshotting is used and will be the requested semispace size as
114// set up by ConfigureHeap otherwise.
115int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
116
Steve Block6ded16b2010-05-10 14:33:55 +0100117List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
118List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
119
Steve Blocka7e24c12009-10-30 11:49:00 +0000120GCCallback Heap::global_gc_prologue_callback_ = NULL;
121GCCallback Heap::global_gc_epilogue_callback_ = NULL;
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100122HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000123
124// Variables set based on semispace_size_ and old_generation_size_ in
125// ConfigureHeap.
Steve Block3ce2e202009-11-05 08:53:23 +0000126
127// Will be 4 * reserved_semispace_size_ to ensure that young
128// generation can be aligned to its size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000129int Heap::survived_since_last_expansion_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100130intptr_t Heap::external_allocation_limit_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000131
132Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
133
134int Heap::mc_count_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100135int Heap::ms_count_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000136int Heap::gc_count_ = 0;
137
Leon Clarkef7060e22010-06-03 12:02:55 +0100138GCTracer* Heap::tracer_ = NULL;
139
Steve Block6ded16b2010-05-10 14:33:55 +0100140int Heap::unflattened_strings_length_ = 0;
141
Steve Blocka7e24c12009-10-30 11:49:00 +0000142int Heap::always_allocate_scope_depth_ = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000143int Heap::linear_allocation_scope_depth_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100144int Heap::contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000145
Steve Block8defd9f2010-07-08 12:39:36 +0100146int Heap::young_survivors_after_last_gc_ = 0;
147int Heap::high_survival_rate_period_length_ = 0;
148double Heap::survival_rate_ = 0;
149Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
150Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
151
Steve Blocka7e24c12009-10-30 11:49:00 +0000152#ifdef DEBUG
153bool Heap::allocation_allowed_ = true;
154
155int Heap::allocation_timeout_ = 0;
156bool Heap::disallow_allocation_failure_ = false;
157#endif // DEBUG
158
Ben Murdochf87a2032010-10-22 12:50:53 +0100159intptr_t GCTracer::alive_after_last_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100160double GCTracer::last_gc_end_timestamp_ = 0.0;
161int GCTracer::max_gc_pause_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100162intptr_t GCTracer::max_alive_after_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100163int GCTracer::min_in_mutator_ = kMaxInt;
Steve Blocka7e24c12009-10-30 11:49:00 +0000164
Ben Murdochf87a2032010-10-22 12:50:53 +0100165intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000166 if (!HasBeenSetup()) return 0;
167
168 return new_space_.Capacity() +
169 old_pointer_space_->Capacity() +
170 old_data_space_->Capacity() +
171 code_space_->Capacity() +
172 map_space_->Capacity() +
173 cell_space_->Capacity();
174}
175
176
Ben Murdochf87a2032010-10-22 12:50:53 +0100177intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000178 if (!HasBeenSetup()) return 0;
179
180 return new_space_.CommittedMemory() +
181 old_pointer_space_->CommittedMemory() +
182 old_data_space_->CommittedMemory() +
183 code_space_->CommittedMemory() +
184 map_space_->CommittedMemory() +
185 cell_space_->CommittedMemory() +
186 lo_space_->Size();
187}
188
Russell Brenner90bac252010-11-18 13:33:46 -0800189intptr_t Heap::CommittedMemoryExecutable() {
190 if (!HasBeenSetup()) return 0;
191
192 return MemoryAllocator::SizeExecutable();
193}
194
Steve Block3ce2e202009-11-05 08:53:23 +0000195
Ben Murdochf87a2032010-10-22 12:50:53 +0100196intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000197 if (!HasBeenSetup()) return 0;
198
199 return new_space_.Available() +
200 old_pointer_space_->Available() +
201 old_data_space_->Available() +
202 code_space_->Available() +
203 map_space_->Available() +
204 cell_space_->Available();
205}
206
207
208bool Heap::HasBeenSetup() {
209 return old_pointer_space_ != NULL &&
210 old_data_space_ != NULL &&
211 code_space_ != NULL &&
212 map_space_ != NULL &&
213 cell_space_ != NULL &&
214 lo_space_ != NULL;
215}
216
217
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100218int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
219 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
220 ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
221 MapWord map_word = object->map_word();
222 map_word.ClearMark();
223 map_word.ClearOverflow();
224 return object->SizeFromMap(map_word.ToMap());
225}
226
227
228int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
229 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
230 ASSERT(MarkCompactCollector::are_map_pointers_encoded());
231 uint32_t marker = Memory::uint32_at(object->address());
232 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
233 return kIntSize;
234 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
235 return Memory::int_at(object->address() + kIntSize);
236 } else {
237 MapWord map_word = object->map_word();
238 Address map_address = map_word.DecodeMapAddress(Heap::map_space());
239 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
240 return object->SizeFromMap(map);
241 }
242}
243
244
Steve Blocka7e24c12009-10-30 11:49:00 +0000245GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
246 // Is global GC requested?
247 if (space != NEW_SPACE || FLAG_gc_global) {
248 Counters::gc_compactor_caused_by_request.Increment();
249 return MARK_COMPACTOR;
250 }
251
252 // Is enough data promoted to justify a global GC?
253 if (OldGenerationPromotionLimitReached()) {
254 Counters::gc_compactor_caused_by_promoted_data.Increment();
255 return MARK_COMPACTOR;
256 }
257
258 // Have allocation in OLD and LO failed?
259 if (old_gen_exhausted_) {
260 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
261 return MARK_COMPACTOR;
262 }
263
264 // Is there enough space left in OLD to guarantee that a scavenge can
265 // succeed?
266 //
267 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
268 // for object promotion. It counts only the bytes that the memory
269 // allocator has not yet allocated from the OS and assigned to any space,
270 // and does not count available bytes already in the old space or code
271 // space. Undercounting is safe---we may get an unrequested full GC when
272 // a scavenge would have succeeded.
273 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
274 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
275 return MARK_COMPACTOR;
276 }
277
278 // Default
279 return SCAVENGER;
280}
281
282
283// TODO(1238405): Combine the infrastructure for --heap-stats and
284// --log-gc to avoid the complicated preprocessor and flag testing.
285#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
286void Heap::ReportStatisticsBeforeGC() {
287 // Heap::ReportHeapStatistics will also log NewSpace statistics when
288 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
289 // following logic is used to avoid double logging.
290#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
291 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
292 if (FLAG_heap_stats) {
293 ReportHeapStatistics("Before GC");
294 } else if (FLAG_log_gc) {
295 new_space_.ReportStatistics();
296 }
297 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
298#elif defined(DEBUG)
299 if (FLAG_heap_stats) {
300 new_space_.CollectStatistics();
301 ReportHeapStatistics("Before GC");
302 new_space_.ClearHistograms();
303 }
304#elif defined(ENABLE_LOGGING_AND_PROFILING)
305 if (FLAG_log_gc) {
306 new_space_.CollectStatistics();
307 new_space_.ReportStatistics();
308 new_space_.ClearHistograms();
309 }
310#endif
311}
312
313
314#if defined(ENABLE_LOGGING_AND_PROFILING)
315void Heap::PrintShortHeapStatistics() {
316 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100317 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
318 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000319 MemoryAllocator::Size(),
320 MemoryAllocator::Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100321 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
322 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000323 Heap::new_space_.Size(),
324 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100325 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
326 ", available: %8" V8_PTR_PREFIX "d"
327 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000328 old_pointer_space_->Size(),
329 old_pointer_space_->Available(),
330 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100331 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
332 ", available: %8" V8_PTR_PREFIX "d"
333 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000334 old_data_space_->Size(),
335 old_data_space_->Available(),
336 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100337 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
338 ", available: %8" V8_PTR_PREFIX "d"
339 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000340 code_space_->Size(),
341 code_space_->Available(),
342 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100343 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
344 ", available: %8" V8_PTR_PREFIX "d"
345 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000346 map_space_->Size(),
347 map_space_->Available(),
348 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100349 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
350 ", available: %8" V8_PTR_PREFIX "d"
351 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000352 cell_space_->Size(),
353 cell_space_->Available(),
354 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100355 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
356 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000357 lo_space_->Size(),
358 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000359}
360#endif
361
362
363// TODO(1238405): Combine the infrastructure for --heap-stats and
364// --log-gc to avoid the complicated preprocessor and flag testing.
365void Heap::ReportStatisticsAfterGC() {
366 // Similar to the before GC, we use some complicated logic to ensure that
367 // NewSpace statistics are logged exactly once when --log-gc is turned on.
368#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
369 if (FLAG_heap_stats) {
370 new_space_.CollectStatistics();
371 ReportHeapStatistics("After GC");
372 } else if (FLAG_log_gc) {
373 new_space_.ReportStatistics();
374 }
375#elif defined(DEBUG)
376 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
377#elif defined(ENABLE_LOGGING_AND_PROFILING)
378 if (FLAG_log_gc) new_space_.ReportStatistics();
379#endif
380}
381#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
382
383
384void Heap::GarbageCollectionPrologue() {
385 TranscendentalCache::Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100386 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000387 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100388 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000389#ifdef DEBUG
390 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
391 allow_allocation(false);
392
393 if (FLAG_verify_heap) {
394 Verify();
395 }
396
397 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000398#endif
399
400#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
401 ReportStatisticsBeforeGC();
402#endif
403}
404
Ben Murdochf87a2032010-10-22 12:50:53 +0100405intptr_t Heap::SizeOfObjects() {
406 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000407 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800409 total += space->SizeOfObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +0000410 }
411 return total;
412}
413
414void Heap::GarbageCollectionEpilogue() {
415#ifdef DEBUG
416 allow_allocation(true);
417 ZapFromSpace();
418
419 if (FLAG_verify_heap) {
420 Verify();
421 }
422
423 if (FLAG_print_global_handles) GlobalHandles::Print();
424 if (FLAG_print_handles) PrintHandles();
425 if (FLAG_gc_verbose) Print();
426 if (FLAG_code_stats) ReportCodeStatistics("After GC");
427#endif
428
Ben Murdochf87a2032010-10-22 12:50:53 +0100429 Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000430
431 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
432 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
433#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
434 ReportStatisticsAfterGC();
435#endif
436#ifdef ENABLE_DEBUGGER_SUPPORT
437 Debug::AfterGarbageCollection();
438#endif
439}
440
441
John Reck59135872010-11-02 12:39:01 -0700442void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000443 // Since we are ignoring the return value, the exact choice of space does
444 // not matter, so long as we do not specify NEW_SPACE, which would not
445 // cause a full GC.
446 MarkCompactCollector::SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700447 CollectGarbage(OLD_POINTER_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000448 MarkCompactCollector::SetForceCompaction(false);
449}
450
451
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800452void Heap::CollectAllAvailableGarbage() {
453 // Since we are ignoring the return value, the exact choice of space does
454 // not matter, so long as we do not specify NEW_SPACE, which would not
455 // cause a full GC.
456 MarkCompactCollector::SetForceCompaction(true);
457
458 // Major GC would invoke weak handle callbacks on weakly reachable
459 // handles, but won't collect weakly reachable objects until next
460 // major GC. Therefore if we collect aggressively and weak handle callback
461 // has been invoked, we rerun major GC to release objects which become
462 // garbage.
463 // Note: as weak callbacks can execute arbitrary code, we cannot
464 // hope that eventually there will be no weak callbacks invocations.
465 // Therefore stop recollecting after several attempts.
466 const int kMaxNumberOfAttempts = 7;
467 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
468 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
469 break;
470 }
471 }
472 MarkCompactCollector::SetForceCompaction(false);
473}
474
475
476bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000477 // The VM is in the GC state until exiting this function.
478 VMState state(GC);
479
480#ifdef DEBUG
481 // Reset the allocation timeout to the GC interval, but make sure to
482 // allow at least a few allocations after a collection. The reason
483 // for this is that we have a lot of allocation sequences and we
484 // assume that a garbage collection will allow the subsequent
485 // allocation attempts to go through.
486 allocation_timeout_ = Max(6, FLAG_gc_interval);
487#endif
488
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800489 bool next_gc_likely_to_collect_more = false;
490
Steve Blocka7e24c12009-10-30 11:49:00 +0000491 { GCTracer tracer;
492 GarbageCollectionPrologue();
493 // The GC count was incremented in the prologue. Tell the tracer about
494 // it.
495 tracer.set_gc_count(gc_count_);
496
Steve Blocka7e24c12009-10-30 11:49:00 +0000497 // Tell the tracer which collector we've selected.
498 tracer.set_collector(collector);
499
500 HistogramTimer* rate = (collector == SCAVENGER)
501 ? &Counters::gc_scavenger
502 : &Counters::gc_compactor;
503 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800504 next_gc_likely_to_collect_more =
505 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000506 rate->Stop();
507
508 GarbageCollectionEpilogue();
509 }
510
511
512#ifdef ENABLE_LOGGING_AND_PROFILING
513 if (FLAG_log_gc) HeapProfiler::WriteSample();
Ben Murdochf87a2032010-10-22 12:50:53 +0100514 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
Steve Blocka7e24c12009-10-30 11:49:00 +0000515#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800516
517 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000518}
519
520
521void Heap::PerformScavenge() {
522 GCTracer tracer;
John Reck59135872010-11-02 12:39:01 -0700523 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000524}
525
526
527#ifdef DEBUG
528// Helper class for verifying the symbol table.
529class SymbolTableVerifier : public ObjectVisitor {
530 public:
531 SymbolTableVerifier() { }
532 void VisitPointers(Object** start, Object** end) {
533 // Visit all HeapObject pointers in [start, end).
534 for (Object** p = start; p < end; p++) {
535 if ((*p)->IsHeapObject()) {
536 // Check that the symbol is actually a symbol.
537 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
538 }
539 }
540 }
541};
542#endif // DEBUG
543
544
545static void VerifySymbolTable() {
546#ifdef DEBUG
547 SymbolTableVerifier verifier;
548 Heap::symbol_table()->IterateElements(&verifier);
549#endif // DEBUG
550}
551
552
Leon Clarkee46be812010-01-19 14:06:41 +0000553void Heap::ReserveSpace(
554 int new_space_size,
555 int pointer_space_size,
556 int data_space_size,
557 int code_space_size,
558 int map_space_size,
559 int cell_space_size,
560 int large_object_size) {
561 NewSpace* new_space = Heap::new_space();
562 PagedSpace* old_pointer_space = Heap::old_pointer_space();
563 PagedSpace* old_data_space = Heap::old_data_space();
564 PagedSpace* code_space = Heap::code_space();
565 PagedSpace* map_space = Heap::map_space();
566 PagedSpace* cell_space = Heap::cell_space();
567 LargeObjectSpace* lo_space = Heap::lo_space();
568 bool gc_performed = true;
569 while (gc_performed) {
570 gc_performed = false;
571 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100572 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000573 gc_performed = true;
574 }
575 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100576 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000577 gc_performed = true;
578 }
579 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100580 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000581 gc_performed = true;
582 }
583 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100584 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000585 gc_performed = true;
586 }
587 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100588 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000589 gc_performed = true;
590 }
591 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100592 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000593 gc_performed = true;
594 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100595 // We add a slack-factor of 2 in order to have space for a series of
596 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000597 large_object_size *= 2;
598 // The ReserveSpace method on the large object space checks how much
599 // we can expand the old generation. This includes expansion caused by
600 // allocation in the other spaces.
601 large_object_size += cell_space_size + map_space_size + code_space_size +
602 data_space_size + pointer_space_size;
603 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100604 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000605 gc_performed = true;
606 }
607 }
608}
609
610
Steve Blocka7e24c12009-10-30 11:49:00 +0000611void Heap::EnsureFromSpaceIsCommitted() {
612 if (new_space_.CommitFromSpaceIfNeeded()) return;
613
614 // Committing memory to from space failed.
615 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100616 PagedSpaces spaces;
617 for (PagedSpace* space = spaces.next();
618 space != NULL;
619 space = spaces.next()) {
620 space->RelinkPageListInChunkOrder(true);
621 }
622
Steve Blocka7e24c12009-10-30 11:49:00 +0000623 Shrink();
624 if (new_space_.CommitFromSpaceIfNeeded()) return;
625
626 // Committing memory to from space failed again.
627 // Memory is exhausted and we will die.
628 V8::FatalProcessOutOfMemory("Committing semi space failed.");
629}
630
631
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800632void Heap::ClearJSFunctionResultCaches() {
633 if (Bootstrapper::IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100634
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800635 Object* context = global_contexts_list_;
636 while (!context->IsUndefined()) {
637 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100638 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800639 Context::cast(context)->jsfunction_result_caches();
640 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100641 int length = caches->length();
642 for (int i = 0; i < length; i++) {
643 JSFunctionResultCache::cast(caches->get(i))->Clear();
644 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800645 // Get the next context:
646 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100647 }
Steve Block6ded16b2010-05-10 14:33:55 +0100648}
649
650
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100651void Heap::ClearNormalizedMapCaches() {
652 if (Bootstrapper::IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100653
654 Object* context = global_contexts_list_;
655 while (!context->IsUndefined()) {
656 Context::cast(context)->normalized_map_cache()->Clear();
657 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
658 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100659}
660
661
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100662#ifdef DEBUG
663
664enum PageWatermarkValidity {
665 ALL_VALID,
666 ALL_INVALID
667};
668
669static void VerifyPageWatermarkValidity(PagedSpace* space,
670 PageWatermarkValidity validity) {
671 PageIterator it(space, PageIterator::PAGES_IN_USE);
672 bool expected_value = (validity == ALL_VALID);
673 while (it.has_next()) {
674 Page* page = it.next();
675 ASSERT(page->IsWatermarkValid() == expected_value);
676 }
677}
678#endif
679
Steve Block8defd9f2010-07-08 12:39:36 +0100680void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
681 double survival_rate =
682 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
683 start_new_space_size;
684
685 if (survival_rate > kYoungSurvivalRateThreshold) {
686 high_survival_rate_period_length_++;
687 } else {
688 high_survival_rate_period_length_ = 0;
689 }
690
691 double survival_rate_diff = survival_rate_ - survival_rate;
692
693 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
694 set_survival_rate_trend(DECREASING);
695 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
696 set_survival_rate_trend(INCREASING);
697 } else {
698 set_survival_rate_trend(STABLE);
699 }
700
701 survival_rate_ = survival_rate;
702}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100703
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800704bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700705 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800706 bool next_gc_likely_to_collect_more = false;
707
Ben Murdochf87a2032010-10-22 12:50:53 +0100708 if (collector != SCAVENGER) {
709 PROFILE(CodeMovingGCEvent());
710 }
711
Steve Blocka7e24c12009-10-30 11:49:00 +0000712 VerifySymbolTable();
713 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
714 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100715 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000716 global_gc_prologue_callback_();
717 }
Steve Block6ded16b2010-05-10 14:33:55 +0100718
719 GCType gc_type =
720 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
721
722 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
723 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
724 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
725 }
726 }
727
Steve Blocka7e24c12009-10-30 11:49:00 +0000728 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100729
Ben Murdochf87a2032010-10-22 12:50:53 +0100730 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100731
Steve Blocka7e24c12009-10-30 11:49:00 +0000732 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100733 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000734 MarkCompact(tracer);
735
Steve Block8defd9f2010-07-08 12:39:36 +0100736 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
737 IsStableOrIncreasingSurvivalTrend();
738
739 UpdateSurvivalRateTrend(start_new_space_size);
740
John Reck59135872010-11-02 12:39:01 -0700741 intptr_t old_gen_size = PromotedSpaceSize();
742 old_gen_promotion_limit_ =
743 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
744 old_gen_allocation_limit_ =
745 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100746
John Reck59135872010-11-02 12:39:01 -0700747 if (high_survival_rate_during_scavenges &&
748 IsStableOrIncreasingSurvivalTrend()) {
749 // Stable high survival rates of young objects both during partial and
750 // full collection indicate that mutator is either building or modifying
751 // a structure with a long lifetime.
752 // In this case we aggressively raise old generation memory limits to
753 // postpone subsequent mark-sweep collection and thus trade memory
754 // space for the mutation speed.
755 old_gen_promotion_limit_ *= 2;
756 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100757 }
758
John Reck59135872010-11-02 12:39:01 -0700759 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100760 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100761 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100762 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100763 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100764
765 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000766 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000767
768 Counters::objs_since_last_young.Set(0);
769
John Reck59135872010-11-02 12:39:01 -0700770 if (collector == MARK_COMPACTOR) {
771 DisableAssertNoAllocation allow_allocation;
772 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800773 next_gc_likely_to_collect_more =
774 GlobalHandles::PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700775 }
776
Steve Block3ce2e202009-11-05 08:53:23 +0000777 // Update relocatables.
778 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000779
780 if (collector == MARK_COMPACTOR) {
781 // Register the amount of external allocated memory.
782 amount_of_external_allocated_memory_at_last_global_gc_ =
783 amount_of_external_allocated_memory_;
784 }
785
Steve Block6ded16b2010-05-10 14:33:55 +0100786 GCCallbackFlags callback_flags = tracer->is_compacting()
787 ? kGCCallbackFlagCompacted
788 : kNoGCCallbackFlags;
789 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
790 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
791 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
792 }
793 }
794
Steve Blocka7e24c12009-10-30 11:49:00 +0000795 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
796 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100797 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000798 global_gc_epilogue_callback_();
799 }
800 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800801
802 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000803}
804
805
Steve Blocka7e24c12009-10-30 11:49:00 +0000806void Heap::MarkCompact(GCTracer* tracer) {
807 gc_state_ = MARK_COMPACT;
Steve Blocka7e24c12009-10-30 11:49:00 +0000808 LOG(ResourceEvent("markcompact", "begin"));
809
810 MarkCompactCollector::Prepare(tracer);
811
812 bool is_compacting = MarkCompactCollector::IsCompacting();
813
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100814 if (is_compacting) {
815 mc_count_++;
816 } else {
817 ms_count_++;
818 }
819 tracer->set_full_gc_count(mc_count_ + ms_count_);
820
Steve Blocka7e24c12009-10-30 11:49:00 +0000821 MarkCompactPrologue(is_compacting);
822
823 MarkCompactCollector::CollectGarbage();
824
Steve Blocka7e24c12009-10-30 11:49:00 +0000825 LOG(ResourceEvent("markcompact", "end"));
826
827 gc_state_ = NOT_IN_GC;
828
829 Shrink();
830
831 Counters::objs_since_last_full.Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100832
833 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000834}
835
836
837void Heap::MarkCompactPrologue(bool is_compacting) {
838 // At any old GC clear the keyed lookup cache to enable collection of unused
839 // maps.
840 KeyedLookupCache::Clear();
841 ContextSlotCache::Clear();
842 DescriptorLookupCache::Clear();
843
Ben Murdochb0fe1622011-05-05 13:52:32 +0100844 RuntimeProfiler::MarkCompactPrologue(is_compacting);
845
Steve Blocka7e24c12009-10-30 11:49:00 +0000846 CompilationCache::MarkCompactPrologue();
847
Kristian Monsen25f61362010-05-21 11:50:48 +0100848 CompletelyClearInstanceofCache();
849
Leon Clarkee46be812010-01-19 14:06:41 +0000850 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000851
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100852 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000853}
854
855
856Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700857 Object* obj = NULL; // Initialization to please compiler.
858 { MaybeObject* maybe_obj = code_space_->FindObject(a);
859 if (!maybe_obj->ToObject(&obj)) {
860 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
861 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000862 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000863 return obj;
864}
865
866
867// Helper class for copying HeapObjects
868class ScavengeVisitor: public ObjectVisitor {
869 public:
870
871 void VisitPointer(Object** p) { ScavengePointer(p); }
872
873 void VisitPointers(Object** start, Object** end) {
874 // Copy all HeapObject pointers in [start, end)
875 for (Object** p = start; p < end; p++) ScavengePointer(p);
876 }
877
878 private:
879 void ScavengePointer(Object** p) {
880 Object* object = *p;
881 if (!Heap::InNewSpace(object)) return;
882 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
883 reinterpret_cast<HeapObject*>(object));
884 }
885};
886
887
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100888// A queue of objects promoted during scavenge. Each object is accompanied
889// by it's size to avoid dereferencing a map pointer for scanning.
Steve Blocka7e24c12009-10-30 11:49:00 +0000890class PromotionQueue {
891 public:
892 void Initialize(Address start_address) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100893 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000894 }
895
896 bool is_empty() { return front_ <= rear_; }
897
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100898 void insert(HeapObject* target, int size) {
899 *(--rear_) = reinterpret_cast<intptr_t>(target);
900 *(--rear_) = size;
Steve Blocka7e24c12009-10-30 11:49:00 +0000901 // Assert no overflow into live objects.
902 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
903 }
904
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100905 void remove(HeapObject** target, int* size) {
906 *target = reinterpret_cast<HeapObject*>(*(--front_));
907 *size = static_cast<int>(*(--front_));
Steve Blocka7e24c12009-10-30 11:49:00 +0000908 // Assert no underflow.
909 ASSERT(front_ >= rear_);
910 }
911
912 private:
913 // The front of the queue is higher in memory than the rear.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100914 intptr_t* front_;
915 intptr_t* rear_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000916};
917
918
919// Shared state read by the scavenge collector and set by ScavengeObject.
920static PromotionQueue promotion_queue;
921
922
923#ifdef DEBUG
924// Visitor class to verify pointers in code or data space do not point into
925// new space.
926class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
927 public:
928 void VisitPointers(Object** start, Object**end) {
929 for (Object** current = start; current < end; current++) {
930 if ((*current)->IsHeapObject()) {
931 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
932 }
933 }
934 }
935};
936
937
938static void VerifyNonPointerSpacePointers() {
939 // Verify that there are no pointers to new space in spaces where we
940 // do not expect them.
941 VerifyNonPointerSpacePointersVisitor v;
942 HeapObjectIterator code_it(Heap::code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000943 for (HeapObject* object = code_it.next();
944 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000945 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000946
947 HeapObjectIterator data_it(Heap::old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000948 for (HeapObject* object = data_it.next();
949 object != NULL; object = data_it.next())
950 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000951}
952#endif
953
954
Steve Block6ded16b2010-05-10 14:33:55 +0100955void Heap::CheckNewSpaceExpansionCriteria() {
956 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
957 survived_since_last_expansion_ > new_space_.Capacity()) {
958 // Grow the size of new space if there is room to grow and enough
959 // data has survived scavenge since the last expansion.
960 new_space_.Grow();
961 survived_since_last_expansion_ = 0;
962 }
963}
964
965
Steve Blocka7e24c12009-10-30 11:49:00 +0000966void Heap::Scavenge() {
967#ifdef DEBUG
968 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
969#endif
970
971 gc_state_ = SCAVENGE;
972
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100973 Page::FlipMeaningOfInvalidatedWatermarkFlag();
974#ifdef DEBUG
975 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
976 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
977#endif
978
979 // We do not update an allocation watermark of the top page during linear
980 // allocation to avoid overhead. So to maintain the watermark invariant
981 // we have to manually cache the watermark and mark the top page as having an
982 // invalid watermark. This guarantees that dirty regions iteration will use a
983 // correct watermark even if a linear allocation happens.
984 old_pointer_space_->FlushTopPageWatermark();
985 map_space_->FlushTopPageWatermark();
986
Steve Blocka7e24c12009-10-30 11:49:00 +0000987 // Implements Cheney's copying algorithm
988 LOG(ResourceEvent("scavenge", "begin"));
989
990 // Clear descriptor cache.
991 DescriptorLookupCache::Clear();
992
993 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100994 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000995
Steve Block6ded16b2010-05-10 14:33:55 +0100996 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000997
998 // Flip the semispaces. After flipping, to space is empty, from space has
999 // live objects.
1000 new_space_.Flip();
1001 new_space_.ResetAllocationInfo();
1002
1003 // We need to sweep newly copied objects which can be either in the
1004 // to space or promoted to the old generation. For to-space
1005 // objects, we treat the bottom of the to space as a queue. Newly
1006 // copied and unswept objects lie between a 'front' mark and the
1007 // allocation pointer.
1008 //
1009 // Promoted objects can go into various old-generation spaces, and
1010 // can be allocated internally in the spaces (from the free list).
1011 // We treat the top of the to space as a queue of addresses of
1012 // promoted objects. The addresses of newly promoted and unswept
1013 // objects lie between a 'front' mark and a 'rear' mark that is
1014 // updated as a side effect of promoting an object.
1015 //
1016 // There is guaranteed to be enough room at the top of the to space
1017 // for the addresses of promoted objects: every object promoted
1018 // frees up its size in bytes from the top of the new space, and
1019 // objects are at least one pointer in size.
1020 Address new_space_front = new_space_.ToSpaceLow();
1021 promotion_queue.Initialize(new_space_.ToSpaceHigh());
1022
1023 ScavengeVisitor scavenge_visitor;
1024 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001025 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001026
1027 // Copy objects reachable from the old generation. By definition,
1028 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001029 IterateDirtyRegions(old_pointer_space_,
1030 &IteratePointersInDirtyRegion,
1031 &ScavengePointer,
1032 WATERMARK_CAN_BE_INVALID);
1033
1034 IterateDirtyRegions(map_space_,
1035 &IteratePointersInDirtyMapsRegion,
1036 &ScavengePointer,
1037 WATERMARK_CAN_BE_INVALID);
1038
1039 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001040
1041 // Copy objects reachable from cells by scavenging cell values directly.
1042 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001043 for (HeapObject* cell = cell_iterator.next();
1044 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001045 if (cell->IsJSGlobalPropertyCell()) {
1046 Address value_address =
1047 reinterpret_cast<Address>(cell) +
1048 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1049 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1050 }
1051 }
1052
Ben Murdochf87a2032010-10-22 12:50:53 +01001053 // Scavenge object reachable from the global contexts list directly.
1054 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1055
Ben Murdochb0fe1622011-05-05 13:52:32 +01001056 // Scavenge objects reachable from the runtime-profiler sampler
1057 // window directly.
1058 Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress();
1059 int sampler_window_size = RuntimeProfiler::SamplerWindowSize();
1060 scavenge_visitor.VisitPointers(
1061 sampler_window_address,
1062 sampler_window_address + sampler_window_size);
1063
Leon Clarkee46be812010-01-19 14:06:41 +00001064 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1065
Steve Block6ded16b2010-05-10 14:33:55 +01001066 UpdateNewSpaceReferencesInExternalStringTable(
1067 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1068
Leon Clarkee46be812010-01-19 14:06:41 +00001069 ASSERT(new_space_front == new_space_.top());
1070
1071 // Set age mark.
1072 new_space_.set_age_mark(new_space_.top());
1073
1074 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001075 IncrementYoungSurvivorsCounter(static_cast<int>(
1076 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001077
1078 LOG(ResourceEvent("scavenge", "end"));
1079
1080 gc_state_ = NOT_IN_GC;
1081}
1082
1083
Steve Block6ded16b2010-05-10 14:33:55 +01001084String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
1085 MapWord first_word = HeapObject::cast(*p)->map_word();
1086
1087 if (!first_word.IsForwardingAddress()) {
1088 // Unreachable external string can be finalized.
1089 FinalizeExternalString(String::cast(*p));
1090 return NULL;
1091 }
1092
1093 // String is still reachable.
1094 return String::cast(first_word.ToForwardingAddress());
1095}
1096
1097
1098void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1099 ExternalStringTableUpdaterCallback updater_func) {
Leon Clarkee46be812010-01-19 14:06:41 +00001100 ExternalStringTable::Verify();
1101
1102 if (ExternalStringTable::new_space_strings_.is_empty()) return;
1103
1104 Object** start = &ExternalStringTable::new_space_strings_[0];
1105 Object** end = start + ExternalStringTable::new_space_strings_.length();
1106 Object** last = start;
1107
1108 for (Object** p = start; p < end; ++p) {
1109 ASSERT(Heap::InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001110 String* target = updater_func(p);
Leon Clarkee46be812010-01-19 14:06:41 +00001111
Steve Block6ded16b2010-05-10 14:33:55 +01001112 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001113
Leon Clarkee46be812010-01-19 14:06:41 +00001114 ASSERT(target->IsExternalString());
1115
1116 if (Heap::InNewSpace(target)) {
1117 // String is still in new space. Update the table entry.
1118 *last = target;
1119 ++last;
1120 } else {
1121 // String got promoted. Move it to the old string list.
1122 ExternalStringTable::AddOldString(target);
1123 }
1124 }
1125
1126 ASSERT(last <= end);
1127 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
1128}
1129
1130
Ben Murdochb0fe1622011-05-05 13:52:32 +01001131static Object* ProcessFunctionWeakReferences(Object* function,
1132 WeakObjectRetainer* retainer) {
1133 Object* head = Heap::undefined_value();
1134 JSFunction* tail = NULL;
1135 Object* candidate = function;
1136 while (!candidate->IsUndefined()) {
1137 // Check whether to keep the candidate in the list.
1138 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1139 Object* retain = retainer->RetainAs(candidate);
1140 if (retain != NULL) {
1141 if (head->IsUndefined()) {
1142 // First element in the list.
1143 head = candidate_function;
1144 } else {
1145 // Subsequent elements in the list.
1146 ASSERT(tail != NULL);
1147 tail->set_next_function_link(candidate_function);
1148 }
1149 // Retained function is new tail.
1150 tail = candidate_function;
1151 }
1152 // Move to next element in the list.
1153 candidate = candidate_function->next_function_link();
1154 }
1155
1156 // Terminate the list if there is one or more elements.
1157 if (tail != NULL) {
1158 tail->set_next_function_link(Heap::undefined_value());
1159 }
1160
1161 return head;
1162}
1163
1164
Ben Murdochf87a2032010-10-22 12:50:53 +01001165void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1166 Object* head = undefined_value();
1167 Context* tail = NULL;
1168 Object* candidate = global_contexts_list_;
1169 while (!candidate->IsUndefined()) {
1170 // Check whether to keep the candidate in the list.
1171 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1172 Object* retain = retainer->RetainAs(candidate);
1173 if (retain != NULL) {
1174 if (head->IsUndefined()) {
1175 // First element in the list.
1176 head = candidate_context;
1177 } else {
1178 // Subsequent elements in the list.
1179 ASSERT(tail != NULL);
1180 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1181 candidate_context,
1182 UPDATE_WRITE_BARRIER);
1183 }
1184 // Retained context is new tail.
1185 tail = candidate_context;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001186
1187 // Process the weak list of optimized functions for the context.
1188 Object* function_list_head =
1189 ProcessFunctionWeakReferences(
1190 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1191 retainer);
1192 candidate_context->set_unchecked(Context::OPTIMIZED_FUNCTIONS_LIST,
1193 function_list_head,
1194 UPDATE_WRITE_BARRIER);
Ben Murdochf87a2032010-10-22 12:50:53 +01001195 }
1196 // Move to next element in the list.
1197 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1198 }
1199
1200 // Terminate the list if there is one or more elements.
1201 if (tail != NULL) {
1202 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1203 Heap::undefined_value(),
1204 UPDATE_WRITE_BARRIER);
1205 }
1206
1207 // Update the head of the list of contexts.
1208 Heap::global_contexts_list_ = head;
1209}
1210
1211
Iain Merrick75681382010-08-19 15:07:18 +01001212class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1213 public:
1214 static inline void VisitPointer(Object** p) {
1215 Object* object = *p;
1216 if (!Heap::InNewSpace(object)) return;
1217 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1218 reinterpret_cast<HeapObject*>(object));
1219 }
1220};
1221
1222
Leon Clarkee46be812010-01-19 14:06:41 +00001223Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1224 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001225 do {
1226 ASSERT(new_space_front <= new_space_.top());
1227
1228 // The addresses new_space_front and new_space_.top() define a
1229 // queue of unprocessed copied objects. Process them until the
1230 // queue is empty.
1231 while (new_space_front < new_space_.top()) {
1232 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001233 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001234 }
1235
1236 // Promote and process all the to-be-promoted objects.
1237 while (!promotion_queue.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001238 HeapObject* target;
1239 int size;
1240 promotion_queue.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001241
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001242 // Promoted object might be already partially visited
1243 // during dirty regions iteration. Thus we search specificly
1244 // for pointers to from semispace instead of looking for pointers
1245 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001246 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001247 IterateAndMarkPointersToFromSpace(target->address(),
1248 target->address() + size,
1249 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001250 }
1251
1252 // Take another spin if there are now unswept objects in new space
1253 // (there are currently no more unswept promoted objects).
1254 } while (new_space_front < new_space_.top());
1255
Leon Clarkee46be812010-01-19 14:06:41 +00001256 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001257}
1258
1259
Iain Merrick75681382010-08-19 15:07:18 +01001260class ScavengingVisitor : public StaticVisitorBase {
1261 public:
1262 static void Initialize() {
1263 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1264 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1265 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1266 table_.Register(kVisitByteArray, &EvacuateByteArray);
1267 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdochf87a2032010-10-22 12:50:53 +01001268 table_.Register(kVisitGlobalContext,
1269 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1270 VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001271
1272 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
1273
1274 table_.Register(kVisitConsString,
1275 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1276 VisitSpecialized<ConsString::kSize>);
1277
1278 table_.Register(kVisitSharedFunctionInfo,
1279 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1280 VisitSpecialized<SharedFunctionInfo::kSize>);
1281
1282 table_.Register(kVisitJSFunction,
1283 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1284 VisitSpecialized<JSFunction::kSize>);
1285
1286 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1287 kVisitDataObject,
1288 kVisitDataObjectGeneric>();
1289
1290 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1291 kVisitJSObject,
1292 kVisitJSObjectGeneric>();
1293
1294 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1295 kVisitStruct,
1296 kVisitStructGeneric>();
1297 }
1298
1299
1300 static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
1301 table_.GetVisitor(map)(map, slot, obj);
1302 }
1303
1304
1305 private:
1306 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1307 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1308
Steve Blocka7e24c12009-10-30 11:49:00 +00001309#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001310 static void RecordCopiedObject(HeapObject* obj) {
1311 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001312#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001313 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001314#endif
1315#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001316 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001317#endif
Iain Merrick75681382010-08-19 15:07:18 +01001318 if (should_record) {
1319 if (Heap::new_space()->Contains(obj)) {
1320 Heap::new_space()->RecordAllocation(obj);
1321 } else {
1322 Heap::new_space()->RecordPromotion(obj);
1323 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001324 }
1325 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001326#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1327
Iain Merrick75681382010-08-19 15:07:18 +01001328 // Helper function used by CopyObject to copy a source object to an
1329 // allocated target object and update the forwarding pointer in the source
1330 // object. Returns the target object.
1331 INLINE(static HeapObject* MigrateObject(HeapObject* source,
1332 HeapObject* target,
1333 int size)) {
1334 // Copy the content of source to target.
1335 Heap::CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001336
Iain Merrick75681382010-08-19 15:07:18 +01001337 // Set the forwarding address.
1338 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001339
1340#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001341 // Update NewSpace stats if necessary.
1342 RecordCopiedObject(target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001343#endif
Iain Merrick75681382010-08-19 15:07:18 +01001344 HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001345#if defined(ENABLE_LOGGING_AND_PROFILING)
1346 if (Logger::is_logging() || CpuProfiler::is_profiling()) {
1347 if (target->IsJSFunction()) {
1348 PROFILE(FunctionMoveEvent(source->address(), target->address()));
Ben Murdochf87a2032010-10-22 12:50:53 +01001349 PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001350 }
1351 }
1352#endif
Iain Merrick75681382010-08-19 15:07:18 +01001353 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001354 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001355
1356
Iain Merrick75681382010-08-19 15:07:18 +01001357 template<ObjectContents object_contents, SizeRestriction size_restriction>
1358 static inline void EvacuateObject(Map* map,
1359 HeapObject** slot,
1360 HeapObject* object,
1361 int object_size) {
1362 ASSERT((size_restriction != SMALL) ||
1363 (object_size <= Page::kMaxHeapObjectSize));
1364 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001365
Iain Merrick75681382010-08-19 15:07:18 +01001366 if (Heap::ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001367 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001368
Iain Merrick75681382010-08-19 15:07:18 +01001369 if ((size_restriction != SMALL) &&
1370 (object_size > Page::kMaxHeapObjectSize)) {
John Reck59135872010-11-02 12:39:01 -07001371 maybe_result = Heap::lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001372 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001373 if (object_contents == DATA_OBJECT) {
John Reck59135872010-11-02 12:39:01 -07001374 maybe_result = Heap::old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001375 } else {
John Reck59135872010-11-02 12:39:01 -07001376 maybe_result = Heap::old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001377 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001378 }
1379
John Reck59135872010-11-02 12:39:01 -07001380 Object* result = NULL; // Initialization to please compiler.
1381 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001382 HeapObject* target = HeapObject::cast(result);
1383 *slot = MigrateObject(object, target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001384
Iain Merrick75681382010-08-19 15:07:18 +01001385 if (object_contents == POINTER_OBJECT) {
1386 promotion_queue.insert(target, object_size);
1387 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001388
Iain Merrick75681382010-08-19 15:07:18 +01001389 Heap::tracer()->increment_promoted_objects_size(object_size);
1390 return;
1391 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001392 }
John Reck59135872010-11-02 12:39:01 -07001393 Object* result =
1394 Heap::new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
Iain Merrick75681382010-08-19 15:07:18 +01001395 *slot = MigrateObject(object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001396 return;
1397 }
1398
Iain Merrick75681382010-08-19 15:07:18 +01001399
1400 static inline void EvacuateFixedArray(Map* map,
1401 HeapObject** slot,
1402 HeapObject* object) {
1403 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1404 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1405 slot,
1406 object,
1407 object_size);
1408 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001409
1410
Iain Merrick75681382010-08-19 15:07:18 +01001411 static inline void EvacuateByteArray(Map* map,
1412 HeapObject** slot,
1413 HeapObject* object) {
1414 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1415 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1416 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001417
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001418
Iain Merrick75681382010-08-19 15:07:18 +01001419 static inline void EvacuateSeqAsciiString(Map* map,
1420 HeapObject** slot,
1421 HeapObject* object) {
1422 int object_size = SeqAsciiString::cast(object)->
1423 SeqAsciiStringSize(map->instance_type());
1424 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1425 }
1426
1427
1428 static inline void EvacuateSeqTwoByteString(Map* map,
1429 HeapObject** slot,
1430 HeapObject* object) {
1431 int object_size = SeqTwoByteString::cast(object)->
1432 SeqTwoByteStringSize(map->instance_type());
1433 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1434 }
1435
1436
1437 static inline bool IsShortcutCandidate(int type) {
1438 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1439 }
1440
1441 static inline void EvacuateShortcutCandidate(Map* map,
1442 HeapObject** slot,
1443 HeapObject* object) {
1444 ASSERT(IsShortcutCandidate(map->instance_type()));
1445
1446 if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
1447 HeapObject* first =
1448 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1449
1450 *slot = first;
1451
1452 if (!Heap::InNewSpace(first)) {
1453 object->set_map_word(MapWord::FromForwardingAddress(first));
1454 return;
1455 }
1456
1457 MapWord first_word = first->map_word();
1458 if (first_word.IsForwardingAddress()) {
1459 HeapObject* target = first_word.ToForwardingAddress();
1460
1461 *slot = target;
1462 object->set_map_word(MapWord::FromForwardingAddress(target));
1463 return;
1464 }
1465
1466 Scavenge(first->map(), slot, first);
1467 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1468 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001469 }
Iain Merrick75681382010-08-19 15:07:18 +01001470
1471 int object_size = ConsString::kSize;
1472 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001473 }
1474
Iain Merrick75681382010-08-19 15:07:18 +01001475 template<ObjectContents object_contents>
1476 class ObjectEvacuationStrategy {
1477 public:
1478 template<int object_size>
1479 static inline void VisitSpecialized(Map* map,
1480 HeapObject** slot,
1481 HeapObject* object) {
1482 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1483 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001484
Iain Merrick75681382010-08-19 15:07:18 +01001485 static inline void Visit(Map* map,
1486 HeapObject** slot,
1487 HeapObject* object) {
1488 int object_size = map->instance_size();
1489 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1490 }
1491 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001492
Iain Merrick75681382010-08-19 15:07:18 +01001493 typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001494
Iain Merrick75681382010-08-19 15:07:18 +01001495 static VisitorDispatchTable<Callback> table_;
1496};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001497
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001498
Iain Merrick75681382010-08-19 15:07:18 +01001499VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001500
1501
1502void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1503 ASSERT(InFromSpace(object));
1504 MapWord first_word = object->map_word();
1505 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001506 Map* map = first_word.ToMap();
Iain Merrick75681382010-08-19 15:07:18 +01001507 ScavengingVisitor::Scavenge(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001508}
1509
1510
1511void Heap::ScavengePointer(HeapObject** p) {
1512 ScavengeObject(p, *p);
1513}
1514
1515
John Reck59135872010-11-02 12:39:01 -07001516MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1517 int instance_size) {
1518 Object* result;
1519 { MaybeObject* maybe_result = AllocateRawMap();
1520 if (!maybe_result->ToObject(&result)) return maybe_result;
1521 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001522
1523 // Map::cast cannot be used due to uninitialized map field.
1524 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1525 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1526 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001527 reinterpret_cast<Map*>(result)->
Iain Merrick75681382010-08-19 15:07:18 +01001528 set_visitor_id(
1529 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001530 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001531 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001532 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001533 reinterpret_cast<Map*>(result)->set_bit_field(0);
1534 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001535 return result;
1536}
1537
1538
John Reck59135872010-11-02 12:39:01 -07001539MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1540 Object* result;
1541 { MaybeObject* maybe_result = AllocateRawMap();
1542 if (!maybe_result->ToObject(&result)) return maybe_result;
1543 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001544
1545 Map* map = reinterpret_cast<Map*>(result);
1546 map->set_map(meta_map());
1547 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001548 map->set_visitor_id(
1549 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001550 map->set_prototype(null_value());
1551 map->set_constructor(null_value());
1552 map->set_instance_size(instance_size);
1553 map->set_inobject_properties(0);
1554 map->set_pre_allocated_property_fields(0);
1555 map->set_instance_descriptors(empty_descriptor_array());
1556 map->set_code_cache(empty_fixed_array());
1557 map->set_unused_property_fields(0);
1558 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001559 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001560
1561 // If the map object is aligned fill the padding area with Smi 0 objects.
1562 if (Map::kPadStart < Map::kSize) {
1563 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1564 0,
1565 Map::kSize - Map::kPadStart);
1566 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001567 return map;
1568}
1569
1570
John Reck59135872010-11-02 12:39:01 -07001571MaybeObject* Heap::AllocateCodeCache() {
1572 Object* result;
1573 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1574 if (!maybe_result->ToObject(&result)) return maybe_result;
1575 }
Steve Block6ded16b2010-05-10 14:33:55 +01001576 CodeCache* code_cache = CodeCache::cast(result);
1577 code_cache->set_default_cache(empty_fixed_array());
1578 code_cache->set_normal_type_cache(undefined_value());
1579 return code_cache;
1580}
1581
1582
Steve Blocka7e24c12009-10-30 11:49:00 +00001583const Heap::StringTypeTable Heap::string_type_table[] = {
1584#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1585 {type, size, k##camel_name##MapRootIndex},
1586 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1587#undef STRING_TYPE_ELEMENT
1588};
1589
1590
1591const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1592#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1593 {contents, k##name##RootIndex},
1594 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1595#undef CONSTANT_SYMBOL_ELEMENT
1596};
1597
1598
1599const Heap::StructTable Heap::struct_table[] = {
1600#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1601 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1602 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1603#undef STRUCT_TABLE_ELEMENT
1604};
1605
1606
1607bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001608 Object* obj;
1609 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1610 if (!maybe_obj->ToObject(&obj)) return false;
1611 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001612 // Map::cast cannot be used due to uninitialized map field.
1613 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1614 set_meta_map(new_meta_map);
1615 new_meta_map->set_map(new_meta_map);
1616
John Reck59135872010-11-02 12:39:01 -07001617 { MaybeObject* maybe_obj =
1618 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1619 if (!maybe_obj->ToObject(&obj)) return false;
1620 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001621 set_fixed_array_map(Map::cast(obj));
1622
John Reck59135872010-11-02 12:39:01 -07001623 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1624 if (!maybe_obj->ToObject(&obj)) return false;
1625 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001626 set_oddball_map(Map::cast(obj));
1627
Steve Block6ded16b2010-05-10 14:33:55 +01001628 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001629 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1630 if (!maybe_obj->ToObject(&obj)) return false;
1631 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001632 set_empty_fixed_array(FixedArray::cast(obj));
1633
John Reck59135872010-11-02 12:39:01 -07001634 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1635 if (!maybe_obj->ToObject(&obj)) return false;
1636 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001637 set_null_value(obj);
1638
1639 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001640 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1641 if (!maybe_obj->ToObject(&obj)) return false;
1642 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001643 set_empty_descriptor_array(DescriptorArray::cast(obj));
1644
1645 // Fix the instance_descriptors for the existing maps.
1646 meta_map()->set_instance_descriptors(empty_descriptor_array());
1647 meta_map()->set_code_cache(empty_fixed_array());
1648
1649 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1650 fixed_array_map()->set_code_cache(empty_fixed_array());
1651
1652 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1653 oddball_map()->set_code_cache(empty_fixed_array());
1654
1655 // Fix prototype object for existing maps.
1656 meta_map()->set_prototype(null_value());
1657 meta_map()->set_constructor(null_value());
1658
1659 fixed_array_map()->set_prototype(null_value());
1660 fixed_array_map()->set_constructor(null_value());
1661
1662 oddball_map()->set_prototype(null_value());
1663 oddball_map()->set_constructor(null_value());
1664
John Reck59135872010-11-02 12:39:01 -07001665 { MaybeObject* maybe_obj =
1666 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1667 if (!maybe_obj->ToObject(&obj)) return false;
1668 }
Iain Merrick75681382010-08-19 15:07:18 +01001669 set_fixed_cow_array_map(Map::cast(obj));
1670 ASSERT(fixed_array_map() != fixed_cow_array_map());
1671
John Reck59135872010-11-02 12:39:01 -07001672 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1673 if (!maybe_obj->ToObject(&obj)) return false;
1674 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001675 set_heap_number_map(Map::cast(obj));
1676
John Reck59135872010-11-02 12:39:01 -07001677 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1678 if (!maybe_obj->ToObject(&obj)) return false;
1679 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001680 set_proxy_map(Map::cast(obj));
1681
1682 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1683 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001684 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1685 if (!maybe_obj->ToObject(&obj)) return false;
1686 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001687 roots_[entry.index] = Map::cast(obj);
1688 }
1689
John Reck59135872010-11-02 12:39:01 -07001690 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1691 if (!maybe_obj->ToObject(&obj)) return false;
1692 }
Steve Blockd0582a62009-12-15 09:54:21 +00001693 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001694 Map::cast(obj)->set_is_undetectable();
1695
John Reck59135872010-11-02 12:39:01 -07001696 { MaybeObject* maybe_obj =
1697 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1698 if (!maybe_obj->ToObject(&obj)) return false;
1699 }
Steve Blockd0582a62009-12-15 09:54:21 +00001700 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001701 Map::cast(obj)->set_is_undetectable();
1702
John Reck59135872010-11-02 12:39:01 -07001703 { MaybeObject* maybe_obj =
1704 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1705 if (!maybe_obj->ToObject(&obj)) return false;
1706 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001707 set_byte_array_map(Map::cast(obj));
1708
Ben Murdochb0fe1622011-05-05 13:52:32 +01001709 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1710 if (!maybe_obj->ToObject(&obj)) return false;
1711 }
1712 set_empty_byte_array(ByteArray::cast(obj));
1713
John Reck59135872010-11-02 12:39:01 -07001714 { MaybeObject* maybe_obj =
1715 AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1716 if (!maybe_obj->ToObject(&obj)) return false;
1717 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001718 set_pixel_array_map(Map::cast(obj));
1719
John Reck59135872010-11-02 12:39:01 -07001720 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1721 ExternalArray::kAlignedSize);
1722 if (!maybe_obj->ToObject(&obj)) return false;
1723 }
Steve Block3ce2e202009-11-05 08:53:23 +00001724 set_external_byte_array_map(Map::cast(obj));
1725
John Reck59135872010-11-02 12:39:01 -07001726 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1727 ExternalArray::kAlignedSize);
1728 if (!maybe_obj->ToObject(&obj)) return false;
1729 }
Steve Block3ce2e202009-11-05 08:53:23 +00001730 set_external_unsigned_byte_array_map(Map::cast(obj));
1731
John Reck59135872010-11-02 12:39:01 -07001732 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1733 ExternalArray::kAlignedSize);
1734 if (!maybe_obj->ToObject(&obj)) return false;
1735 }
Steve Block3ce2e202009-11-05 08:53:23 +00001736 set_external_short_array_map(Map::cast(obj));
1737
John Reck59135872010-11-02 12:39:01 -07001738 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1739 ExternalArray::kAlignedSize);
1740 if (!maybe_obj->ToObject(&obj)) return false;
1741 }
Steve Block3ce2e202009-11-05 08:53:23 +00001742 set_external_unsigned_short_array_map(Map::cast(obj));
1743
John Reck59135872010-11-02 12:39:01 -07001744 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1745 ExternalArray::kAlignedSize);
1746 if (!maybe_obj->ToObject(&obj)) return false;
1747 }
Steve Block3ce2e202009-11-05 08:53:23 +00001748 set_external_int_array_map(Map::cast(obj));
1749
John Reck59135872010-11-02 12:39:01 -07001750 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1751 ExternalArray::kAlignedSize);
1752 if (!maybe_obj->ToObject(&obj)) return false;
1753 }
Steve Block3ce2e202009-11-05 08:53:23 +00001754 set_external_unsigned_int_array_map(Map::cast(obj));
1755
John Reck59135872010-11-02 12:39:01 -07001756 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1757 ExternalArray::kAlignedSize);
1758 if (!maybe_obj->ToObject(&obj)) return false;
1759 }
Steve Block3ce2e202009-11-05 08:53:23 +00001760 set_external_float_array_map(Map::cast(obj));
1761
John Reck59135872010-11-02 12:39:01 -07001762 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1763 if (!maybe_obj->ToObject(&obj)) return false;
1764 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001765 set_code_map(Map::cast(obj));
1766
John Reck59135872010-11-02 12:39:01 -07001767 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1768 JSGlobalPropertyCell::kSize);
1769 if (!maybe_obj->ToObject(&obj)) return false;
1770 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001771 set_global_property_cell_map(Map::cast(obj));
1772
John Reck59135872010-11-02 12:39:01 -07001773 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1774 if (!maybe_obj->ToObject(&obj)) return false;
1775 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001776 set_one_pointer_filler_map(Map::cast(obj));
1777
John Reck59135872010-11-02 12:39:01 -07001778 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1779 if (!maybe_obj->ToObject(&obj)) return false;
1780 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001781 set_two_pointer_filler_map(Map::cast(obj));
1782
1783 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1784 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001785 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1786 if (!maybe_obj->ToObject(&obj)) return false;
1787 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001788 roots_[entry.index] = Map::cast(obj);
1789 }
1790
John Reck59135872010-11-02 12:39:01 -07001791 { MaybeObject* maybe_obj =
1792 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1793 if (!maybe_obj->ToObject(&obj)) return false;
1794 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001795 set_hash_table_map(Map::cast(obj));
1796
John Reck59135872010-11-02 12:39:01 -07001797 { MaybeObject* maybe_obj =
1798 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1799 if (!maybe_obj->ToObject(&obj)) return false;
1800 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 set_context_map(Map::cast(obj));
1802
John Reck59135872010-11-02 12:39:01 -07001803 { MaybeObject* maybe_obj =
1804 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1805 if (!maybe_obj->ToObject(&obj)) return false;
1806 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001807 set_catch_context_map(Map::cast(obj));
1808
John Reck59135872010-11-02 12:39:01 -07001809 { MaybeObject* maybe_obj =
1810 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1811 if (!maybe_obj->ToObject(&obj)) return false;
1812 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001813 Map* global_context_map = Map::cast(obj);
1814 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1815 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001816
John Reck59135872010-11-02 12:39:01 -07001817 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1818 SharedFunctionInfo::kAlignedSize);
1819 if (!maybe_obj->ToObject(&obj)) return false;
1820 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001821 set_shared_function_info_map(Map::cast(obj));
1822
1823 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1824 return true;
1825}
1826
1827
John Reck59135872010-11-02 12:39:01 -07001828MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001829 // Statically ensure that it is safe to allocate heap numbers in paged
1830 // spaces.
1831 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1832 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1833
John Reck59135872010-11-02 12:39:01 -07001834 Object* result;
1835 { MaybeObject* maybe_result =
1836 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1837 if (!maybe_result->ToObject(&result)) return maybe_result;
1838 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001839
1840 HeapObject::cast(result)->set_map(heap_number_map());
1841 HeapNumber::cast(result)->set_value(value);
1842 return result;
1843}
1844
1845
John Reck59135872010-11-02 12:39:01 -07001846MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001847 // Use general version, if we're forced to always allocate.
1848 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1849
1850 // This version of AllocateHeapNumber is optimized for
1851 // allocation in new space.
1852 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1853 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001854 Object* result;
1855 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1856 if (!maybe_result->ToObject(&result)) return maybe_result;
1857 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001858 HeapObject::cast(result)->set_map(heap_number_map());
1859 HeapNumber::cast(result)->set_value(value);
1860 return result;
1861}
1862
1863
John Reck59135872010-11-02 12:39:01 -07001864MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1865 Object* result;
1866 { MaybeObject* maybe_result = AllocateRawCell();
1867 if (!maybe_result->ToObject(&result)) return maybe_result;
1868 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001869 HeapObject::cast(result)->set_map(global_property_cell_map());
1870 JSGlobalPropertyCell::cast(result)->set_value(value);
1871 return result;
1872}
1873
1874
John Reck59135872010-11-02 12:39:01 -07001875MaybeObject* Heap::CreateOddball(const char* to_string,
1876 Object* to_number) {
1877 Object* result;
1878 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1879 if (!maybe_result->ToObject(&result)) return maybe_result;
1880 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001881 return Oddball::cast(result)->Initialize(to_string, to_number);
1882}
1883
1884
1885bool Heap::CreateApiObjects() {
1886 Object* obj;
1887
John Reck59135872010-11-02 12:39:01 -07001888 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1889 if (!maybe_obj->ToObject(&obj)) return false;
1890 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001891 set_neander_map(Map::cast(obj));
1892
John Reck59135872010-11-02 12:39:01 -07001893 { MaybeObject* maybe_obj = Heap::AllocateJSObjectFromMap(neander_map());
1894 if (!maybe_obj->ToObject(&obj)) return false;
1895 }
1896 Object* elements;
1897 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1898 if (!maybe_elements->ToObject(&elements)) return false;
1899 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001900 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1901 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1902 set_message_listeners(JSObject::cast(obj));
1903
1904 return true;
1905}
1906
1907
1908void Heap::CreateCEntryStub() {
1909 CEntryStub stub(1);
1910 set_c_entry_code(*stub.GetCode());
1911}
1912
1913
Steve Block6ded16b2010-05-10 14:33:55 +01001914#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001915void Heap::CreateRegExpCEntryStub() {
1916 RegExpCEntryStub stub;
1917 set_re_c_entry_code(*stub.GetCode());
1918}
1919#endif
1920
1921
Steve Blocka7e24c12009-10-30 11:49:00 +00001922void Heap::CreateJSEntryStub() {
1923 JSEntryStub stub;
1924 set_js_entry_code(*stub.GetCode());
1925}
1926
1927
1928void Heap::CreateJSConstructEntryStub() {
1929 JSConstructEntryStub stub;
1930 set_js_construct_entry_code(*stub.GetCode());
1931}
1932
1933
1934void Heap::CreateFixedStubs() {
1935 // Here we create roots for fixed stubs. They are needed at GC
1936 // for cooking and uncooking (check out frames.cc).
1937 // The eliminates the need for doing dictionary lookup in the
1938 // stub cache for these stubs.
1939 HandleScope scope;
1940 // gcc-4.4 has problem generating correct code of following snippet:
1941 // { CEntryStub stub;
1942 // c_entry_code_ = *stub.GetCode();
1943 // }
Leon Clarke4515c472010-02-03 11:58:03 +00001944 // { DebuggerStatementStub stub;
1945 // debugger_statement_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001946 // }
1947 // To workaround the problem, make separate functions without inlining.
1948 Heap::CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001949 Heap::CreateJSEntryStub();
1950 Heap::CreateJSConstructEntryStub();
Steve Block6ded16b2010-05-10 14:33:55 +01001951#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001952 Heap::CreateRegExpCEntryStub();
1953#endif
1954}
1955
1956
1957bool Heap::CreateInitialObjects() {
1958 Object* obj;
1959
1960 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001961 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1962 if (!maybe_obj->ToObject(&obj)) return false;
1963 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001964 set_minus_zero_value(obj);
1965 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1966
John Reck59135872010-11-02 12:39:01 -07001967 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1968 if (!maybe_obj->ToObject(&obj)) return false;
1969 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001970 set_nan_value(obj);
1971
John Reck59135872010-11-02 12:39:01 -07001972 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1973 if (!maybe_obj->ToObject(&obj)) return false;
1974 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001975 set_undefined_value(obj);
1976 ASSERT(!InNewSpace(undefined_value()));
1977
1978 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07001979 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1980 if (!maybe_obj->ToObject(&obj)) return false;
1981 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001982 // Don't use set_symbol_table() due to asserts.
1983 roots_[kSymbolTableRootIndex] = obj;
1984
1985 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07001986 Object* symbol;
1987 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
1988 if (!maybe_symbol->ToObject(&symbol)) return false;
1989 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001990 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1991 Oddball::cast(undefined_value())->set_to_number(nan_value());
1992
Steve Blocka7e24c12009-10-30 11:49:00 +00001993 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07001994 { MaybeObject* maybe_obj =
1995 Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1996 if (!maybe_obj->ToObject(&obj)) return false;
1997 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001998
John Reck59135872010-11-02 12:39:01 -07001999 { MaybeObject* maybe_obj = CreateOddball("true", Smi::FromInt(1));
2000 if (!maybe_obj->ToObject(&obj)) return false;
2001 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002002 set_true_value(obj);
2003
John Reck59135872010-11-02 12:39:01 -07002004 { MaybeObject* maybe_obj = CreateOddball("false", Smi::FromInt(0));
2005 if (!maybe_obj->ToObject(&obj)) return false;
2006 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002007 set_false_value(obj);
2008
John Reck59135872010-11-02 12:39:01 -07002009 { MaybeObject* maybe_obj = CreateOddball("hole", Smi::FromInt(-1));
2010 if (!maybe_obj->ToObject(&obj)) return false;
2011 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002012 set_the_hole_value(obj);
2013
John Reck59135872010-11-02 12:39:01 -07002014 { MaybeObject* maybe_obj =
2015 CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
2016 if (!maybe_obj->ToObject(&obj)) return false;
2017 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002018 set_no_interceptor_result_sentinel(obj);
2019
John Reck59135872010-11-02 12:39:01 -07002020 { MaybeObject* maybe_obj =
2021 CreateOddball("termination_exception", Smi::FromInt(-3));
2022 if (!maybe_obj->ToObject(&obj)) return false;
2023 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002024 set_termination_exception(obj);
2025
2026 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07002027 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2028 if (!maybe_obj->ToObject(&obj)) return false;
2029 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002030 set_empty_string(String::cast(obj));
2031
2032 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07002033 { MaybeObject* maybe_obj =
2034 LookupAsciiSymbol(constant_symbol_table[i].contents);
2035 if (!maybe_obj->ToObject(&obj)) return false;
2036 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002037 roots_[constant_symbol_table[i].index] = String::cast(obj);
2038 }
2039
2040 // Allocate the hidden symbol which is used to identify the hidden properties
2041 // in JSObjects. The hash code has a special value so that it will not match
2042 // the empty string when searching for the property. It cannot be part of the
2043 // loop above because it needs to be allocated manually with the special
2044 // hash code in place. The hash code for the hidden_symbol is zero to ensure
2045 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07002046 { MaybeObject* maybe_obj =
2047 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
2048 if (!maybe_obj->ToObject(&obj)) return false;
2049 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002050 hidden_symbol_ = String::cast(obj);
2051
2052 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07002053 { MaybeObject* maybe_obj =
2054 AllocateProxy((Address) &Accessors::ObjectPrototype);
2055 if (!maybe_obj->ToObject(&obj)) return false;
2056 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002057 set_prototype_accessors(Proxy::cast(obj));
2058
2059 // Allocate the code_stubs dictionary. The initial size is set to avoid
2060 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002061 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
2062 if (!maybe_obj->ToObject(&obj)) return false;
2063 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002064 set_code_stubs(NumberDictionary::cast(obj));
2065
2066 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
2067 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07002068 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
2069 if (!maybe_obj->ToObject(&obj)) return false;
2070 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002071 set_non_monomorphic_cache(NumberDictionary::cast(obj));
2072
Kristian Monsen25f61362010-05-21 11:50:48 +01002073 set_instanceof_cache_function(Smi::FromInt(0));
2074 set_instanceof_cache_map(Smi::FromInt(0));
2075 set_instanceof_cache_answer(Smi::FromInt(0));
2076
Steve Blocka7e24c12009-10-30 11:49:00 +00002077 CreateFixedStubs();
2078
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002079 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002080 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2081 if (!maybe_obj->ToObject(&obj)) return false;
2082 }
2083 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(obj);
2084 if (!maybe_obj->ToObject(&obj)) return false;
2085 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002086 set_intrinsic_function_names(StringDictionary::cast(obj));
2087
Leon Clarkee46be812010-01-19 14:06:41 +00002088 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002089
Steve Block6ded16b2010-05-10 14:33:55 +01002090 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002091 { MaybeObject* maybe_obj =
2092 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2093 if (!maybe_obj->ToObject(&obj)) return false;
2094 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002095 set_single_character_string_cache(FixedArray::cast(obj));
2096
2097 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002098 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2099 if (!maybe_obj->ToObject(&obj)) return false;
2100 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002101 set_natives_source_cache(FixedArray::cast(obj));
2102
2103 // Handling of script id generation is in Factory::NewScript.
2104 set_last_script_id(undefined_value());
2105
2106 // Initialize keyed lookup cache.
2107 KeyedLookupCache::Clear();
2108
2109 // Initialize context slot cache.
2110 ContextSlotCache::Clear();
2111
2112 // Initialize descriptor cache.
2113 DescriptorLookupCache::Clear();
2114
2115 // Initialize compilation cache.
2116 CompilationCache::Clear();
2117
2118 return true;
2119}
2120
2121
John Reck59135872010-11-02 12:39:01 -07002122MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002123 // Compute the size of the number string cache based on the max heap size.
2124 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2125 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2126 int number_string_cache_size = max_semispace_size_ / 512;
2127 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002128 Object* obj;
2129 MaybeObject* maybe_obj =
2130 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2131 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2132 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002133}
2134
2135
2136void Heap::FlushNumberStringCache() {
2137 // Flush the number to string cache.
2138 int len = number_string_cache()->length();
2139 for (int i = 0; i < len; i++) {
2140 number_string_cache()->set_undefined(i);
2141 }
2142}
2143
2144
Steve Blocka7e24c12009-10-30 11:49:00 +00002145static inline int double_get_hash(double d) {
2146 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002147 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002148}
2149
2150
2151static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002152 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002153}
2154
2155
Steve Blocka7e24c12009-10-30 11:49:00 +00002156Object* Heap::GetNumberStringCache(Object* number) {
2157 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002158 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002159 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002160 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002161 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002162 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002163 }
2164 Object* key = number_string_cache()->get(hash * 2);
2165 if (key == number) {
2166 return String::cast(number_string_cache()->get(hash * 2 + 1));
2167 } else if (key->IsHeapNumber() &&
2168 number->IsHeapNumber() &&
2169 key->Number() == number->Number()) {
2170 return String::cast(number_string_cache()->get(hash * 2 + 1));
2171 }
2172 return undefined_value();
2173}
2174
2175
2176void Heap::SetNumberStringCache(Object* number, String* string) {
2177 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002178 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002179 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002180 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002181 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002182 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002183 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002184 number_string_cache()->set(hash * 2, number);
2185 }
2186 number_string_cache()->set(hash * 2 + 1, string);
2187}
2188
2189
John Reck59135872010-11-02 12:39:01 -07002190MaybeObject* Heap::NumberToString(Object* number,
2191 bool check_number_string_cache) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002192 Counters::number_to_string_runtime.Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002193 if (check_number_string_cache) {
2194 Object* cached = GetNumberStringCache(number);
2195 if (cached != undefined_value()) {
2196 return cached;
2197 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002198 }
2199
2200 char arr[100];
2201 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2202 const char* str;
2203 if (number->IsSmi()) {
2204 int num = Smi::cast(number)->value();
2205 str = IntToCString(num, buffer);
2206 } else {
2207 double num = HeapNumber::cast(number)->value();
2208 str = DoubleToCString(num, buffer);
2209 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002210
John Reck59135872010-11-02 12:39:01 -07002211 Object* js_string;
2212 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2213 if (maybe_js_string->ToObject(&js_string)) {
2214 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002215 }
John Reck59135872010-11-02 12:39:01 -07002216 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002217}
2218
2219
Steve Block3ce2e202009-11-05 08:53:23 +00002220Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2221 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2222}
2223
2224
2225Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2226 ExternalArrayType array_type) {
2227 switch (array_type) {
2228 case kExternalByteArray:
2229 return kExternalByteArrayMapRootIndex;
2230 case kExternalUnsignedByteArray:
2231 return kExternalUnsignedByteArrayMapRootIndex;
2232 case kExternalShortArray:
2233 return kExternalShortArrayMapRootIndex;
2234 case kExternalUnsignedShortArray:
2235 return kExternalUnsignedShortArrayMapRootIndex;
2236 case kExternalIntArray:
2237 return kExternalIntArrayMapRootIndex;
2238 case kExternalUnsignedIntArray:
2239 return kExternalUnsignedIntArrayMapRootIndex;
2240 case kExternalFloatArray:
2241 return kExternalFloatArrayMapRootIndex;
2242 default:
2243 UNREACHABLE();
2244 return kUndefinedValueRootIndex;
2245 }
2246}
2247
2248
John Reck59135872010-11-02 12:39:01 -07002249MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002250 // We need to distinguish the minus zero value and this cannot be
2251 // done after conversion to int. Doing this by comparing bit
2252 // patterns is faster than using fpclassify() et al.
2253 static const DoubleRepresentation minus_zero(-0.0);
2254
2255 DoubleRepresentation rep(value);
2256 if (rep.bits == minus_zero.bits) {
2257 return AllocateHeapNumber(-0.0, pretenure);
2258 }
2259
2260 int int_value = FastD2I(value);
2261 if (value == int_value && Smi::IsValid(int_value)) {
2262 return Smi::FromInt(int_value);
2263 }
2264
2265 // Materialize the value in the heap.
2266 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002267}
2268
2269
John Reck59135872010-11-02 12:39:01 -07002270MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002271 // Statically ensure that it is safe to allocate proxies in paged spaces.
2272 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2273 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002274 Object* result;
2275 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2276 if (!maybe_result->ToObject(&result)) return maybe_result;
2277 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002278
2279 Proxy::cast(result)->set_proxy(proxy);
2280 return result;
2281}
2282
2283
John Reck59135872010-11-02 12:39:01 -07002284MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2285 Object* result;
2286 { MaybeObject* maybe_result =
2287 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2288 if (!maybe_result->ToObject(&result)) return maybe_result;
2289 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002290
2291 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2292 share->set_name(name);
2293 Code* illegal = Builtins::builtin(Builtins::Illegal);
2294 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002295 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00002296 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
2297 share->set_construct_stub(construct_stub);
2298 share->set_expected_nof_properties(0);
2299 share->set_length(0);
2300 share->set_formal_parameter_count(0);
2301 share->set_instance_class_name(Object_symbol());
2302 share->set_function_data(undefined_value());
2303 share->set_script(undefined_value());
2304 share->set_start_position_and_type(0);
2305 share->set_debug_info(undefined_value());
2306 share->set_inferred_name(empty_string());
2307 share->set_compiler_hints(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002308 share->set_deopt_counter(Smi::FromInt(FLAG_deopt_every_n_times));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002309 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002310 share->set_this_property_assignments_count(0);
2311 share->set_this_property_assignments(undefined_value());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002312 share->set_opt_count(0);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002313 share->set_num_literals(0);
2314 share->set_end_position(0);
2315 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002316 return result;
2317}
2318
2319
Steve Blockd0582a62009-12-15 09:54:21 +00002320// Returns true for a character in a range. Both limits are inclusive.
2321static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2322 // This makes uses of the the unsigned wraparound.
2323 return character - from <= to - from;
2324}
2325
2326
John Reck59135872010-11-02 12:39:01 -07002327MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2328 uint32_t c1,
2329 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002330 String* symbol;
2331 // Numeric strings have a different hash algorithm not known by
2332 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2333 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2334 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2335 return symbol;
2336 // Now we know the length is 2, we might as well make use of that fact
2337 // when building the new string.
2338 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2339 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002340 Object* result;
2341 { MaybeObject* maybe_result = Heap::AllocateRawAsciiString(2);
2342 if (!maybe_result->ToObject(&result)) return maybe_result;
2343 }
Steve Blockd0582a62009-12-15 09:54:21 +00002344 char* dest = SeqAsciiString::cast(result)->GetChars();
2345 dest[0] = c1;
2346 dest[1] = c2;
2347 return result;
2348 } else {
John Reck59135872010-11-02 12:39:01 -07002349 Object* result;
2350 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(2);
2351 if (!maybe_result->ToObject(&result)) return maybe_result;
2352 }
Steve Blockd0582a62009-12-15 09:54:21 +00002353 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2354 dest[0] = c1;
2355 dest[1] = c2;
2356 return result;
2357 }
2358}
2359
2360
John Reck59135872010-11-02 12:39:01 -07002361MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002362 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002363 if (first_length == 0) {
2364 return second;
2365 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002366
2367 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002368 if (second_length == 0) {
2369 return first;
2370 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002371
2372 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002373
2374 // Optimization for 2-byte strings often used as keys in a decompression
2375 // dictionary. Check whether we already have the string in the symbol
2376 // table to prevent creation of many unneccesary strings.
2377 if (length == 2) {
2378 unsigned c1 = first->Get(0);
2379 unsigned c2 = second->Get(0);
2380 return MakeOrFindTwoCharacterString(c1, c2);
2381 }
2382
Steve Block6ded16b2010-05-10 14:33:55 +01002383 bool first_is_ascii = first->IsAsciiRepresentation();
2384 bool second_is_ascii = second->IsAsciiRepresentation();
2385 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002386
2387 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002388 // of the new cons string is too large.
2389 if (length > String::kMaxLength || length < 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002390 Top::context()->mark_out_of_memory();
2391 return Failure::OutOfMemoryException();
2392 }
2393
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002394 bool is_ascii_data_in_two_byte_string = false;
2395 if (!is_ascii) {
2396 // At least one of the strings uses two-byte representation so we
2397 // can't use the fast case code for short ascii strings below, but
2398 // we can try to save memory if all chars actually fit in ascii.
2399 is_ascii_data_in_two_byte_string =
2400 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2401 if (is_ascii_data_in_two_byte_string) {
2402 Counters::string_add_runtime_ext_to_ascii.Increment();
2403 }
2404 }
2405
Steve Blocka7e24c12009-10-30 11:49:00 +00002406 // If the resulting string is small make a flat string.
2407 if (length < String::kMinNonFlatLength) {
2408 ASSERT(first->IsFlat());
2409 ASSERT(second->IsFlat());
2410 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002411 Object* result;
2412 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2413 if (!maybe_result->ToObject(&result)) return maybe_result;
2414 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002415 // Copy the characters into the new object.
2416 char* dest = SeqAsciiString::cast(result)->GetChars();
2417 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002418 const char* src;
2419 if (first->IsExternalString()) {
2420 src = ExternalAsciiString::cast(first)->resource()->data();
2421 } else {
2422 src = SeqAsciiString::cast(first)->GetChars();
2423 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002424 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2425 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002426 if (second->IsExternalString()) {
2427 src = ExternalAsciiString::cast(second)->resource()->data();
2428 } else {
2429 src = SeqAsciiString::cast(second)->GetChars();
2430 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002431 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2432 return result;
2433 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002434 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002435 Object* result;
2436 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2437 if (!maybe_result->ToObject(&result)) return maybe_result;
2438 }
Steve Block6ded16b2010-05-10 14:33:55 +01002439 // Copy the characters into the new object.
2440 char* dest = SeqAsciiString::cast(result)->GetChars();
2441 String::WriteToFlat(first, dest, 0, first_length);
2442 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block6ded16b2010-05-10 14:33:55 +01002443 return result;
2444 }
2445
John Reck59135872010-11-02 12:39:01 -07002446 Object* result;
2447 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2448 if (!maybe_result->ToObject(&result)) return maybe_result;
2449 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002450 // Copy the characters into the new object.
2451 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2452 String::WriteToFlat(first, dest, 0, first_length);
2453 String::WriteToFlat(second, dest + first_length, 0, second_length);
2454 return result;
2455 }
2456 }
2457
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002458 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2459 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002460
John Reck59135872010-11-02 12:39:01 -07002461 Object* result;
2462 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2463 if (!maybe_result->ToObject(&result)) return maybe_result;
2464 }
Leon Clarke4515c472010-02-03 11:58:03 +00002465
2466 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002467 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002468 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002469 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002470 cons_string->set_hash_field(String::kEmptyHashField);
2471 cons_string->set_first(first, mode);
2472 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002473 return result;
2474}
2475
2476
John Reck59135872010-11-02 12:39:01 -07002477MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002478 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002479 int end,
2480 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002481 int length = end - start;
2482
2483 if (length == 1) {
2484 return Heap::LookupSingleCharacterStringFromCode(
2485 buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002486 } else if (length == 2) {
2487 // Optimization for 2-byte strings often used as keys in a decompression
2488 // dictionary. Check whether we already have the string in the symbol
2489 // table to prevent creation of many unneccesary strings.
2490 unsigned c1 = buffer->Get(start);
2491 unsigned c2 = buffer->Get(start + 1);
2492 return MakeOrFindTwoCharacterString(c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002493 }
2494
2495 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002496 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002497
John Reck59135872010-11-02 12:39:01 -07002498 Object* result;
2499 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2500 ? AllocateRawAsciiString(length, pretenure )
2501 : AllocateRawTwoByteString(length, pretenure);
2502 if (!maybe_result->ToObject(&result)) return maybe_result;
2503 }
Steve Blockd0582a62009-12-15 09:54:21 +00002504 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002505 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002506 if (buffer->IsAsciiRepresentation()) {
2507 ASSERT(string_result->IsAsciiRepresentation());
2508 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2509 String::WriteToFlat(buffer, dest, start, end);
2510 } else {
2511 ASSERT(string_result->IsTwoByteRepresentation());
2512 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2513 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002514 }
Steve Blockd0582a62009-12-15 09:54:21 +00002515
Steve Blocka7e24c12009-10-30 11:49:00 +00002516 return result;
2517}
2518
2519
John Reck59135872010-11-02 12:39:01 -07002520MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002521 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002522 size_t length = resource->length();
2523 if (length > static_cast<size_t>(String::kMaxLength)) {
2524 Top::context()->mark_out_of_memory();
2525 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002526 }
2527
Steve Blockd0582a62009-12-15 09:54:21 +00002528 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002529 Object* result;
2530 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2531 if (!maybe_result->ToObject(&result)) return maybe_result;
2532 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002533
2534 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002535 external_string->set_length(static_cast<int>(length));
2536 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002537 external_string->set_resource(resource);
2538
2539 return result;
2540}
2541
2542
John Reck59135872010-11-02 12:39:01 -07002543MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002544 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002545 size_t length = resource->length();
2546 if (length > static_cast<size_t>(String::kMaxLength)) {
2547 Top::context()->mark_out_of_memory();
2548 return Failure::OutOfMemoryException();
2549 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002550
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002551 // For small strings we check whether the resource contains only
2552 // ascii characters. If yes, we use a different string map.
2553 bool is_ascii = true;
2554 if (length >= static_cast<size_t>(String::kMinNonFlatLength)) {
2555 is_ascii = false;
2556 } else {
2557 const uc16* data = resource->data();
2558 for (size_t i = 0; i < length; i++) {
2559 if (data[i] > String::kMaxAsciiCharCode) {
2560 is_ascii = false;
2561 break;
2562 }
2563 }
2564 }
2565
2566 Map* map = is_ascii ?
2567 Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
John Reck59135872010-11-02 12:39:01 -07002568 Object* result;
2569 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2570 if (!maybe_result->ToObject(&result)) return maybe_result;
2571 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002572
2573 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002574 external_string->set_length(static_cast<int>(length));
2575 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002576 external_string->set_resource(resource);
2577
2578 return result;
2579}
2580
2581
John Reck59135872010-11-02 12:39:01 -07002582MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002583 if (code <= String::kMaxAsciiCharCode) {
2584 Object* value = Heap::single_character_string_cache()->get(code);
2585 if (value != Heap::undefined_value()) return value;
2586
2587 char buffer[1];
2588 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002589 Object* result;
2590 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002591
John Reck59135872010-11-02 12:39:01 -07002592 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002593 Heap::single_character_string_cache()->set(code, result);
2594 return result;
2595 }
2596
John Reck59135872010-11-02 12:39:01 -07002597 Object* result;
2598 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(1);
2599 if (!maybe_result->ToObject(&result)) return maybe_result;
2600 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002601 String* answer = String::cast(result);
2602 answer->Set(0, code);
2603 return answer;
2604}
2605
2606
John Reck59135872010-11-02 12:39:01 -07002607MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002608 if (length < 0 || length > ByteArray::kMaxLength) {
2609 return Failure::OutOfMemoryException();
2610 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002611 if (pretenure == NOT_TENURED) {
2612 return AllocateByteArray(length);
2613 }
2614 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002615 Object* result;
2616 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2617 ? old_data_space_->AllocateRaw(size)
2618 : lo_space_->AllocateRaw(size);
2619 if (!maybe_result->ToObject(&result)) return maybe_result;
2620 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002621
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002622 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2623 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002624 return result;
2625}
2626
2627
John Reck59135872010-11-02 12:39:01 -07002628MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002629 if (length < 0 || length > ByteArray::kMaxLength) {
2630 return Failure::OutOfMemoryException();
2631 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002632 int size = ByteArray::SizeFor(length);
2633 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002634 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002635 Object* result;
2636 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2637 if (!maybe_result->ToObject(&result)) return maybe_result;
2638 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002639
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002640 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2641 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002642 return result;
2643}
2644
2645
2646void Heap::CreateFillerObjectAt(Address addr, int size) {
2647 if (size == 0) return;
2648 HeapObject* filler = HeapObject::FromAddress(addr);
2649 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002650 filler->set_map(one_pointer_filler_map());
2651 } else if (size == 2 * kPointerSize) {
2652 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002653 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002654 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002655 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2656 }
2657}
2658
2659
John Reck59135872010-11-02 12:39:01 -07002660MaybeObject* Heap::AllocatePixelArray(int length,
Steve Blocka7e24c12009-10-30 11:49:00 +00002661 uint8_t* external_pointer,
2662 PretenureFlag pretenure) {
2663 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002664 Object* result;
2665 { MaybeObject* maybe_result =
2666 AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
2667 if (!maybe_result->ToObject(&result)) return maybe_result;
2668 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002669
2670 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2671 reinterpret_cast<PixelArray*>(result)->set_length(length);
2672 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2673
2674 return result;
2675}
2676
2677
John Reck59135872010-11-02 12:39:01 -07002678MaybeObject* Heap::AllocateExternalArray(int length,
2679 ExternalArrayType array_type,
2680 void* external_pointer,
2681 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002682 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002683 Object* result;
2684 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2685 space,
2686 OLD_DATA_SPACE);
2687 if (!maybe_result->ToObject(&result)) return maybe_result;
2688 }
Steve Block3ce2e202009-11-05 08:53:23 +00002689
2690 reinterpret_cast<ExternalArray*>(result)->set_map(
2691 MapForExternalArrayType(array_type));
2692 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2693 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2694 external_pointer);
2695
2696 return result;
2697}
2698
2699
John Reck59135872010-11-02 12:39:01 -07002700MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2701 Code::Flags flags,
2702 Handle<Object> self_reference) {
Leon Clarkeac952652010-07-15 11:15:24 +01002703 // Allocate ByteArray before the Code object, so that we do not risk
2704 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002705 Object* reloc_info;
2706 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2707 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2708 }
Leon Clarkeac952652010-07-15 11:15:24 +01002709
Steve Blocka7e24c12009-10-30 11:49:00 +00002710 // Compute size
Leon Clarkeac952652010-07-15 11:15:24 +01002711 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002712 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002713 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002714 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002715 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002716 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002717 } else {
John Reck59135872010-11-02 12:39:01 -07002718 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002719 }
2720
John Reck59135872010-11-02 12:39:01 -07002721 Object* result;
2722 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002723
2724 // Initialize the object
2725 HeapObject::cast(result)->set_map(code_map());
2726 Code* code = Code::cast(result);
2727 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2728 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002729 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002730 code->set_flags(flags);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002731 code->set_deoptimization_data(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002732 // Allow self references to created code object by patching the handle to
2733 // point to the newly allocated Code object.
2734 if (!self_reference.is_null()) {
2735 *(self_reference.location()) = code;
2736 }
2737 // Migrate generated code.
2738 // The generated code can contain Object** values (typically from handles)
2739 // that are dereferenced during the copy to point directly to the actual heap
2740 // objects. These pointers can include references to the code object itself,
2741 // through the self_reference parameter.
2742 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002743
2744#ifdef DEBUG
2745 code->Verify();
2746#endif
2747 return code;
2748}
2749
2750
John Reck59135872010-11-02 12:39:01 -07002751MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002752 // Allocate an object the same size as the code object.
2753 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002754 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002755 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002756 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002757 } else {
John Reck59135872010-11-02 12:39:01 -07002758 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002759 }
2760
John Reck59135872010-11-02 12:39:01 -07002761 Object* result;
2762 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002763
2764 // Copy code object.
2765 Address old_addr = code->address();
2766 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002767 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002768 // Relocate the copy.
2769 Code* new_code = Code::cast(result);
2770 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2771 new_code->Relocate(new_addr - old_addr);
2772 return new_code;
2773}
2774
2775
John Reck59135872010-11-02 12:39:01 -07002776MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002777 // Allocate ByteArray before the Code object, so that we do not risk
2778 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002779 Object* reloc_info_array;
2780 { MaybeObject* maybe_reloc_info_array =
2781 AllocateByteArray(reloc_info.length(), TENURED);
2782 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2783 return maybe_reloc_info_array;
2784 }
2785 }
Leon Clarkeac952652010-07-15 11:15:24 +01002786
2787 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002788
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002789 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002790
2791 Address old_addr = code->address();
2792
2793 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002794 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002795
John Reck59135872010-11-02 12:39:01 -07002796 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002797 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002798 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002799 } else {
John Reck59135872010-11-02 12:39:01 -07002800 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002801 }
2802
John Reck59135872010-11-02 12:39:01 -07002803 Object* result;
2804 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002805
2806 // Copy code object.
2807 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2808
2809 // Copy header and instructions.
2810 memcpy(new_addr, old_addr, relocation_offset);
2811
Steve Block6ded16b2010-05-10 14:33:55 +01002812 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002813 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002814
Leon Clarkeac952652010-07-15 11:15:24 +01002815 // Copy patched rinfo.
2816 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002817
2818 // Relocate the copy.
2819 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2820 new_code->Relocate(new_addr - old_addr);
2821
2822#ifdef DEBUG
2823 code->Verify();
2824#endif
2825 return new_code;
2826}
2827
2828
John Reck59135872010-11-02 12:39:01 -07002829MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002830 ASSERT(gc_state_ == NOT_IN_GC);
2831 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002832 // If allocation failures are disallowed, we may allocate in a different
2833 // space when new space is full and the object is not a large object.
2834 AllocationSpace retry_space =
2835 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002836 Object* result;
2837 { MaybeObject* maybe_result =
2838 AllocateRaw(map->instance_size(), space, retry_space);
2839 if (!maybe_result->ToObject(&result)) return maybe_result;
2840 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002841 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002842#ifdef ENABLE_LOGGING_AND_PROFILING
2843 ProducerHeapProfile::RecordJSObjectAllocation(result);
2844#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002845 return result;
2846}
2847
2848
John Reck59135872010-11-02 12:39:01 -07002849MaybeObject* Heap::InitializeFunction(JSFunction* function,
2850 SharedFunctionInfo* shared,
2851 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002852 ASSERT(!prototype->IsMap());
2853 function->initialize_properties();
2854 function->initialize_elements();
2855 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002856 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002857 function->set_prototype_or_initial_map(prototype);
2858 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002859 function->set_literals(empty_fixed_array());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002860 function->set_next_function_link(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002861 return function;
2862}
2863
2864
John Reck59135872010-11-02 12:39:01 -07002865MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002866 // Allocate the prototype. Make sure to use the object function
2867 // from the function's context, since the function can be from a
2868 // different context.
2869 JSFunction* object_function =
2870 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002871 Object* prototype;
2872 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2873 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2874 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002875 // When creating the prototype for the function we must set its
2876 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002877 Object* result;
2878 { MaybeObject* maybe_result =
2879 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2880 function,
2881 DONT_ENUM);
2882 if (!maybe_result->ToObject(&result)) return maybe_result;
2883 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002884 return prototype;
2885}
2886
2887
John Reck59135872010-11-02 12:39:01 -07002888MaybeObject* Heap::AllocateFunction(Map* function_map,
2889 SharedFunctionInfo* shared,
2890 Object* prototype,
2891 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002892 AllocationSpace space =
2893 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002894 Object* result;
2895 { MaybeObject* maybe_result = Allocate(function_map, space);
2896 if (!maybe_result->ToObject(&result)) return maybe_result;
2897 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002898 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2899}
2900
2901
John Reck59135872010-11-02 12:39:01 -07002902MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002903 // To get fast allocation and map sharing for arguments objects we
2904 // allocate them based on an arguments boilerplate.
2905
2906 // This calls Copy directly rather than using Heap::AllocateRaw so we
2907 // duplicate the check here.
2908 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2909
2910 JSObject* boilerplate =
2911 Top::context()->global_context()->arguments_boilerplate();
2912
Leon Clarkee46be812010-01-19 14:06:41 +00002913 // Check that the size of the boilerplate matches our
2914 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2915 // on the size being a known constant.
2916 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2917
2918 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002919 Object* result;
2920 { MaybeObject* maybe_result =
2921 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2922 if (!maybe_result->ToObject(&result)) return maybe_result;
2923 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002924
2925 // Copy the content. The arguments boilerplate doesn't have any
2926 // fields that point to new space so it's safe to skip the write
2927 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002928 CopyBlock(HeapObject::cast(result)->address(),
2929 boilerplate->address(),
Leon Clarkee46be812010-01-19 14:06:41 +00002930 kArgumentsObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002931
2932 // Set the two properties.
2933 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2934 callee);
2935 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2936 Smi::FromInt(length),
2937 SKIP_WRITE_BARRIER);
2938
2939 // Check the state of the object
2940 ASSERT(JSObject::cast(result)->HasFastProperties());
2941 ASSERT(JSObject::cast(result)->HasFastElements());
2942
2943 return result;
2944}
2945
2946
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002947static bool HasDuplicates(DescriptorArray* descriptors) {
2948 int count = descriptors->number_of_descriptors();
2949 if (count > 1) {
2950 String* prev_key = descriptors->GetKey(0);
2951 for (int i = 1; i != count; i++) {
2952 String* current_key = descriptors->GetKey(i);
2953 if (prev_key == current_key) return true;
2954 prev_key = current_key;
2955 }
2956 }
2957 return false;
2958}
2959
2960
John Reck59135872010-11-02 12:39:01 -07002961MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002962 ASSERT(!fun->has_initial_map());
2963
2964 // First create a new map with the size and number of in-object properties
2965 // suggested by the function.
2966 int instance_size = fun->shared()->CalculateInstanceSize();
2967 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07002968 Object* map_obj;
2969 { MaybeObject* maybe_map_obj =
2970 Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
2971 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
2972 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002973
2974 // Fetch or allocate prototype.
2975 Object* prototype;
2976 if (fun->has_instance_prototype()) {
2977 prototype = fun->instance_prototype();
2978 } else {
John Reck59135872010-11-02 12:39:01 -07002979 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
2980 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2981 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002982 }
2983 Map* map = Map::cast(map_obj);
2984 map->set_inobject_properties(in_object_properties);
2985 map->set_unused_property_fields(in_object_properties);
2986 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01002987 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002988
Andrei Popescu402d9372010-02-26 13:31:12 +00002989 // If the function has only simple this property assignments add
2990 // field descriptors for these to the initial map as the object
2991 // cannot be constructed without having these properties. Guard by
2992 // the inline_new flag so we only change the map if we generate a
2993 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00002994 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00002995 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002996 int count = fun->shared()->this_property_assignments_count();
2997 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002998 // Inline constructor can only handle inobject properties.
2999 fun->shared()->ForbidInlineConstructor();
3000 } else {
John Reck59135872010-11-02 12:39:01 -07003001 Object* descriptors_obj;
3002 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
3003 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
3004 return maybe_descriptors_obj;
3005 }
3006 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003007 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
3008 for (int i = 0; i < count; i++) {
3009 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
3010 ASSERT(name->IsSymbol());
3011 FieldDescriptor field(name, i, NONE);
3012 field.SetEnumerationIndex(i);
3013 descriptors->Set(i, &field);
3014 }
3015 descriptors->SetNextEnumerationIndex(count);
3016 descriptors->SortUnchecked();
3017
3018 // The descriptors may contain duplicates because the compiler does not
3019 // guarantee the uniqueness of property names (it would have required
3020 // quadratic time). Once the descriptors are sorted we can check for
3021 // duplicates in linear time.
3022 if (HasDuplicates(descriptors)) {
3023 fun->shared()->ForbidInlineConstructor();
3024 } else {
3025 map->set_instance_descriptors(descriptors);
3026 map->set_pre_allocated_property_fields(count);
3027 map->set_unused_property_fields(in_object_properties - count);
3028 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003029 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003030 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003031
3032 fun->shared()->StartInobjectSlackTracking(map);
3033
Steve Blocka7e24c12009-10-30 11:49:00 +00003034 return map;
3035}
3036
3037
3038void Heap::InitializeJSObjectFromMap(JSObject* obj,
3039 FixedArray* properties,
3040 Map* map) {
3041 obj->set_properties(properties);
3042 obj->initialize_elements();
3043 // TODO(1240798): Initialize the object's body using valid initial values
3044 // according to the object's initial map. For example, if the map's
3045 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3046 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3047 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3048 // verification code has to cope with (temporarily) invalid objects. See
3049 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003050 Object* filler;
3051 // We cannot always fill with one_pointer_filler_map because objects
3052 // created from API functions expect their internal fields to be initialized
3053 // with undefined_value.
3054 if (map->constructor()->IsJSFunction() &&
3055 JSFunction::cast(map->constructor())->shared()->
3056 IsInobjectSlackTrackingInProgress()) {
3057 // We might want to shrink the object later.
3058 ASSERT(obj->GetInternalFieldCount() == 0);
3059 filler = Heap::one_pointer_filler_map();
3060 } else {
3061 filler = Heap::undefined_value();
3062 }
3063 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00003064}
3065
3066
John Reck59135872010-11-02 12:39:01 -07003067MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003068 // JSFunctions should be allocated using AllocateFunction to be
3069 // properly initialized.
3070 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3071
Steve Block8defd9f2010-07-08 12:39:36 +01003072 // Both types of global objects should be allocated using
3073 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00003074 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3075 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3076
3077 // Allocate the backing storage for the properties.
3078 int prop_size =
3079 map->pre_allocated_property_fields() +
3080 map->unused_property_fields() -
3081 map->inobject_properties();
3082 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003083 Object* properties;
3084 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3085 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3086 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003087
3088 // Allocate the JSObject.
3089 AllocationSpace space =
3090 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3091 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003092 Object* obj;
3093 { MaybeObject* maybe_obj = Allocate(map, space);
3094 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3095 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003096
3097 // Initialize the JSObject.
3098 InitializeJSObjectFromMap(JSObject::cast(obj),
3099 FixedArray::cast(properties),
3100 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003101 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003102 return obj;
3103}
3104
3105
John Reck59135872010-11-02 12:39:01 -07003106MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3107 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003108 // Allocate the initial map if absent.
3109 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003110 Object* initial_map;
3111 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3112 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3113 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003114 constructor->set_initial_map(Map::cast(initial_map));
3115 Map::cast(initial_map)->set_constructor(constructor);
3116 }
3117 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003118 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003119 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003120#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003121 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003122 Object* non_failure;
3123 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3124#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003125 return result;
3126}
3127
3128
John Reck59135872010-11-02 12:39:01 -07003129MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003130 ASSERT(constructor->has_initial_map());
3131 Map* map = constructor->initial_map();
3132
3133 // Make sure no field properties are described in the initial map.
3134 // This guarantees us that normalizing the properties does not
3135 // require us to change property values to JSGlobalPropertyCells.
3136 ASSERT(map->NextFreePropertyIndex() == 0);
3137
3138 // Make sure we don't have a ton of pre-allocated slots in the
3139 // global objects. They will be unused once we normalize the object.
3140 ASSERT(map->unused_property_fields() == 0);
3141 ASSERT(map->inobject_properties() == 0);
3142
3143 // Initial size of the backing store to avoid resize of the storage during
3144 // bootstrapping. The size differs between the JS global object ad the
3145 // builtins object.
3146 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3147
3148 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003149 Object* obj;
3150 { MaybeObject* maybe_obj =
3151 StringDictionary::Allocate(
3152 map->NumberOfDescribedProperties() * 2 + initial_size);
3153 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3154 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003155 StringDictionary* dictionary = StringDictionary::cast(obj);
3156
3157 // The global object might be created from an object template with accessors.
3158 // Fill these accessors into the dictionary.
3159 DescriptorArray* descs = map->instance_descriptors();
3160 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3161 PropertyDetails details = descs->GetDetails(i);
3162 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3163 PropertyDetails d =
3164 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3165 Object* value = descs->GetCallbacksObject(i);
John Reck59135872010-11-02 12:39:01 -07003166 { MaybeObject* maybe_value = Heap::AllocateJSGlobalPropertyCell(value);
3167 if (!maybe_value->ToObject(&value)) return maybe_value;
3168 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003169
John Reck59135872010-11-02 12:39:01 -07003170 Object* result;
3171 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3172 if (!maybe_result->ToObject(&result)) return maybe_result;
3173 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003174 dictionary = StringDictionary::cast(result);
3175 }
3176
3177 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003178 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3179 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3180 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003181 JSObject* global = JSObject::cast(obj);
3182 InitializeJSObjectFromMap(global, dictionary, map);
3183
3184 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003185 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3186 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3187 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003188 Map* new_map = Map::cast(obj);
3189
3190 // Setup the global object as a normalized object.
3191 global->set_map(new_map);
3192 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
3193 global->set_properties(dictionary);
3194
3195 // Make sure result is a global object with properties in dictionary.
3196 ASSERT(global->IsGlobalObject());
3197 ASSERT(!global->HasFastProperties());
3198 return global;
3199}
3200
3201
John Reck59135872010-11-02 12:39:01 -07003202MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003203 // Never used to copy functions. If functions need to be copied we
3204 // have to be careful to clear the literals array.
3205 ASSERT(!source->IsJSFunction());
3206
3207 // Make the clone.
3208 Map* map = source->map();
3209 int object_size = map->instance_size();
3210 Object* clone;
3211
3212 // If we're forced to always allocate, we use the general allocation
3213 // functions which may leave us with an object in old space.
3214 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003215 { MaybeObject* maybe_clone =
3216 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3217 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3218 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003219 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003220 CopyBlock(clone_address,
3221 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003222 object_size);
3223 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003224 RecordWrites(clone_address,
3225 JSObject::kHeaderSize,
3226 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003227 } else {
John Reck59135872010-11-02 12:39:01 -07003228 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3229 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3230 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003231 ASSERT(Heap::InNewSpace(clone));
3232 // Since we know the clone is allocated in new space, we can copy
3233 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003234 CopyBlock(HeapObject::cast(clone)->address(),
3235 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003236 object_size);
3237 }
3238
3239 FixedArray* elements = FixedArray::cast(source->elements());
3240 FixedArray* properties = FixedArray::cast(source->properties());
3241 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003242 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003243 Object* elem;
3244 { MaybeObject* maybe_elem =
3245 (elements->map() == fixed_cow_array_map()) ?
3246 elements : CopyFixedArray(elements);
3247 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3248 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003249 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3250 }
3251 // Update properties if necessary.
3252 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003253 Object* prop;
3254 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3255 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3256 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003257 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3258 }
3259 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003260#ifdef ENABLE_LOGGING_AND_PROFILING
3261 ProducerHeapProfile::RecordJSObjectAllocation(clone);
3262#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003263 return clone;
3264}
3265
3266
John Reck59135872010-11-02 12:39:01 -07003267MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3268 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003269 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003270 Map* map = constructor->initial_map();
3271
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003272 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003273 // objects allocated using the constructor.
3274 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003275 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003276
3277 // Allocate the backing storage for the properties.
3278 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003279 Object* properties;
3280 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3281 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3282 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003283
3284 // Reset the map for the object.
3285 object->set_map(constructor->initial_map());
3286
3287 // Reinitialize the object from the constructor map.
3288 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3289 return object;
3290}
3291
3292
John Reck59135872010-11-02 12:39:01 -07003293MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3294 PretenureFlag pretenure) {
3295 Object* result;
3296 { MaybeObject* maybe_result =
3297 AllocateRawAsciiString(string.length(), pretenure);
3298 if (!maybe_result->ToObject(&result)) return maybe_result;
3299 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003300
3301 // Copy the characters into the new object.
3302 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3303 for (int i = 0; i < string.length(); i++) {
3304 string_result->SeqAsciiStringSet(i, string[i]);
3305 }
3306 return result;
3307}
3308
3309
John Reck59135872010-11-02 12:39:01 -07003310MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> string,
3311 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003312 // V8 only supports characters in the Basic Multilingual Plane.
3313 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003314 // Count the number of characters in the UTF-8 string and check if
3315 // it is an ASCII string.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003316 Access<ScannerConstants::Utf8Decoder>
3317 decoder(ScannerConstants::utf8_decoder());
Steve Blocka7e24c12009-10-30 11:49:00 +00003318 decoder->Reset(string.start(), string.length());
3319 int chars = 0;
3320 bool is_ascii = true;
3321 while (decoder->has_more()) {
3322 uc32 r = decoder->GetNext();
3323 if (r > String::kMaxAsciiCharCode) is_ascii = false;
3324 chars++;
3325 }
3326
3327 // If the string is ascii, we do not need to convert the characters
3328 // since UTF8 is backwards compatible with ascii.
3329 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
3330
John Reck59135872010-11-02 12:39:01 -07003331 Object* result;
3332 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3333 if (!maybe_result->ToObject(&result)) return maybe_result;
3334 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003335
3336 // Convert and copy the characters into the new object.
3337 String* string_result = String::cast(result);
3338 decoder->Reset(string.start(), string.length());
3339 for (int i = 0; i < chars; i++) {
3340 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003341 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003342 string_result->Set(i, r);
3343 }
3344 return result;
3345}
3346
3347
John Reck59135872010-11-02 12:39:01 -07003348MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3349 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003350 // Check if the string is an ASCII string.
3351 int i = 0;
3352 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
3353
John Reck59135872010-11-02 12:39:01 -07003354 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003355 if (i == string.length()) { // It's an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003356 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003357 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003358 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003359 }
John Reck59135872010-11-02 12:39:01 -07003360 Object* result;
3361 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003362
3363 // Copy the characters into the new object, which may be either ASCII or
3364 // UTF-16.
3365 String* string_result = String::cast(result);
3366 for (int i = 0; i < string.length(); i++) {
3367 string_result->Set(i, string[i]);
3368 }
3369 return result;
3370}
3371
3372
3373Map* Heap::SymbolMapForString(String* string) {
3374 // If the string is in new space it cannot be used as a symbol.
3375 if (InNewSpace(string)) return NULL;
3376
3377 // Find the corresponding symbol map for strings.
3378 Map* map = string->map();
Steve Blockd0582a62009-12-15 09:54:21 +00003379 if (map == ascii_string_map()) return ascii_symbol_map();
3380 if (map == string_map()) return symbol_map();
3381 if (map == cons_string_map()) return cons_symbol_map();
3382 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
3383 if (map == external_string_map()) return external_symbol_map();
3384 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003385 if (map == external_string_with_ascii_data_map()) {
3386 return external_symbol_with_ascii_data_map();
3387 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003388
3389 // No match found.
3390 return NULL;
3391}
3392
3393
John Reck59135872010-11-02 12:39:01 -07003394MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3395 int chars,
3396 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003397 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003398 // Ensure the chars matches the number of characters in the buffer.
3399 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3400 // Determine whether the string is ascii.
3401 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003402 while (buffer->has_more()) {
3403 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3404 is_ascii = false;
3405 break;
3406 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003407 }
3408 buffer->Rewind();
3409
3410 // Compute map and object size.
3411 int size;
3412 Map* map;
3413
3414 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003415 if (chars > SeqAsciiString::kMaxLength) {
3416 return Failure::OutOfMemoryException();
3417 }
Steve Blockd0582a62009-12-15 09:54:21 +00003418 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003419 size = SeqAsciiString::SizeFor(chars);
3420 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003421 if (chars > SeqTwoByteString::kMaxLength) {
3422 return Failure::OutOfMemoryException();
3423 }
Steve Blockd0582a62009-12-15 09:54:21 +00003424 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003425 size = SeqTwoByteString::SizeFor(chars);
3426 }
3427
3428 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003429 Object* result;
3430 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3431 ? lo_space_->AllocateRaw(size)
3432 : old_data_space_->AllocateRaw(size);
3433 if (!maybe_result->ToObject(&result)) return maybe_result;
3434 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003435
3436 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003437 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003438 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003439 answer->set_length(chars);
3440 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003441
3442 ASSERT_EQ(size, answer->Size());
3443
3444 // Fill in the characters.
3445 for (int i = 0; i < chars; i++) {
3446 answer->Set(i, buffer->GetNext());
3447 }
3448 return answer;
3449}
3450
3451
John Reck59135872010-11-02 12:39:01 -07003452MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003453 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3454 return Failure::OutOfMemoryException();
3455 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003456
3457 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003458 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003459
Leon Clarkee46be812010-01-19 14:06:41 +00003460 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3461 AllocationSpace retry_space = OLD_DATA_SPACE;
3462
Steve Blocka7e24c12009-10-30 11:49:00 +00003463 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003464 if (size > kMaxObjectSizeInNewSpace) {
3465 // Allocate in large object space, retry space will be ignored.
3466 space = LO_SPACE;
3467 } else if (size > MaxObjectSizeInPagedSpace()) {
3468 // Allocate in new space, retry in large object space.
3469 retry_space = LO_SPACE;
3470 }
3471 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3472 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003473 }
John Reck59135872010-11-02 12:39:01 -07003474 Object* result;
3475 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3476 if (!maybe_result->ToObject(&result)) return maybe_result;
3477 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003478
Steve Blocka7e24c12009-10-30 11:49:00 +00003479 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003480 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003481 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003482 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003483 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3484 return result;
3485}
3486
3487
John Reck59135872010-11-02 12:39:01 -07003488MaybeObject* Heap::AllocateRawTwoByteString(int length,
3489 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003490 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3491 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003492 }
Leon Clarkee46be812010-01-19 14:06:41 +00003493 int size = SeqTwoByteString::SizeFor(length);
3494 ASSERT(size <= SeqTwoByteString::kMaxSize);
3495 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3496 AllocationSpace retry_space = OLD_DATA_SPACE;
3497
3498 if (space == NEW_SPACE) {
3499 if (size > kMaxObjectSizeInNewSpace) {
3500 // Allocate in large object space, retry space will be ignored.
3501 space = LO_SPACE;
3502 } else if (size > MaxObjectSizeInPagedSpace()) {
3503 // Allocate in new space, retry in large object space.
3504 retry_space = LO_SPACE;
3505 }
3506 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3507 space = LO_SPACE;
3508 }
John Reck59135872010-11-02 12:39:01 -07003509 Object* result;
3510 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3511 if (!maybe_result->ToObject(&result)) return maybe_result;
3512 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003513
Steve Blocka7e24c12009-10-30 11:49:00 +00003514 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003515 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003516 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003517 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003518 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3519 return result;
3520}
3521
3522
John Reck59135872010-11-02 12:39:01 -07003523MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003524 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003525 Object* result;
3526 { MaybeObject* maybe_result =
3527 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3528 if (!maybe_result->ToObject(&result)) return maybe_result;
3529 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003530 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003531 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3532 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003533 return result;
3534}
3535
3536
John Reck59135872010-11-02 12:39:01 -07003537MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003538 if (length < 0 || length > FixedArray::kMaxLength) {
3539 return Failure::OutOfMemoryException();
3540 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003541 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003542 // Use the general function if we're forced to always allocate.
3543 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3544 // Allocate the raw data for a fixed array.
3545 int size = FixedArray::SizeFor(length);
3546 return size <= kMaxObjectSizeInNewSpace
3547 ? new_space_.AllocateRaw(size)
3548 : lo_space_->AllocateRawFixedArray(size);
3549}
3550
3551
John Reck59135872010-11-02 12:39:01 -07003552MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003553 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003554 Object* obj;
3555 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3556 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3557 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003558 if (Heap::InNewSpace(obj)) {
3559 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003560 dst->set_map(map);
3561 CopyBlock(dst->address() + kPointerSize,
3562 src->address() + kPointerSize,
3563 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003564 return obj;
3565 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003566 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003567 FixedArray* result = FixedArray::cast(obj);
3568 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003569
Steve Blocka7e24c12009-10-30 11:49:00 +00003570 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003571 AssertNoAllocation no_gc;
3572 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003573 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3574 return result;
3575}
3576
3577
John Reck59135872010-11-02 12:39:01 -07003578MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003579 ASSERT(length >= 0);
3580 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003581 Object* result;
3582 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3583 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003584 }
John Reck59135872010-11-02 12:39:01 -07003585 // Initialize header.
3586 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3587 array->set_map(fixed_array_map());
3588 array->set_length(length);
3589 // Initialize body.
3590 ASSERT(!Heap::InNewSpace(undefined_value()));
3591 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003592 return result;
3593}
3594
3595
John Reck59135872010-11-02 12:39:01 -07003596MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003597 if (length < 0 || length > FixedArray::kMaxLength) {
3598 return Failure::OutOfMemoryException();
3599 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003600
Leon Clarkee46be812010-01-19 14:06:41 +00003601 AllocationSpace space =
3602 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003603 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003604 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3605 // Too big for new space.
3606 space = LO_SPACE;
3607 } else if (space == OLD_POINTER_SPACE &&
3608 size > MaxObjectSizeInPagedSpace()) {
3609 // Too big for old pointer space.
3610 space = LO_SPACE;
3611 }
3612
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003613 AllocationSpace retry_space =
3614 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3615
3616 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003617}
3618
3619
John Reck59135872010-11-02 12:39:01 -07003620MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
3621 int length,
3622 PretenureFlag pretenure,
3623 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003624 ASSERT(length >= 0);
3625 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3626 if (length == 0) return Heap::empty_fixed_array();
3627
3628 ASSERT(!Heap::InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003629 Object* result;
3630 { MaybeObject* maybe_result = Heap::AllocateRawFixedArray(length, pretenure);
3631 if (!maybe_result->ToObject(&result)) return maybe_result;
3632 }
Steve Block6ded16b2010-05-10 14:33:55 +01003633
3634 HeapObject::cast(result)->set_map(Heap::fixed_array_map());
3635 FixedArray* array = FixedArray::cast(result);
3636 array->set_length(length);
3637 MemsetPointer(array->data_start(), filler, length);
3638 return array;
3639}
3640
3641
John Reck59135872010-11-02 12:39:01 -07003642MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003643 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3644}
3645
3646
John Reck59135872010-11-02 12:39:01 -07003647MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3648 PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003649 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
3650}
3651
3652
John Reck59135872010-11-02 12:39:01 -07003653MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003654 if (length == 0) return empty_fixed_array();
3655
John Reck59135872010-11-02 12:39:01 -07003656 Object* obj;
3657 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3658 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3659 }
Steve Block6ded16b2010-05-10 14:33:55 +01003660
3661 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3662 FixedArray::cast(obj)->set_length(length);
3663 return obj;
3664}
3665
3666
John Reck59135872010-11-02 12:39:01 -07003667MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3668 Object* result;
3669 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length, pretenure);
3670 if (!maybe_result->ToObject(&result)) return maybe_result;
3671 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003672 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003673 ASSERT(result->IsHashTable());
3674 return result;
3675}
3676
3677
John Reck59135872010-11-02 12:39:01 -07003678MaybeObject* Heap::AllocateGlobalContext() {
3679 Object* result;
3680 { MaybeObject* maybe_result =
3681 Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3682 if (!maybe_result->ToObject(&result)) return maybe_result;
3683 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003684 Context* context = reinterpret_cast<Context*>(result);
3685 context->set_map(global_context_map());
3686 ASSERT(context->IsGlobalContext());
3687 ASSERT(result->IsContext());
3688 return result;
3689}
3690
3691
John Reck59135872010-11-02 12:39:01 -07003692MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003693 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003694 Object* result;
3695 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length);
3696 if (!maybe_result->ToObject(&result)) return maybe_result;
3697 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003698 Context* context = reinterpret_cast<Context*>(result);
3699 context->set_map(context_map());
3700 context->set_closure(function);
3701 context->set_fcontext(context);
3702 context->set_previous(NULL);
3703 context->set_extension(NULL);
3704 context->set_global(function->context()->global());
3705 ASSERT(!context->IsGlobalContext());
3706 ASSERT(context->is_function_context());
3707 ASSERT(result->IsContext());
3708 return result;
3709}
3710
3711
John Reck59135872010-11-02 12:39:01 -07003712MaybeObject* Heap::AllocateWithContext(Context* previous,
3713 JSObject* extension,
3714 bool is_catch_context) {
3715 Object* result;
3716 { MaybeObject* maybe_result =
3717 Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3718 if (!maybe_result->ToObject(&result)) return maybe_result;
3719 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003720 Context* context = reinterpret_cast<Context*>(result);
3721 context->set_map(is_catch_context ? catch_context_map() : context_map());
3722 context->set_closure(previous->closure());
3723 context->set_fcontext(previous->fcontext());
3724 context->set_previous(previous);
3725 context->set_extension(extension);
3726 context->set_global(previous->global());
3727 ASSERT(!context->IsGlobalContext());
3728 ASSERT(!context->is_function_context());
3729 ASSERT(result->IsContext());
3730 return result;
3731}
3732
3733
John Reck59135872010-11-02 12:39:01 -07003734MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003735 Map* map;
3736 switch (type) {
3737#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3738STRUCT_LIST(MAKE_CASE)
3739#undef MAKE_CASE
3740 default:
3741 UNREACHABLE();
3742 return Failure::InternalError();
3743 }
3744 int size = map->instance_size();
3745 AllocationSpace space =
3746 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003747 Object* result;
3748 { MaybeObject* maybe_result = Heap::Allocate(map, space);
3749 if (!maybe_result->ToObject(&result)) return maybe_result;
3750 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003751 Struct::cast(result)->InitializeBody(size);
3752 return result;
3753}
3754
3755
3756bool Heap::IdleNotification() {
3757 static const int kIdlesBeforeScavenge = 4;
3758 static const int kIdlesBeforeMarkSweep = 7;
3759 static const int kIdlesBeforeMarkCompact = 8;
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003760 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
3761 static const int kGCsBetweenCleanup = 4;
Steve Blocka7e24c12009-10-30 11:49:00 +00003762 static int number_idle_notifications = 0;
3763 static int last_gc_count = gc_count_;
3764
Steve Block6ded16b2010-05-10 14:33:55 +01003765 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003766 bool finished = false;
3767
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003768 // Reset the number of idle notifications received when a number of
3769 // GCs have taken place. This allows another round of cleanup based
3770 // on idle notifications if enough work has been carried out to
3771 // provoke a number of garbage collections.
3772 if (gc_count_ < last_gc_count + kGCsBetweenCleanup) {
3773 number_idle_notifications =
3774 Min(number_idle_notifications + 1, kMaxIdleCount);
Steve Blocka7e24c12009-10-30 11:49:00 +00003775 } else {
3776 number_idle_notifications = 0;
3777 last_gc_count = gc_count_;
3778 }
3779
3780 if (number_idle_notifications == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003781 if (contexts_disposed_ > 0) {
3782 HistogramTimerScope scope(&Counters::gc_context);
3783 CollectAllGarbage(false);
3784 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003785 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003786 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003787 new_space_.Shrink();
3788 last_gc_count = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003789 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003790 // Before doing the mark-sweep collections we clear the
3791 // compilation cache to avoid hanging on to source code and
3792 // generated code for cached functions.
3793 CompilationCache::Clear();
3794
Steve Blocka7e24c12009-10-30 11:49:00 +00003795 CollectAllGarbage(false);
3796 new_space_.Shrink();
3797 last_gc_count = gc_count_;
3798
3799 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3800 CollectAllGarbage(true);
3801 new_space_.Shrink();
3802 last_gc_count = gc_count_;
Steve Blocka7e24c12009-10-30 11:49:00 +00003803 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003804
3805 } else if (contexts_disposed_ > 0) {
3806 if (FLAG_expose_gc) {
3807 contexts_disposed_ = 0;
3808 } else {
3809 HistogramTimerScope scope(&Counters::gc_context);
3810 CollectAllGarbage(false);
3811 last_gc_count = gc_count_;
3812 }
3813 // If this is the first idle notification, we reset the
3814 // notification count to avoid letting idle notifications for
3815 // context disposal garbage collections start a potentially too
3816 // aggressive idle GC cycle.
3817 if (number_idle_notifications <= 1) {
3818 number_idle_notifications = 0;
3819 uncommit = false;
3820 }
Ben Murdochdb5a90a2011-01-06 18:27:03 +00003821 } else if (number_idle_notifications > kIdlesBeforeMarkCompact) {
3822 // If we have received more than kIdlesBeforeMarkCompact idle
3823 // notifications we do not perform any cleanup because we don't
3824 // expect to gain much by doing so.
3825 finished = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003826 }
3827
Steve Block6ded16b2010-05-10 14:33:55 +01003828 // Make sure that we have no pending context disposals and
3829 // conditionally uncommit from space.
3830 ASSERT(contexts_disposed_ == 0);
3831 if (uncommit) Heap::UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003832 return finished;
3833}
3834
3835
3836#ifdef DEBUG
3837
3838void Heap::Print() {
3839 if (!HasBeenSetup()) return;
3840 Top::PrintStack();
3841 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003842 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3843 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003844}
3845
3846
3847void Heap::ReportCodeStatistics(const char* title) {
3848 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3849 PagedSpace::ResetCodeStatistics();
3850 // We do not look for code in new space, map space, or old space. If code
3851 // somehow ends up in those spaces, we would miss it here.
3852 code_space_->CollectCodeStatistics();
3853 lo_space_->CollectCodeStatistics();
3854 PagedSpace::ReportCodeStatistics();
3855}
3856
3857
3858// This function expects that NewSpace's allocated objects histogram is
3859// populated (via a call to CollectStatistics or else as a side effect of a
3860// just-completed scavenge collection).
3861void Heap::ReportHeapStatistics(const char* title) {
3862 USE(title);
3863 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3864 title, gc_count_);
3865 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003866 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3867 old_gen_promotion_limit_);
3868 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3869 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003870
3871 PrintF("\n");
3872 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3873 GlobalHandles::PrintStats();
3874 PrintF("\n");
3875
3876 PrintF("Heap statistics : ");
3877 MemoryAllocator::ReportStatistics();
3878 PrintF("To space : ");
3879 new_space_.ReportStatistics();
3880 PrintF("Old pointer space : ");
3881 old_pointer_space_->ReportStatistics();
3882 PrintF("Old data space : ");
3883 old_data_space_->ReportStatistics();
3884 PrintF("Code space : ");
3885 code_space_->ReportStatistics();
3886 PrintF("Map space : ");
3887 map_space_->ReportStatistics();
3888 PrintF("Cell space : ");
3889 cell_space_->ReportStatistics();
3890 PrintF("Large object space : ");
3891 lo_space_->ReportStatistics();
3892 PrintF(">>>>>> ========================================= >>>>>>\n");
3893}
3894
3895#endif // DEBUG
3896
3897bool Heap::Contains(HeapObject* value) {
3898 return Contains(value->address());
3899}
3900
3901
3902bool Heap::Contains(Address addr) {
3903 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3904 return HasBeenSetup() &&
3905 (new_space_.ToSpaceContains(addr) ||
3906 old_pointer_space_->Contains(addr) ||
3907 old_data_space_->Contains(addr) ||
3908 code_space_->Contains(addr) ||
3909 map_space_->Contains(addr) ||
3910 cell_space_->Contains(addr) ||
3911 lo_space_->SlowContains(addr));
3912}
3913
3914
3915bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3916 return InSpace(value->address(), space);
3917}
3918
3919
3920bool Heap::InSpace(Address addr, AllocationSpace space) {
3921 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3922 if (!HasBeenSetup()) return false;
3923
3924 switch (space) {
3925 case NEW_SPACE:
3926 return new_space_.ToSpaceContains(addr);
3927 case OLD_POINTER_SPACE:
3928 return old_pointer_space_->Contains(addr);
3929 case OLD_DATA_SPACE:
3930 return old_data_space_->Contains(addr);
3931 case CODE_SPACE:
3932 return code_space_->Contains(addr);
3933 case MAP_SPACE:
3934 return map_space_->Contains(addr);
3935 case CELL_SPACE:
3936 return cell_space_->Contains(addr);
3937 case LO_SPACE:
3938 return lo_space_->SlowContains(addr);
3939 }
3940
3941 return false;
3942}
3943
3944
3945#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003946static void DummyScavengePointer(HeapObject** p) {
3947}
3948
3949
3950static void VerifyPointersUnderWatermark(
3951 PagedSpace* space,
3952 DirtyRegionCallback visit_dirty_region) {
3953 PageIterator it(space, PageIterator::PAGES_IN_USE);
3954
3955 while (it.has_next()) {
3956 Page* page = it.next();
3957 Address start = page->ObjectAreaStart();
3958 Address end = page->AllocationWatermark();
3959
3960 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
3961 start,
3962 end,
3963 visit_dirty_region,
3964 &DummyScavengePointer);
3965 }
3966}
3967
3968
3969static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3970 LargeObjectIterator it(space);
3971 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3972 if (object->IsFixedArray()) {
3973 Address slot_address = object->address();
3974 Address end = object->address() + object->Size();
3975
3976 while (slot_address < end) {
3977 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3978 // When we are not in GC the Heap::InNewSpace() predicate
3979 // checks that pointers which satisfy predicate point into
3980 // the active semispace.
3981 Heap::InNewSpace(*slot);
3982 slot_address += kPointerSize;
3983 }
3984 }
3985 }
3986}
3987
3988
Steve Blocka7e24c12009-10-30 11:49:00 +00003989void Heap::Verify() {
3990 ASSERT(HasBeenSetup());
3991
3992 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00003993 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00003994
3995 new_space_.Verify();
3996
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003997 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
3998 old_pointer_space_->Verify(&dirty_regions_visitor);
3999 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004000
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004001 VerifyPointersUnderWatermark(old_pointer_space_,
4002 &IteratePointersInDirtyRegion);
4003 VerifyPointersUnderWatermark(map_space_,
4004 &IteratePointersInDirtyMapsRegion);
4005 VerifyPointersUnderWatermark(lo_space_);
4006
4007 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4008 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4009
4010 VerifyPointersVisitor no_dirty_regions_visitor;
4011 old_data_space_->Verify(&no_dirty_regions_visitor);
4012 code_space_->Verify(&no_dirty_regions_visitor);
4013 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00004014
4015 lo_space_->Verify();
4016}
4017#endif // DEBUG
4018
4019
John Reck59135872010-11-02 12:39:01 -07004020MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004021 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004022 Object* new_table;
4023 { MaybeObject* maybe_new_table =
4024 symbol_table()->LookupSymbol(string, &symbol);
4025 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4026 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004027 // Can't use set_symbol_table because SymbolTable::cast knows that
4028 // SymbolTable is a singleton and checks for identity.
4029 roots_[kSymbolTableRootIndex] = new_table;
4030 ASSERT(symbol != NULL);
4031 return symbol;
4032}
4033
4034
John Reck59135872010-11-02 12:39:01 -07004035MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004036 if (string->IsSymbol()) return string;
4037 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07004038 Object* new_table;
4039 { MaybeObject* maybe_new_table =
4040 symbol_table()->LookupString(string, &symbol);
4041 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4042 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004043 // Can't use set_symbol_table because SymbolTable::cast knows that
4044 // SymbolTable is a singleton and checks for identity.
4045 roots_[kSymbolTableRootIndex] = new_table;
4046 ASSERT(symbol != NULL);
4047 return symbol;
4048}
4049
4050
4051bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
4052 if (string->IsSymbol()) {
4053 *symbol = string;
4054 return true;
4055 }
4056 return symbol_table()->LookupSymbolIfExists(string, symbol);
4057}
4058
4059
4060#ifdef DEBUG
4061void Heap::ZapFromSpace() {
4062 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
4063 for (Address a = new_space_.FromSpaceLow();
4064 a < new_space_.FromSpaceHigh();
4065 a += kPointerSize) {
4066 Memory::Address_at(a) = kFromSpaceZapValue;
4067 }
4068}
4069#endif // DEBUG
4070
4071
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004072bool Heap::IteratePointersInDirtyRegion(Address start,
4073 Address end,
4074 ObjectSlotCallback copy_object_func) {
4075 Address slot_address = start;
4076 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004077
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004078 while (slot_address < end) {
4079 Object** slot = reinterpret_cast<Object**>(slot_address);
4080 if (Heap::InNewSpace(*slot)) {
4081 ASSERT((*slot)->IsHeapObject());
4082 copy_object_func(reinterpret_cast<HeapObject**>(slot));
4083 if (Heap::InNewSpace(*slot)) {
4084 ASSERT((*slot)->IsHeapObject());
4085 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00004086 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004087 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004088 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00004089 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004090 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00004091}
4092
4093
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004094// Compute start address of the first map following given addr.
4095static inline Address MapStartAlign(Address addr) {
4096 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4097 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4098}
Steve Blocka7e24c12009-10-30 11:49:00 +00004099
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004100
4101// Compute end address of the first map preceding given addr.
4102static inline Address MapEndAlign(Address addr) {
4103 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4104 return page + ((addr - page) / Map::kSize * Map::kSize);
4105}
4106
4107
4108static bool IteratePointersInDirtyMaps(Address start,
4109 Address end,
4110 ObjectSlotCallback copy_object_func) {
4111 ASSERT(MapStartAlign(start) == start);
4112 ASSERT(MapEndAlign(end) == end);
4113
4114 Address map_address = start;
4115 bool pointers_to_new_space_found = false;
4116
4117 while (map_address < end) {
4118 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
4119 ASSERT(Memory::Object_at(map_address)->IsMap());
4120
4121 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4122 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4123
4124 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
4125 pointer_fields_end,
4126 copy_object_func)) {
4127 pointers_to_new_space_found = true;
4128 }
4129
4130 map_address += Map::kSize;
4131 }
4132
4133 return pointers_to_new_space_found;
4134}
4135
4136
4137bool Heap::IteratePointersInDirtyMapsRegion(
4138 Address start,
4139 Address end,
4140 ObjectSlotCallback copy_object_func) {
4141 Address map_aligned_start = MapStartAlign(start);
4142 Address map_aligned_end = MapEndAlign(end);
4143
4144 bool contains_pointers_to_new_space = false;
4145
4146 if (map_aligned_start != start) {
4147 Address prev_map = map_aligned_start - Map::kSize;
4148 ASSERT(Memory::Object_at(prev_map)->IsMap());
4149
4150 Address pointer_fields_start =
4151 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4152
4153 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004154 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004155
4156 contains_pointers_to_new_space =
4157 IteratePointersInDirtyRegion(pointer_fields_start,
4158 pointer_fields_end,
4159 copy_object_func)
4160 || contains_pointers_to_new_space;
4161 }
4162
4163 contains_pointers_to_new_space =
4164 IteratePointersInDirtyMaps(map_aligned_start,
4165 map_aligned_end,
4166 copy_object_func)
4167 || contains_pointers_to_new_space;
4168
4169 if (map_aligned_end != end) {
4170 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4171
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004172 Address pointer_fields_start =
4173 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004174
4175 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004176 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004177
4178 contains_pointers_to_new_space =
4179 IteratePointersInDirtyRegion(pointer_fields_start,
4180 pointer_fields_end,
4181 copy_object_func)
4182 || contains_pointers_to_new_space;
4183 }
4184
4185 return contains_pointers_to_new_space;
4186}
4187
4188
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004189void Heap::IterateAndMarkPointersToFromSpace(Address start,
4190 Address end,
4191 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004192 Address slot_address = start;
4193 Page* page = Page::FromAddress(start);
4194
4195 uint32_t marks = page->GetRegionMarks();
4196
4197 while (slot_address < end) {
4198 Object** slot = reinterpret_cast<Object**>(slot_address);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004199 if (Heap::InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004200 ASSERT((*slot)->IsHeapObject());
4201 callback(reinterpret_cast<HeapObject**>(slot));
4202 if (Heap::InNewSpace(*slot)) {
4203 ASSERT((*slot)->IsHeapObject());
4204 marks |= page->GetRegionMaskForAddress(slot_address);
4205 }
4206 }
4207 slot_address += kPointerSize;
4208 }
4209
4210 page->SetRegionMarks(marks);
4211}
4212
4213
4214uint32_t Heap::IterateDirtyRegions(
4215 uint32_t marks,
4216 Address area_start,
4217 Address area_end,
4218 DirtyRegionCallback visit_dirty_region,
4219 ObjectSlotCallback copy_object_func) {
4220 uint32_t newmarks = 0;
4221 uint32_t mask = 1;
4222
4223 if (area_start >= area_end) {
4224 return newmarks;
4225 }
4226
4227 Address region_start = area_start;
4228
4229 // area_start does not necessarily coincide with start of the first region.
4230 // Thus to calculate the beginning of the next region we have to align
4231 // area_start by Page::kRegionSize.
4232 Address second_region =
4233 reinterpret_cast<Address>(
4234 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4235 ~Page::kRegionAlignmentMask);
4236
4237 // Next region might be beyond area_end.
4238 Address region_end = Min(second_region, area_end);
4239
4240 if (marks & mask) {
4241 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4242 newmarks |= mask;
4243 }
4244 }
4245 mask <<= 1;
4246
4247 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4248 region_start = region_end;
4249 region_end = region_start + Page::kRegionSize;
4250
4251 while (region_end <= area_end) {
4252 if (marks & mask) {
4253 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4254 newmarks |= mask;
4255 }
4256 }
4257
4258 region_start = region_end;
4259 region_end = region_start + Page::kRegionSize;
4260
4261 mask <<= 1;
4262 }
4263
4264 if (region_start != area_end) {
4265 // A small piece of area left uniterated because area_end does not coincide
4266 // with region end. Check whether region covering last part of area is
4267 // dirty.
4268 if (marks & mask) {
4269 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
4270 newmarks |= mask;
4271 }
4272 }
4273 }
4274
4275 return newmarks;
4276}
4277
4278
4279
4280void Heap::IterateDirtyRegions(
4281 PagedSpace* space,
4282 DirtyRegionCallback visit_dirty_region,
4283 ObjectSlotCallback copy_object_func,
4284 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004285
4286 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004287
Steve Blocka7e24c12009-10-30 11:49:00 +00004288 while (it.has_next()) {
4289 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004290 uint32_t marks = page->GetRegionMarks();
4291
4292 if (marks != Page::kAllRegionsCleanMarks) {
4293 Address start = page->ObjectAreaStart();
4294
4295 // Do not try to visit pointers beyond page allocation watermark.
4296 // Page can contain garbage pointers there.
4297 Address end;
4298
4299 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4300 page->IsWatermarkValid()) {
4301 end = page->AllocationWatermark();
4302 } else {
4303 end = page->CachedAllocationWatermark();
4304 }
4305
4306 ASSERT(space == old_pointer_space_ ||
4307 (space == map_space_ &&
4308 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4309
4310 page->SetRegionMarks(IterateDirtyRegions(marks,
4311 start,
4312 end,
4313 visit_dirty_region,
4314 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004315 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004316
4317 // Mark page watermark as invalid to maintain watermark validity invariant.
4318 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4319 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004320 }
4321}
4322
4323
Steve Blockd0582a62009-12-15 09:54:21 +00004324void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4325 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004326 IterateWeakRoots(v, mode);
4327}
4328
4329
4330void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004331 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004332 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004333 if (mode != VISIT_ALL_IN_SCAVENGE) {
4334 // Scavenge collections have special processing for this.
4335 ExternalStringTable::Iterate(v);
4336 }
4337 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004338}
4339
4340
Steve Blockd0582a62009-12-15 09:54:21 +00004341void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004342 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004343 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004344
Iain Merrick75681382010-08-19 15:07:18 +01004345 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004346 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004347
4348 Bootstrapper::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004349 v->Synchronize("bootstrapper");
Steve Blocka7e24c12009-10-30 11:49:00 +00004350 Top::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004351 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004352 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004353 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004354
4355#ifdef ENABLE_DEBUGGER_SUPPORT
4356 Debug::Iterate(v);
4357#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004358 v->Synchronize("debug");
Steve Blocka7e24c12009-10-30 11:49:00 +00004359 CompilationCache::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004360 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004361
4362 // Iterate over local handles in handle scopes.
4363 HandleScopeImplementer::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004364 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004365
Leon Clarkee46be812010-01-19 14:06:41 +00004366 // Iterate over the builtin code objects and code stubs in the
4367 // heap. Note that it is not necessary to iterate over code objects
4368 // on scavenge collections.
4369 if (mode != VISIT_ALL_IN_SCAVENGE) {
4370 Builtins::IterateBuiltins(v);
4371 }
Steve Blockd0582a62009-12-15 09:54:21 +00004372 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004373
4374 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004375 if (mode == VISIT_ONLY_STRONG) {
4376 GlobalHandles::IterateStrongRoots(v);
4377 } else {
4378 GlobalHandles::IterateAllRoots(v);
4379 }
4380 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004381
4382 // Iterate over pointers being held by inactive threads.
4383 ThreadManager::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004384 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004385
4386 // Iterate over the pointers the Serialization/Deserialization code is
4387 // holding.
4388 // During garbage collection this keeps the partial snapshot cache alive.
4389 // During deserialization of the startup snapshot this creates the partial
4390 // snapshot cache and deserializes the objects it refers to. During
4391 // serialization this does nothing, since the partial snapshot cache is
4392 // empty. However the next thing we do is create the partial snapshot,
4393 // filling up the partial snapshot cache with objects it needs as we go.
4394 SerializerDeserializer::Iterate(v);
4395 // We don't do a v->Synchronize call here, because in debug mode that will
4396 // output a flag to the snapshot. However at this point the serializer and
4397 // deserializer are deliberately a little unsynchronized (see above) so the
4398 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004399}
Steve Blocka7e24c12009-10-30 11:49:00 +00004400
4401
4402// Flag is set when the heap has been configured. The heap can be repeatedly
4403// configured through the API until it is setup.
4404static bool heap_configured = false;
4405
4406// TODO(1236194): Since the heap size is configurable on the command line
4407// and through the API, we should gracefully handle the case that the heap
4408// size is not big enough to fit all the initial objects.
Russell Brenner90bac252010-11-18 13:33:46 -08004409bool Heap::ConfigureHeap(int max_semispace_size,
4410 int max_old_gen_size,
4411 int max_executable_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004412 if (HasBeenSetup()) return false;
4413
Steve Block3ce2e202009-11-05 08:53:23 +00004414 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4415
4416 if (Snapshot::IsEnabled()) {
4417 // If we are using a snapshot we always reserve the default amount
4418 // of memory for each semispace because code in the snapshot has
4419 // write-barrier code that relies on the size and alignment of new
4420 // space. We therefore cannot use a larger max semispace size
4421 // than the default reserved semispace size.
4422 if (max_semispace_size_ > reserved_semispace_size_) {
4423 max_semispace_size_ = reserved_semispace_size_;
4424 }
4425 } else {
4426 // If we are not using snapshots we reserve space for the actual
4427 // max semispace size.
4428 reserved_semispace_size_ = max_semispace_size_;
4429 }
4430
4431 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Russell Brenner90bac252010-11-18 13:33:46 -08004432 if (max_executable_size > 0) {
4433 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
4434 }
4435
4436 // The max executable size must be less than or equal to the max old
4437 // generation size.
4438 if (max_executable_size_ > max_old_generation_size_) {
4439 max_executable_size_ = max_old_generation_size_;
4440 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004441
4442 // The new space size must be a power of two to support single-bit testing
4443 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004444 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4445 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4446 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4447 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004448
4449 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004450 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004451
4452 heap_configured = true;
4453 return true;
4454}
4455
4456
4457bool Heap::ConfigureHeapDefault() {
Russell Brenner90bac252010-11-18 13:33:46 -08004458 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB,
4459 FLAG_max_old_space_size * MB,
4460 FLAG_max_executable_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004461}
4462
4463
Ben Murdochbb769b22010-08-11 14:56:33 +01004464void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004465 *stats->start_marker = HeapStats::kStartMarker;
4466 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004467 *stats->new_space_size = new_space_.SizeAsInt();
4468 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004469 *stats->old_pointer_space_size = old_pointer_space_->Size();
4470 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4471 *stats->old_data_space_size = old_data_space_->Size();
4472 *stats->old_data_space_capacity = old_data_space_->Capacity();
4473 *stats->code_space_size = code_space_->Size();
4474 *stats->code_space_capacity = code_space_->Capacity();
4475 *stats->map_space_size = map_space_->Size();
4476 *stats->map_space_capacity = map_space_->Capacity();
4477 *stats->cell_space_size = cell_space_->Size();
4478 *stats->cell_space_capacity = cell_space_->Capacity();
4479 *stats->lo_space_size = lo_space_->Size();
4480 GlobalHandles::RecordStats(stats);
Ben Murdochbb769b22010-08-11 14:56:33 +01004481 *stats->memory_allocator_size = MemoryAllocator::Size();
4482 *stats->memory_allocator_capacity =
4483 MemoryAllocator::Size() + MemoryAllocator::Available();
Iain Merrick75681382010-08-19 15:07:18 +01004484 *stats->os_error = OS::GetLastError();
Ben Murdochbb769b22010-08-11 14:56:33 +01004485 if (take_snapshot) {
Ben Murdochb0fe1622011-05-05 13:52:32 +01004486 HeapIterator iterator(HeapIterator::kFilterFreeListNodes);
Ben Murdochbb769b22010-08-11 14:56:33 +01004487 for (HeapObject* obj = iterator.next();
4488 obj != NULL;
4489 obj = iterator.next()) {
Ben Murdochbb769b22010-08-11 14:56:33 +01004490 InstanceType type = obj->map()->instance_type();
4491 ASSERT(0 <= type && type <= LAST_TYPE);
4492 stats->objects_per_type[type]++;
4493 stats->size_per_type[type] += obj->Size();
4494 }
4495 }
Steve Blockd0582a62009-12-15 09:54:21 +00004496}
4497
4498
Ben Murdochf87a2032010-10-22 12:50:53 +01004499intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004500 return old_pointer_space_->Size()
4501 + old_data_space_->Size()
4502 + code_space_->Size()
4503 + map_space_->Size()
4504 + cell_space_->Size()
4505 + lo_space_->Size();
4506}
4507
4508
4509int Heap::PromotedExternalMemorySize() {
4510 if (amount_of_external_allocated_memory_
4511 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4512 return amount_of_external_allocated_memory_
4513 - amount_of_external_allocated_memory_at_last_global_gc_;
4514}
4515
4516
4517bool Heap::Setup(bool create_heap_objects) {
4518 // Initialize heap spaces and initial maps and objects. Whenever something
4519 // goes wrong, just return false. The caller should check the results and
4520 // call Heap::TearDown() to release allocated memory.
4521 //
4522 // If the heap is not yet configured (eg, through the API), configure it.
4523 // Configuration is based on the flags new-space-size (really the semispace
4524 // size) and old-space-size if set or the initial values of semispace_size_
4525 // and old_generation_size_ otherwise.
4526 if (!heap_configured) {
4527 if (!ConfigureHeapDefault()) return false;
4528 }
4529
Iain Merrick75681382010-08-19 15:07:18 +01004530 ScavengingVisitor::Initialize();
4531 NewSpaceScavenger::Initialize();
4532 MarkCompactCollector::Initialize();
4533
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004534 MarkMapPointersAsEncoded(false);
4535
Steve Blocka7e24c12009-10-30 11:49:00 +00004536 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004537 // space. The chunk is double the size of the requested reserved
4538 // new space size to ensure that we can find a pair of semispaces that
4539 // are contiguous and aligned to their size.
Russell Brenner90bac252010-11-18 13:33:46 -08004540 if (!MemoryAllocator::Setup(MaxReserved(), MaxExecutableSize())) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004541 void* chunk =
Steve Block3ce2e202009-11-05 08:53:23 +00004542 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004543 if (chunk == NULL) return false;
4544
4545 // Align the pair of semispaces to their size, which must be a power
4546 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004547 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004548 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4549 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4550 return false;
4551 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004552
4553 // Initialize old pointer space.
4554 old_pointer_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004555 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004556 if (old_pointer_space_ == NULL) return false;
4557 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4558
4559 // Initialize old data space.
4560 old_data_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004561 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004562 if (old_data_space_ == NULL) return false;
4563 if (!old_data_space_->Setup(NULL, 0)) return false;
4564
4565 // Initialize the code space, set its maximum capacity to the old
4566 // generation size. It needs executable memory.
4567 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4568 // virtual address space, so that they can call each other with near calls.
4569 if (code_range_size_ > 0) {
4570 if (!CodeRange::Setup(code_range_size_)) {
4571 return false;
4572 }
4573 }
4574
4575 code_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004576 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004577 if (code_space_ == NULL) return false;
4578 if (!code_space_->Setup(NULL, 0)) return false;
4579
4580 // Initialize map space.
Leon Clarkee46be812010-01-19 14:06:41 +00004581 map_space_ = new MapSpace(FLAG_use_big_map_space
4582 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004583 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4584 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004585 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004586 if (map_space_ == NULL) return false;
4587 if (!map_space_->Setup(NULL, 0)) return false;
4588
4589 // Initialize global property cell space.
Steve Block3ce2e202009-11-05 08:53:23 +00004590 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004591 if (cell_space_ == NULL) return false;
4592 if (!cell_space_->Setup(NULL, 0)) return false;
4593
4594 // The large object code space may contain code or data. We set the memory
4595 // to be non-executable here for safety, but this means we need to enable it
4596 // explicitly when allocating large code objects.
4597 lo_space_ = new LargeObjectSpace(LO_SPACE);
4598 if (lo_space_ == NULL) return false;
4599 if (!lo_space_->Setup()) return false;
4600
4601 if (create_heap_objects) {
4602 // Create initial maps.
4603 if (!CreateInitialMaps()) return false;
4604 if (!CreateApiObjects()) return false;
4605
4606 // Create initial objects
4607 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004608
4609 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004610 }
4611
Ben Murdochf87a2032010-10-22 12:50:53 +01004612 LOG(IntPtrTEvent("heap-capacity", Capacity()));
4613 LOG(IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004614
Steve Block3ce2e202009-11-05 08:53:23 +00004615#ifdef ENABLE_LOGGING_AND_PROFILING
4616 // This should be called only after initial objects have been created.
4617 ProducerHeapProfile::Setup();
4618#endif
4619
Steve Blocka7e24c12009-10-30 11:49:00 +00004620 return true;
4621}
4622
4623
Steve Blockd0582a62009-12-15 09:54:21 +00004624void Heap::SetStackLimits() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004625 // On 64 bit machines, pointers are generally out of range of Smis. We write
4626 // something that looks like an out of range Smi to the GC.
4627
Steve Blockd0582a62009-12-15 09:54:21 +00004628 // Set up the special root array entries containing the stack limits.
4629 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004630 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004631 reinterpret_cast<Object*>(
4632 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
4633 roots_[kRealStackLimitRootIndex] =
4634 reinterpret_cast<Object*>(
4635 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004636}
4637
4638
4639void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004640 if (FLAG_print_cumulative_gc_stat) {
4641 PrintF("\n\n");
4642 PrintF("gc_count=%d ", gc_count_);
4643 PrintF("mark_sweep_count=%d ", ms_count_);
4644 PrintF("mark_compact_count=%d ", mc_count_);
4645 PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
4646 PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004647 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
4648 GCTracer::get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004649 PrintF("\n\n");
4650 }
4651
Steve Blocka7e24c12009-10-30 11:49:00 +00004652 GlobalHandles::TearDown();
4653
Leon Clarkee46be812010-01-19 14:06:41 +00004654 ExternalStringTable::TearDown();
4655
Steve Blocka7e24c12009-10-30 11:49:00 +00004656 new_space_.TearDown();
4657
4658 if (old_pointer_space_ != NULL) {
4659 old_pointer_space_->TearDown();
4660 delete old_pointer_space_;
4661 old_pointer_space_ = NULL;
4662 }
4663
4664 if (old_data_space_ != NULL) {
4665 old_data_space_->TearDown();
4666 delete old_data_space_;
4667 old_data_space_ = NULL;
4668 }
4669
4670 if (code_space_ != NULL) {
4671 code_space_->TearDown();
4672 delete code_space_;
4673 code_space_ = NULL;
4674 }
4675
4676 if (map_space_ != NULL) {
4677 map_space_->TearDown();
4678 delete map_space_;
4679 map_space_ = NULL;
4680 }
4681
4682 if (cell_space_ != NULL) {
4683 cell_space_->TearDown();
4684 delete cell_space_;
4685 cell_space_ = NULL;
4686 }
4687
4688 if (lo_space_ != NULL) {
4689 lo_space_->TearDown();
4690 delete lo_space_;
4691 lo_space_ = NULL;
4692 }
4693
4694 MemoryAllocator::TearDown();
4695}
4696
4697
4698void Heap::Shrink() {
4699 // Try to shrink all paged spaces.
4700 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004701 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
4702 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00004703}
4704
4705
4706#ifdef ENABLE_HEAP_PROTECTION
4707
4708void Heap::Protect() {
4709 if (HasBeenSetup()) {
4710 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004711 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4712 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004713 }
4714}
4715
4716
4717void Heap::Unprotect() {
4718 if (HasBeenSetup()) {
4719 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004720 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4721 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004722 }
4723}
4724
4725#endif
4726
4727
Steve Block6ded16b2010-05-10 14:33:55 +01004728void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
4729 ASSERT(callback != NULL);
4730 GCPrologueCallbackPair pair(callback, gc_type);
4731 ASSERT(!gc_prologue_callbacks_.Contains(pair));
4732 return gc_prologue_callbacks_.Add(pair);
4733}
4734
4735
4736void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
4737 ASSERT(callback != NULL);
4738 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
4739 if (gc_prologue_callbacks_[i].callback == callback) {
4740 gc_prologue_callbacks_.Remove(i);
4741 return;
4742 }
4743 }
4744 UNREACHABLE();
4745}
4746
4747
4748void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
4749 ASSERT(callback != NULL);
4750 GCEpilogueCallbackPair pair(callback, gc_type);
4751 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
4752 return gc_epilogue_callbacks_.Add(pair);
4753}
4754
4755
4756void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
4757 ASSERT(callback != NULL);
4758 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
4759 if (gc_epilogue_callbacks_[i].callback == callback) {
4760 gc_epilogue_callbacks_.Remove(i);
4761 return;
4762 }
4763 }
4764 UNREACHABLE();
4765}
4766
4767
Steve Blocka7e24c12009-10-30 11:49:00 +00004768#ifdef DEBUG
4769
4770class PrintHandleVisitor: public ObjectVisitor {
4771 public:
4772 void VisitPointers(Object** start, Object** end) {
4773 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01004774 PrintF(" handle %p to %p\n",
4775 reinterpret_cast<void*>(p),
4776 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00004777 }
4778};
4779
4780void Heap::PrintHandles() {
4781 PrintF("Handles:\n");
4782 PrintHandleVisitor v;
4783 HandleScopeImplementer::Iterate(&v);
4784}
4785
4786#endif
4787
4788
4789Space* AllSpaces::next() {
4790 switch (counter_++) {
4791 case NEW_SPACE:
4792 return Heap::new_space();
4793 case OLD_POINTER_SPACE:
4794 return Heap::old_pointer_space();
4795 case OLD_DATA_SPACE:
4796 return Heap::old_data_space();
4797 case CODE_SPACE:
4798 return Heap::code_space();
4799 case MAP_SPACE:
4800 return Heap::map_space();
4801 case CELL_SPACE:
4802 return Heap::cell_space();
4803 case LO_SPACE:
4804 return Heap::lo_space();
4805 default:
4806 return NULL;
4807 }
4808}
4809
4810
4811PagedSpace* PagedSpaces::next() {
4812 switch (counter_++) {
4813 case OLD_POINTER_SPACE:
4814 return Heap::old_pointer_space();
4815 case OLD_DATA_SPACE:
4816 return Heap::old_data_space();
4817 case CODE_SPACE:
4818 return Heap::code_space();
4819 case MAP_SPACE:
4820 return Heap::map_space();
4821 case CELL_SPACE:
4822 return Heap::cell_space();
4823 default:
4824 return NULL;
4825 }
4826}
4827
4828
4829
4830OldSpace* OldSpaces::next() {
4831 switch (counter_++) {
4832 case OLD_POINTER_SPACE:
4833 return Heap::old_pointer_space();
4834 case OLD_DATA_SPACE:
4835 return Heap::old_data_space();
4836 case CODE_SPACE:
4837 return Heap::code_space();
4838 default:
4839 return NULL;
4840 }
4841}
4842
4843
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004844SpaceIterator::SpaceIterator()
4845 : current_space_(FIRST_SPACE),
4846 iterator_(NULL),
4847 size_func_(NULL) {
4848}
4849
4850
4851SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
4852 : current_space_(FIRST_SPACE),
4853 iterator_(NULL),
4854 size_func_(size_func) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004855}
4856
4857
4858SpaceIterator::~SpaceIterator() {
4859 // Delete active iterator if any.
4860 delete iterator_;
4861}
4862
4863
4864bool SpaceIterator::has_next() {
4865 // Iterate until no more spaces.
4866 return current_space_ != LAST_SPACE;
4867}
4868
4869
4870ObjectIterator* SpaceIterator::next() {
4871 if (iterator_ != NULL) {
4872 delete iterator_;
4873 iterator_ = NULL;
4874 // Move to the next space
4875 current_space_++;
4876 if (current_space_ > LAST_SPACE) {
4877 return NULL;
4878 }
4879 }
4880
4881 // Return iterator for the new current space.
4882 return CreateIterator();
4883}
4884
4885
4886// Create an iterator for the space to iterate.
4887ObjectIterator* SpaceIterator::CreateIterator() {
4888 ASSERT(iterator_ == NULL);
4889
4890 switch (current_space_) {
4891 case NEW_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004892 iterator_ = new SemiSpaceIterator(Heap::new_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004893 break;
4894 case OLD_POINTER_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004895 iterator_ = new HeapObjectIterator(Heap::old_pointer_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004896 break;
4897 case OLD_DATA_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004898 iterator_ = new HeapObjectIterator(Heap::old_data_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004899 break;
4900 case CODE_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004901 iterator_ = new HeapObjectIterator(Heap::code_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004902 break;
4903 case MAP_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004904 iterator_ = new HeapObjectIterator(Heap::map_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004905 break;
4906 case CELL_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004907 iterator_ = new HeapObjectIterator(Heap::cell_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004908 break;
4909 case LO_SPACE:
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004910 iterator_ = new LargeObjectIterator(Heap::lo_space(), size_func_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004911 break;
4912 }
4913
4914 // Return the newly allocated iterator;
4915 ASSERT(iterator_ != NULL);
4916 return iterator_;
4917}
4918
4919
Ben Murdochb0fe1622011-05-05 13:52:32 +01004920class HeapObjectsFilter {
4921 public:
4922 virtual ~HeapObjectsFilter() {}
4923 virtual bool SkipObject(HeapObject* object) = 0;
4924};
4925
4926
4927class FreeListNodesFilter : public HeapObjectsFilter {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004928 public:
4929 FreeListNodesFilter() {
4930 MarkFreeListNodes();
4931 }
4932
Ben Murdochb0fe1622011-05-05 13:52:32 +01004933 bool SkipObject(HeapObject* object) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08004934 if (object->IsMarked()) {
4935 object->ClearMark();
4936 return true;
4937 } else {
4938 return false;
4939 }
4940 }
4941
4942 private:
4943 void MarkFreeListNodes() {
4944 Heap::old_pointer_space()->MarkFreeListNodes();
4945 Heap::old_data_space()->MarkFreeListNodes();
4946 MarkCodeSpaceFreeListNodes();
4947 Heap::map_space()->MarkFreeListNodes();
4948 Heap::cell_space()->MarkFreeListNodes();
4949 }
4950
4951 void MarkCodeSpaceFreeListNodes() {
4952 // For code space, using FreeListNode::IsFreeListNode is OK.
4953 HeapObjectIterator iter(Heap::code_space());
4954 for (HeapObject* obj = iter.next_object();
4955 obj != NULL;
4956 obj = iter.next_object()) {
4957 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
4958 }
4959 }
4960
4961 AssertNoAllocation no_alloc;
4962};
4963
4964
Ben Murdochb0fe1622011-05-05 13:52:32 +01004965class UnreachableObjectsFilter : public HeapObjectsFilter {
4966 public:
4967 UnreachableObjectsFilter() {
4968 MarkUnreachableObjects();
4969 }
4970
4971 bool SkipObject(HeapObject* object) {
4972 if (object->IsMarked()) {
4973 object->ClearMark();
4974 return true;
4975 } else {
4976 return false;
4977 }
4978 }
4979
4980 private:
4981 class UnmarkingVisitor : public ObjectVisitor {
4982 public:
4983 UnmarkingVisitor() : list_(10) {}
4984
4985 void VisitPointers(Object** start, Object** end) {
4986 for (Object** p = start; p < end; p++) {
4987 if (!(*p)->IsHeapObject()) continue;
4988 HeapObject* obj = HeapObject::cast(*p);
4989 if (obj->IsMarked()) {
4990 obj->ClearMark();
4991 list_.Add(obj);
4992 }
4993 }
4994 }
4995
4996 bool can_process() { return !list_.is_empty(); }
4997
4998 void ProcessNext() {
4999 HeapObject* obj = list_.RemoveLast();
5000 obj->Iterate(this);
5001 }
5002
5003 private:
5004 List<HeapObject*> list_;
5005 };
5006
5007 void MarkUnreachableObjects() {
5008 HeapIterator iterator;
5009 for (HeapObject* obj = iterator.next();
5010 obj != NULL;
5011 obj = iterator.next()) {
5012 obj->SetMark();
5013 }
5014 UnmarkingVisitor visitor;
5015 Heap::IterateRoots(&visitor, VISIT_ONLY_STRONG);
5016 while (visitor.can_process())
5017 visitor.ProcessNext();
5018 }
5019
5020 AssertNoAllocation no_alloc;
5021};
5022
5023
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005024HeapIterator::HeapIterator()
5025 : filtering_(HeapIterator::kNoFiltering),
5026 filter_(NULL) {
5027 Init();
5028}
5029
5030
Ben Murdochb0fe1622011-05-05 13:52:32 +01005031HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005032 : filtering_(filtering),
5033 filter_(NULL) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005034 Init();
5035}
5036
5037
5038HeapIterator::~HeapIterator() {
5039 Shutdown();
5040}
5041
5042
5043void HeapIterator::Init() {
5044 // Start the iteration.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005045 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5046 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject);
5047 switch (filtering_) {
5048 case kFilterFreeListNodes:
5049 filter_ = new FreeListNodesFilter;
5050 break;
5051 case kFilterUnreachable:
5052 filter_ = new UnreachableObjectsFilter;
5053 break;
5054 default:
5055 break;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005056 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005057 object_iterator_ = space_iterator_->next();
5058}
5059
5060
5061void HeapIterator::Shutdown() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005062#ifdef DEBUG
Ben Murdochb0fe1622011-05-05 13:52:32 +01005063 // Assert that in filtering mode we have iterated through all
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005064 // objects. Otherwise, heap will be left in an inconsistent state.
Ben Murdochb0fe1622011-05-05 13:52:32 +01005065 if (filtering_ != kNoFiltering) {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005066 ASSERT(object_iterator_ == NULL);
5067 }
5068#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00005069 // Make sure the last iterator is deallocated.
5070 delete space_iterator_;
5071 space_iterator_ = NULL;
5072 object_iterator_ = NULL;
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005073 delete filter_;
5074 filter_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005075}
5076
5077
Leon Clarked91b9f72010-01-27 17:25:45 +00005078HeapObject* HeapIterator::next() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005079 if (filter_ == NULL) return NextObject();
5080
5081 HeapObject* obj = NextObject();
Ben Murdochb0fe1622011-05-05 13:52:32 +01005082 while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08005083 return obj;
5084}
5085
5086
5087HeapObject* HeapIterator::NextObject() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005088 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00005089 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005090
Leon Clarked91b9f72010-01-27 17:25:45 +00005091 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005092 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00005093 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005094 } else {
5095 // Go though the spaces looking for one that has objects.
5096 while (space_iterator_->has_next()) {
5097 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00005098 if (HeapObject* obj = object_iterator_->next_object()) {
5099 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00005100 }
5101 }
5102 }
5103 // Done with the last space.
5104 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00005105 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00005106}
5107
5108
5109void HeapIterator::reset() {
5110 // Restart the iterator.
5111 Shutdown();
5112 Init();
5113}
5114
5115
5116#ifdef DEBUG
5117
5118static bool search_for_any_global;
5119static Object* search_target;
5120static bool found_target;
5121static List<Object*> object_stack(20);
5122
5123
5124// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
5125static const int kMarkTag = 2;
5126
5127static void MarkObjectRecursively(Object** p);
5128class MarkObjectVisitor : public ObjectVisitor {
5129 public:
5130 void VisitPointers(Object** start, Object** end) {
5131 // Copy all HeapObject pointers in [start, end)
5132 for (Object** p = start; p < end; p++) {
5133 if ((*p)->IsHeapObject())
5134 MarkObjectRecursively(p);
5135 }
5136 }
5137};
5138
5139static MarkObjectVisitor mark_visitor;
5140
5141static void MarkObjectRecursively(Object** p) {
5142 if (!(*p)->IsHeapObject()) return;
5143
5144 HeapObject* obj = HeapObject::cast(*p);
5145
5146 Object* map = obj->map();
5147
5148 if (!map->IsHeapObject()) return; // visited before
5149
5150 if (found_target) return; // stop if target found
5151 object_stack.Add(obj);
5152 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
5153 (!search_for_any_global && (obj == search_target))) {
5154 found_target = true;
5155 return;
5156 }
5157
5158 // not visited yet
5159 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5160
5161 Address map_addr = map_p->address();
5162
5163 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5164
5165 MarkObjectRecursively(&map);
5166
5167 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
5168 &mark_visitor);
5169
5170 if (!found_target) // don't pop if found the target
5171 object_stack.RemoveLast();
5172}
5173
5174
5175static void UnmarkObjectRecursively(Object** p);
5176class UnmarkObjectVisitor : public ObjectVisitor {
5177 public:
5178 void VisitPointers(Object** start, Object** end) {
5179 // Copy all HeapObject pointers in [start, end)
5180 for (Object** p = start; p < end; p++) {
5181 if ((*p)->IsHeapObject())
5182 UnmarkObjectRecursively(p);
5183 }
5184 }
5185};
5186
5187static UnmarkObjectVisitor unmark_visitor;
5188
5189static void UnmarkObjectRecursively(Object** p) {
5190 if (!(*p)->IsHeapObject()) return;
5191
5192 HeapObject* obj = HeapObject::cast(*p);
5193
5194 Object* map = obj->map();
5195
5196 if (map->IsHeapObject()) return; // unmarked already
5197
5198 Address map_addr = reinterpret_cast<Address>(map);
5199
5200 map_addr -= kMarkTag;
5201
5202 ASSERT_TAG_ALIGNED(map_addr);
5203
5204 HeapObject* map_p = HeapObject::FromAddress(map_addr);
5205
5206 obj->set_map(reinterpret_cast<Map*>(map_p));
5207
5208 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
5209
5210 obj->IterateBody(Map::cast(map_p)->instance_type(),
5211 obj->SizeFromMap(Map::cast(map_p)),
5212 &unmark_visitor);
5213}
5214
5215
5216static void MarkRootObjectRecursively(Object** root) {
5217 if (search_for_any_global) {
5218 ASSERT(search_target == NULL);
5219 } else {
5220 ASSERT(search_target->IsHeapObject());
5221 }
5222 found_target = false;
5223 object_stack.Clear();
5224
5225 MarkObjectRecursively(root);
5226 UnmarkObjectRecursively(root);
5227
5228 if (found_target) {
5229 PrintF("=====================================\n");
5230 PrintF("==== Path to object ====\n");
5231 PrintF("=====================================\n\n");
5232
5233 ASSERT(!object_stack.is_empty());
5234 for (int i = 0; i < object_stack.length(); i++) {
5235 if (i > 0) PrintF("\n |\n |\n V\n\n");
5236 Object* obj = object_stack[i];
5237 obj->Print();
5238 }
5239 PrintF("=====================================\n");
5240 }
5241}
5242
5243
5244// Helper class for visiting HeapObjects recursively.
5245class MarkRootVisitor: public ObjectVisitor {
5246 public:
5247 void VisitPointers(Object** start, Object** end) {
5248 // Visit all HeapObject pointers in [start, end)
5249 for (Object** p = start; p < end; p++) {
5250 if ((*p)->IsHeapObject())
5251 MarkRootObjectRecursively(p);
5252 }
5253 }
5254};
5255
5256
5257// Triggers a depth-first traversal of reachable objects from roots
5258// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005259void Heap::TracePathToObject(Object* target) {
5260 search_target = target;
Steve Blocka7e24c12009-10-30 11:49:00 +00005261 search_for_any_global = false;
5262
5263 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005264 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005265}
5266
5267
5268// Triggers a depth-first traversal of reachable objects from roots
5269// and finds a path to any global object and prints it. Useful for
5270// determining the source for leaks of global objects.
5271void Heap::TracePathToGlobal() {
5272 search_target = NULL;
5273 search_for_any_global = true;
5274
5275 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005276 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005277}
5278#endif
5279
5280
Ben Murdochf87a2032010-10-22 12:50:53 +01005281static intptr_t CountTotalHolesSize() {
5282 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005283 OldSpaces spaces;
5284 for (OldSpace* space = spaces.next();
5285 space != NULL;
5286 space = spaces.next()) {
5287 holes_size += space->Waste() + space->AvailableFree();
5288 }
5289 return holes_size;
5290}
5291
5292
Steve Blocka7e24c12009-10-30 11:49:00 +00005293GCTracer::GCTracer()
5294 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005295 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005296 gc_count_(0),
5297 full_gc_count_(0),
5298 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005299 marked_count_(0),
5300 allocated_since_last_gc_(0),
5301 spent_in_mutator_(0),
5302 promoted_objects_size_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005303 // These two fields reflect the state of the previous full collection.
5304 // Set them before they are changed by the collector.
5305 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
5306 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005307 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005308 start_time_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005309 start_size_ = Heap::SizeOfObjects();
5310
5311 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5312 scopes_[i] = 0;
5313 }
5314
5315 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5316
5317 allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
5318
5319 if (last_gc_end_timestamp_ > 0) {
5320 spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
5321 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005322}
5323
5324
5325GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005326 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005327 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5328
5329 bool first_gc = (last_gc_end_timestamp_ == 0);
5330
5331 alive_after_last_gc_ = Heap::SizeOfObjects();
5332 last_gc_end_timestamp_ = OS::TimeCurrentMillis();
5333
5334 int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
5335
5336 // Update cumulative GC statistics if required.
5337 if (FLAG_print_cumulative_gc_stat) {
5338 max_gc_pause_ = Max(max_gc_pause_, time);
5339 max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
5340 if (!first_gc) {
5341 min_in_mutator_ = Min(min_in_mutator_,
5342 static_cast<int>(spent_in_mutator_));
5343 }
5344 }
5345
5346 if (!FLAG_trace_gc_nvp) {
5347 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5348
5349 PrintF("%s %.1f -> %.1f MB, ",
5350 CollectorString(),
5351 static_cast<double>(start_size_) / MB,
5352 SizeOfHeapObjects());
5353
5354 if (external_time > 0) PrintF("%d / ", external_time);
5355 PrintF("%d ms.\n", time);
5356 } else {
5357 PrintF("pause=%d ", time);
5358 PrintF("mutator=%d ",
5359 static_cast<int>(spent_in_mutator_));
5360
5361 PrintF("gc=");
5362 switch (collector_) {
5363 case SCAVENGER:
5364 PrintF("s");
5365 break;
5366 case MARK_COMPACTOR:
5367 PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
5368 break;
5369 default:
5370 UNREACHABLE();
5371 }
5372 PrintF(" ");
5373
5374 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5375 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5376 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005377 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005378 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5379
Ben Murdochf87a2032010-10-22 12:50:53 +01005380 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
5381 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
5382 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5383 in_free_list_or_wasted_before_gc_);
5384 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005385
Ben Murdochf87a2032010-10-22 12:50:53 +01005386 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5387 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005388
5389 PrintF("\n");
5390 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005391
5392#if defined(ENABLE_LOGGING_AND_PROFILING)
5393 Heap::PrintShortHeapStatistics();
5394#endif
5395}
5396
5397
5398const char* GCTracer::CollectorString() {
5399 switch (collector_) {
5400 case SCAVENGER:
5401 return "Scavenge";
5402 case MARK_COMPACTOR:
5403 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
5404 : "Mark-sweep";
5405 }
5406 return "Unknown GC";
5407}
5408
5409
5410int KeyedLookupCache::Hash(Map* map, String* name) {
5411 // Uses only lower 32 bits if pointers are larger.
5412 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005413 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005414 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005415}
5416
5417
5418int KeyedLookupCache::Lookup(Map* map, String* name) {
5419 int index = Hash(map, name);
5420 Key& key = keys_[index];
5421 if ((key.map == map) && key.name->Equals(name)) {
5422 return field_offsets_[index];
5423 }
5424 return -1;
5425}
5426
5427
5428void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5429 String* symbol;
5430 if (Heap::LookupSymbolIfExists(name, &symbol)) {
5431 int index = Hash(map, symbol);
5432 Key& key = keys_[index];
5433 key.map = map;
5434 key.name = symbol;
5435 field_offsets_[index] = field_offset;
5436 }
5437}
5438
5439
5440void KeyedLookupCache::Clear() {
5441 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5442}
5443
5444
5445KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
5446
5447
5448int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
5449
5450
5451void DescriptorLookupCache::Clear() {
5452 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5453}
5454
5455
5456DescriptorLookupCache::Key
5457DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
5458
5459int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
5460
5461
5462#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005463void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005464 ASSERT(FLAG_gc_greedy);
Ben Murdochf87a2032010-10-22 12:50:53 +01005465 if (Bootstrapper::IsActive()) return;
5466 if (disallow_allocation_failure()) return;
5467 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005468}
5469#endif
5470
5471
5472TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
5473 : type_(t) {
5474 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5475 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5476 for (int i = 0; i < kCacheSize; i++) {
5477 elements_[i].in[0] = in0;
5478 elements_[i].in[1] = in1;
5479 elements_[i].output = NULL;
5480 }
5481}
5482
5483
5484TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
5485
5486
5487void TranscendentalCache::Clear() {
5488 for (int i = 0; i < kNumberOfCaches; i++) {
5489 if (caches_[i] != NULL) {
5490 delete caches_[i];
5491 caches_[i] = NULL;
5492 }
5493 }
5494}
5495
5496
Leon Clarkee46be812010-01-19 14:06:41 +00005497void ExternalStringTable::CleanUp() {
5498 int last = 0;
5499 for (int i = 0; i < new_space_strings_.length(); ++i) {
5500 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5501 if (Heap::InNewSpace(new_space_strings_[i])) {
5502 new_space_strings_[last++] = new_space_strings_[i];
5503 } else {
5504 old_space_strings_.Add(new_space_strings_[i]);
5505 }
5506 }
5507 new_space_strings_.Rewind(last);
5508 last = 0;
5509 for (int i = 0; i < old_space_strings_.length(); ++i) {
5510 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5511 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
5512 old_space_strings_[last++] = old_space_strings_[i];
5513 }
5514 old_space_strings_.Rewind(last);
5515 Verify();
5516}
5517
5518
5519void ExternalStringTable::TearDown() {
5520 new_space_strings_.Free();
5521 old_space_strings_.Free();
5522}
5523
5524
5525List<Object*> ExternalStringTable::new_space_strings_;
5526List<Object*> ExternalStringTable::old_space_strings_;
5527
Steve Blocka7e24c12009-10-30 11:49:00 +00005528} } // namespace v8::internal