blob: 226a2022a3adf1c2a6dcf1349086093897b5b230 [file] [log] [blame]
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001// Copyright 2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "bootstrapper.h"
33#include "codegen-inl.h"
34#include "compilation-cache.h"
35#include "debug.h"
36#include "heap-profiler.h"
37#include "global-handles.h"
38#include "mark-compact.h"
39#include "natives.h"
Iain Merrick75681382010-08-19 15:07:18 +010040#include "objects-visiting.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000041#include "scanner.h"
42#include "scopeinfo.h"
Steve Block3ce2e202009-11-05 08:53:23 +000043#include "snapshot.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000044#include "v8threads.h"
Steve Block6ded16b2010-05-10 14:33:55 +010045#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +000046#include "regexp-macro-assembler.h"
Steve Blockd0582a62009-12-15 09:54:21 +000047#include "arm/regexp-macro-assembler-arm.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000048#endif
49
Steve Block6ded16b2010-05-10 14:33:55 +010050
Steve Blocka7e24c12009-10-30 11:49:00 +000051namespace v8 {
52namespace internal {
53
54
55String* Heap::hidden_symbol_;
56Object* Heap::roots_[Heap::kRootListLength];
Ben Murdochf87a2032010-10-22 12:50:53 +010057Object* Heap::global_contexts_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +000058
John Reck59135872010-11-02 12:39:01 -070059
Steve Blocka7e24c12009-10-30 11:49:00 +000060NewSpace Heap::new_space_;
61OldSpace* Heap::old_pointer_space_ = NULL;
62OldSpace* Heap::old_data_space_ = NULL;
63OldSpace* Heap::code_space_ = NULL;
64MapSpace* Heap::map_space_ = NULL;
65CellSpace* Heap::cell_space_ = NULL;
66LargeObjectSpace* Heap::lo_space_ = NULL;
67
John Reck59135872010-11-02 12:39:01 -070068static const intptr_t kMinimumPromotionLimit = 2 * MB;
69static const intptr_t kMinimumAllocationLimit = 8 * MB;
70
Ben Murdochf87a2032010-10-22 12:50:53 +010071intptr_t Heap::old_gen_promotion_limit_ = kMinimumPromotionLimit;
72intptr_t Heap::old_gen_allocation_limit_ = kMinimumAllocationLimit;
Steve Blocka7e24c12009-10-30 11:49:00 +000073
74int Heap::old_gen_exhausted_ = false;
75
76int Heap::amount_of_external_allocated_memory_ = 0;
77int Heap::amount_of_external_allocated_memory_at_last_global_gc_ = 0;
78
79// semispace_size_ should be a power of 2 and old_generation_size_ should be
80// a multiple of Page::kPageSize.
81#if defined(ANDROID)
Leon Clarked91b9f72010-01-27 17:25:45 +000082int Heap::max_semispace_size_ = 2*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010083intptr_t Heap::max_old_generation_size_ = 192*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000084int Heap::initial_semispace_size_ = 128*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010085intptr_t Heap::code_range_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000086#elif defined(V8_TARGET_ARCH_X64)
Steve Block3ce2e202009-11-05 08:53:23 +000087int Heap::max_semispace_size_ = 16*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010088intptr_t Heap::max_old_generation_size_ = 1*GB;
Steve Blocka7e24c12009-10-30 11:49:00 +000089int Heap::initial_semispace_size_ = 1*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010090intptr_t Heap::code_range_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000091#else
Steve Block3ce2e202009-11-05 08:53:23 +000092int Heap::max_semispace_size_ = 8*MB;
Ben Murdochf87a2032010-10-22 12:50:53 +010093intptr_t Heap::max_old_generation_size_ = 512*MB;
Steve Blocka7e24c12009-10-30 11:49:00 +000094int Heap::initial_semispace_size_ = 512*KB;
Ben Murdochf87a2032010-10-22 12:50:53 +010095intptr_t Heap::code_range_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000096#endif
97
Steve Block3ce2e202009-11-05 08:53:23 +000098// The snapshot semispace size will be the default semispace size if
99// snapshotting is used and will be the requested semispace size as
100// set up by ConfigureHeap otherwise.
101int Heap::reserved_semispace_size_ = Heap::max_semispace_size_;
102
Steve Block6ded16b2010-05-10 14:33:55 +0100103List<Heap::GCPrologueCallbackPair> Heap::gc_prologue_callbacks_;
104List<Heap::GCEpilogueCallbackPair> Heap::gc_epilogue_callbacks_;
105
Steve Blocka7e24c12009-10-30 11:49:00 +0000106GCCallback Heap::global_gc_prologue_callback_ = NULL;
107GCCallback Heap::global_gc_epilogue_callback_ = NULL;
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100108HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000109
110// Variables set based on semispace_size_ and old_generation_size_ in
111// ConfigureHeap.
Steve Block3ce2e202009-11-05 08:53:23 +0000112
113// Will be 4 * reserved_semispace_size_ to ensure that young
114// generation can be aligned to its size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000115int Heap::survived_since_last_expansion_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100116intptr_t Heap::external_allocation_limit_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000117
118Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
119
120int Heap::mc_count_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100121int Heap::ms_count_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000122int Heap::gc_count_ = 0;
123
Leon Clarkef7060e22010-06-03 12:02:55 +0100124GCTracer* Heap::tracer_ = NULL;
125
Steve Block6ded16b2010-05-10 14:33:55 +0100126int Heap::unflattened_strings_length_ = 0;
127
Steve Blocka7e24c12009-10-30 11:49:00 +0000128int Heap::always_allocate_scope_depth_ = 0;
Steve Blockd0582a62009-12-15 09:54:21 +0000129int Heap::linear_allocation_scope_depth_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100130int Heap::contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000131
Steve Block8defd9f2010-07-08 12:39:36 +0100132int Heap::young_survivors_after_last_gc_ = 0;
133int Heap::high_survival_rate_period_length_ = 0;
134double Heap::survival_rate_ = 0;
135Heap::SurvivalRateTrend Heap::previous_survival_rate_trend_ = Heap::STABLE;
136Heap::SurvivalRateTrend Heap::survival_rate_trend_ = Heap::STABLE;
137
Steve Blocka7e24c12009-10-30 11:49:00 +0000138#ifdef DEBUG
139bool Heap::allocation_allowed_ = true;
140
141int Heap::allocation_timeout_ = 0;
142bool Heap::disallow_allocation_failure_ = false;
143#endif // DEBUG
144
Ben Murdochf87a2032010-10-22 12:50:53 +0100145intptr_t GCTracer::alive_after_last_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100146double GCTracer::last_gc_end_timestamp_ = 0.0;
147int GCTracer::max_gc_pause_ = 0;
Ben Murdochf87a2032010-10-22 12:50:53 +0100148intptr_t GCTracer::max_alive_after_gc_ = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +0100149int GCTracer::min_in_mutator_ = kMaxInt;
Steve Blocka7e24c12009-10-30 11:49:00 +0000150
Ben Murdochf87a2032010-10-22 12:50:53 +0100151intptr_t Heap::Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000152 if (!HasBeenSetup()) return 0;
153
154 return new_space_.Capacity() +
155 old_pointer_space_->Capacity() +
156 old_data_space_->Capacity() +
157 code_space_->Capacity() +
158 map_space_->Capacity() +
159 cell_space_->Capacity();
160}
161
162
Ben Murdochf87a2032010-10-22 12:50:53 +0100163intptr_t Heap::CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +0000164 if (!HasBeenSetup()) return 0;
165
166 return new_space_.CommittedMemory() +
167 old_pointer_space_->CommittedMemory() +
168 old_data_space_->CommittedMemory() +
169 code_space_->CommittedMemory() +
170 map_space_->CommittedMemory() +
171 cell_space_->CommittedMemory() +
172 lo_space_->Size();
173}
174
175
Ben Murdochf87a2032010-10-22 12:50:53 +0100176intptr_t Heap::Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000177 if (!HasBeenSetup()) return 0;
178
179 return new_space_.Available() +
180 old_pointer_space_->Available() +
181 old_data_space_->Available() +
182 code_space_->Available() +
183 map_space_->Available() +
184 cell_space_->Available();
185}
186
187
188bool Heap::HasBeenSetup() {
189 return old_pointer_space_ != NULL &&
190 old_data_space_ != NULL &&
191 code_space_ != NULL &&
192 map_space_ != NULL &&
193 cell_space_ != NULL &&
194 lo_space_ != NULL;
195}
196
197
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100198int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
199 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
200 ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
201 MapWord map_word = object->map_word();
202 map_word.ClearMark();
203 map_word.ClearOverflow();
204 return object->SizeFromMap(map_word.ToMap());
205}
206
207
208int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
209 ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
210 ASSERT(MarkCompactCollector::are_map_pointers_encoded());
211 uint32_t marker = Memory::uint32_at(object->address());
212 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
213 return kIntSize;
214 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
215 return Memory::int_at(object->address() + kIntSize);
216 } else {
217 MapWord map_word = object->map_word();
218 Address map_address = map_word.DecodeMapAddress(Heap::map_space());
219 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
220 return object->SizeFromMap(map);
221 }
222}
223
224
Steve Blocka7e24c12009-10-30 11:49:00 +0000225GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
226 // Is global GC requested?
227 if (space != NEW_SPACE || FLAG_gc_global) {
228 Counters::gc_compactor_caused_by_request.Increment();
229 return MARK_COMPACTOR;
230 }
231
232 // Is enough data promoted to justify a global GC?
233 if (OldGenerationPromotionLimitReached()) {
234 Counters::gc_compactor_caused_by_promoted_data.Increment();
235 return MARK_COMPACTOR;
236 }
237
238 // Have allocation in OLD and LO failed?
239 if (old_gen_exhausted_) {
240 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
241 return MARK_COMPACTOR;
242 }
243
244 // Is there enough space left in OLD to guarantee that a scavenge can
245 // succeed?
246 //
247 // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
248 // for object promotion. It counts only the bytes that the memory
249 // allocator has not yet allocated from the OS and assigned to any space,
250 // and does not count available bytes already in the old space or code
251 // space. Undercounting is safe---we may get an unrequested full GC when
252 // a scavenge would have succeeded.
253 if (MemoryAllocator::MaxAvailable() <= new_space_.Size()) {
254 Counters::gc_compactor_caused_by_oldspace_exhaustion.Increment();
255 return MARK_COMPACTOR;
256 }
257
258 // Default
259 return SCAVENGER;
260}
261
262
263// TODO(1238405): Combine the infrastructure for --heap-stats and
264// --log-gc to avoid the complicated preprocessor and flag testing.
265#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
266void Heap::ReportStatisticsBeforeGC() {
267 // Heap::ReportHeapStatistics will also log NewSpace statistics when
268 // compiled with ENABLE_LOGGING_AND_PROFILING and --log-gc is set. The
269 // following logic is used to avoid double logging.
270#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
271 if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
272 if (FLAG_heap_stats) {
273 ReportHeapStatistics("Before GC");
274 } else if (FLAG_log_gc) {
275 new_space_.ReportStatistics();
276 }
277 if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
278#elif defined(DEBUG)
279 if (FLAG_heap_stats) {
280 new_space_.CollectStatistics();
281 ReportHeapStatistics("Before GC");
282 new_space_.ClearHistograms();
283 }
284#elif defined(ENABLE_LOGGING_AND_PROFILING)
285 if (FLAG_log_gc) {
286 new_space_.CollectStatistics();
287 new_space_.ReportStatistics();
288 new_space_.ClearHistograms();
289 }
290#endif
291}
292
293
294#if defined(ENABLE_LOGGING_AND_PROFILING)
295void Heap::PrintShortHeapStatistics() {
296 if (!FLAG_trace_gc_verbose) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100297 PrintF("Memory allocator, used: %8" V8_PTR_PREFIX "d"
298 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000299 MemoryAllocator::Size(),
300 MemoryAllocator::Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100301 PrintF("New space, used: %8" V8_PTR_PREFIX "d"
302 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000303 Heap::new_space_.Size(),
304 new_space_.Available());
Ben Murdochf87a2032010-10-22 12:50:53 +0100305 PrintF("Old pointers, used: %8" V8_PTR_PREFIX "d"
306 ", available: %8" V8_PTR_PREFIX "d"
307 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000308 old_pointer_space_->Size(),
309 old_pointer_space_->Available(),
310 old_pointer_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100311 PrintF("Old data space, used: %8" V8_PTR_PREFIX "d"
312 ", available: %8" V8_PTR_PREFIX "d"
313 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000314 old_data_space_->Size(),
315 old_data_space_->Available(),
316 old_data_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100317 PrintF("Code space, used: %8" V8_PTR_PREFIX "d"
318 ", available: %8" V8_PTR_PREFIX "d"
319 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000320 code_space_->Size(),
321 code_space_->Available(),
322 code_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100323 PrintF("Map space, used: %8" V8_PTR_PREFIX "d"
324 ", available: %8" V8_PTR_PREFIX "d"
325 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000326 map_space_->Size(),
327 map_space_->Available(),
328 map_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100329 PrintF("Cell space, used: %8" V8_PTR_PREFIX "d"
330 ", available: %8" V8_PTR_PREFIX "d"
331 ", waste: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000332 cell_space_->Size(),
333 cell_space_->Available(),
334 cell_space_->Waste());
Ben Murdochf87a2032010-10-22 12:50:53 +0100335 PrintF("Large object space, used: %8" V8_PTR_PREFIX "d"
336 ", available: %8" V8_PTR_PREFIX "d\n",
Steve Block3ce2e202009-11-05 08:53:23 +0000337 lo_space_->Size(),
338 lo_space_->Available());
Steve Blocka7e24c12009-10-30 11:49:00 +0000339}
340#endif
341
342
343// TODO(1238405): Combine the infrastructure for --heap-stats and
344// --log-gc to avoid the complicated preprocessor and flag testing.
345void Heap::ReportStatisticsAfterGC() {
346 // Similar to the before GC, we use some complicated logic to ensure that
347 // NewSpace statistics are logged exactly once when --log-gc is turned on.
348#if defined(DEBUG) && defined(ENABLE_LOGGING_AND_PROFILING)
349 if (FLAG_heap_stats) {
350 new_space_.CollectStatistics();
351 ReportHeapStatistics("After GC");
352 } else if (FLAG_log_gc) {
353 new_space_.ReportStatistics();
354 }
355#elif defined(DEBUG)
356 if (FLAG_heap_stats) ReportHeapStatistics("After GC");
357#elif defined(ENABLE_LOGGING_AND_PROFILING)
358 if (FLAG_log_gc) new_space_.ReportStatistics();
359#endif
360}
361#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
362
363
364void Heap::GarbageCollectionPrologue() {
365 TranscendentalCache::Clear();
Steve Block6ded16b2010-05-10 14:33:55 +0100366 ClearJSFunctionResultCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000367 gc_count_++;
Steve Block6ded16b2010-05-10 14:33:55 +0100368 unflattened_strings_length_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000369#ifdef DEBUG
370 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
371 allow_allocation(false);
372
373 if (FLAG_verify_heap) {
374 Verify();
375 }
376
377 if (FLAG_gc_verbose) Print();
Steve Blocka7e24c12009-10-30 11:49:00 +0000378#endif
379
380#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
381 ReportStatisticsBeforeGC();
382#endif
383}
384
Ben Murdochf87a2032010-10-22 12:50:53 +0100385intptr_t Heap::SizeOfObjects() {
386 intptr_t total = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000387 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000388 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000389 total += space->Size();
390 }
391 return total;
392}
393
394void Heap::GarbageCollectionEpilogue() {
395#ifdef DEBUG
396 allow_allocation(true);
397 ZapFromSpace();
398
399 if (FLAG_verify_heap) {
400 Verify();
401 }
402
403 if (FLAG_print_global_handles) GlobalHandles::Print();
404 if (FLAG_print_handles) PrintHandles();
405 if (FLAG_gc_verbose) Print();
406 if (FLAG_code_stats) ReportCodeStatistics("After GC");
407#endif
408
Ben Murdochf87a2032010-10-22 12:50:53 +0100409 Counters::alive_after_last_gc.Set(static_cast<int>(SizeOfObjects()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000410
411 Counters::symbol_table_capacity.Set(symbol_table()->Capacity());
412 Counters::number_of_symbols.Set(symbol_table()->NumberOfElements());
413#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
414 ReportStatisticsAfterGC();
415#endif
416#ifdef ENABLE_DEBUGGER_SUPPORT
417 Debug::AfterGarbageCollection();
418#endif
419}
420
421
John Reck59135872010-11-02 12:39:01 -0700422void Heap::CollectAllGarbage(bool force_compaction) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000423 // Since we are ignoring the return value, the exact choice of space does
424 // not matter, so long as we do not specify NEW_SPACE, which would not
425 // cause a full GC.
426 MarkCompactCollector::SetForceCompaction(force_compaction);
John Reck59135872010-11-02 12:39:01 -0700427 CollectGarbage(OLD_POINTER_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +0000428 MarkCompactCollector::SetForceCompaction(false);
429}
430
431
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800432void Heap::CollectAllAvailableGarbage() {
433 // Since we are ignoring the return value, the exact choice of space does
434 // not matter, so long as we do not specify NEW_SPACE, which would not
435 // cause a full GC.
436 MarkCompactCollector::SetForceCompaction(true);
437
438 // Major GC would invoke weak handle callbacks on weakly reachable
439 // handles, but won't collect weakly reachable objects until next
440 // major GC. Therefore if we collect aggressively and weak handle callback
441 // has been invoked, we rerun major GC to release objects which become
442 // garbage.
443 // Note: as weak callbacks can execute arbitrary code, we cannot
444 // hope that eventually there will be no weak callbacks invocations.
445 // Therefore stop recollecting after several attempts.
446 const int kMaxNumberOfAttempts = 7;
447 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
448 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
449 break;
450 }
451 }
452 MarkCompactCollector::SetForceCompaction(false);
453}
454
455
456bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000457 // The VM is in the GC state until exiting this function.
458 VMState state(GC);
459
460#ifdef DEBUG
461 // Reset the allocation timeout to the GC interval, but make sure to
462 // allow at least a few allocations after a collection. The reason
463 // for this is that we have a lot of allocation sequences and we
464 // assume that a garbage collection will allow the subsequent
465 // allocation attempts to go through.
466 allocation_timeout_ = Max(6, FLAG_gc_interval);
467#endif
468
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800469 bool next_gc_likely_to_collect_more = false;
470
Steve Blocka7e24c12009-10-30 11:49:00 +0000471 { GCTracer tracer;
472 GarbageCollectionPrologue();
473 // The GC count was incremented in the prologue. Tell the tracer about
474 // it.
475 tracer.set_gc_count(gc_count_);
476
Steve Blocka7e24c12009-10-30 11:49:00 +0000477 // Tell the tracer which collector we've selected.
478 tracer.set_collector(collector);
479
480 HistogramTimer* rate = (collector == SCAVENGER)
481 ? &Counters::gc_scavenger
482 : &Counters::gc_compactor;
483 rate->Start();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800484 next_gc_likely_to_collect_more =
485 PerformGarbageCollection(collector, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000486 rate->Stop();
487
488 GarbageCollectionEpilogue();
489 }
490
491
492#ifdef ENABLE_LOGGING_AND_PROFILING
493 if (FLAG_log_gc) HeapProfiler::WriteSample();
Ben Murdochf87a2032010-10-22 12:50:53 +0100494 if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
Steve Blocka7e24c12009-10-30 11:49:00 +0000495#endif
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800496
497 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000498}
499
500
501void Heap::PerformScavenge() {
502 GCTracer tracer;
John Reck59135872010-11-02 12:39:01 -0700503 PerformGarbageCollection(SCAVENGER, &tracer);
Steve Blocka7e24c12009-10-30 11:49:00 +0000504}
505
506
507#ifdef DEBUG
508// Helper class for verifying the symbol table.
509class SymbolTableVerifier : public ObjectVisitor {
510 public:
511 SymbolTableVerifier() { }
512 void VisitPointers(Object** start, Object** end) {
513 // Visit all HeapObject pointers in [start, end).
514 for (Object** p = start; p < end; p++) {
515 if ((*p)->IsHeapObject()) {
516 // Check that the symbol is actually a symbol.
517 ASSERT((*p)->IsNull() || (*p)->IsUndefined() || (*p)->IsSymbol());
518 }
519 }
520 }
521};
522#endif // DEBUG
523
524
525static void VerifySymbolTable() {
526#ifdef DEBUG
527 SymbolTableVerifier verifier;
528 Heap::symbol_table()->IterateElements(&verifier);
529#endif // DEBUG
530}
531
532
Leon Clarkee46be812010-01-19 14:06:41 +0000533void Heap::ReserveSpace(
534 int new_space_size,
535 int pointer_space_size,
536 int data_space_size,
537 int code_space_size,
538 int map_space_size,
539 int cell_space_size,
540 int large_object_size) {
541 NewSpace* new_space = Heap::new_space();
542 PagedSpace* old_pointer_space = Heap::old_pointer_space();
543 PagedSpace* old_data_space = Heap::old_data_space();
544 PagedSpace* code_space = Heap::code_space();
545 PagedSpace* map_space = Heap::map_space();
546 PagedSpace* cell_space = Heap::cell_space();
547 LargeObjectSpace* lo_space = Heap::lo_space();
548 bool gc_performed = true;
549 while (gc_performed) {
550 gc_performed = false;
551 if (!new_space->ReserveSpace(new_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100552 Heap::CollectGarbage(NEW_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000553 gc_performed = true;
554 }
555 if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100556 Heap::CollectGarbage(OLD_POINTER_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000557 gc_performed = true;
558 }
559 if (!(old_data_space->ReserveSpace(data_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100560 Heap::CollectGarbage(OLD_DATA_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000561 gc_performed = true;
562 }
563 if (!(code_space->ReserveSpace(code_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100564 Heap::CollectGarbage(CODE_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000565 gc_performed = true;
566 }
567 if (!(map_space->ReserveSpace(map_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100568 Heap::CollectGarbage(MAP_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000569 gc_performed = true;
570 }
571 if (!(cell_space->ReserveSpace(cell_space_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100572 Heap::CollectGarbage(CELL_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000573 gc_performed = true;
574 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100575 // We add a slack-factor of 2 in order to have space for a series of
576 // large-object allocations that are only just larger than the page size.
Leon Clarkee46be812010-01-19 14:06:41 +0000577 large_object_size *= 2;
578 // The ReserveSpace method on the large object space checks how much
579 // we can expand the old generation. This includes expansion caused by
580 // allocation in the other spaces.
581 large_object_size += cell_space_size + map_space_size + code_space_size +
582 data_space_size + pointer_space_size;
583 if (!(lo_space->ReserveSpace(large_object_size))) {
Ben Murdochf87a2032010-10-22 12:50:53 +0100584 Heap::CollectGarbage(LO_SPACE);
Leon Clarkee46be812010-01-19 14:06:41 +0000585 gc_performed = true;
586 }
587 }
588}
589
590
Steve Blocka7e24c12009-10-30 11:49:00 +0000591void Heap::EnsureFromSpaceIsCommitted() {
592 if (new_space_.CommitFromSpaceIfNeeded()) return;
593
594 // Committing memory to from space failed.
595 // Try shrinking and try again.
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100596 PagedSpaces spaces;
597 for (PagedSpace* space = spaces.next();
598 space != NULL;
599 space = spaces.next()) {
600 space->RelinkPageListInChunkOrder(true);
601 }
602
Steve Blocka7e24c12009-10-30 11:49:00 +0000603 Shrink();
604 if (new_space_.CommitFromSpaceIfNeeded()) return;
605
606 // Committing memory to from space failed again.
607 // Memory is exhausted and we will die.
608 V8::FatalProcessOutOfMemory("Committing semi space failed.");
609}
610
611
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800612void Heap::ClearJSFunctionResultCaches() {
613 if (Bootstrapper::IsActive()) return;
Steve Block6ded16b2010-05-10 14:33:55 +0100614
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800615 Object* context = global_contexts_list_;
616 while (!context->IsUndefined()) {
617 // Get the caches for this context:
Steve Block6ded16b2010-05-10 14:33:55 +0100618 FixedArray* caches =
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800619 Context::cast(context)->jsfunction_result_caches();
620 // Clear the caches:
Steve Block6ded16b2010-05-10 14:33:55 +0100621 int length = caches->length();
622 for (int i = 0; i < length; i++) {
623 JSFunctionResultCache::cast(caches->get(i))->Clear();
624 }
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800625 // Get the next context:
626 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
Steve Block6ded16b2010-05-10 14:33:55 +0100627 }
Steve Block6ded16b2010-05-10 14:33:55 +0100628}
629
630
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100631void Heap::ClearNormalizedMapCaches() {
632 if (Bootstrapper::IsActive()) return;
Ben Murdochf87a2032010-10-22 12:50:53 +0100633
634 Object* context = global_contexts_list_;
635 while (!context->IsUndefined()) {
636 Context::cast(context)->normalized_map_cache()->Clear();
637 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
638 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100639}
640
641
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100642#ifdef DEBUG
643
644enum PageWatermarkValidity {
645 ALL_VALID,
646 ALL_INVALID
647};
648
649static void VerifyPageWatermarkValidity(PagedSpace* space,
650 PageWatermarkValidity validity) {
651 PageIterator it(space, PageIterator::PAGES_IN_USE);
652 bool expected_value = (validity == ALL_VALID);
653 while (it.has_next()) {
654 Page* page = it.next();
655 ASSERT(page->IsWatermarkValid() == expected_value);
656 }
657}
658#endif
659
Steve Block8defd9f2010-07-08 12:39:36 +0100660void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
661 double survival_rate =
662 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
663 start_new_space_size;
664
665 if (survival_rate > kYoungSurvivalRateThreshold) {
666 high_survival_rate_period_length_++;
667 } else {
668 high_survival_rate_period_length_ = 0;
669 }
670
671 double survival_rate_diff = survival_rate_ - survival_rate;
672
673 if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
674 set_survival_rate_trend(DECREASING);
675 } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
676 set_survival_rate_trend(INCREASING);
677 } else {
678 set_survival_rate_trend(STABLE);
679 }
680
681 survival_rate_ = survival_rate;
682}
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100683
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800684bool Heap::PerformGarbageCollection(GarbageCollector collector,
John Reck59135872010-11-02 12:39:01 -0700685 GCTracer* tracer) {
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800686 bool next_gc_likely_to_collect_more = false;
687
Ben Murdochf87a2032010-10-22 12:50:53 +0100688 if (collector != SCAVENGER) {
689 PROFILE(CodeMovingGCEvent());
690 }
691
Steve Blocka7e24c12009-10-30 11:49:00 +0000692 VerifySymbolTable();
693 if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
694 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100695 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000696 global_gc_prologue_callback_();
697 }
Steve Block6ded16b2010-05-10 14:33:55 +0100698
699 GCType gc_type =
700 collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
701
702 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
703 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
704 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
705 }
706 }
707
Steve Blocka7e24c12009-10-30 11:49:00 +0000708 EnsureFromSpaceIsCommitted();
Steve Block6ded16b2010-05-10 14:33:55 +0100709
Ben Murdochf87a2032010-10-22 12:50:53 +0100710 int start_new_space_size = Heap::new_space()->SizeAsInt();
Steve Block8defd9f2010-07-08 12:39:36 +0100711
Steve Blocka7e24c12009-10-30 11:49:00 +0000712 if (collector == MARK_COMPACTOR) {
Steve Block6ded16b2010-05-10 14:33:55 +0100713 // Perform mark-sweep with optional compaction.
Steve Blocka7e24c12009-10-30 11:49:00 +0000714 MarkCompact(tracer);
715
Steve Block8defd9f2010-07-08 12:39:36 +0100716 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
717 IsStableOrIncreasingSurvivalTrend();
718
719 UpdateSurvivalRateTrend(start_new_space_size);
720
John Reck59135872010-11-02 12:39:01 -0700721 intptr_t old_gen_size = PromotedSpaceSize();
722 old_gen_promotion_limit_ =
723 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
724 old_gen_allocation_limit_ =
725 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
Steve Block8defd9f2010-07-08 12:39:36 +0100726
John Reck59135872010-11-02 12:39:01 -0700727 if (high_survival_rate_during_scavenges &&
728 IsStableOrIncreasingSurvivalTrend()) {
729 // Stable high survival rates of young objects both during partial and
730 // full collection indicate that mutator is either building or modifying
731 // a structure with a long lifetime.
732 // In this case we aggressively raise old generation memory limits to
733 // postpone subsequent mark-sweep collection and thus trade memory
734 // space for the mutation speed.
735 old_gen_promotion_limit_ *= 2;
736 old_gen_allocation_limit_ *= 2;
Steve Block8defd9f2010-07-08 12:39:36 +0100737 }
738
John Reck59135872010-11-02 12:39:01 -0700739 old_gen_exhausted_ = false;
Steve Block6ded16b2010-05-10 14:33:55 +0100740 } else {
Leon Clarkef7060e22010-06-03 12:02:55 +0100741 tracer_ = tracer;
Steve Block6ded16b2010-05-10 14:33:55 +0100742 Scavenge();
Leon Clarkef7060e22010-06-03 12:02:55 +0100743 tracer_ = NULL;
Steve Block8defd9f2010-07-08 12:39:36 +0100744
745 UpdateSurvivalRateTrend(start_new_space_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000746 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000747
748 Counters::objs_since_last_young.Set(0);
749
John Reck59135872010-11-02 12:39:01 -0700750 if (collector == MARK_COMPACTOR) {
751 DisableAssertNoAllocation allow_allocation;
752 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800753 next_gc_likely_to_collect_more =
754 GlobalHandles::PostGarbageCollectionProcessing();
John Reck59135872010-11-02 12:39:01 -0700755 }
756
Steve Block3ce2e202009-11-05 08:53:23 +0000757 // Update relocatables.
758 Relocatable::PostGarbageCollectionProcessing();
Steve Blocka7e24c12009-10-30 11:49:00 +0000759
760 if (collector == MARK_COMPACTOR) {
761 // Register the amount of external allocated memory.
762 amount_of_external_allocated_memory_at_last_global_gc_ =
763 amount_of_external_allocated_memory_;
764 }
765
Steve Block6ded16b2010-05-10 14:33:55 +0100766 GCCallbackFlags callback_flags = tracer->is_compacting()
767 ? kGCCallbackFlagCompacted
768 : kNoGCCallbackFlags;
769 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
770 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
771 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
772 }
773 }
774
Steve Blocka7e24c12009-10-30 11:49:00 +0000775 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
776 ASSERT(!allocation_allowed_);
Leon Clarkef7060e22010-06-03 12:02:55 +0100777 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
Steve Blocka7e24c12009-10-30 11:49:00 +0000778 global_gc_epilogue_callback_();
779 }
780 VerifySymbolTable();
Teng-Hui Zhu3e5fa292010-11-09 16:16:48 -0800781
782 return next_gc_likely_to_collect_more;
Steve Blocka7e24c12009-10-30 11:49:00 +0000783}
784
785
Steve Blocka7e24c12009-10-30 11:49:00 +0000786void Heap::MarkCompact(GCTracer* tracer) {
787 gc_state_ = MARK_COMPACT;
Steve Blocka7e24c12009-10-30 11:49:00 +0000788 LOG(ResourceEvent("markcompact", "begin"));
789
790 MarkCompactCollector::Prepare(tracer);
791
792 bool is_compacting = MarkCompactCollector::IsCompacting();
793
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100794 if (is_compacting) {
795 mc_count_++;
796 } else {
797 ms_count_++;
798 }
799 tracer->set_full_gc_count(mc_count_ + ms_count_);
800
Steve Blocka7e24c12009-10-30 11:49:00 +0000801 MarkCompactPrologue(is_compacting);
802
803 MarkCompactCollector::CollectGarbage();
804
Steve Blocka7e24c12009-10-30 11:49:00 +0000805 LOG(ResourceEvent("markcompact", "end"));
806
807 gc_state_ = NOT_IN_GC;
808
809 Shrink();
810
811 Counters::objs_since_last_full.Set(0);
Steve Block6ded16b2010-05-10 14:33:55 +0100812
813 contexts_disposed_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000814}
815
816
817void Heap::MarkCompactPrologue(bool is_compacting) {
818 // At any old GC clear the keyed lookup cache to enable collection of unused
819 // maps.
820 KeyedLookupCache::Clear();
821 ContextSlotCache::Clear();
822 DescriptorLookupCache::Clear();
823
824 CompilationCache::MarkCompactPrologue();
825
Kristian Monsen25f61362010-05-21 11:50:48 +0100826 CompletelyClearInstanceofCache();
827
Leon Clarkee46be812010-01-19 14:06:41 +0000828 if (is_compacting) FlushNumberStringCache();
Steve Blocka7e24c12009-10-30 11:49:00 +0000829
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100830 ClearNormalizedMapCaches();
Steve Blocka7e24c12009-10-30 11:49:00 +0000831}
832
833
834Object* Heap::FindCodeObject(Address a) {
John Reck59135872010-11-02 12:39:01 -0700835 Object* obj = NULL; // Initialization to please compiler.
836 { MaybeObject* maybe_obj = code_space_->FindObject(a);
837 if (!maybe_obj->ToObject(&obj)) {
838 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
839 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000840 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000841 return obj;
842}
843
844
845// Helper class for copying HeapObjects
846class ScavengeVisitor: public ObjectVisitor {
847 public:
848
849 void VisitPointer(Object** p) { ScavengePointer(p); }
850
851 void VisitPointers(Object** start, Object** end) {
852 // Copy all HeapObject pointers in [start, end)
853 for (Object** p = start; p < end; p++) ScavengePointer(p);
854 }
855
856 private:
857 void ScavengePointer(Object** p) {
858 Object* object = *p;
859 if (!Heap::InNewSpace(object)) return;
860 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
861 reinterpret_cast<HeapObject*>(object));
862 }
863};
864
865
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100866// A queue of objects promoted during scavenge. Each object is accompanied
867// by it's size to avoid dereferencing a map pointer for scanning.
Steve Blocka7e24c12009-10-30 11:49:00 +0000868class PromotionQueue {
869 public:
870 void Initialize(Address start_address) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100871 front_ = rear_ = reinterpret_cast<intptr_t*>(start_address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000872 }
873
874 bool is_empty() { return front_ <= rear_; }
875
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100876 void insert(HeapObject* target, int size) {
877 *(--rear_) = reinterpret_cast<intptr_t>(target);
878 *(--rear_) = size;
Steve Blocka7e24c12009-10-30 11:49:00 +0000879 // Assert no overflow into live objects.
880 ASSERT(reinterpret_cast<Address>(rear_) >= Heap::new_space()->top());
881 }
882
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100883 void remove(HeapObject** target, int* size) {
884 *target = reinterpret_cast<HeapObject*>(*(--front_));
885 *size = static_cast<int>(*(--front_));
Steve Blocka7e24c12009-10-30 11:49:00 +0000886 // Assert no underflow.
887 ASSERT(front_ >= rear_);
888 }
889
890 private:
891 // The front of the queue is higher in memory than the rear.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100892 intptr_t* front_;
893 intptr_t* rear_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000894};
895
896
897// Shared state read by the scavenge collector and set by ScavengeObject.
898static PromotionQueue promotion_queue;
899
900
901#ifdef DEBUG
902// Visitor class to verify pointers in code or data space do not point into
903// new space.
904class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
905 public:
906 void VisitPointers(Object** start, Object**end) {
907 for (Object** current = start; current < end; current++) {
908 if ((*current)->IsHeapObject()) {
909 ASSERT(!Heap::InNewSpace(HeapObject::cast(*current)));
910 }
911 }
912 }
913};
914
915
916static void VerifyNonPointerSpacePointers() {
917 // Verify that there are no pointers to new space in spaces where we
918 // do not expect them.
919 VerifyNonPointerSpacePointersVisitor v;
920 HeapObjectIterator code_it(Heap::code_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000921 for (HeapObject* object = code_it.next();
922 object != NULL; object = code_it.next())
Steve Blocka7e24c12009-10-30 11:49:00 +0000923 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000924
925 HeapObjectIterator data_it(Heap::old_data_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000926 for (HeapObject* object = data_it.next();
927 object != NULL; object = data_it.next())
928 object->Iterate(&v);
Steve Blocka7e24c12009-10-30 11:49:00 +0000929}
930#endif
931
932
Steve Block6ded16b2010-05-10 14:33:55 +0100933void Heap::CheckNewSpaceExpansionCriteria() {
934 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
935 survived_since_last_expansion_ > new_space_.Capacity()) {
936 // Grow the size of new space if there is room to grow and enough
937 // data has survived scavenge since the last expansion.
938 new_space_.Grow();
939 survived_since_last_expansion_ = 0;
940 }
941}
942
943
Steve Blocka7e24c12009-10-30 11:49:00 +0000944void Heap::Scavenge() {
945#ifdef DEBUG
946 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
947#endif
948
949 gc_state_ = SCAVENGE;
950
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100951 Page::FlipMeaningOfInvalidatedWatermarkFlag();
952#ifdef DEBUG
953 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
954 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
955#endif
956
957 // We do not update an allocation watermark of the top page during linear
958 // allocation to avoid overhead. So to maintain the watermark invariant
959 // we have to manually cache the watermark and mark the top page as having an
960 // invalid watermark. This guarantees that dirty regions iteration will use a
961 // correct watermark even if a linear allocation happens.
962 old_pointer_space_->FlushTopPageWatermark();
963 map_space_->FlushTopPageWatermark();
964
Steve Blocka7e24c12009-10-30 11:49:00 +0000965 // Implements Cheney's copying algorithm
966 LOG(ResourceEvent("scavenge", "begin"));
967
968 // Clear descriptor cache.
969 DescriptorLookupCache::Clear();
970
971 // Used for updating survived_since_last_expansion_ at function end.
Ben Murdochf87a2032010-10-22 12:50:53 +0100972 intptr_t survived_watermark = PromotedSpaceSize();
Steve Blocka7e24c12009-10-30 11:49:00 +0000973
Steve Block6ded16b2010-05-10 14:33:55 +0100974 CheckNewSpaceExpansionCriteria();
Steve Blocka7e24c12009-10-30 11:49:00 +0000975
976 // Flip the semispaces. After flipping, to space is empty, from space has
977 // live objects.
978 new_space_.Flip();
979 new_space_.ResetAllocationInfo();
980
981 // We need to sweep newly copied objects which can be either in the
982 // to space or promoted to the old generation. For to-space
983 // objects, we treat the bottom of the to space as a queue. Newly
984 // copied and unswept objects lie between a 'front' mark and the
985 // allocation pointer.
986 //
987 // Promoted objects can go into various old-generation spaces, and
988 // can be allocated internally in the spaces (from the free list).
989 // We treat the top of the to space as a queue of addresses of
990 // promoted objects. The addresses of newly promoted and unswept
991 // objects lie between a 'front' mark and a 'rear' mark that is
992 // updated as a side effect of promoting an object.
993 //
994 // There is guaranteed to be enough room at the top of the to space
995 // for the addresses of promoted objects: every object promoted
996 // frees up its size in bytes from the top of the new space, and
997 // objects are at least one pointer in size.
998 Address new_space_front = new_space_.ToSpaceLow();
999 promotion_queue.Initialize(new_space_.ToSpaceHigh());
1000
1001 ScavengeVisitor scavenge_visitor;
1002 // Copy roots.
Leon Clarkee46be812010-01-19 14:06:41 +00001003 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Blocka7e24c12009-10-30 11:49:00 +00001004
1005 // Copy objects reachable from the old generation. By definition,
1006 // there are no intergenerational pointers in code or data spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001007 IterateDirtyRegions(old_pointer_space_,
1008 &IteratePointersInDirtyRegion,
1009 &ScavengePointer,
1010 WATERMARK_CAN_BE_INVALID);
1011
1012 IterateDirtyRegions(map_space_,
1013 &IteratePointersInDirtyMapsRegion,
1014 &ScavengePointer,
1015 WATERMARK_CAN_BE_INVALID);
1016
1017 lo_space_->IterateDirtyRegions(&ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001018
1019 // Copy objects reachable from cells by scavenging cell values directly.
1020 HeapObjectIterator cell_iterator(cell_space_);
Leon Clarked91b9f72010-01-27 17:25:45 +00001021 for (HeapObject* cell = cell_iterator.next();
1022 cell != NULL; cell = cell_iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001023 if (cell->IsJSGlobalPropertyCell()) {
1024 Address value_address =
1025 reinterpret_cast<Address>(cell) +
1026 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1027 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1028 }
1029 }
1030
Ben Murdochf87a2032010-10-22 12:50:53 +01001031 // Scavenge object reachable from the global contexts list directly.
1032 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1033
Leon Clarkee46be812010-01-19 14:06:41 +00001034 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1035
Steve Block6ded16b2010-05-10 14:33:55 +01001036 UpdateNewSpaceReferencesInExternalStringTable(
1037 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1038
Leon Clarkee46be812010-01-19 14:06:41 +00001039 ASSERT(new_space_front == new_space_.top());
1040
1041 // Set age mark.
1042 new_space_.set_age_mark(new_space_.top());
1043
1044 // Update how much has survived scavenge.
Ben Murdochf87a2032010-10-22 12:50:53 +01001045 IncrementYoungSurvivorsCounter(static_cast<int>(
1046 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
Leon Clarkee46be812010-01-19 14:06:41 +00001047
1048 LOG(ResourceEvent("scavenge", "end"));
1049
1050 gc_state_ = NOT_IN_GC;
1051}
1052
1053
Steve Block6ded16b2010-05-10 14:33:55 +01001054String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Object** p) {
1055 MapWord first_word = HeapObject::cast(*p)->map_word();
1056
1057 if (!first_word.IsForwardingAddress()) {
1058 // Unreachable external string can be finalized.
1059 FinalizeExternalString(String::cast(*p));
1060 return NULL;
1061 }
1062
1063 // String is still reachable.
1064 return String::cast(first_word.ToForwardingAddress());
1065}
1066
1067
1068void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1069 ExternalStringTableUpdaterCallback updater_func) {
Leon Clarkee46be812010-01-19 14:06:41 +00001070 ExternalStringTable::Verify();
1071
1072 if (ExternalStringTable::new_space_strings_.is_empty()) return;
1073
1074 Object** start = &ExternalStringTable::new_space_strings_[0];
1075 Object** end = start + ExternalStringTable::new_space_strings_.length();
1076 Object** last = start;
1077
1078 for (Object** p = start; p < end; ++p) {
1079 ASSERT(Heap::InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001080 String* target = updater_func(p);
Leon Clarkee46be812010-01-19 14:06:41 +00001081
Steve Block6ded16b2010-05-10 14:33:55 +01001082 if (target == NULL) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00001083
Leon Clarkee46be812010-01-19 14:06:41 +00001084 ASSERT(target->IsExternalString());
1085
1086 if (Heap::InNewSpace(target)) {
1087 // String is still in new space. Update the table entry.
1088 *last = target;
1089 ++last;
1090 } else {
1091 // String got promoted. Move it to the old string list.
1092 ExternalStringTable::AddOldString(target);
1093 }
1094 }
1095
1096 ASSERT(last <= end);
1097 ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
1098}
1099
1100
Ben Murdochf87a2032010-10-22 12:50:53 +01001101void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1102 Object* head = undefined_value();
1103 Context* tail = NULL;
1104 Object* candidate = global_contexts_list_;
1105 while (!candidate->IsUndefined()) {
1106 // Check whether to keep the candidate in the list.
1107 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1108 Object* retain = retainer->RetainAs(candidate);
1109 if (retain != NULL) {
1110 if (head->IsUndefined()) {
1111 // First element in the list.
1112 head = candidate_context;
1113 } else {
1114 // Subsequent elements in the list.
1115 ASSERT(tail != NULL);
1116 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1117 candidate_context,
1118 UPDATE_WRITE_BARRIER);
1119 }
1120 // Retained context is new tail.
1121 tail = candidate_context;
1122 }
1123 // Move to next element in the list.
1124 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1125 }
1126
1127 // Terminate the list if there is one or more elements.
1128 if (tail != NULL) {
1129 tail->set_unchecked(Context::NEXT_CONTEXT_LINK,
1130 Heap::undefined_value(),
1131 UPDATE_WRITE_BARRIER);
1132 }
1133
1134 // Update the head of the list of contexts.
1135 Heap::global_contexts_list_ = head;
1136}
1137
1138
Iain Merrick75681382010-08-19 15:07:18 +01001139class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1140 public:
1141 static inline void VisitPointer(Object** p) {
1142 Object* object = *p;
1143 if (!Heap::InNewSpace(object)) return;
1144 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1145 reinterpret_cast<HeapObject*>(object));
1146 }
1147};
1148
1149
Leon Clarkee46be812010-01-19 14:06:41 +00001150Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1151 Address new_space_front) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001152 do {
1153 ASSERT(new_space_front <= new_space_.top());
1154
1155 // The addresses new_space_front and new_space_.top() define a
1156 // queue of unprocessed copied objects. Process them until the
1157 // queue is empty.
1158 while (new_space_front < new_space_.top()) {
1159 HeapObject* object = HeapObject::FromAddress(new_space_front);
Iain Merrick75681382010-08-19 15:07:18 +01001160 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001161 }
1162
1163 // Promote and process all the to-be-promoted objects.
1164 while (!promotion_queue.is_empty()) {
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001165 HeapObject* target;
1166 int size;
1167 promotion_queue.remove(&target, &size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001168
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001169 // Promoted object might be already partially visited
1170 // during dirty regions iteration. Thus we search specificly
1171 // for pointers to from semispace instead of looking for pointers
1172 // to new space.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001173 ASSERT(!target->IsMap());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001174 IterateAndMarkPointersToFromSpace(target->address(),
1175 target->address() + size,
1176 &ScavengePointer);
Steve Blocka7e24c12009-10-30 11:49:00 +00001177 }
1178
1179 // Take another spin if there are now unswept objects in new space
1180 // (there are currently no more unswept promoted objects).
1181 } while (new_space_front < new_space_.top());
1182
Leon Clarkee46be812010-01-19 14:06:41 +00001183 return new_space_front;
Steve Blocka7e24c12009-10-30 11:49:00 +00001184}
1185
1186
Iain Merrick75681382010-08-19 15:07:18 +01001187class ScavengingVisitor : public StaticVisitorBase {
1188 public:
1189 static void Initialize() {
1190 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1191 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1192 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1193 table_.Register(kVisitByteArray, &EvacuateByteArray);
1194 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
Ben Murdochf87a2032010-10-22 12:50:53 +01001195 table_.Register(kVisitGlobalContext,
1196 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1197 VisitSpecialized<Context::kSize>);
Iain Merrick75681382010-08-19 15:07:18 +01001198
1199 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject;
1200
1201 table_.Register(kVisitConsString,
1202 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1203 VisitSpecialized<ConsString::kSize>);
1204
1205 table_.Register(kVisitSharedFunctionInfo,
1206 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1207 VisitSpecialized<SharedFunctionInfo::kSize>);
1208
1209 table_.Register(kVisitJSFunction,
1210 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1211 VisitSpecialized<JSFunction::kSize>);
1212
1213 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1214 kVisitDataObject,
1215 kVisitDataObjectGeneric>();
1216
1217 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1218 kVisitJSObject,
1219 kVisitJSObjectGeneric>();
1220
1221 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1222 kVisitStruct,
1223 kVisitStructGeneric>();
1224 }
1225
1226
1227 static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) {
1228 table_.GetVisitor(map)(map, slot, obj);
1229 }
1230
1231
1232 private:
1233 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1234 enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1235
Steve Blocka7e24c12009-10-30 11:49:00 +00001236#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001237 static void RecordCopiedObject(HeapObject* obj) {
1238 bool should_record = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00001239#ifdef DEBUG
Iain Merrick75681382010-08-19 15:07:18 +01001240 should_record = FLAG_heap_stats;
Steve Blocka7e24c12009-10-30 11:49:00 +00001241#endif
1242#ifdef ENABLE_LOGGING_AND_PROFILING
Iain Merrick75681382010-08-19 15:07:18 +01001243 should_record = should_record || FLAG_log_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00001244#endif
Iain Merrick75681382010-08-19 15:07:18 +01001245 if (should_record) {
1246 if (Heap::new_space()->Contains(obj)) {
1247 Heap::new_space()->RecordAllocation(obj);
1248 } else {
1249 Heap::new_space()->RecordPromotion(obj);
1250 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001251 }
1252 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001253#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1254
Iain Merrick75681382010-08-19 15:07:18 +01001255 // Helper function used by CopyObject to copy a source object to an
1256 // allocated target object and update the forwarding pointer in the source
1257 // object. Returns the target object.
1258 INLINE(static HeapObject* MigrateObject(HeapObject* source,
1259 HeapObject* target,
1260 int size)) {
1261 // Copy the content of source to target.
1262 Heap::CopyBlock(target->address(), source->address(), size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001263
Iain Merrick75681382010-08-19 15:07:18 +01001264 // Set the forwarding address.
1265 source->set_map_word(MapWord::FromForwardingAddress(target));
Steve Blocka7e24c12009-10-30 11:49:00 +00001266
1267#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
Iain Merrick75681382010-08-19 15:07:18 +01001268 // Update NewSpace stats if necessary.
1269 RecordCopiedObject(target);
Steve Blocka7e24c12009-10-30 11:49:00 +00001270#endif
Iain Merrick75681382010-08-19 15:07:18 +01001271 HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001272#if defined(ENABLE_LOGGING_AND_PROFILING)
1273 if (Logger::is_logging() || CpuProfiler::is_profiling()) {
1274 if (target->IsJSFunction()) {
1275 PROFILE(FunctionMoveEvent(source->address(), target->address()));
Ben Murdochf87a2032010-10-22 12:50:53 +01001276 PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001277 }
1278 }
1279#endif
Iain Merrick75681382010-08-19 15:07:18 +01001280 return target;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001281 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001282
1283
Iain Merrick75681382010-08-19 15:07:18 +01001284 template<ObjectContents object_contents, SizeRestriction size_restriction>
1285 static inline void EvacuateObject(Map* map,
1286 HeapObject** slot,
1287 HeapObject* object,
1288 int object_size) {
1289 ASSERT((size_restriction != SMALL) ||
1290 (object_size <= Page::kMaxHeapObjectSize));
1291 ASSERT(object->Size() == object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001292
Iain Merrick75681382010-08-19 15:07:18 +01001293 if (Heap::ShouldBePromoted(object->address(), object_size)) {
John Reck59135872010-11-02 12:39:01 -07001294 MaybeObject* maybe_result;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001295
Iain Merrick75681382010-08-19 15:07:18 +01001296 if ((size_restriction != SMALL) &&
1297 (object_size > Page::kMaxHeapObjectSize)) {
John Reck59135872010-11-02 12:39:01 -07001298 maybe_result = Heap::lo_space()->AllocateRawFixedArray(object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001299 } else {
Iain Merrick75681382010-08-19 15:07:18 +01001300 if (object_contents == DATA_OBJECT) {
John Reck59135872010-11-02 12:39:01 -07001301 maybe_result = Heap::old_data_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001302 } else {
John Reck59135872010-11-02 12:39:01 -07001303 maybe_result = Heap::old_pointer_space()->AllocateRaw(object_size);
Iain Merrick75681382010-08-19 15:07:18 +01001304 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001305 }
1306
John Reck59135872010-11-02 12:39:01 -07001307 Object* result = NULL; // Initialization to please compiler.
1308 if (maybe_result->ToObject(&result)) {
Iain Merrick75681382010-08-19 15:07:18 +01001309 HeapObject* target = HeapObject::cast(result);
1310 *slot = MigrateObject(object, target, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001311
Iain Merrick75681382010-08-19 15:07:18 +01001312 if (object_contents == POINTER_OBJECT) {
1313 promotion_queue.insert(target, object_size);
1314 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001315
Iain Merrick75681382010-08-19 15:07:18 +01001316 Heap::tracer()->increment_promoted_objects_size(object_size);
1317 return;
1318 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001319 }
John Reck59135872010-11-02 12:39:01 -07001320 Object* result =
1321 Heap::new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
Iain Merrick75681382010-08-19 15:07:18 +01001322 *slot = MigrateObject(object, HeapObject::cast(result), object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001323 return;
1324 }
1325
Iain Merrick75681382010-08-19 15:07:18 +01001326
1327 static inline void EvacuateFixedArray(Map* map,
1328 HeapObject** slot,
1329 HeapObject* object) {
1330 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1331 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1332 slot,
1333 object,
1334 object_size);
1335 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001336
1337
Iain Merrick75681382010-08-19 15:07:18 +01001338 static inline void EvacuateByteArray(Map* map,
1339 HeapObject** slot,
1340 HeapObject* object) {
1341 int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
1342 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1343 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001344
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001345
Iain Merrick75681382010-08-19 15:07:18 +01001346 static inline void EvacuateSeqAsciiString(Map* map,
1347 HeapObject** slot,
1348 HeapObject* object) {
1349 int object_size = SeqAsciiString::cast(object)->
1350 SeqAsciiStringSize(map->instance_type());
1351 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1352 }
1353
1354
1355 static inline void EvacuateSeqTwoByteString(Map* map,
1356 HeapObject** slot,
1357 HeapObject* object) {
1358 int object_size = SeqTwoByteString::cast(object)->
1359 SeqTwoByteStringSize(map->instance_type());
1360 EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE>(map, slot, object, object_size);
1361 }
1362
1363
1364 static inline bool IsShortcutCandidate(int type) {
1365 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1366 }
1367
1368 static inline void EvacuateShortcutCandidate(Map* map,
1369 HeapObject** slot,
1370 HeapObject* object) {
1371 ASSERT(IsShortcutCandidate(map->instance_type()));
1372
1373 if (ConsString::cast(object)->unchecked_second() == Heap::empty_string()) {
1374 HeapObject* first =
1375 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1376
1377 *slot = first;
1378
1379 if (!Heap::InNewSpace(first)) {
1380 object->set_map_word(MapWord::FromForwardingAddress(first));
1381 return;
1382 }
1383
1384 MapWord first_word = first->map_word();
1385 if (first_word.IsForwardingAddress()) {
1386 HeapObject* target = first_word.ToForwardingAddress();
1387
1388 *slot = target;
1389 object->set_map_word(MapWord::FromForwardingAddress(target));
1390 return;
1391 }
1392
1393 Scavenge(first->map(), slot, first);
1394 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1395 return;
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001396 }
Iain Merrick75681382010-08-19 15:07:18 +01001397
1398 int object_size = ConsString::kSize;
1399 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001400 }
1401
Iain Merrick75681382010-08-19 15:07:18 +01001402 template<ObjectContents object_contents>
1403 class ObjectEvacuationStrategy {
1404 public:
1405 template<int object_size>
1406 static inline void VisitSpecialized(Map* map,
1407 HeapObject** slot,
1408 HeapObject* object) {
1409 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1410 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001411
Iain Merrick75681382010-08-19 15:07:18 +01001412 static inline void Visit(Map* map,
1413 HeapObject** slot,
1414 HeapObject* object) {
1415 int object_size = map->instance_size();
1416 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1417 }
1418 };
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001419
Iain Merrick75681382010-08-19 15:07:18 +01001420 typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001421
Iain Merrick75681382010-08-19 15:07:18 +01001422 static VisitorDispatchTable<Callback> table_;
1423};
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001424
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001425
Iain Merrick75681382010-08-19 15:07:18 +01001426VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001427
1428
1429void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1430 ASSERT(InFromSpace(object));
1431 MapWord first_word = object->map_word();
1432 ASSERT(!first_word.IsForwardingAddress());
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001433 Map* map = first_word.ToMap();
Iain Merrick75681382010-08-19 15:07:18 +01001434 ScavengingVisitor::Scavenge(map, p, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001435}
1436
1437
1438void Heap::ScavengePointer(HeapObject** p) {
1439 ScavengeObject(p, *p);
1440}
1441
1442
John Reck59135872010-11-02 12:39:01 -07001443MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1444 int instance_size) {
1445 Object* result;
1446 { MaybeObject* maybe_result = AllocateRawMap();
1447 if (!maybe_result->ToObject(&result)) return maybe_result;
1448 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001449
1450 // Map::cast cannot be used due to uninitialized map field.
1451 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1452 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1453 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01001454 reinterpret_cast<Map*>(result)->
Iain Merrick75681382010-08-19 15:07:18 +01001455 set_visitor_id(
1456 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001457 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001458 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001459 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
Leon Clarke4515c472010-02-03 11:58:03 +00001460 reinterpret_cast<Map*>(result)->set_bit_field(0);
1461 reinterpret_cast<Map*>(result)->set_bit_field2(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001462 return result;
1463}
1464
1465
John Reck59135872010-11-02 12:39:01 -07001466MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
1467 Object* result;
1468 { MaybeObject* maybe_result = AllocateRawMap();
1469 if (!maybe_result->ToObject(&result)) return maybe_result;
1470 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001471
1472 Map* map = reinterpret_cast<Map*>(result);
1473 map->set_map(meta_map());
1474 map->set_instance_type(instance_type);
Iain Merrick75681382010-08-19 15:07:18 +01001475 map->set_visitor_id(
1476 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
Steve Blocka7e24c12009-10-30 11:49:00 +00001477 map->set_prototype(null_value());
1478 map->set_constructor(null_value());
1479 map->set_instance_size(instance_size);
1480 map->set_inobject_properties(0);
1481 map->set_pre_allocated_property_fields(0);
1482 map->set_instance_descriptors(empty_descriptor_array());
1483 map->set_code_cache(empty_fixed_array());
1484 map->set_unused_property_fields(0);
1485 map->set_bit_field(0);
Steve Block8defd9f2010-07-08 12:39:36 +01001486 map->set_bit_field2((1 << Map::kIsExtensible) | (1 << Map::kHasFastElements));
Leon Clarkee46be812010-01-19 14:06:41 +00001487
1488 // If the map object is aligned fill the padding area with Smi 0 objects.
1489 if (Map::kPadStart < Map::kSize) {
1490 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1491 0,
1492 Map::kSize - Map::kPadStart);
1493 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001494 return map;
1495}
1496
1497
John Reck59135872010-11-02 12:39:01 -07001498MaybeObject* Heap::AllocateCodeCache() {
1499 Object* result;
1500 { MaybeObject* maybe_result = AllocateStruct(CODE_CACHE_TYPE);
1501 if (!maybe_result->ToObject(&result)) return maybe_result;
1502 }
Steve Block6ded16b2010-05-10 14:33:55 +01001503 CodeCache* code_cache = CodeCache::cast(result);
1504 code_cache->set_default_cache(empty_fixed_array());
1505 code_cache->set_normal_type_cache(undefined_value());
1506 return code_cache;
1507}
1508
1509
Steve Blocka7e24c12009-10-30 11:49:00 +00001510const Heap::StringTypeTable Heap::string_type_table[] = {
1511#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
1512 {type, size, k##camel_name##MapRootIndex},
1513 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
1514#undef STRING_TYPE_ELEMENT
1515};
1516
1517
1518const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
1519#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
1520 {contents, k##name##RootIndex},
1521 SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
1522#undef CONSTANT_SYMBOL_ELEMENT
1523};
1524
1525
1526const Heap::StructTable Heap::struct_table[] = {
1527#define STRUCT_TABLE_ELEMENT(NAME, Name, name) \
1528 { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
1529 STRUCT_LIST(STRUCT_TABLE_ELEMENT)
1530#undef STRUCT_TABLE_ELEMENT
1531};
1532
1533
1534bool Heap::CreateInitialMaps() {
John Reck59135872010-11-02 12:39:01 -07001535 Object* obj;
1536 { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
1537 if (!maybe_obj->ToObject(&obj)) return false;
1538 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001539 // Map::cast cannot be used due to uninitialized map field.
1540 Map* new_meta_map = reinterpret_cast<Map*>(obj);
1541 set_meta_map(new_meta_map);
1542 new_meta_map->set_map(new_meta_map);
1543
John Reck59135872010-11-02 12:39:01 -07001544 { MaybeObject* maybe_obj =
1545 AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1546 if (!maybe_obj->ToObject(&obj)) return false;
1547 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001548 set_fixed_array_map(Map::cast(obj));
1549
John Reck59135872010-11-02 12:39:01 -07001550 { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
1551 if (!maybe_obj->ToObject(&obj)) return false;
1552 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001553 set_oddball_map(Map::cast(obj));
1554
Steve Block6ded16b2010-05-10 14:33:55 +01001555 // Allocate the empty array.
John Reck59135872010-11-02 12:39:01 -07001556 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1557 if (!maybe_obj->ToObject(&obj)) return false;
1558 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001559 set_empty_fixed_array(FixedArray::cast(obj));
1560
John Reck59135872010-11-02 12:39:01 -07001561 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1562 if (!maybe_obj->ToObject(&obj)) return false;
1563 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001564 set_null_value(obj);
1565
1566 // Allocate the empty descriptor array.
John Reck59135872010-11-02 12:39:01 -07001567 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1568 if (!maybe_obj->ToObject(&obj)) return false;
1569 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001570 set_empty_descriptor_array(DescriptorArray::cast(obj));
1571
1572 // Fix the instance_descriptors for the existing maps.
1573 meta_map()->set_instance_descriptors(empty_descriptor_array());
1574 meta_map()->set_code_cache(empty_fixed_array());
1575
1576 fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
1577 fixed_array_map()->set_code_cache(empty_fixed_array());
1578
1579 oddball_map()->set_instance_descriptors(empty_descriptor_array());
1580 oddball_map()->set_code_cache(empty_fixed_array());
1581
1582 // Fix prototype object for existing maps.
1583 meta_map()->set_prototype(null_value());
1584 meta_map()->set_constructor(null_value());
1585
1586 fixed_array_map()->set_prototype(null_value());
1587 fixed_array_map()->set_constructor(null_value());
1588
1589 oddball_map()->set_prototype(null_value());
1590 oddball_map()->set_constructor(null_value());
1591
John Reck59135872010-11-02 12:39:01 -07001592 { MaybeObject* maybe_obj =
1593 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1594 if (!maybe_obj->ToObject(&obj)) return false;
1595 }
Iain Merrick75681382010-08-19 15:07:18 +01001596 set_fixed_cow_array_map(Map::cast(obj));
1597 ASSERT(fixed_array_map() != fixed_cow_array_map());
1598
John Reck59135872010-11-02 12:39:01 -07001599 { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
1600 if (!maybe_obj->ToObject(&obj)) return false;
1601 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001602 set_heap_number_map(Map::cast(obj));
1603
John Reck59135872010-11-02 12:39:01 -07001604 { MaybeObject* maybe_obj = AllocateMap(PROXY_TYPE, Proxy::kSize);
1605 if (!maybe_obj->ToObject(&obj)) return false;
1606 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001607 set_proxy_map(Map::cast(obj));
1608
1609 for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
1610 const StringTypeTable& entry = string_type_table[i];
John Reck59135872010-11-02 12:39:01 -07001611 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1612 if (!maybe_obj->ToObject(&obj)) return false;
1613 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001614 roots_[entry.index] = Map::cast(obj);
1615 }
1616
John Reck59135872010-11-02 12:39:01 -07001617 { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
1618 if (!maybe_obj->ToObject(&obj)) return false;
1619 }
Steve Blockd0582a62009-12-15 09:54:21 +00001620 set_undetectable_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001621 Map::cast(obj)->set_is_undetectable();
1622
John Reck59135872010-11-02 12:39:01 -07001623 { MaybeObject* maybe_obj =
1624 AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
1625 if (!maybe_obj->ToObject(&obj)) return false;
1626 }
Steve Blockd0582a62009-12-15 09:54:21 +00001627 set_undetectable_ascii_string_map(Map::cast(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00001628 Map::cast(obj)->set_is_undetectable();
1629
John Reck59135872010-11-02 12:39:01 -07001630 { MaybeObject* maybe_obj =
1631 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1632 if (!maybe_obj->ToObject(&obj)) return false;
1633 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001634 set_byte_array_map(Map::cast(obj));
1635
John Reck59135872010-11-02 12:39:01 -07001636 { MaybeObject* maybe_obj =
1637 AllocateMap(PIXEL_ARRAY_TYPE, PixelArray::kAlignedSize);
1638 if (!maybe_obj->ToObject(&obj)) return false;
1639 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001640 set_pixel_array_map(Map::cast(obj));
1641
John Reck59135872010-11-02 12:39:01 -07001642 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
1643 ExternalArray::kAlignedSize);
1644 if (!maybe_obj->ToObject(&obj)) return false;
1645 }
Steve Block3ce2e202009-11-05 08:53:23 +00001646 set_external_byte_array_map(Map::cast(obj));
1647
John Reck59135872010-11-02 12:39:01 -07001648 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
1649 ExternalArray::kAlignedSize);
1650 if (!maybe_obj->ToObject(&obj)) return false;
1651 }
Steve Block3ce2e202009-11-05 08:53:23 +00001652 set_external_unsigned_byte_array_map(Map::cast(obj));
1653
John Reck59135872010-11-02 12:39:01 -07001654 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
1655 ExternalArray::kAlignedSize);
1656 if (!maybe_obj->ToObject(&obj)) return false;
1657 }
Steve Block3ce2e202009-11-05 08:53:23 +00001658 set_external_short_array_map(Map::cast(obj));
1659
John Reck59135872010-11-02 12:39:01 -07001660 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
1661 ExternalArray::kAlignedSize);
1662 if (!maybe_obj->ToObject(&obj)) return false;
1663 }
Steve Block3ce2e202009-11-05 08:53:23 +00001664 set_external_unsigned_short_array_map(Map::cast(obj));
1665
John Reck59135872010-11-02 12:39:01 -07001666 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
1667 ExternalArray::kAlignedSize);
1668 if (!maybe_obj->ToObject(&obj)) return false;
1669 }
Steve Block3ce2e202009-11-05 08:53:23 +00001670 set_external_int_array_map(Map::cast(obj));
1671
John Reck59135872010-11-02 12:39:01 -07001672 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
1673 ExternalArray::kAlignedSize);
1674 if (!maybe_obj->ToObject(&obj)) return false;
1675 }
Steve Block3ce2e202009-11-05 08:53:23 +00001676 set_external_unsigned_int_array_map(Map::cast(obj));
1677
John Reck59135872010-11-02 12:39:01 -07001678 { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
1679 ExternalArray::kAlignedSize);
1680 if (!maybe_obj->ToObject(&obj)) return false;
1681 }
Steve Block3ce2e202009-11-05 08:53:23 +00001682 set_external_float_array_map(Map::cast(obj));
1683
John Reck59135872010-11-02 12:39:01 -07001684 { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
1685 if (!maybe_obj->ToObject(&obj)) return false;
1686 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001687 set_code_map(Map::cast(obj));
1688
John Reck59135872010-11-02 12:39:01 -07001689 { MaybeObject* maybe_obj = AllocateMap(JS_GLOBAL_PROPERTY_CELL_TYPE,
1690 JSGlobalPropertyCell::kSize);
1691 if (!maybe_obj->ToObject(&obj)) return false;
1692 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001693 set_global_property_cell_map(Map::cast(obj));
1694
John Reck59135872010-11-02 12:39:01 -07001695 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
1696 if (!maybe_obj->ToObject(&obj)) return false;
1697 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001698 set_one_pointer_filler_map(Map::cast(obj));
1699
John Reck59135872010-11-02 12:39:01 -07001700 { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
1701 if (!maybe_obj->ToObject(&obj)) return false;
1702 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001703 set_two_pointer_filler_map(Map::cast(obj));
1704
1705 for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
1706 const StructTable& entry = struct_table[i];
John Reck59135872010-11-02 12:39:01 -07001707 { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
1708 if (!maybe_obj->ToObject(&obj)) return false;
1709 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001710 roots_[entry.index] = Map::cast(obj);
1711 }
1712
John Reck59135872010-11-02 12:39:01 -07001713 { MaybeObject* maybe_obj =
1714 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1715 if (!maybe_obj->ToObject(&obj)) return false;
1716 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001717 set_hash_table_map(Map::cast(obj));
1718
John Reck59135872010-11-02 12:39:01 -07001719 { MaybeObject* maybe_obj =
1720 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1721 if (!maybe_obj->ToObject(&obj)) return false;
1722 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001723 set_context_map(Map::cast(obj));
1724
John Reck59135872010-11-02 12:39:01 -07001725 { MaybeObject* maybe_obj =
1726 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1727 if (!maybe_obj->ToObject(&obj)) return false;
1728 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001729 set_catch_context_map(Map::cast(obj));
1730
John Reck59135872010-11-02 12:39:01 -07001731 { MaybeObject* maybe_obj =
1732 AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
1733 if (!maybe_obj->ToObject(&obj)) return false;
1734 }
Ben Murdochf87a2032010-10-22 12:50:53 +01001735 Map* global_context_map = Map::cast(obj);
1736 global_context_map->set_visitor_id(StaticVisitorBase::kVisitGlobalContext);
1737 set_global_context_map(global_context_map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001738
John Reck59135872010-11-02 12:39:01 -07001739 { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
1740 SharedFunctionInfo::kAlignedSize);
1741 if (!maybe_obj->ToObject(&obj)) return false;
1742 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001743 set_shared_function_info_map(Map::cast(obj));
1744
1745 ASSERT(!Heap::InNewSpace(Heap::empty_fixed_array()));
1746 return true;
1747}
1748
1749
John Reck59135872010-11-02 12:39:01 -07001750MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001751 // Statically ensure that it is safe to allocate heap numbers in paged
1752 // spaces.
1753 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1754 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
1755
John Reck59135872010-11-02 12:39:01 -07001756 Object* result;
1757 { MaybeObject* maybe_result =
1758 AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
1759 if (!maybe_result->ToObject(&result)) return maybe_result;
1760 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001761
1762 HeapObject::cast(result)->set_map(heap_number_map());
1763 HeapNumber::cast(result)->set_value(value);
1764 return result;
1765}
1766
1767
John Reck59135872010-11-02 12:39:01 -07001768MaybeObject* Heap::AllocateHeapNumber(double value) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001769 // Use general version, if we're forced to always allocate.
1770 if (always_allocate()) return AllocateHeapNumber(value, TENURED);
1771
1772 // This version of AllocateHeapNumber is optimized for
1773 // allocation in new space.
1774 STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
1775 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
John Reck59135872010-11-02 12:39:01 -07001776 Object* result;
1777 { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
1778 if (!maybe_result->ToObject(&result)) return maybe_result;
1779 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001780 HeapObject::cast(result)->set_map(heap_number_map());
1781 HeapNumber::cast(result)->set_value(value);
1782 return result;
1783}
1784
1785
John Reck59135872010-11-02 12:39:01 -07001786MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
1787 Object* result;
1788 { MaybeObject* maybe_result = AllocateRawCell();
1789 if (!maybe_result->ToObject(&result)) return maybe_result;
1790 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001791 HeapObject::cast(result)->set_map(global_property_cell_map());
1792 JSGlobalPropertyCell::cast(result)->set_value(value);
1793 return result;
1794}
1795
1796
John Reck59135872010-11-02 12:39:01 -07001797MaybeObject* Heap::CreateOddball(const char* to_string,
1798 Object* to_number) {
1799 Object* result;
1800 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE);
1801 if (!maybe_result->ToObject(&result)) return maybe_result;
1802 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001803 return Oddball::cast(result)->Initialize(to_string, to_number);
1804}
1805
1806
1807bool Heap::CreateApiObjects() {
1808 Object* obj;
1809
John Reck59135872010-11-02 12:39:01 -07001810 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
1811 if (!maybe_obj->ToObject(&obj)) return false;
1812 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001813 set_neander_map(Map::cast(obj));
1814
John Reck59135872010-11-02 12:39:01 -07001815 { MaybeObject* maybe_obj = Heap::AllocateJSObjectFromMap(neander_map());
1816 if (!maybe_obj->ToObject(&obj)) return false;
1817 }
1818 Object* elements;
1819 { MaybeObject* maybe_elements = AllocateFixedArray(2);
1820 if (!maybe_elements->ToObject(&elements)) return false;
1821 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001822 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
1823 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
1824 set_message_listeners(JSObject::cast(obj));
1825
1826 return true;
1827}
1828
1829
1830void Heap::CreateCEntryStub() {
1831 CEntryStub stub(1);
1832 set_c_entry_code(*stub.GetCode());
1833}
1834
1835
Steve Block6ded16b2010-05-10 14:33:55 +01001836#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001837void Heap::CreateRegExpCEntryStub() {
1838 RegExpCEntryStub stub;
1839 set_re_c_entry_code(*stub.GetCode());
1840}
1841#endif
1842
1843
Steve Blocka7e24c12009-10-30 11:49:00 +00001844void Heap::CreateJSEntryStub() {
1845 JSEntryStub stub;
1846 set_js_entry_code(*stub.GetCode());
1847}
1848
1849
1850void Heap::CreateJSConstructEntryStub() {
1851 JSConstructEntryStub stub;
1852 set_js_construct_entry_code(*stub.GetCode());
1853}
1854
1855
1856void Heap::CreateFixedStubs() {
1857 // Here we create roots for fixed stubs. They are needed at GC
1858 // for cooking and uncooking (check out frames.cc).
1859 // The eliminates the need for doing dictionary lookup in the
1860 // stub cache for these stubs.
1861 HandleScope scope;
1862 // gcc-4.4 has problem generating correct code of following snippet:
1863 // { CEntryStub stub;
1864 // c_entry_code_ = *stub.GetCode();
1865 // }
Leon Clarke4515c472010-02-03 11:58:03 +00001866 // { DebuggerStatementStub stub;
1867 // debugger_statement_code_ = *stub.GetCode();
Steve Blocka7e24c12009-10-30 11:49:00 +00001868 // }
1869 // To workaround the problem, make separate functions without inlining.
1870 Heap::CreateCEntryStub();
Steve Blocka7e24c12009-10-30 11:49:00 +00001871 Heap::CreateJSEntryStub();
1872 Heap::CreateJSConstructEntryStub();
Steve Block6ded16b2010-05-10 14:33:55 +01001873#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
Steve Blocka7e24c12009-10-30 11:49:00 +00001874 Heap::CreateRegExpCEntryStub();
1875#endif
1876}
1877
1878
1879bool Heap::CreateInitialObjects() {
1880 Object* obj;
1881
1882 // The -0 value must be set before NumberFromDouble works.
John Reck59135872010-11-02 12:39:01 -07001883 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
1884 if (!maybe_obj->ToObject(&obj)) return false;
1885 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001886 set_minus_zero_value(obj);
1887 ASSERT(signbit(minus_zero_value()->Number()) != 0);
1888
John Reck59135872010-11-02 12:39:01 -07001889 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
1890 if (!maybe_obj->ToObject(&obj)) return false;
1891 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001892 set_nan_value(obj);
1893
John Reck59135872010-11-02 12:39:01 -07001894 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE);
1895 if (!maybe_obj->ToObject(&obj)) return false;
1896 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001897 set_undefined_value(obj);
1898 ASSERT(!InNewSpace(undefined_value()));
1899
1900 // Allocate initial symbol table.
John Reck59135872010-11-02 12:39:01 -07001901 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
1902 if (!maybe_obj->ToObject(&obj)) return false;
1903 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001904 // Don't use set_symbol_table() due to asserts.
1905 roots_[kSymbolTableRootIndex] = obj;
1906
1907 // Assign the print strings for oddballs after creating symboltable.
John Reck59135872010-11-02 12:39:01 -07001908 Object* symbol;
1909 { MaybeObject* maybe_symbol = LookupAsciiSymbol("undefined");
1910 if (!maybe_symbol->ToObject(&symbol)) return false;
1911 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001912 Oddball::cast(undefined_value())->set_to_string(String::cast(symbol));
1913 Oddball::cast(undefined_value())->set_to_number(nan_value());
1914
Steve Blocka7e24c12009-10-30 11:49:00 +00001915 // Allocate the null_value
John Reck59135872010-11-02 12:39:01 -07001916 { MaybeObject* maybe_obj =
1917 Oddball::cast(null_value())->Initialize("null", Smi::FromInt(0));
1918 if (!maybe_obj->ToObject(&obj)) return false;
1919 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001920
John Reck59135872010-11-02 12:39:01 -07001921 { MaybeObject* maybe_obj = CreateOddball("true", Smi::FromInt(1));
1922 if (!maybe_obj->ToObject(&obj)) return false;
1923 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001924 set_true_value(obj);
1925
John Reck59135872010-11-02 12:39:01 -07001926 { MaybeObject* maybe_obj = CreateOddball("false", Smi::FromInt(0));
1927 if (!maybe_obj->ToObject(&obj)) return false;
1928 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001929 set_false_value(obj);
1930
John Reck59135872010-11-02 12:39:01 -07001931 { MaybeObject* maybe_obj = CreateOddball("hole", Smi::FromInt(-1));
1932 if (!maybe_obj->ToObject(&obj)) return false;
1933 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001934 set_the_hole_value(obj);
1935
John Reck59135872010-11-02 12:39:01 -07001936 { MaybeObject* maybe_obj =
1937 CreateOddball("no_interceptor_result_sentinel", Smi::FromInt(-2));
1938 if (!maybe_obj->ToObject(&obj)) return false;
1939 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001940 set_no_interceptor_result_sentinel(obj);
1941
John Reck59135872010-11-02 12:39:01 -07001942 { MaybeObject* maybe_obj =
1943 CreateOddball("termination_exception", Smi::FromInt(-3));
1944 if (!maybe_obj->ToObject(&obj)) return false;
1945 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001946 set_termination_exception(obj);
1947
1948 // Allocate the empty string.
John Reck59135872010-11-02 12:39:01 -07001949 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
1950 if (!maybe_obj->ToObject(&obj)) return false;
1951 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001952 set_empty_string(String::cast(obj));
1953
1954 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
John Reck59135872010-11-02 12:39:01 -07001955 { MaybeObject* maybe_obj =
1956 LookupAsciiSymbol(constant_symbol_table[i].contents);
1957 if (!maybe_obj->ToObject(&obj)) return false;
1958 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001959 roots_[constant_symbol_table[i].index] = String::cast(obj);
1960 }
1961
1962 // Allocate the hidden symbol which is used to identify the hidden properties
1963 // in JSObjects. The hash code has a special value so that it will not match
1964 // the empty string when searching for the property. It cannot be part of the
1965 // loop above because it needs to be allocated manually with the special
1966 // hash code in place. The hash code for the hidden_symbol is zero to ensure
1967 // that it will always be at the first entry in property descriptors.
John Reck59135872010-11-02 12:39:01 -07001968 { MaybeObject* maybe_obj =
1969 AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
1970 if (!maybe_obj->ToObject(&obj)) return false;
1971 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001972 hidden_symbol_ = String::cast(obj);
1973
1974 // Allocate the proxy for __proto__.
John Reck59135872010-11-02 12:39:01 -07001975 { MaybeObject* maybe_obj =
1976 AllocateProxy((Address) &Accessors::ObjectPrototype);
1977 if (!maybe_obj->ToObject(&obj)) return false;
1978 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001979 set_prototype_accessors(Proxy::cast(obj));
1980
1981 // Allocate the code_stubs dictionary. The initial size is set to avoid
1982 // expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07001983 { MaybeObject* maybe_obj = NumberDictionary::Allocate(128);
1984 if (!maybe_obj->ToObject(&obj)) return false;
1985 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001986 set_code_stubs(NumberDictionary::cast(obj));
1987
1988 // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
1989 // is set to avoid expanding the dictionary during bootstrapping.
John Reck59135872010-11-02 12:39:01 -07001990 { MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
1991 if (!maybe_obj->ToObject(&obj)) return false;
1992 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001993 set_non_monomorphic_cache(NumberDictionary::cast(obj));
1994
Kristian Monsen25f61362010-05-21 11:50:48 +01001995 set_instanceof_cache_function(Smi::FromInt(0));
1996 set_instanceof_cache_map(Smi::FromInt(0));
1997 set_instanceof_cache_answer(Smi::FromInt(0));
1998
Steve Blocka7e24c12009-10-30 11:49:00 +00001999 CreateFixedStubs();
2000
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002001 // Allocate the dictionary of intrinsic function names.
John Reck59135872010-11-02 12:39:01 -07002002 { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions);
2003 if (!maybe_obj->ToObject(&obj)) return false;
2004 }
2005 { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(obj);
2006 if (!maybe_obj->ToObject(&obj)) return false;
2007 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002008 set_intrinsic_function_names(StringDictionary::cast(obj));
2009
Leon Clarkee46be812010-01-19 14:06:41 +00002010 if (InitializeNumberStringCache()->IsFailure()) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00002011
Steve Block6ded16b2010-05-10 14:33:55 +01002012 // Allocate cache for single character ASCII strings.
John Reck59135872010-11-02 12:39:01 -07002013 { MaybeObject* maybe_obj =
2014 AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
2015 if (!maybe_obj->ToObject(&obj)) return false;
2016 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002017 set_single_character_string_cache(FixedArray::cast(obj));
2018
2019 // Allocate cache for external strings pointing to native source code.
John Reck59135872010-11-02 12:39:01 -07002020 { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
2021 if (!maybe_obj->ToObject(&obj)) return false;
2022 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002023 set_natives_source_cache(FixedArray::cast(obj));
2024
2025 // Handling of script id generation is in Factory::NewScript.
2026 set_last_script_id(undefined_value());
2027
2028 // Initialize keyed lookup cache.
2029 KeyedLookupCache::Clear();
2030
2031 // Initialize context slot cache.
2032 ContextSlotCache::Clear();
2033
2034 // Initialize descriptor cache.
2035 DescriptorLookupCache::Clear();
2036
2037 // Initialize compilation cache.
2038 CompilationCache::Clear();
2039
2040 return true;
2041}
2042
2043
John Reck59135872010-11-02 12:39:01 -07002044MaybeObject* Heap::InitializeNumberStringCache() {
Leon Clarkee46be812010-01-19 14:06:41 +00002045 // Compute the size of the number string cache based on the max heap size.
2046 // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
2047 // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
2048 int number_string_cache_size = max_semispace_size_ / 512;
2049 number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
John Reck59135872010-11-02 12:39:01 -07002050 Object* obj;
2051 MaybeObject* maybe_obj =
2052 AllocateFixedArray(number_string_cache_size * 2, TENURED);
2053 if (maybe_obj->ToObject(&obj)) set_number_string_cache(FixedArray::cast(obj));
2054 return maybe_obj;
Leon Clarkee46be812010-01-19 14:06:41 +00002055}
2056
2057
2058void Heap::FlushNumberStringCache() {
2059 // Flush the number to string cache.
2060 int len = number_string_cache()->length();
2061 for (int i = 0; i < len; i++) {
2062 number_string_cache()->set_undefined(i);
2063 }
2064}
2065
2066
Steve Blocka7e24c12009-10-30 11:49:00 +00002067static inline int double_get_hash(double d) {
2068 DoubleRepresentation rep(d);
Leon Clarkee46be812010-01-19 14:06:41 +00002069 return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
Steve Blocka7e24c12009-10-30 11:49:00 +00002070}
2071
2072
2073static inline int smi_get_hash(Smi* smi) {
Leon Clarkee46be812010-01-19 14:06:41 +00002074 return smi->value();
Steve Blocka7e24c12009-10-30 11:49:00 +00002075}
2076
2077
Steve Blocka7e24c12009-10-30 11:49:00 +00002078Object* Heap::GetNumberStringCache(Object* number) {
2079 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002080 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002081 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002082 hash = smi_get_hash(Smi::cast(number)) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002083 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002084 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002085 }
2086 Object* key = number_string_cache()->get(hash * 2);
2087 if (key == number) {
2088 return String::cast(number_string_cache()->get(hash * 2 + 1));
2089 } else if (key->IsHeapNumber() &&
2090 number->IsHeapNumber() &&
2091 key->Number() == number->Number()) {
2092 return String::cast(number_string_cache()->get(hash * 2 + 1));
2093 }
2094 return undefined_value();
2095}
2096
2097
2098void Heap::SetNumberStringCache(Object* number, String* string) {
2099 int hash;
Leon Clarkee46be812010-01-19 14:06:41 +00002100 int mask = (number_string_cache()->length() >> 1) - 1;
Steve Blocka7e24c12009-10-30 11:49:00 +00002101 if (number->IsSmi()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002102 hash = smi_get_hash(Smi::cast(number)) & mask;
Leon Clarke4515c472010-02-03 11:58:03 +00002103 number_string_cache()->set(hash * 2, Smi::cast(number));
Steve Blocka7e24c12009-10-30 11:49:00 +00002104 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00002105 hash = double_get_hash(number->Number()) & mask;
Steve Blocka7e24c12009-10-30 11:49:00 +00002106 number_string_cache()->set(hash * 2, number);
2107 }
2108 number_string_cache()->set(hash * 2 + 1, string);
2109}
2110
2111
John Reck59135872010-11-02 12:39:01 -07002112MaybeObject* Heap::NumberToString(Object* number,
2113 bool check_number_string_cache) {
Andrei Popescu402d9372010-02-26 13:31:12 +00002114 Counters::number_to_string_runtime.Increment();
Steve Block6ded16b2010-05-10 14:33:55 +01002115 if (check_number_string_cache) {
2116 Object* cached = GetNumberStringCache(number);
2117 if (cached != undefined_value()) {
2118 return cached;
2119 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002120 }
2121
2122 char arr[100];
2123 Vector<char> buffer(arr, ARRAY_SIZE(arr));
2124 const char* str;
2125 if (number->IsSmi()) {
2126 int num = Smi::cast(number)->value();
2127 str = IntToCString(num, buffer);
2128 } else {
2129 double num = HeapNumber::cast(number)->value();
2130 str = DoubleToCString(num, buffer);
2131 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002132
John Reck59135872010-11-02 12:39:01 -07002133 Object* js_string;
2134 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2135 if (maybe_js_string->ToObject(&js_string)) {
2136 SetNumberStringCache(number, String::cast(js_string));
Steve Blocka7e24c12009-10-30 11:49:00 +00002137 }
John Reck59135872010-11-02 12:39:01 -07002138 return maybe_js_string;
Steve Blocka7e24c12009-10-30 11:49:00 +00002139}
2140
2141
Steve Block3ce2e202009-11-05 08:53:23 +00002142Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2143 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2144}
2145
2146
2147Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2148 ExternalArrayType array_type) {
2149 switch (array_type) {
2150 case kExternalByteArray:
2151 return kExternalByteArrayMapRootIndex;
2152 case kExternalUnsignedByteArray:
2153 return kExternalUnsignedByteArrayMapRootIndex;
2154 case kExternalShortArray:
2155 return kExternalShortArrayMapRootIndex;
2156 case kExternalUnsignedShortArray:
2157 return kExternalUnsignedShortArrayMapRootIndex;
2158 case kExternalIntArray:
2159 return kExternalIntArrayMapRootIndex;
2160 case kExternalUnsignedIntArray:
2161 return kExternalUnsignedIntArrayMapRootIndex;
2162 case kExternalFloatArray:
2163 return kExternalFloatArrayMapRootIndex;
2164 default:
2165 UNREACHABLE();
2166 return kUndefinedValueRootIndex;
2167 }
2168}
2169
2170
John Reck59135872010-11-02 12:39:01 -07002171MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01002172 // We need to distinguish the minus zero value and this cannot be
2173 // done after conversion to int. Doing this by comparing bit
2174 // patterns is faster than using fpclassify() et al.
2175 static const DoubleRepresentation minus_zero(-0.0);
2176
2177 DoubleRepresentation rep(value);
2178 if (rep.bits == minus_zero.bits) {
2179 return AllocateHeapNumber(-0.0, pretenure);
2180 }
2181
2182 int int_value = FastD2I(value);
2183 if (value == int_value && Smi::IsValid(int_value)) {
2184 return Smi::FromInt(int_value);
2185 }
2186
2187 // Materialize the value in the heap.
2188 return AllocateHeapNumber(value, pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00002189}
2190
2191
John Reck59135872010-11-02 12:39:01 -07002192MaybeObject* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002193 // Statically ensure that it is safe to allocate proxies in paged spaces.
2194 STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
2195 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002196 Object* result;
2197 { MaybeObject* maybe_result = Allocate(proxy_map(), space);
2198 if (!maybe_result->ToObject(&result)) return maybe_result;
2199 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002200
2201 Proxy::cast(result)->set_proxy(proxy);
2202 return result;
2203}
2204
2205
John Reck59135872010-11-02 12:39:01 -07002206MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
2207 Object* result;
2208 { MaybeObject* maybe_result =
2209 Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
2210 if (!maybe_result->ToObject(&result)) return maybe_result;
2211 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002212
2213 SharedFunctionInfo* share = SharedFunctionInfo::cast(result);
2214 share->set_name(name);
2215 Code* illegal = Builtins::builtin(Builtins::Illegal);
2216 share->set_code(illegal);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002217 share->set_scope_info(SerializedScopeInfo::Empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00002218 Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
2219 share->set_construct_stub(construct_stub);
2220 share->set_expected_nof_properties(0);
2221 share->set_length(0);
2222 share->set_formal_parameter_count(0);
2223 share->set_instance_class_name(Object_symbol());
2224 share->set_function_data(undefined_value());
2225 share->set_script(undefined_value());
2226 share->set_start_position_and_type(0);
2227 share->set_debug_info(undefined_value());
2228 share->set_inferred_name(empty_string());
2229 share->set_compiler_hints(0);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002230 share->set_initial_map(undefined_value());
Steve Blocka7e24c12009-10-30 11:49:00 +00002231 share->set_this_property_assignments_count(0);
2232 share->set_this_property_assignments(undefined_value());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002233 share->set_num_literals(0);
2234 share->set_end_position(0);
2235 share->set_function_token_position(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00002236 return result;
2237}
2238
2239
Steve Blockd0582a62009-12-15 09:54:21 +00002240// Returns true for a character in a range. Both limits are inclusive.
2241static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
2242 // This makes uses of the the unsigned wraparound.
2243 return character - from <= to - from;
2244}
2245
2246
John Reck59135872010-11-02 12:39:01 -07002247MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
2248 uint32_t c1,
2249 uint32_t c2) {
Steve Blockd0582a62009-12-15 09:54:21 +00002250 String* symbol;
2251 // Numeric strings have a different hash algorithm not known by
2252 // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
2253 if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
2254 Heap::symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
2255 return symbol;
2256 // Now we know the length is 2, we might as well make use of that fact
2257 // when building the new string.
2258 } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
2259 ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
John Reck59135872010-11-02 12:39:01 -07002260 Object* result;
2261 { MaybeObject* maybe_result = Heap::AllocateRawAsciiString(2);
2262 if (!maybe_result->ToObject(&result)) return maybe_result;
2263 }
Steve Blockd0582a62009-12-15 09:54:21 +00002264 char* dest = SeqAsciiString::cast(result)->GetChars();
2265 dest[0] = c1;
2266 dest[1] = c2;
2267 return result;
2268 } else {
John Reck59135872010-11-02 12:39:01 -07002269 Object* result;
2270 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(2);
2271 if (!maybe_result->ToObject(&result)) return maybe_result;
2272 }
Steve Blockd0582a62009-12-15 09:54:21 +00002273 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2274 dest[0] = c1;
2275 dest[1] = c2;
2276 return result;
2277 }
2278}
2279
2280
John Reck59135872010-11-02 12:39:01 -07002281MaybeObject* Heap::AllocateConsString(String* first, String* second) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002282 int first_length = first->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002283 if (first_length == 0) {
2284 return second;
2285 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002286
2287 int second_length = second->length();
Steve Blockd0582a62009-12-15 09:54:21 +00002288 if (second_length == 0) {
2289 return first;
2290 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002291
2292 int length = first_length + second_length;
Steve Blockd0582a62009-12-15 09:54:21 +00002293
2294 // Optimization for 2-byte strings often used as keys in a decompression
2295 // dictionary. Check whether we already have the string in the symbol
2296 // table to prevent creation of many unneccesary strings.
2297 if (length == 2) {
2298 unsigned c1 = first->Get(0);
2299 unsigned c2 = second->Get(0);
2300 return MakeOrFindTwoCharacterString(c1, c2);
2301 }
2302
Steve Block6ded16b2010-05-10 14:33:55 +01002303 bool first_is_ascii = first->IsAsciiRepresentation();
2304 bool second_is_ascii = second->IsAsciiRepresentation();
2305 bool is_ascii = first_is_ascii && second_is_ascii;
Steve Blocka7e24c12009-10-30 11:49:00 +00002306
2307 // Make sure that an out of memory exception is thrown if the length
Steve Block3ce2e202009-11-05 08:53:23 +00002308 // of the new cons string is too large.
2309 if (length > String::kMaxLength || length < 0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002310 Top::context()->mark_out_of_memory();
2311 return Failure::OutOfMemoryException();
2312 }
2313
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002314 bool is_ascii_data_in_two_byte_string = false;
2315 if (!is_ascii) {
2316 // At least one of the strings uses two-byte representation so we
2317 // can't use the fast case code for short ascii strings below, but
2318 // we can try to save memory if all chars actually fit in ascii.
2319 is_ascii_data_in_two_byte_string =
2320 first->HasOnlyAsciiChars() && second->HasOnlyAsciiChars();
2321 if (is_ascii_data_in_two_byte_string) {
2322 Counters::string_add_runtime_ext_to_ascii.Increment();
2323 }
2324 }
2325
Steve Blocka7e24c12009-10-30 11:49:00 +00002326 // If the resulting string is small make a flat string.
2327 if (length < String::kMinNonFlatLength) {
2328 ASSERT(first->IsFlat());
2329 ASSERT(second->IsFlat());
2330 if (is_ascii) {
John Reck59135872010-11-02 12:39:01 -07002331 Object* result;
2332 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2333 if (!maybe_result->ToObject(&result)) return maybe_result;
2334 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002335 // Copy the characters into the new object.
2336 char* dest = SeqAsciiString::cast(result)->GetChars();
2337 // Copy first part.
Steve Blockd0582a62009-12-15 09:54:21 +00002338 const char* src;
2339 if (first->IsExternalString()) {
2340 src = ExternalAsciiString::cast(first)->resource()->data();
2341 } else {
2342 src = SeqAsciiString::cast(first)->GetChars();
2343 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002344 for (int i = 0; i < first_length; i++) *dest++ = src[i];
2345 // Copy second part.
Steve Blockd0582a62009-12-15 09:54:21 +00002346 if (second->IsExternalString()) {
2347 src = ExternalAsciiString::cast(second)->resource()->data();
2348 } else {
2349 src = SeqAsciiString::cast(second)->GetChars();
2350 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002351 for (int i = 0; i < second_length; i++) *dest++ = src[i];
2352 return result;
2353 } else {
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002354 if (is_ascii_data_in_two_byte_string) {
John Reck59135872010-11-02 12:39:01 -07002355 Object* result;
2356 { MaybeObject* maybe_result = AllocateRawAsciiString(length);
2357 if (!maybe_result->ToObject(&result)) return maybe_result;
2358 }
Steve Block6ded16b2010-05-10 14:33:55 +01002359 // Copy the characters into the new object.
2360 char* dest = SeqAsciiString::cast(result)->GetChars();
2361 String::WriteToFlat(first, dest, 0, first_length);
2362 String::WriteToFlat(second, dest + first_length, 0, second_length);
Steve Block6ded16b2010-05-10 14:33:55 +01002363 return result;
2364 }
2365
John Reck59135872010-11-02 12:39:01 -07002366 Object* result;
2367 { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
2368 if (!maybe_result->ToObject(&result)) return maybe_result;
2369 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002370 // Copy the characters into the new object.
2371 uc16* dest = SeqTwoByteString::cast(result)->GetChars();
2372 String::WriteToFlat(first, dest, 0, first_length);
2373 String::WriteToFlat(second, dest + first_length, 0, second_length);
2374 return result;
2375 }
2376 }
2377
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002378 Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
2379 cons_ascii_string_map() : cons_string_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00002380
John Reck59135872010-11-02 12:39:01 -07002381 Object* result;
2382 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2383 if (!maybe_result->ToObject(&result)) return maybe_result;
2384 }
Leon Clarke4515c472010-02-03 11:58:03 +00002385
2386 AssertNoAllocation no_gc;
Steve Blocka7e24c12009-10-30 11:49:00 +00002387 ConsString* cons_string = ConsString::cast(result);
Leon Clarke4515c472010-02-03 11:58:03 +00002388 WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002389 cons_string->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00002390 cons_string->set_hash_field(String::kEmptyHashField);
2391 cons_string->set_first(first, mode);
2392 cons_string->set_second(second, mode);
Steve Blocka7e24c12009-10-30 11:49:00 +00002393 return result;
2394}
2395
2396
John Reck59135872010-11-02 12:39:01 -07002397MaybeObject* Heap::AllocateSubString(String* buffer,
Steve Blocka7e24c12009-10-30 11:49:00 +00002398 int start,
Steve Block6ded16b2010-05-10 14:33:55 +01002399 int end,
2400 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002401 int length = end - start;
2402
2403 if (length == 1) {
2404 return Heap::LookupSingleCharacterStringFromCode(
2405 buffer->Get(start));
Steve Blockd0582a62009-12-15 09:54:21 +00002406 } else if (length == 2) {
2407 // Optimization for 2-byte strings often used as keys in a decompression
2408 // dictionary. Check whether we already have the string in the symbol
2409 // table to prevent creation of many unneccesary strings.
2410 unsigned c1 = buffer->Get(start);
2411 unsigned c2 = buffer->Get(start + 1);
2412 return MakeOrFindTwoCharacterString(c1, c2);
Steve Blocka7e24c12009-10-30 11:49:00 +00002413 }
2414
2415 // Make an attempt to flatten the buffer to reduce access time.
Leon Clarkef7060e22010-06-03 12:02:55 +01002416 buffer = buffer->TryFlattenGetString();
Steve Blocka7e24c12009-10-30 11:49:00 +00002417
John Reck59135872010-11-02 12:39:01 -07002418 Object* result;
2419 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation()
2420 ? AllocateRawAsciiString(length, pretenure )
2421 : AllocateRawTwoByteString(length, pretenure);
2422 if (!maybe_result->ToObject(&result)) return maybe_result;
2423 }
Steve Blockd0582a62009-12-15 09:54:21 +00002424 String* string_result = String::cast(result);
Steve Blocka7e24c12009-10-30 11:49:00 +00002425 // Copy the characters into the new object.
Steve Blockd0582a62009-12-15 09:54:21 +00002426 if (buffer->IsAsciiRepresentation()) {
2427 ASSERT(string_result->IsAsciiRepresentation());
2428 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2429 String::WriteToFlat(buffer, dest, start, end);
2430 } else {
2431 ASSERT(string_result->IsTwoByteRepresentation());
2432 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2433 String::WriteToFlat(buffer, dest, start, end);
Steve Blocka7e24c12009-10-30 11:49:00 +00002434 }
Steve Blockd0582a62009-12-15 09:54:21 +00002435
Steve Blocka7e24c12009-10-30 11:49:00 +00002436 return result;
2437}
2438
2439
John Reck59135872010-11-02 12:39:01 -07002440MaybeObject* Heap::AllocateExternalStringFromAscii(
Steve Blocka7e24c12009-10-30 11:49:00 +00002441 ExternalAsciiString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002442 size_t length = resource->length();
2443 if (length > static_cast<size_t>(String::kMaxLength)) {
2444 Top::context()->mark_out_of_memory();
2445 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00002446 }
2447
Steve Blockd0582a62009-12-15 09:54:21 +00002448 Map* map = external_ascii_string_map();
John Reck59135872010-11-02 12:39:01 -07002449 Object* result;
2450 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2451 if (!maybe_result->ToObject(&result)) return maybe_result;
2452 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002453
2454 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002455 external_string->set_length(static_cast<int>(length));
2456 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002457 external_string->set_resource(resource);
2458
2459 return result;
2460}
2461
2462
John Reck59135872010-11-02 12:39:01 -07002463MaybeObject* Heap::AllocateExternalStringFromTwoByte(
Steve Blocka7e24c12009-10-30 11:49:00 +00002464 ExternalTwoByteString::Resource* resource) {
Steve Blockd0582a62009-12-15 09:54:21 +00002465 size_t length = resource->length();
2466 if (length > static_cast<size_t>(String::kMaxLength)) {
2467 Top::context()->mark_out_of_memory();
2468 return Failure::OutOfMemoryException();
2469 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002470
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01002471 // For small strings we check whether the resource contains only
2472 // ascii characters. If yes, we use a different string map.
2473 bool is_ascii = true;
2474 if (length >= static_cast<size_t>(String::kMinNonFlatLength)) {
2475 is_ascii = false;
2476 } else {
2477 const uc16* data = resource->data();
2478 for (size_t i = 0; i < length; i++) {
2479 if (data[i] > String::kMaxAsciiCharCode) {
2480 is_ascii = false;
2481 break;
2482 }
2483 }
2484 }
2485
2486 Map* map = is_ascii ?
2487 Heap::external_string_with_ascii_data_map() : Heap::external_string_map();
John Reck59135872010-11-02 12:39:01 -07002488 Object* result;
2489 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2490 if (!maybe_result->ToObject(&result)) return maybe_result;
2491 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002492
2493 ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00002494 external_string->set_length(static_cast<int>(length));
2495 external_string->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00002496 external_string->set_resource(resource);
2497
2498 return result;
2499}
2500
2501
John Reck59135872010-11-02 12:39:01 -07002502MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002503 if (code <= String::kMaxAsciiCharCode) {
2504 Object* value = Heap::single_character_string_cache()->get(code);
2505 if (value != Heap::undefined_value()) return value;
2506
2507 char buffer[1];
2508 buffer[0] = static_cast<char>(code);
John Reck59135872010-11-02 12:39:01 -07002509 Object* result;
2510 MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
Steve Blocka7e24c12009-10-30 11:49:00 +00002511
John Reck59135872010-11-02 12:39:01 -07002512 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002513 Heap::single_character_string_cache()->set(code, result);
2514 return result;
2515 }
2516
John Reck59135872010-11-02 12:39:01 -07002517 Object* result;
2518 { MaybeObject* maybe_result = Heap::AllocateRawTwoByteString(1);
2519 if (!maybe_result->ToObject(&result)) return maybe_result;
2520 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002521 String* answer = String::cast(result);
2522 answer->Set(0, code);
2523 return answer;
2524}
2525
2526
John Reck59135872010-11-02 12:39:01 -07002527MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002528 if (length < 0 || length > ByteArray::kMaxLength) {
2529 return Failure::OutOfMemoryException();
2530 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002531 if (pretenure == NOT_TENURED) {
2532 return AllocateByteArray(length);
2533 }
2534 int size = ByteArray::SizeFor(length);
John Reck59135872010-11-02 12:39:01 -07002535 Object* result;
2536 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2537 ? old_data_space_->AllocateRaw(size)
2538 : lo_space_->AllocateRaw(size);
2539 if (!maybe_result->ToObject(&result)) return maybe_result;
2540 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002541
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002542 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2543 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002544 return result;
2545}
2546
2547
John Reck59135872010-11-02 12:39:01 -07002548MaybeObject* Heap::AllocateByteArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00002549 if (length < 0 || length > ByteArray::kMaxLength) {
2550 return Failure::OutOfMemoryException();
2551 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002552 int size = ByteArray::SizeFor(length);
2553 AllocationSpace space =
Leon Clarkee46be812010-01-19 14:06:41 +00002554 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002555 Object* result;
2556 { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2557 if (!maybe_result->ToObject(&result)) return maybe_result;
2558 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002559
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002560 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2561 reinterpret_cast<ByteArray*>(result)->set_length(length);
Steve Blocka7e24c12009-10-30 11:49:00 +00002562 return result;
2563}
2564
2565
2566void Heap::CreateFillerObjectAt(Address addr, int size) {
2567 if (size == 0) return;
2568 HeapObject* filler = HeapObject::FromAddress(addr);
2569 if (size == kPointerSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01002570 filler->set_map(one_pointer_filler_map());
2571 } else if (size == 2 * kPointerSize) {
2572 filler->set_map(two_pointer_filler_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002573 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002574 filler->set_map(byte_array_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00002575 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size));
2576 }
2577}
2578
2579
John Reck59135872010-11-02 12:39:01 -07002580MaybeObject* Heap::AllocatePixelArray(int length,
Steve Blocka7e24c12009-10-30 11:49:00 +00002581 uint8_t* external_pointer,
2582 PretenureFlag pretenure) {
2583 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002584 Object* result;
2585 { MaybeObject* maybe_result =
2586 AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
2587 if (!maybe_result->ToObject(&result)) return maybe_result;
2588 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002589
2590 reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
2591 reinterpret_cast<PixelArray*>(result)->set_length(length);
2592 reinterpret_cast<PixelArray*>(result)->set_external_pointer(external_pointer);
2593
2594 return result;
2595}
2596
2597
John Reck59135872010-11-02 12:39:01 -07002598MaybeObject* Heap::AllocateExternalArray(int length,
2599 ExternalArrayType array_type,
2600 void* external_pointer,
2601 PretenureFlag pretenure) {
Steve Block3ce2e202009-11-05 08:53:23 +00002602 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002603 Object* result;
2604 { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
2605 space,
2606 OLD_DATA_SPACE);
2607 if (!maybe_result->ToObject(&result)) return maybe_result;
2608 }
Steve Block3ce2e202009-11-05 08:53:23 +00002609
2610 reinterpret_cast<ExternalArray*>(result)->set_map(
2611 MapForExternalArrayType(array_type));
2612 reinterpret_cast<ExternalArray*>(result)->set_length(length);
2613 reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
2614 external_pointer);
2615
2616 return result;
2617}
2618
2619
John Reck59135872010-11-02 12:39:01 -07002620MaybeObject* Heap::CreateCode(const CodeDesc& desc,
2621 Code::Flags flags,
2622 Handle<Object> self_reference) {
Leon Clarkeac952652010-07-15 11:15:24 +01002623 // Allocate ByteArray before the Code object, so that we do not risk
2624 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002625 Object* reloc_info;
2626 { MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
2627 if (!maybe_reloc_info->ToObject(&reloc_info)) return maybe_reloc_info;
2628 }
Leon Clarkeac952652010-07-15 11:15:24 +01002629
Steve Blocka7e24c12009-10-30 11:49:00 +00002630 // Compute size
Leon Clarkeac952652010-07-15 11:15:24 +01002631 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002632 int obj_size = Code::SizeFor(body_size);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002633 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
John Reck59135872010-11-02 12:39:01 -07002634 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002635 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002636 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002637 } else {
John Reck59135872010-11-02 12:39:01 -07002638 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002639 }
2640
John Reck59135872010-11-02 12:39:01 -07002641 Object* result;
2642 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002643
2644 // Initialize the object
2645 HeapObject::cast(result)->set_map(code_map());
2646 Code* code = Code::cast(result);
2647 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2648 code->set_instruction_size(desc.instr_size);
Leon Clarkeac952652010-07-15 11:15:24 +01002649 code->set_relocation_info(ByteArray::cast(reloc_info));
Steve Blocka7e24c12009-10-30 11:49:00 +00002650 code->set_flags(flags);
2651 // Allow self references to created code object by patching the handle to
2652 // point to the newly allocated Code object.
2653 if (!self_reference.is_null()) {
2654 *(self_reference.location()) = code;
2655 }
2656 // Migrate generated code.
2657 // The generated code can contain Object** values (typically from handles)
2658 // that are dereferenced during the copy to point directly to the actual heap
2659 // objects. These pointers can include references to the code object itself,
2660 // through the self_reference parameter.
2661 code->CopyFrom(desc);
Steve Blocka7e24c12009-10-30 11:49:00 +00002662
2663#ifdef DEBUG
2664 code->Verify();
2665#endif
2666 return code;
2667}
2668
2669
John Reck59135872010-11-02 12:39:01 -07002670MaybeObject* Heap::CopyCode(Code* code) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002671 // Allocate an object the same size as the code object.
2672 int obj_size = code->Size();
John Reck59135872010-11-02 12:39:01 -07002673 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002674 if (obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002675 maybe_result = lo_space_->AllocateRawCode(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002676 } else {
John Reck59135872010-11-02 12:39:01 -07002677 maybe_result = code_space_->AllocateRaw(obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002678 }
2679
John Reck59135872010-11-02 12:39:01 -07002680 Object* result;
2681 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00002682
2683 // Copy code object.
2684 Address old_addr = code->address();
2685 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002686 CopyBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002687 // Relocate the copy.
2688 Code* new_code = Code::cast(result);
2689 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2690 new_code->Relocate(new_addr - old_addr);
2691 return new_code;
2692}
2693
2694
John Reck59135872010-11-02 12:39:01 -07002695MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
Leon Clarkeac952652010-07-15 11:15:24 +01002696 // Allocate ByteArray before the Code object, so that we do not risk
2697 // leaving uninitialized Code object (and breaking the heap).
John Reck59135872010-11-02 12:39:01 -07002698 Object* reloc_info_array;
2699 { MaybeObject* maybe_reloc_info_array =
2700 AllocateByteArray(reloc_info.length(), TENURED);
2701 if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
2702 return maybe_reloc_info_array;
2703 }
2704 }
Leon Clarkeac952652010-07-15 11:15:24 +01002705
2706 int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
Steve Block6ded16b2010-05-10 14:33:55 +01002707
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002708 int new_obj_size = Code::SizeFor(new_body_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002709
2710 Address old_addr = code->address();
2711
2712 size_t relocation_offset =
Leon Clarkeac952652010-07-15 11:15:24 +01002713 static_cast<size_t>(code->instruction_end() - old_addr);
Steve Block6ded16b2010-05-10 14:33:55 +01002714
John Reck59135872010-11-02 12:39:01 -07002715 MaybeObject* maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002716 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002717 maybe_result = lo_space_->AllocateRawCode(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002718 } else {
John Reck59135872010-11-02 12:39:01 -07002719 maybe_result = code_space_->AllocateRaw(new_obj_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002720 }
2721
John Reck59135872010-11-02 12:39:01 -07002722 Object* result;
2723 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Block6ded16b2010-05-10 14:33:55 +01002724
2725 // Copy code object.
2726 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
2727
2728 // Copy header and instructions.
2729 memcpy(new_addr, old_addr, relocation_offset);
2730
Steve Block6ded16b2010-05-10 14:33:55 +01002731 Code* new_code = Code::cast(result);
Leon Clarkeac952652010-07-15 11:15:24 +01002732 new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
Steve Block6ded16b2010-05-10 14:33:55 +01002733
Leon Clarkeac952652010-07-15 11:15:24 +01002734 // Copy patched rinfo.
2735 memcpy(new_code->relocation_start(), reloc_info.start(), reloc_info.length());
Steve Block6ded16b2010-05-10 14:33:55 +01002736
2737 // Relocate the copy.
2738 ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
2739 new_code->Relocate(new_addr - old_addr);
2740
2741#ifdef DEBUG
2742 code->Verify();
2743#endif
2744 return new_code;
2745}
2746
2747
John Reck59135872010-11-02 12:39:01 -07002748MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002749 ASSERT(gc_state_ == NOT_IN_GC);
2750 ASSERT(map->instance_type() != MAP_TYPE);
Leon Clarkee46be812010-01-19 14:06:41 +00002751 // If allocation failures are disallowed, we may allocate in a different
2752 // space when new space is full and the object is not a large object.
2753 AllocationSpace retry_space =
2754 (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
John Reck59135872010-11-02 12:39:01 -07002755 Object* result;
2756 { MaybeObject* maybe_result =
2757 AllocateRaw(map->instance_size(), space, retry_space);
2758 if (!maybe_result->ToObject(&result)) return maybe_result;
2759 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002760 HeapObject::cast(result)->set_map(map);
Steve Block3ce2e202009-11-05 08:53:23 +00002761#ifdef ENABLE_LOGGING_AND_PROFILING
2762 ProducerHeapProfile::RecordJSObjectAllocation(result);
2763#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00002764 return result;
2765}
2766
2767
John Reck59135872010-11-02 12:39:01 -07002768MaybeObject* Heap::InitializeFunction(JSFunction* function,
2769 SharedFunctionInfo* shared,
2770 Object* prototype) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002771 ASSERT(!prototype->IsMap());
2772 function->initialize_properties();
2773 function->initialize_elements();
2774 function->set_shared(shared);
Iain Merrick75681382010-08-19 15:07:18 +01002775 function->set_code(shared->code());
Steve Blocka7e24c12009-10-30 11:49:00 +00002776 function->set_prototype_or_initial_map(prototype);
2777 function->set_context(undefined_value());
Leon Clarke4515c472010-02-03 11:58:03 +00002778 function->set_literals(empty_fixed_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00002779 return function;
2780}
2781
2782
John Reck59135872010-11-02 12:39:01 -07002783MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002784 // Allocate the prototype. Make sure to use the object function
2785 // from the function's context, since the function can be from a
2786 // different context.
2787 JSFunction* object_function =
2788 function->context()->global_context()->object_function();
John Reck59135872010-11-02 12:39:01 -07002789 Object* prototype;
2790 { MaybeObject* maybe_prototype = AllocateJSObject(object_function);
2791 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2792 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002793 // When creating the prototype for the function we must set its
2794 // constructor to the function.
John Reck59135872010-11-02 12:39:01 -07002795 Object* result;
2796 { MaybeObject* maybe_result =
2797 JSObject::cast(prototype)->SetProperty(constructor_symbol(),
2798 function,
2799 DONT_ENUM);
2800 if (!maybe_result->ToObject(&result)) return maybe_result;
2801 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002802 return prototype;
2803}
2804
2805
John Reck59135872010-11-02 12:39:01 -07002806MaybeObject* Heap::AllocateFunction(Map* function_map,
2807 SharedFunctionInfo* shared,
2808 Object* prototype,
2809 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00002810 AllocationSpace space =
2811 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
John Reck59135872010-11-02 12:39:01 -07002812 Object* result;
2813 { MaybeObject* maybe_result = Allocate(function_map, space);
2814 if (!maybe_result->ToObject(&result)) return maybe_result;
2815 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002816 return InitializeFunction(JSFunction::cast(result), shared, prototype);
2817}
2818
2819
John Reck59135872010-11-02 12:39:01 -07002820MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002821 // To get fast allocation and map sharing for arguments objects we
2822 // allocate them based on an arguments boilerplate.
2823
2824 // This calls Copy directly rather than using Heap::AllocateRaw so we
2825 // duplicate the check here.
2826 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
2827
2828 JSObject* boilerplate =
2829 Top::context()->global_context()->arguments_boilerplate();
2830
Leon Clarkee46be812010-01-19 14:06:41 +00002831 // Check that the size of the boilerplate matches our
2832 // expectations. The ArgumentsAccessStub::GenerateNewObject relies
2833 // on the size being a known constant.
2834 ASSERT(kArgumentsObjectSize == boilerplate->map()->instance_size());
2835
2836 // Do the allocation.
John Reck59135872010-11-02 12:39:01 -07002837 Object* result;
2838 { MaybeObject* maybe_result =
2839 AllocateRaw(kArgumentsObjectSize, NEW_SPACE, OLD_POINTER_SPACE);
2840 if (!maybe_result->ToObject(&result)) return maybe_result;
2841 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002842
2843 // Copy the content. The arguments boilerplate doesn't have any
2844 // fields that point to new space so it's safe to skip the write
2845 // barrier here.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002846 CopyBlock(HeapObject::cast(result)->address(),
2847 boilerplate->address(),
Leon Clarkee46be812010-01-19 14:06:41 +00002848 kArgumentsObjectSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002849
2850 // Set the two properties.
2851 JSObject::cast(result)->InObjectPropertyAtPut(arguments_callee_index,
2852 callee);
2853 JSObject::cast(result)->InObjectPropertyAtPut(arguments_length_index,
2854 Smi::FromInt(length),
2855 SKIP_WRITE_BARRIER);
2856
2857 // Check the state of the object
2858 ASSERT(JSObject::cast(result)->HasFastProperties());
2859 ASSERT(JSObject::cast(result)->HasFastElements());
2860
2861 return result;
2862}
2863
2864
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002865static bool HasDuplicates(DescriptorArray* descriptors) {
2866 int count = descriptors->number_of_descriptors();
2867 if (count > 1) {
2868 String* prev_key = descriptors->GetKey(0);
2869 for (int i = 1; i != count; i++) {
2870 String* current_key = descriptors->GetKey(i);
2871 if (prev_key == current_key) return true;
2872 prev_key = current_key;
2873 }
2874 }
2875 return false;
2876}
2877
2878
John Reck59135872010-11-02 12:39:01 -07002879MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002880 ASSERT(!fun->has_initial_map());
2881
2882 // First create a new map with the size and number of in-object properties
2883 // suggested by the function.
2884 int instance_size = fun->shared()->CalculateInstanceSize();
2885 int in_object_properties = fun->shared()->CalculateInObjectProperties();
John Reck59135872010-11-02 12:39:01 -07002886 Object* map_obj;
2887 { MaybeObject* maybe_map_obj =
2888 Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
2889 if (!maybe_map_obj->ToObject(&map_obj)) return maybe_map_obj;
2890 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002891
2892 // Fetch or allocate prototype.
2893 Object* prototype;
2894 if (fun->has_instance_prototype()) {
2895 prototype = fun->instance_prototype();
2896 } else {
John Reck59135872010-11-02 12:39:01 -07002897 { MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
2898 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
2899 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002900 }
2901 Map* map = Map::cast(map_obj);
2902 map->set_inobject_properties(in_object_properties);
2903 map->set_unused_property_fields(in_object_properties);
2904 map->set_prototype(prototype);
Steve Block8defd9f2010-07-08 12:39:36 +01002905 ASSERT(map->has_fast_elements());
Steve Blocka7e24c12009-10-30 11:49:00 +00002906
Andrei Popescu402d9372010-02-26 13:31:12 +00002907 // If the function has only simple this property assignments add
2908 // field descriptors for these to the initial map as the object
2909 // cannot be constructed without having these properties. Guard by
2910 // the inline_new flag so we only change the map if we generate a
2911 // specialized construct stub.
Steve Blocka7e24c12009-10-30 11:49:00 +00002912 ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
Andrei Popescu402d9372010-02-26 13:31:12 +00002913 if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002914 int count = fun->shared()->this_property_assignments_count();
2915 if (count > in_object_properties) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002916 // Inline constructor can only handle inobject properties.
2917 fun->shared()->ForbidInlineConstructor();
2918 } else {
John Reck59135872010-11-02 12:39:01 -07002919 Object* descriptors_obj;
2920 { MaybeObject* maybe_descriptors_obj = DescriptorArray::Allocate(count);
2921 if (!maybe_descriptors_obj->ToObject(&descriptors_obj)) {
2922 return maybe_descriptors_obj;
2923 }
2924 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002925 DescriptorArray* descriptors = DescriptorArray::cast(descriptors_obj);
2926 for (int i = 0; i < count; i++) {
2927 String* name = fun->shared()->GetThisPropertyAssignmentName(i);
2928 ASSERT(name->IsSymbol());
2929 FieldDescriptor field(name, i, NONE);
2930 field.SetEnumerationIndex(i);
2931 descriptors->Set(i, &field);
2932 }
2933 descriptors->SetNextEnumerationIndex(count);
2934 descriptors->SortUnchecked();
2935
2936 // The descriptors may contain duplicates because the compiler does not
2937 // guarantee the uniqueness of property names (it would have required
2938 // quadratic time). Once the descriptors are sorted we can check for
2939 // duplicates in linear time.
2940 if (HasDuplicates(descriptors)) {
2941 fun->shared()->ForbidInlineConstructor();
2942 } else {
2943 map->set_instance_descriptors(descriptors);
2944 map->set_pre_allocated_property_fields(count);
2945 map->set_unused_property_fields(in_object_properties - count);
2946 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002947 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002948 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002949
2950 fun->shared()->StartInobjectSlackTracking(map);
2951
Steve Blocka7e24c12009-10-30 11:49:00 +00002952 return map;
2953}
2954
2955
2956void Heap::InitializeJSObjectFromMap(JSObject* obj,
2957 FixedArray* properties,
2958 Map* map) {
2959 obj->set_properties(properties);
2960 obj->initialize_elements();
2961 // TODO(1240798): Initialize the object's body using valid initial values
2962 // according to the object's initial map. For example, if the map's
2963 // instance type is JS_ARRAY_TYPE, the length field should be initialized
2964 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
2965 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
2966 // verification code has to cope with (temporarily) invalid objects. See
2967 // for example, JSArray::JSArrayVerify).
Kristian Monsen0d5e1162010-09-30 15:31:59 +01002968 Object* filler;
2969 // We cannot always fill with one_pointer_filler_map because objects
2970 // created from API functions expect their internal fields to be initialized
2971 // with undefined_value.
2972 if (map->constructor()->IsJSFunction() &&
2973 JSFunction::cast(map->constructor())->shared()->
2974 IsInobjectSlackTrackingInProgress()) {
2975 // We might want to shrink the object later.
2976 ASSERT(obj->GetInternalFieldCount() == 0);
2977 filler = Heap::one_pointer_filler_map();
2978 } else {
2979 filler = Heap::undefined_value();
2980 }
2981 obj->InitializeBody(map->instance_size(), filler);
Steve Blocka7e24c12009-10-30 11:49:00 +00002982}
2983
2984
John Reck59135872010-11-02 12:39:01 -07002985MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002986 // JSFunctions should be allocated using AllocateFunction to be
2987 // properly initialized.
2988 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
2989
Steve Block8defd9f2010-07-08 12:39:36 +01002990 // Both types of global objects should be allocated using
2991 // AllocateGlobalObject to be properly initialized.
Steve Blocka7e24c12009-10-30 11:49:00 +00002992 ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
2993 ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
2994
2995 // Allocate the backing storage for the properties.
2996 int prop_size =
2997 map->pre_allocated_property_fields() +
2998 map->unused_property_fields() -
2999 map->inobject_properties();
3000 ASSERT(prop_size >= 0);
John Reck59135872010-11-02 12:39:01 -07003001 Object* properties;
3002 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
3003 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3004 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003005
3006 // Allocate the JSObject.
3007 AllocationSpace space =
3008 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3009 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
John Reck59135872010-11-02 12:39:01 -07003010 Object* obj;
3011 { MaybeObject* maybe_obj = Allocate(map, space);
3012 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3013 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003014
3015 // Initialize the JSObject.
3016 InitializeJSObjectFromMap(JSObject::cast(obj),
3017 FixedArray::cast(properties),
3018 map);
Steve Block8defd9f2010-07-08 12:39:36 +01003019 ASSERT(JSObject::cast(obj)->HasFastElements());
Steve Blocka7e24c12009-10-30 11:49:00 +00003020 return obj;
3021}
3022
3023
John Reck59135872010-11-02 12:39:01 -07003024MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3025 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003026 // Allocate the initial map if absent.
3027 if (!constructor->has_initial_map()) {
John Reck59135872010-11-02 12:39:01 -07003028 Object* initial_map;
3029 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
3030 if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
3031 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003032 constructor->set_initial_map(Map::cast(initial_map));
3033 Map::cast(initial_map)->set_constructor(constructor);
3034 }
3035 // Allocate the object based on the constructors initial map.
John Reck59135872010-11-02 12:39:01 -07003036 MaybeObject* result =
Steve Blocka7e24c12009-10-30 11:49:00 +00003037 AllocateJSObjectFromMap(constructor->initial_map(), pretenure);
John Reck59135872010-11-02 12:39:01 -07003038#ifdef DEBUG
Steve Blocka7e24c12009-10-30 11:49:00 +00003039 // Make sure result is NOT a global object if valid.
John Reck59135872010-11-02 12:39:01 -07003040 Object* non_failure;
3041 ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
3042#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003043 return result;
3044}
3045
3046
John Reck59135872010-11-02 12:39:01 -07003047MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003048 ASSERT(constructor->has_initial_map());
3049 Map* map = constructor->initial_map();
3050
3051 // Make sure no field properties are described in the initial map.
3052 // This guarantees us that normalizing the properties does not
3053 // require us to change property values to JSGlobalPropertyCells.
3054 ASSERT(map->NextFreePropertyIndex() == 0);
3055
3056 // Make sure we don't have a ton of pre-allocated slots in the
3057 // global objects. They will be unused once we normalize the object.
3058 ASSERT(map->unused_property_fields() == 0);
3059 ASSERT(map->inobject_properties() == 0);
3060
3061 // Initial size of the backing store to avoid resize of the storage during
3062 // bootstrapping. The size differs between the JS global object ad the
3063 // builtins object.
3064 int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
3065
3066 // Allocate a dictionary object for backing storage.
John Reck59135872010-11-02 12:39:01 -07003067 Object* obj;
3068 { MaybeObject* maybe_obj =
3069 StringDictionary::Allocate(
3070 map->NumberOfDescribedProperties() * 2 + initial_size);
3071 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3072 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003073 StringDictionary* dictionary = StringDictionary::cast(obj);
3074
3075 // The global object might be created from an object template with accessors.
3076 // Fill these accessors into the dictionary.
3077 DescriptorArray* descs = map->instance_descriptors();
3078 for (int i = 0; i < descs->number_of_descriptors(); i++) {
3079 PropertyDetails details = descs->GetDetails(i);
3080 ASSERT(details.type() == CALLBACKS); // Only accessors are expected.
3081 PropertyDetails d =
3082 PropertyDetails(details.attributes(), CALLBACKS, details.index());
3083 Object* value = descs->GetCallbacksObject(i);
John Reck59135872010-11-02 12:39:01 -07003084 { MaybeObject* maybe_value = Heap::AllocateJSGlobalPropertyCell(value);
3085 if (!maybe_value->ToObject(&value)) return maybe_value;
3086 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003087
John Reck59135872010-11-02 12:39:01 -07003088 Object* result;
3089 { MaybeObject* maybe_result = dictionary->Add(descs->GetKey(i), value, d);
3090 if (!maybe_result->ToObject(&result)) return maybe_result;
3091 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003092 dictionary = StringDictionary::cast(result);
3093 }
3094
3095 // Allocate the global object and initialize it with the backing store.
John Reck59135872010-11-02 12:39:01 -07003096 { MaybeObject* maybe_obj = Allocate(map, OLD_POINTER_SPACE);
3097 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3098 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003099 JSObject* global = JSObject::cast(obj);
3100 InitializeJSObjectFromMap(global, dictionary, map);
3101
3102 // Create a new map for the global object.
John Reck59135872010-11-02 12:39:01 -07003103 { MaybeObject* maybe_obj = map->CopyDropDescriptors();
3104 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3105 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003106 Map* new_map = Map::cast(obj);
3107
3108 // Setup the global object as a normalized object.
3109 global->set_map(new_map);
3110 global->map()->set_instance_descriptors(Heap::empty_descriptor_array());
3111 global->set_properties(dictionary);
3112
3113 // Make sure result is a global object with properties in dictionary.
3114 ASSERT(global->IsGlobalObject());
3115 ASSERT(!global->HasFastProperties());
3116 return global;
3117}
3118
3119
John Reck59135872010-11-02 12:39:01 -07003120MaybeObject* Heap::CopyJSObject(JSObject* source) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003121 // Never used to copy functions. If functions need to be copied we
3122 // have to be careful to clear the literals array.
3123 ASSERT(!source->IsJSFunction());
3124
3125 // Make the clone.
3126 Map* map = source->map();
3127 int object_size = map->instance_size();
3128 Object* clone;
3129
3130 // If we're forced to always allocate, we use the general allocation
3131 // functions which may leave us with an object in old space.
3132 if (always_allocate()) {
John Reck59135872010-11-02 12:39:01 -07003133 { MaybeObject* maybe_clone =
3134 AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3135 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3136 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003137 Address clone_address = HeapObject::cast(clone)->address();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003138 CopyBlock(clone_address,
3139 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003140 object_size);
3141 // Update write barrier for all fields that lie beyond the header.
Steve Block6ded16b2010-05-10 14:33:55 +01003142 RecordWrites(clone_address,
3143 JSObject::kHeaderSize,
3144 (object_size - JSObject::kHeaderSize) / kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003145 } else {
John Reck59135872010-11-02 12:39:01 -07003146 { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
3147 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3148 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003149 ASSERT(Heap::InNewSpace(clone));
3150 // Since we know the clone is allocated in new space, we can copy
3151 // the contents without worrying about updating the write barrier.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003152 CopyBlock(HeapObject::cast(clone)->address(),
3153 source->address(),
Steve Blocka7e24c12009-10-30 11:49:00 +00003154 object_size);
3155 }
3156
3157 FixedArray* elements = FixedArray::cast(source->elements());
3158 FixedArray* properties = FixedArray::cast(source->properties());
3159 // Update elements if necessary.
Steve Block6ded16b2010-05-10 14:33:55 +01003160 if (elements->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003161 Object* elem;
3162 { MaybeObject* maybe_elem =
3163 (elements->map() == fixed_cow_array_map()) ?
3164 elements : CopyFixedArray(elements);
3165 if (!maybe_elem->ToObject(&elem)) return maybe_elem;
3166 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003167 JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
3168 }
3169 // Update properties if necessary.
3170 if (properties->length() > 0) {
John Reck59135872010-11-02 12:39:01 -07003171 Object* prop;
3172 { MaybeObject* maybe_prop = CopyFixedArray(properties);
3173 if (!maybe_prop->ToObject(&prop)) return maybe_prop;
3174 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003175 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3176 }
3177 // Return the new clone.
Steve Block3ce2e202009-11-05 08:53:23 +00003178#ifdef ENABLE_LOGGING_AND_PROFILING
3179 ProducerHeapProfile::RecordJSObjectAllocation(clone);
3180#endif
Steve Blocka7e24c12009-10-30 11:49:00 +00003181 return clone;
3182}
3183
3184
John Reck59135872010-11-02 12:39:01 -07003185MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
3186 JSGlobalProxy* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003187 ASSERT(constructor->has_initial_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003188 Map* map = constructor->initial_map();
3189
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003190 // Check that the already allocated object has the same size and type as
Steve Blocka7e24c12009-10-30 11:49:00 +00003191 // objects allocated using the constructor.
3192 ASSERT(map->instance_size() == object->map()->instance_size());
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003193 ASSERT(map->instance_type() == object->map()->instance_type());
Steve Blocka7e24c12009-10-30 11:49:00 +00003194
3195 // Allocate the backing storage for the properties.
3196 int prop_size = map->unused_property_fields() - map->inobject_properties();
John Reck59135872010-11-02 12:39:01 -07003197 Object* properties;
3198 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
3199 if (!maybe_properties->ToObject(&properties)) return maybe_properties;
3200 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003201
3202 // Reset the map for the object.
3203 object->set_map(constructor->initial_map());
3204
3205 // Reinitialize the object from the constructor map.
3206 InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
3207 return object;
3208}
3209
3210
John Reck59135872010-11-02 12:39:01 -07003211MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
3212 PretenureFlag pretenure) {
3213 Object* result;
3214 { MaybeObject* maybe_result =
3215 AllocateRawAsciiString(string.length(), pretenure);
3216 if (!maybe_result->ToObject(&result)) return maybe_result;
3217 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003218
3219 // Copy the characters into the new object.
3220 SeqAsciiString* string_result = SeqAsciiString::cast(result);
3221 for (int i = 0; i < string.length(); i++) {
3222 string_result->SeqAsciiStringSet(i, string[i]);
3223 }
3224 return result;
3225}
3226
3227
John Reck59135872010-11-02 12:39:01 -07003228MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> string,
3229 PretenureFlag pretenure) {
Leon Clarkeac952652010-07-15 11:15:24 +01003230 // V8 only supports characters in the Basic Multilingual Plane.
3231 const uc32 kMaxSupportedChar = 0xFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +00003232 // Count the number of characters in the UTF-8 string and check if
3233 // it is an ASCII string.
3234 Access<Scanner::Utf8Decoder> decoder(Scanner::utf8_decoder());
3235 decoder->Reset(string.start(), string.length());
3236 int chars = 0;
3237 bool is_ascii = true;
3238 while (decoder->has_more()) {
3239 uc32 r = decoder->GetNext();
3240 if (r > String::kMaxAsciiCharCode) is_ascii = false;
3241 chars++;
3242 }
3243
3244 // If the string is ascii, we do not need to convert the characters
3245 // since UTF8 is backwards compatible with ascii.
3246 if (is_ascii) return AllocateStringFromAscii(string, pretenure);
3247
John Reck59135872010-11-02 12:39:01 -07003248 Object* result;
3249 { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
3250 if (!maybe_result->ToObject(&result)) return maybe_result;
3251 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003252
3253 // Convert and copy the characters into the new object.
3254 String* string_result = String::cast(result);
3255 decoder->Reset(string.start(), string.length());
3256 for (int i = 0; i < chars; i++) {
3257 uc32 r = decoder->GetNext();
Leon Clarkeac952652010-07-15 11:15:24 +01003258 if (r > kMaxSupportedChar) { r = unibrow::Utf8::kBadChar; }
Steve Blocka7e24c12009-10-30 11:49:00 +00003259 string_result->Set(i, r);
3260 }
3261 return result;
3262}
3263
3264
John Reck59135872010-11-02 12:39:01 -07003265MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
3266 PretenureFlag pretenure) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003267 // Check if the string is an ASCII string.
3268 int i = 0;
3269 while (i < string.length() && string[i] <= String::kMaxAsciiCharCode) i++;
3270
John Reck59135872010-11-02 12:39:01 -07003271 MaybeObject* maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003272 if (i == string.length()) { // It's an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003273 maybe_result = AllocateRawAsciiString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003274 } else { // It's not an ASCII string.
John Reck59135872010-11-02 12:39:01 -07003275 maybe_result = AllocateRawTwoByteString(string.length(), pretenure);
Steve Blocka7e24c12009-10-30 11:49:00 +00003276 }
John Reck59135872010-11-02 12:39:01 -07003277 Object* result;
3278 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003279
3280 // Copy the characters into the new object, which may be either ASCII or
3281 // UTF-16.
3282 String* string_result = String::cast(result);
3283 for (int i = 0; i < string.length(); i++) {
3284 string_result->Set(i, string[i]);
3285 }
3286 return result;
3287}
3288
3289
3290Map* Heap::SymbolMapForString(String* string) {
3291 // If the string is in new space it cannot be used as a symbol.
3292 if (InNewSpace(string)) return NULL;
3293
3294 // Find the corresponding symbol map for strings.
3295 Map* map = string->map();
Steve Blockd0582a62009-12-15 09:54:21 +00003296 if (map == ascii_string_map()) return ascii_symbol_map();
3297 if (map == string_map()) return symbol_map();
3298 if (map == cons_string_map()) return cons_symbol_map();
3299 if (map == cons_ascii_string_map()) return cons_ascii_symbol_map();
3300 if (map == external_string_map()) return external_symbol_map();
3301 if (map == external_ascii_string_map()) return external_ascii_symbol_map();
Kristian Monsen9dcf7e22010-06-28 14:14:28 +01003302 if (map == external_string_with_ascii_data_map()) {
3303 return external_symbol_with_ascii_data_map();
3304 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003305
3306 // No match found.
3307 return NULL;
3308}
3309
3310
John Reck59135872010-11-02 12:39:01 -07003311MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
3312 int chars,
3313 uint32_t hash_field) {
Leon Clarkee46be812010-01-19 14:06:41 +00003314 ASSERT(chars >= 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003315 // Ensure the chars matches the number of characters in the buffer.
3316 ASSERT(static_cast<unsigned>(chars) == buffer->Length());
3317 // Determine whether the string is ascii.
3318 bool is_ascii = true;
Leon Clarkee46be812010-01-19 14:06:41 +00003319 while (buffer->has_more()) {
3320 if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
3321 is_ascii = false;
3322 break;
3323 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003324 }
3325 buffer->Rewind();
3326
3327 // Compute map and object size.
3328 int size;
3329 Map* map;
3330
3331 if (is_ascii) {
Leon Clarkee46be812010-01-19 14:06:41 +00003332 if (chars > SeqAsciiString::kMaxLength) {
3333 return Failure::OutOfMemoryException();
3334 }
Steve Blockd0582a62009-12-15 09:54:21 +00003335 map = ascii_symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003336 size = SeqAsciiString::SizeFor(chars);
3337 } else {
Leon Clarkee46be812010-01-19 14:06:41 +00003338 if (chars > SeqTwoByteString::kMaxLength) {
3339 return Failure::OutOfMemoryException();
3340 }
Steve Blockd0582a62009-12-15 09:54:21 +00003341 map = symbol_map();
Steve Blocka7e24c12009-10-30 11:49:00 +00003342 size = SeqTwoByteString::SizeFor(chars);
3343 }
3344
3345 // Allocate string.
John Reck59135872010-11-02 12:39:01 -07003346 Object* result;
3347 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3348 ? lo_space_->AllocateRaw(size)
3349 : old_data_space_->AllocateRaw(size);
3350 if (!maybe_result->ToObject(&result)) return maybe_result;
3351 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003352
3353 reinterpret_cast<HeapObject*>(result)->set_map(map);
Steve Blockd0582a62009-12-15 09:54:21 +00003354 // Set length and hash fields of the allocated string.
Steve Blocka7e24c12009-10-30 11:49:00 +00003355 String* answer = String::cast(result);
Steve Blockd0582a62009-12-15 09:54:21 +00003356 answer->set_length(chars);
3357 answer->set_hash_field(hash_field);
Steve Blocka7e24c12009-10-30 11:49:00 +00003358
3359 ASSERT_EQ(size, answer->Size());
3360
3361 // Fill in the characters.
3362 for (int i = 0; i < chars; i++) {
3363 answer->Set(i, buffer->GetNext());
3364 }
3365 return answer;
3366}
3367
3368
John Reck59135872010-11-02 12:39:01 -07003369MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003370 if (length < 0 || length > SeqAsciiString::kMaxLength) {
3371 return Failure::OutOfMemoryException();
3372 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003373
3374 int size = SeqAsciiString::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003375 ASSERT(size <= SeqAsciiString::kMaxSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003376
Leon Clarkee46be812010-01-19 14:06:41 +00003377 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3378 AllocationSpace retry_space = OLD_DATA_SPACE;
3379
Steve Blocka7e24c12009-10-30 11:49:00 +00003380 if (space == NEW_SPACE) {
Leon Clarkee46be812010-01-19 14:06:41 +00003381 if (size > kMaxObjectSizeInNewSpace) {
3382 // Allocate in large object space, retry space will be ignored.
3383 space = LO_SPACE;
3384 } else if (size > MaxObjectSizeInPagedSpace()) {
3385 // Allocate in new space, retry in large object space.
3386 retry_space = LO_SPACE;
3387 }
3388 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3389 space = LO_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003390 }
John Reck59135872010-11-02 12:39:01 -07003391 Object* result;
3392 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3393 if (!maybe_result->ToObject(&result)) return maybe_result;
3394 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003395
Steve Blocka7e24c12009-10-30 11:49:00 +00003396 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003397 HeapObject::cast(result)->set_map(ascii_string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003398 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003399 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003400 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3401 return result;
3402}
3403
3404
John Reck59135872010-11-02 12:39:01 -07003405MaybeObject* Heap::AllocateRawTwoByteString(int length,
3406 PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003407 if (length < 0 || length > SeqTwoByteString::kMaxLength) {
3408 return Failure::OutOfMemoryException();
Steve Blocka7e24c12009-10-30 11:49:00 +00003409 }
Leon Clarkee46be812010-01-19 14:06:41 +00003410 int size = SeqTwoByteString::SizeFor(length);
3411 ASSERT(size <= SeqTwoByteString::kMaxSize);
3412 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3413 AllocationSpace retry_space = OLD_DATA_SPACE;
3414
3415 if (space == NEW_SPACE) {
3416 if (size > kMaxObjectSizeInNewSpace) {
3417 // Allocate in large object space, retry space will be ignored.
3418 space = LO_SPACE;
3419 } else if (size > MaxObjectSizeInPagedSpace()) {
3420 // Allocate in new space, retry in large object space.
3421 retry_space = LO_SPACE;
3422 }
3423 } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
3424 space = LO_SPACE;
3425 }
John Reck59135872010-11-02 12:39:01 -07003426 Object* result;
3427 { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
3428 if (!maybe_result->ToObject(&result)) return maybe_result;
3429 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003430
Steve Blocka7e24c12009-10-30 11:49:00 +00003431 // Partially initialize the object.
Steve Blockd0582a62009-12-15 09:54:21 +00003432 HeapObject::cast(result)->set_map(string_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003433 String::cast(result)->set_length(length);
Steve Blockd0582a62009-12-15 09:54:21 +00003434 String::cast(result)->set_hash_field(String::kEmptyHashField);
Steve Blocka7e24c12009-10-30 11:49:00 +00003435 ASSERT_EQ(size, HeapObject::cast(result)->Size());
3436 return result;
3437}
3438
3439
John Reck59135872010-11-02 12:39:01 -07003440MaybeObject* Heap::AllocateEmptyFixedArray() {
Steve Blocka7e24c12009-10-30 11:49:00 +00003441 int size = FixedArray::SizeFor(0);
John Reck59135872010-11-02 12:39:01 -07003442 Object* result;
3443 { MaybeObject* maybe_result =
3444 AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3445 if (!maybe_result->ToObject(&result)) return maybe_result;
3446 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003447 // Initialize the object.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003448 reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
3449 reinterpret_cast<FixedArray*>(result)->set_length(0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003450 return result;
3451}
3452
3453
John Reck59135872010-11-02 12:39:01 -07003454MaybeObject* Heap::AllocateRawFixedArray(int length) {
Leon Clarkee46be812010-01-19 14:06:41 +00003455 if (length < 0 || length > FixedArray::kMaxLength) {
3456 return Failure::OutOfMemoryException();
3457 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003458 ASSERT(length > 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00003459 // Use the general function if we're forced to always allocate.
3460 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3461 // Allocate the raw data for a fixed array.
3462 int size = FixedArray::SizeFor(length);
3463 return size <= kMaxObjectSizeInNewSpace
3464 ? new_space_.AllocateRaw(size)
3465 : lo_space_->AllocateRawFixedArray(size);
3466}
3467
3468
John Reck59135872010-11-02 12:39:01 -07003469MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003470 int len = src->length();
John Reck59135872010-11-02 12:39:01 -07003471 Object* obj;
3472 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3473 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3474 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003475 if (Heap::InNewSpace(obj)) {
3476 HeapObject* dst = HeapObject::cast(obj);
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003477 dst->set_map(map);
3478 CopyBlock(dst->address() + kPointerSize,
3479 src->address() + kPointerSize,
3480 FixedArray::SizeFor(len) - kPointerSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003481 return obj;
3482 }
Kristian Monsen0d5e1162010-09-30 15:31:59 +01003483 HeapObject::cast(obj)->set_map(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00003484 FixedArray* result = FixedArray::cast(obj);
3485 result->set_length(len);
Leon Clarke4515c472010-02-03 11:58:03 +00003486
Steve Blocka7e24c12009-10-30 11:49:00 +00003487 // Copy the content
Leon Clarke4515c472010-02-03 11:58:03 +00003488 AssertNoAllocation no_gc;
3489 WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
Steve Blocka7e24c12009-10-30 11:49:00 +00003490 for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3491 return result;
3492}
3493
3494
John Reck59135872010-11-02 12:39:01 -07003495MaybeObject* Heap::AllocateFixedArray(int length) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003496 ASSERT(length >= 0);
3497 if (length == 0) return empty_fixed_array();
John Reck59135872010-11-02 12:39:01 -07003498 Object* result;
3499 { MaybeObject* maybe_result = AllocateRawFixedArray(length);
3500 if (!maybe_result->ToObject(&result)) return maybe_result;
Steve Blocka7e24c12009-10-30 11:49:00 +00003501 }
John Reck59135872010-11-02 12:39:01 -07003502 // Initialize header.
3503 FixedArray* array = reinterpret_cast<FixedArray*>(result);
3504 array->set_map(fixed_array_map());
3505 array->set_length(length);
3506 // Initialize body.
3507 ASSERT(!Heap::InNewSpace(undefined_value()));
3508 MemsetPointer(array->data_start(), undefined_value(), length);
Steve Blocka7e24c12009-10-30 11:49:00 +00003509 return result;
3510}
3511
3512
John Reck59135872010-11-02 12:39:01 -07003513MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
Leon Clarkee46be812010-01-19 14:06:41 +00003514 if (length < 0 || length > FixedArray::kMaxLength) {
3515 return Failure::OutOfMemoryException();
3516 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003517
Leon Clarkee46be812010-01-19 14:06:41 +00003518 AllocationSpace space =
3519 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
Steve Blocka7e24c12009-10-30 11:49:00 +00003520 int size = FixedArray::SizeFor(length);
Leon Clarkee46be812010-01-19 14:06:41 +00003521 if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
3522 // Too big for new space.
3523 space = LO_SPACE;
3524 } else if (space == OLD_POINTER_SPACE &&
3525 size > MaxObjectSizeInPagedSpace()) {
3526 // Too big for old pointer space.
3527 space = LO_SPACE;
3528 }
3529
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003530 AllocationSpace retry_space =
3531 (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
3532
3533 return AllocateRaw(size, space, retry_space);
Steve Blocka7e24c12009-10-30 11:49:00 +00003534}
3535
3536
John Reck59135872010-11-02 12:39:01 -07003537MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
3538 int length,
3539 PretenureFlag pretenure,
3540 Object* filler) {
Steve Block6ded16b2010-05-10 14:33:55 +01003541 ASSERT(length >= 0);
3542 ASSERT(Heap::empty_fixed_array()->IsFixedArray());
3543 if (length == 0) return Heap::empty_fixed_array();
3544
3545 ASSERT(!Heap::InNewSpace(filler));
John Reck59135872010-11-02 12:39:01 -07003546 Object* result;
3547 { MaybeObject* maybe_result = Heap::AllocateRawFixedArray(length, pretenure);
3548 if (!maybe_result->ToObject(&result)) return maybe_result;
3549 }
Steve Block6ded16b2010-05-10 14:33:55 +01003550
3551 HeapObject::cast(result)->set_map(Heap::fixed_array_map());
3552 FixedArray* array = FixedArray::cast(result);
3553 array->set_length(length);
3554 MemsetPointer(array->data_start(), filler, length);
3555 return array;
3556}
3557
3558
John Reck59135872010-11-02 12:39:01 -07003559MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003560 return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
3561}
3562
3563
John Reck59135872010-11-02 12:39:01 -07003564MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
3565 PretenureFlag pretenure) {
Steve Block6ded16b2010-05-10 14:33:55 +01003566 return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
3567}
3568
3569
John Reck59135872010-11-02 12:39:01 -07003570MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
Steve Block6ded16b2010-05-10 14:33:55 +01003571 if (length == 0) return empty_fixed_array();
3572
John Reck59135872010-11-02 12:39:01 -07003573 Object* obj;
3574 { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
3575 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3576 }
Steve Block6ded16b2010-05-10 14:33:55 +01003577
3578 reinterpret_cast<FixedArray*>(obj)->set_map(fixed_array_map());
3579 FixedArray::cast(obj)->set_length(length);
3580 return obj;
3581}
3582
3583
John Reck59135872010-11-02 12:39:01 -07003584MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
3585 Object* result;
3586 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length, pretenure);
3587 if (!maybe_result->ToObject(&result)) return maybe_result;
3588 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003589 reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
Steve Blocka7e24c12009-10-30 11:49:00 +00003590 ASSERT(result->IsHashTable());
3591 return result;
3592}
3593
3594
John Reck59135872010-11-02 12:39:01 -07003595MaybeObject* Heap::AllocateGlobalContext() {
3596 Object* result;
3597 { MaybeObject* maybe_result =
3598 Heap::AllocateFixedArray(Context::GLOBAL_CONTEXT_SLOTS);
3599 if (!maybe_result->ToObject(&result)) return maybe_result;
3600 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003601 Context* context = reinterpret_cast<Context*>(result);
3602 context->set_map(global_context_map());
3603 ASSERT(context->IsGlobalContext());
3604 ASSERT(result->IsContext());
3605 return result;
3606}
3607
3608
John Reck59135872010-11-02 12:39:01 -07003609MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003610 ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
John Reck59135872010-11-02 12:39:01 -07003611 Object* result;
3612 { MaybeObject* maybe_result = Heap::AllocateFixedArray(length);
3613 if (!maybe_result->ToObject(&result)) return maybe_result;
3614 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003615 Context* context = reinterpret_cast<Context*>(result);
3616 context->set_map(context_map());
3617 context->set_closure(function);
3618 context->set_fcontext(context);
3619 context->set_previous(NULL);
3620 context->set_extension(NULL);
3621 context->set_global(function->context()->global());
3622 ASSERT(!context->IsGlobalContext());
3623 ASSERT(context->is_function_context());
3624 ASSERT(result->IsContext());
3625 return result;
3626}
3627
3628
John Reck59135872010-11-02 12:39:01 -07003629MaybeObject* Heap::AllocateWithContext(Context* previous,
3630 JSObject* extension,
3631 bool is_catch_context) {
3632 Object* result;
3633 { MaybeObject* maybe_result =
3634 Heap::AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
3635 if (!maybe_result->ToObject(&result)) return maybe_result;
3636 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003637 Context* context = reinterpret_cast<Context*>(result);
3638 context->set_map(is_catch_context ? catch_context_map() : context_map());
3639 context->set_closure(previous->closure());
3640 context->set_fcontext(previous->fcontext());
3641 context->set_previous(previous);
3642 context->set_extension(extension);
3643 context->set_global(previous->global());
3644 ASSERT(!context->IsGlobalContext());
3645 ASSERT(!context->is_function_context());
3646 ASSERT(result->IsContext());
3647 return result;
3648}
3649
3650
John Reck59135872010-11-02 12:39:01 -07003651MaybeObject* Heap::AllocateStruct(InstanceType type) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003652 Map* map;
3653 switch (type) {
3654#define MAKE_CASE(NAME, Name, name) case NAME##_TYPE: map = name##_map(); break;
3655STRUCT_LIST(MAKE_CASE)
3656#undef MAKE_CASE
3657 default:
3658 UNREACHABLE();
3659 return Failure::InternalError();
3660 }
3661 int size = map->instance_size();
3662 AllocationSpace space =
3663 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
John Reck59135872010-11-02 12:39:01 -07003664 Object* result;
3665 { MaybeObject* maybe_result = Heap::Allocate(map, space);
3666 if (!maybe_result->ToObject(&result)) return maybe_result;
3667 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003668 Struct::cast(result)->InitializeBody(size);
3669 return result;
3670}
3671
3672
3673bool Heap::IdleNotification() {
3674 static const int kIdlesBeforeScavenge = 4;
3675 static const int kIdlesBeforeMarkSweep = 7;
3676 static const int kIdlesBeforeMarkCompact = 8;
3677 static int number_idle_notifications = 0;
3678 static int last_gc_count = gc_count_;
3679
Steve Block6ded16b2010-05-10 14:33:55 +01003680 bool uncommit = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003681 bool finished = false;
3682
3683 if (last_gc_count == gc_count_) {
3684 number_idle_notifications++;
3685 } else {
3686 number_idle_notifications = 0;
3687 last_gc_count = gc_count_;
3688 }
3689
3690 if (number_idle_notifications == kIdlesBeforeScavenge) {
Steve Block6ded16b2010-05-10 14:33:55 +01003691 if (contexts_disposed_ > 0) {
3692 HistogramTimerScope scope(&Counters::gc_context);
3693 CollectAllGarbage(false);
3694 } else {
Ben Murdochf87a2032010-10-22 12:50:53 +01003695 CollectGarbage(NEW_SPACE);
Steve Block6ded16b2010-05-10 14:33:55 +01003696 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003697 new_space_.Shrink();
3698 last_gc_count = gc_count_;
3699
3700 } else if (number_idle_notifications == kIdlesBeforeMarkSweep) {
Steve Blockd0582a62009-12-15 09:54:21 +00003701 // Before doing the mark-sweep collections we clear the
3702 // compilation cache to avoid hanging on to source code and
3703 // generated code for cached functions.
3704 CompilationCache::Clear();
3705
Steve Blocka7e24c12009-10-30 11:49:00 +00003706 CollectAllGarbage(false);
3707 new_space_.Shrink();
3708 last_gc_count = gc_count_;
3709
3710 } else if (number_idle_notifications == kIdlesBeforeMarkCompact) {
3711 CollectAllGarbage(true);
3712 new_space_.Shrink();
3713 last_gc_count = gc_count_;
3714 number_idle_notifications = 0;
3715 finished = true;
Steve Block6ded16b2010-05-10 14:33:55 +01003716
3717 } else if (contexts_disposed_ > 0) {
3718 if (FLAG_expose_gc) {
3719 contexts_disposed_ = 0;
3720 } else {
3721 HistogramTimerScope scope(&Counters::gc_context);
3722 CollectAllGarbage(false);
3723 last_gc_count = gc_count_;
3724 }
3725 // If this is the first idle notification, we reset the
3726 // notification count to avoid letting idle notifications for
3727 // context disposal garbage collections start a potentially too
3728 // aggressive idle GC cycle.
3729 if (number_idle_notifications <= 1) {
3730 number_idle_notifications = 0;
3731 uncommit = false;
3732 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003733 }
3734
Steve Block6ded16b2010-05-10 14:33:55 +01003735 // Make sure that we have no pending context disposals and
3736 // conditionally uncommit from space.
3737 ASSERT(contexts_disposed_ == 0);
3738 if (uncommit) Heap::UncommitFromSpace();
Steve Blocka7e24c12009-10-30 11:49:00 +00003739 return finished;
3740}
3741
3742
3743#ifdef DEBUG
3744
3745void Heap::Print() {
3746 if (!HasBeenSetup()) return;
3747 Top::PrintStack();
3748 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003749 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
3750 space->Print();
Steve Blocka7e24c12009-10-30 11:49:00 +00003751}
3752
3753
3754void Heap::ReportCodeStatistics(const char* title) {
3755 PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
3756 PagedSpace::ResetCodeStatistics();
3757 // We do not look for code in new space, map space, or old space. If code
3758 // somehow ends up in those spaces, we would miss it here.
3759 code_space_->CollectCodeStatistics();
3760 lo_space_->CollectCodeStatistics();
3761 PagedSpace::ReportCodeStatistics();
3762}
3763
3764
3765// This function expects that NewSpace's allocated objects histogram is
3766// populated (via a call to CollectStatistics or else as a side effect of a
3767// just-completed scavenge collection).
3768void Heap::ReportHeapStatistics(const char* title) {
3769 USE(title);
3770 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
3771 title, gc_count_);
3772 PrintF("mark-compact GC : %d\n", mc_count_);
Ben Murdochf87a2032010-10-22 12:50:53 +01003773 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
3774 old_gen_promotion_limit_);
3775 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
3776 old_gen_allocation_limit_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003777
3778 PrintF("\n");
3779 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
3780 GlobalHandles::PrintStats();
3781 PrintF("\n");
3782
3783 PrintF("Heap statistics : ");
3784 MemoryAllocator::ReportStatistics();
3785 PrintF("To space : ");
3786 new_space_.ReportStatistics();
3787 PrintF("Old pointer space : ");
3788 old_pointer_space_->ReportStatistics();
3789 PrintF("Old data space : ");
3790 old_data_space_->ReportStatistics();
3791 PrintF("Code space : ");
3792 code_space_->ReportStatistics();
3793 PrintF("Map space : ");
3794 map_space_->ReportStatistics();
3795 PrintF("Cell space : ");
3796 cell_space_->ReportStatistics();
3797 PrintF("Large object space : ");
3798 lo_space_->ReportStatistics();
3799 PrintF(">>>>>> ========================================= >>>>>>\n");
3800}
3801
3802#endif // DEBUG
3803
3804bool Heap::Contains(HeapObject* value) {
3805 return Contains(value->address());
3806}
3807
3808
3809bool Heap::Contains(Address addr) {
3810 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3811 return HasBeenSetup() &&
3812 (new_space_.ToSpaceContains(addr) ||
3813 old_pointer_space_->Contains(addr) ||
3814 old_data_space_->Contains(addr) ||
3815 code_space_->Contains(addr) ||
3816 map_space_->Contains(addr) ||
3817 cell_space_->Contains(addr) ||
3818 lo_space_->SlowContains(addr));
3819}
3820
3821
3822bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
3823 return InSpace(value->address(), space);
3824}
3825
3826
3827bool Heap::InSpace(Address addr, AllocationSpace space) {
3828 if (OS::IsOutsideAllocatedSpace(addr)) return false;
3829 if (!HasBeenSetup()) return false;
3830
3831 switch (space) {
3832 case NEW_SPACE:
3833 return new_space_.ToSpaceContains(addr);
3834 case OLD_POINTER_SPACE:
3835 return old_pointer_space_->Contains(addr);
3836 case OLD_DATA_SPACE:
3837 return old_data_space_->Contains(addr);
3838 case CODE_SPACE:
3839 return code_space_->Contains(addr);
3840 case MAP_SPACE:
3841 return map_space_->Contains(addr);
3842 case CELL_SPACE:
3843 return cell_space_->Contains(addr);
3844 case LO_SPACE:
3845 return lo_space_->SlowContains(addr);
3846 }
3847
3848 return false;
3849}
3850
3851
3852#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003853static void DummyScavengePointer(HeapObject** p) {
3854}
3855
3856
3857static void VerifyPointersUnderWatermark(
3858 PagedSpace* space,
3859 DirtyRegionCallback visit_dirty_region) {
3860 PageIterator it(space, PageIterator::PAGES_IN_USE);
3861
3862 while (it.has_next()) {
3863 Page* page = it.next();
3864 Address start = page->ObjectAreaStart();
3865 Address end = page->AllocationWatermark();
3866
3867 Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
3868 start,
3869 end,
3870 visit_dirty_region,
3871 &DummyScavengePointer);
3872 }
3873}
3874
3875
3876static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
3877 LargeObjectIterator it(space);
3878 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
3879 if (object->IsFixedArray()) {
3880 Address slot_address = object->address();
3881 Address end = object->address() + object->Size();
3882
3883 while (slot_address < end) {
3884 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
3885 // When we are not in GC the Heap::InNewSpace() predicate
3886 // checks that pointers which satisfy predicate point into
3887 // the active semispace.
3888 Heap::InNewSpace(*slot);
3889 slot_address += kPointerSize;
3890 }
3891 }
3892 }
3893}
3894
3895
Steve Blocka7e24c12009-10-30 11:49:00 +00003896void Heap::Verify() {
3897 ASSERT(HasBeenSetup());
3898
3899 VerifyPointersVisitor visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00003900 IterateRoots(&visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00003901
3902 new_space_.Verify();
3903
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003904 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
3905 old_pointer_space_->Verify(&dirty_regions_visitor);
3906 map_space_->Verify(&dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003907
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003908 VerifyPointersUnderWatermark(old_pointer_space_,
3909 &IteratePointersInDirtyRegion);
3910 VerifyPointersUnderWatermark(map_space_,
3911 &IteratePointersInDirtyMapsRegion);
3912 VerifyPointersUnderWatermark(lo_space_);
3913
3914 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
3915 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
3916
3917 VerifyPointersVisitor no_dirty_regions_visitor;
3918 old_data_space_->Verify(&no_dirty_regions_visitor);
3919 code_space_->Verify(&no_dirty_regions_visitor);
3920 cell_space_->Verify(&no_dirty_regions_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00003921
3922 lo_space_->Verify();
3923}
3924#endif // DEBUG
3925
3926
John Reck59135872010-11-02 12:39:01 -07003927MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003928 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07003929 Object* new_table;
3930 { MaybeObject* maybe_new_table =
3931 symbol_table()->LookupSymbol(string, &symbol);
3932 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
3933 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003934 // Can't use set_symbol_table because SymbolTable::cast knows that
3935 // SymbolTable is a singleton and checks for identity.
3936 roots_[kSymbolTableRootIndex] = new_table;
3937 ASSERT(symbol != NULL);
3938 return symbol;
3939}
3940
3941
John Reck59135872010-11-02 12:39:01 -07003942MaybeObject* Heap::LookupSymbol(String* string) {
Steve Blocka7e24c12009-10-30 11:49:00 +00003943 if (string->IsSymbol()) return string;
3944 Object* symbol = NULL;
John Reck59135872010-11-02 12:39:01 -07003945 Object* new_table;
3946 { MaybeObject* maybe_new_table =
3947 symbol_table()->LookupString(string, &symbol);
3948 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
3949 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003950 // Can't use set_symbol_table because SymbolTable::cast knows that
3951 // SymbolTable is a singleton and checks for identity.
3952 roots_[kSymbolTableRootIndex] = new_table;
3953 ASSERT(symbol != NULL);
3954 return symbol;
3955}
3956
3957
3958bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
3959 if (string->IsSymbol()) {
3960 *symbol = string;
3961 return true;
3962 }
3963 return symbol_table()->LookupSymbolIfExists(string, symbol);
3964}
3965
3966
3967#ifdef DEBUG
3968void Heap::ZapFromSpace() {
3969 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsHeapObject());
3970 for (Address a = new_space_.FromSpaceLow();
3971 a < new_space_.FromSpaceHigh();
3972 a += kPointerSize) {
3973 Memory::Address_at(a) = kFromSpaceZapValue;
3974 }
3975}
3976#endif // DEBUG
3977
3978
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003979bool Heap::IteratePointersInDirtyRegion(Address start,
3980 Address end,
3981 ObjectSlotCallback copy_object_func) {
3982 Address slot_address = start;
3983 bool pointers_to_new_space_found = false;
Steve Blocka7e24c12009-10-30 11:49:00 +00003984
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003985 while (slot_address < end) {
3986 Object** slot = reinterpret_cast<Object**>(slot_address);
3987 if (Heap::InNewSpace(*slot)) {
3988 ASSERT((*slot)->IsHeapObject());
3989 copy_object_func(reinterpret_cast<HeapObject**>(slot));
3990 if (Heap::InNewSpace(*slot)) {
3991 ASSERT((*slot)->IsHeapObject());
3992 pointers_to_new_space_found = true;
Steve Blocka7e24c12009-10-30 11:49:00 +00003993 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003994 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003995 slot_address += kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +00003996 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003997 return pointers_to_new_space_found;
Steve Blocka7e24c12009-10-30 11:49:00 +00003998}
3999
4000
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004001// Compute start address of the first map following given addr.
4002static inline Address MapStartAlign(Address addr) {
4003 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4004 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4005}
Steve Blocka7e24c12009-10-30 11:49:00 +00004006
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004007
4008// Compute end address of the first map preceding given addr.
4009static inline Address MapEndAlign(Address addr) {
4010 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4011 return page + ((addr - page) / Map::kSize * Map::kSize);
4012}
4013
4014
4015static bool IteratePointersInDirtyMaps(Address start,
4016 Address end,
4017 ObjectSlotCallback copy_object_func) {
4018 ASSERT(MapStartAlign(start) == start);
4019 ASSERT(MapEndAlign(end) == end);
4020
4021 Address map_address = start;
4022 bool pointers_to_new_space_found = false;
4023
4024 while (map_address < end) {
4025 ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
4026 ASSERT(Memory::Object_at(map_address)->IsMap());
4027
4028 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4029 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4030
4031 if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
4032 pointer_fields_end,
4033 copy_object_func)) {
4034 pointers_to_new_space_found = true;
4035 }
4036
4037 map_address += Map::kSize;
4038 }
4039
4040 return pointers_to_new_space_found;
4041}
4042
4043
4044bool Heap::IteratePointersInDirtyMapsRegion(
4045 Address start,
4046 Address end,
4047 ObjectSlotCallback copy_object_func) {
4048 Address map_aligned_start = MapStartAlign(start);
4049 Address map_aligned_end = MapEndAlign(end);
4050
4051 bool contains_pointers_to_new_space = false;
4052
4053 if (map_aligned_start != start) {
4054 Address prev_map = map_aligned_start - Map::kSize;
4055 ASSERT(Memory::Object_at(prev_map)->IsMap());
4056
4057 Address pointer_fields_start =
4058 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4059
4060 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004061 Min(prev_map + Map::kPointerFieldsEndOffset, end);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004062
4063 contains_pointers_to_new_space =
4064 IteratePointersInDirtyRegion(pointer_fields_start,
4065 pointer_fields_end,
4066 copy_object_func)
4067 || contains_pointers_to_new_space;
4068 }
4069
4070 contains_pointers_to_new_space =
4071 IteratePointersInDirtyMaps(map_aligned_start,
4072 map_aligned_end,
4073 copy_object_func)
4074 || contains_pointers_to_new_space;
4075
4076 if (map_aligned_end != end) {
4077 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4078
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004079 Address pointer_fields_start =
4080 map_aligned_end + Map::kPointerFieldsBeginOffset;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004081
4082 Address pointer_fields_end =
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004083 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004084
4085 contains_pointers_to_new_space =
4086 IteratePointersInDirtyRegion(pointer_fields_start,
4087 pointer_fields_end,
4088 copy_object_func)
4089 || contains_pointers_to_new_space;
4090 }
4091
4092 return contains_pointers_to_new_space;
4093}
4094
4095
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004096void Heap::IterateAndMarkPointersToFromSpace(Address start,
4097 Address end,
4098 ObjectSlotCallback callback) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004099 Address slot_address = start;
4100 Page* page = Page::FromAddress(start);
4101
4102 uint32_t marks = page->GetRegionMarks();
4103
4104 while (slot_address < end) {
4105 Object** slot = reinterpret_cast<Object**>(slot_address);
Ben Murdoch3bec4d22010-07-22 14:51:16 +01004106 if (Heap::InFromSpace(*slot)) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004107 ASSERT((*slot)->IsHeapObject());
4108 callback(reinterpret_cast<HeapObject**>(slot));
4109 if (Heap::InNewSpace(*slot)) {
4110 ASSERT((*slot)->IsHeapObject());
4111 marks |= page->GetRegionMaskForAddress(slot_address);
4112 }
4113 }
4114 slot_address += kPointerSize;
4115 }
4116
4117 page->SetRegionMarks(marks);
4118}
4119
4120
4121uint32_t Heap::IterateDirtyRegions(
4122 uint32_t marks,
4123 Address area_start,
4124 Address area_end,
4125 DirtyRegionCallback visit_dirty_region,
4126 ObjectSlotCallback copy_object_func) {
4127 uint32_t newmarks = 0;
4128 uint32_t mask = 1;
4129
4130 if (area_start >= area_end) {
4131 return newmarks;
4132 }
4133
4134 Address region_start = area_start;
4135
4136 // area_start does not necessarily coincide with start of the first region.
4137 // Thus to calculate the beginning of the next region we have to align
4138 // area_start by Page::kRegionSize.
4139 Address second_region =
4140 reinterpret_cast<Address>(
4141 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
4142 ~Page::kRegionAlignmentMask);
4143
4144 // Next region might be beyond area_end.
4145 Address region_end = Min(second_region, area_end);
4146
4147 if (marks & mask) {
4148 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4149 newmarks |= mask;
4150 }
4151 }
4152 mask <<= 1;
4153
4154 // Iterate subsequent regions which fully lay inside [area_start, area_end[.
4155 region_start = region_end;
4156 region_end = region_start + Page::kRegionSize;
4157
4158 while (region_end <= area_end) {
4159 if (marks & mask) {
4160 if (visit_dirty_region(region_start, region_end, copy_object_func)) {
4161 newmarks |= mask;
4162 }
4163 }
4164
4165 region_start = region_end;
4166 region_end = region_start + Page::kRegionSize;
4167
4168 mask <<= 1;
4169 }
4170
4171 if (region_start != area_end) {
4172 // A small piece of area left uniterated because area_end does not coincide
4173 // with region end. Check whether region covering last part of area is
4174 // dirty.
4175 if (marks & mask) {
4176 if (visit_dirty_region(region_start, area_end, copy_object_func)) {
4177 newmarks |= mask;
4178 }
4179 }
4180 }
4181
4182 return newmarks;
4183}
4184
4185
4186
4187void Heap::IterateDirtyRegions(
4188 PagedSpace* space,
4189 DirtyRegionCallback visit_dirty_region,
4190 ObjectSlotCallback copy_object_func,
4191 ExpectedPageWatermarkState expected_page_watermark_state) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004192
4193 PageIterator it(space, PageIterator::PAGES_IN_USE);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004194
Steve Blocka7e24c12009-10-30 11:49:00 +00004195 while (it.has_next()) {
4196 Page* page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004197 uint32_t marks = page->GetRegionMarks();
4198
4199 if (marks != Page::kAllRegionsCleanMarks) {
4200 Address start = page->ObjectAreaStart();
4201
4202 // Do not try to visit pointers beyond page allocation watermark.
4203 // Page can contain garbage pointers there.
4204 Address end;
4205
4206 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
4207 page->IsWatermarkValid()) {
4208 end = page->AllocationWatermark();
4209 } else {
4210 end = page->CachedAllocationWatermark();
4211 }
4212
4213 ASSERT(space == old_pointer_space_ ||
4214 (space == map_space_ &&
4215 ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
4216
4217 page->SetRegionMarks(IterateDirtyRegions(marks,
4218 start,
4219 end,
4220 visit_dirty_region,
4221 copy_object_func));
Steve Blocka7e24c12009-10-30 11:49:00 +00004222 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01004223
4224 // Mark page watermark as invalid to maintain watermark validity invariant.
4225 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
4226 page->InvalidateWatermark(true);
Steve Blocka7e24c12009-10-30 11:49:00 +00004227 }
4228}
4229
4230
Steve Blockd0582a62009-12-15 09:54:21 +00004231void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4232 IterateStrongRoots(v, mode);
Leon Clarked91b9f72010-01-27 17:25:45 +00004233 IterateWeakRoots(v, mode);
4234}
4235
4236
4237void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004238 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
Steve Blockd0582a62009-12-15 09:54:21 +00004239 v->Synchronize("symbol_table");
Leon Clarkee46be812010-01-19 14:06:41 +00004240 if (mode != VISIT_ALL_IN_SCAVENGE) {
4241 // Scavenge collections have special processing for this.
4242 ExternalStringTable::Iterate(v);
4243 }
4244 v->Synchronize("external_string_table");
Steve Blocka7e24c12009-10-30 11:49:00 +00004245}
4246
4247
Steve Blockd0582a62009-12-15 09:54:21 +00004248void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004249 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
Steve Blockd0582a62009-12-15 09:54:21 +00004250 v->Synchronize("strong_root_list");
Steve Blocka7e24c12009-10-30 11:49:00 +00004251
Iain Merrick75681382010-08-19 15:07:18 +01004252 v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
Steve Blockd0582a62009-12-15 09:54:21 +00004253 v->Synchronize("symbol");
Steve Blocka7e24c12009-10-30 11:49:00 +00004254
4255 Bootstrapper::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004256 v->Synchronize("bootstrapper");
Steve Blocka7e24c12009-10-30 11:49:00 +00004257 Top::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004258 v->Synchronize("top");
Steve Blocka7e24c12009-10-30 11:49:00 +00004259 Relocatable::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004260 v->Synchronize("relocatable");
Steve Blocka7e24c12009-10-30 11:49:00 +00004261
4262#ifdef ENABLE_DEBUGGER_SUPPORT
4263 Debug::Iterate(v);
4264#endif
Steve Blockd0582a62009-12-15 09:54:21 +00004265 v->Synchronize("debug");
Steve Blocka7e24c12009-10-30 11:49:00 +00004266 CompilationCache::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004267 v->Synchronize("compilationcache");
Steve Blocka7e24c12009-10-30 11:49:00 +00004268
4269 // Iterate over local handles in handle scopes.
4270 HandleScopeImplementer::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004271 v->Synchronize("handlescope");
Steve Blocka7e24c12009-10-30 11:49:00 +00004272
Leon Clarkee46be812010-01-19 14:06:41 +00004273 // Iterate over the builtin code objects and code stubs in the
4274 // heap. Note that it is not necessary to iterate over code objects
4275 // on scavenge collections.
4276 if (mode != VISIT_ALL_IN_SCAVENGE) {
4277 Builtins::IterateBuiltins(v);
4278 }
Steve Blockd0582a62009-12-15 09:54:21 +00004279 v->Synchronize("builtins");
Steve Blocka7e24c12009-10-30 11:49:00 +00004280
4281 // Iterate over global handles.
Steve Blockd0582a62009-12-15 09:54:21 +00004282 if (mode == VISIT_ONLY_STRONG) {
4283 GlobalHandles::IterateStrongRoots(v);
4284 } else {
4285 GlobalHandles::IterateAllRoots(v);
4286 }
4287 v->Synchronize("globalhandles");
Steve Blocka7e24c12009-10-30 11:49:00 +00004288
4289 // Iterate over pointers being held by inactive threads.
4290 ThreadManager::Iterate(v);
Steve Blockd0582a62009-12-15 09:54:21 +00004291 v->Synchronize("threadmanager");
Leon Clarked91b9f72010-01-27 17:25:45 +00004292
4293 // Iterate over the pointers the Serialization/Deserialization code is
4294 // holding.
4295 // During garbage collection this keeps the partial snapshot cache alive.
4296 // During deserialization of the startup snapshot this creates the partial
4297 // snapshot cache and deserializes the objects it refers to. During
4298 // serialization this does nothing, since the partial snapshot cache is
4299 // empty. However the next thing we do is create the partial snapshot,
4300 // filling up the partial snapshot cache with objects it needs as we go.
4301 SerializerDeserializer::Iterate(v);
4302 // We don't do a v->Synchronize call here, because in debug mode that will
4303 // output a flag to the snapshot. However at this point the serializer and
4304 // deserializer are deliberately a little unsynchronized (see above) so the
4305 // checking of the sync flag in the snapshot would fail.
Steve Blocka7e24c12009-10-30 11:49:00 +00004306}
Steve Blocka7e24c12009-10-30 11:49:00 +00004307
4308
4309// Flag is set when the heap has been configured. The heap can be repeatedly
4310// configured through the API until it is setup.
4311static bool heap_configured = false;
4312
4313// TODO(1236194): Since the heap size is configurable on the command line
4314// and through the API, we should gracefully handle the case that the heap
4315// size is not big enough to fit all the initial objects.
Steve Block3ce2e202009-11-05 08:53:23 +00004316bool Heap::ConfigureHeap(int max_semispace_size, int max_old_gen_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004317 if (HasBeenSetup()) return false;
4318
Steve Block3ce2e202009-11-05 08:53:23 +00004319 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size;
4320
4321 if (Snapshot::IsEnabled()) {
4322 // If we are using a snapshot we always reserve the default amount
4323 // of memory for each semispace because code in the snapshot has
4324 // write-barrier code that relies on the size and alignment of new
4325 // space. We therefore cannot use a larger max semispace size
4326 // than the default reserved semispace size.
4327 if (max_semispace_size_ > reserved_semispace_size_) {
4328 max_semispace_size_ = reserved_semispace_size_;
4329 }
4330 } else {
4331 // If we are not using snapshots we reserve space for the actual
4332 // max semispace size.
4333 reserved_semispace_size_ = max_semispace_size_;
4334 }
4335
4336 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00004337
4338 // The new space size must be a power of two to support single-bit testing
4339 // for containment.
Steve Block3ce2e202009-11-05 08:53:23 +00004340 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
4341 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4342 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
4343 external_allocation_limit_ = 10 * max_semispace_size_;
Steve Blocka7e24c12009-10-30 11:49:00 +00004344
4345 // The old generation is paged.
Steve Block3ce2e202009-11-05 08:53:23 +00004346 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00004347
4348 heap_configured = true;
4349 return true;
4350}
4351
4352
4353bool Heap::ConfigureHeapDefault() {
Ben Murdochf87a2032010-10-22 12:50:53 +01004354 return ConfigureHeap(
4355 FLAG_max_new_space_size * (KB / 2), FLAG_max_old_space_size * MB);
Steve Blocka7e24c12009-10-30 11:49:00 +00004356}
4357
4358
Ben Murdochbb769b22010-08-11 14:56:33 +01004359void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
Iain Merrick75681382010-08-19 15:07:18 +01004360 *stats->start_marker = HeapStats::kStartMarker;
4361 *stats->end_marker = HeapStats::kEndMarker;
Ben Murdochf87a2032010-10-22 12:50:53 +01004362 *stats->new_space_size = new_space_.SizeAsInt();
4363 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
Steve Blockd0582a62009-12-15 09:54:21 +00004364 *stats->old_pointer_space_size = old_pointer_space_->Size();
4365 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4366 *stats->old_data_space_size = old_data_space_->Size();
4367 *stats->old_data_space_capacity = old_data_space_->Capacity();
4368 *stats->code_space_size = code_space_->Size();
4369 *stats->code_space_capacity = code_space_->Capacity();
4370 *stats->map_space_size = map_space_->Size();
4371 *stats->map_space_capacity = map_space_->Capacity();
4372 *stats->cell_space_size = cell_space_->Size();
4373 *stats->cell_space_capacity = cell_space_->Capacity();
4374 *stats->lo_space_size = lo_space_->Size();
4375 GlobalHandles::RecordStats(stats);
Ben Murdochbb769b22010-08-11 14:56:33 +01004376 *stats->memory_allocator_size = MemoryAllocator::Size();
4377 *stats->memory_allocator_capacity =
4378 MemoryAllocator::Size() + MemoryAllocator::Available();
Iain Merrick75681382010-08-19 15:07:18 +01004379 *stats->os_error = OS::GetLastError();
Ben Murdochbb769b22010-08-11 14:56:33 +01004380 if (take_snapshot) {
4381 HeapIterator iterator;
4382 for (HeapObject* obj = iterator.next();
4383 obj != NULL;
4384 obj = iterator.next()) {
4385 // Note: snapshot won't be precise because IsFreeListNode returns true
4386 // for any bytearray.
4387 if (FreeListNode::IsFreeListNode(obj)) continue;
4388 InstanceType type = obj->map()->instance_type();
4389 ASSERT(0 <= type && type <= LAST_TYPE);
4390 stats->objects_per_type[type]++;
4391 stats->size_per_type[type] += obj->Size();
4392 }
4393 }
Steve Blockd0582a62009-12-15 09:54:21 +00004394}
4395
4396
Ben Murdochf87a2032010-10-22 12:50:53 +01004397intptr_t Heap::PromotedSpaceSize() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004398 return old_pointer_space_->Size()
4399 + old_data_space_->Size()
4400 + code_space_->Size()
4401 + map_space_->Size()
4402 + cell_space_->Size()
4403 + lo_space_->Size();
4404}
4405
4406
4407int Heap::PromotedExternalMemorySize() {
4408 if (amount_of_external_allocated_memory_
4409 <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
4410 return amount_of_external_allocated_memory_
4411 - amount_of_external_allocated_memory_at_last_global_gc_;
4412}
4413
4414
4415bool Heap::Setup(bool create_heap_objects) {
4416 // Initialize heap spaces and initial maps and objects. Whenever something
4417 // goes wrong, just return false. The caller should check the results and
4418 // call Heap::TearDown() to release allocated memory.
4419 //
4420 // If the heap is not yet configured (eg, through the API), configure it.
4421 // Configuration is based on the flags new-space-size (really the semispace
4422 // size) and old-space-size if set or the initial values of semispace_size_
4423 // and old_generation_size_ otherwise.
4424 if (!heap_configured) {
4425 if (!ConfigureHeapDefault()) return false;
4426 }
4427
Iain Merrick75681382010-08-19 15:07:18 +01004428 ScavengingVisitor::Initialize();
4429 NewSpaceScavenger::Initialize();
4430 MarkCompactCollector::Initialize();
4431
Kristian Monsen80d68ea2010-09-08 11:05:35 +01004432 MarkMapPointersAsEncoded(false);
4433
Steve Blocka7e24c12009-10-30 11:49:00 +00004434 // Setup memory allocator and reserve a chunk of memory for new
Steve Block3ce2e202009-11-05 08:53:23 +00004435 // space. The chunk is double the size of the requested reserved
4436 // new space size to ensure that we can find a pair of semispaces that
4437 // are contiguous and aligned to their size.
4438 if (!MemoryAllocator::Setup(MaxReserved())) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +00004439 void* chunk =
Steve Block3ce2e202009-11-05 08:53:23 +00004440 MemoryAllocator::ReserveInitialChunk(4 * reserved_semispace_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00004441 if (chunk == NULL) return false;
4442
4443 // Align the pair of semispaces to their size, which must be a power
4444 // of 2.
Steve Blocka7e24c12009-10-30 11:49:00 +00004445 Address new_space_start =
Steve Block3ce2e202009-11-05 08:53:23 +00004446 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
4447 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
4448 return false;
4449 }
Steve Blocka7e24c12009-10-30 11:49:00 +00004450
4451 // Initialize old pointer space.
4452 old_pointer_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004453 new OldSpace(max_old_generation_size_, OLD_POINTER_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004454 if (old_pointer_space_ == NULL) return false;
4455 if (!old_pointer_space_->Setup(NULL, 0)) return false;
4456
4457 // Initialize old data space.
4458 old_data_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004459 new OldSpace(max_old_generation_size_, OLD_DATA_SPACE, NOT_EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004460 if (old_data_space_ == NULL) return false;
4461 if (!old_data_space_->Setup(NULL, 0)) return false;
4462
4463 // Initialize the code space, set its maximum capacity to the old
4464 // generation size. It needs executable memory.
4465 // On 64-bit platform(s), we put all code objects in a 2 GB range of
4466 // virtual address space, so that they can call each other with near calls.
4467 if (code_range_size_ > 0) {
4468 if (!CodeRange::Setup(code_range_size_)) {
4469 return false;
4470 }
4471 }
4472
4473 code_space_ =
Steve Block3ce2e202009-11-05 08:53:23 +00004474 new OldSpace(max_old_generation_size_, CODE_SPACE, EXECUTABLE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004475 if (code_space_ == NULL) return false;
4476 if (!code_space_->Setup(NULL, 0)) return false;
4477
4478 // Initialize map space.
Leon Clarkee46be812010-01-19 14:06:41 +00004479 map_space_ = new MapSpace(FLAG_use_big_map_space
4480 ? max_old_generation_size_
Leon Clarked91b9f72010-01-27 17:25:45 +00004481 : MapSpace::kMaxMapPageIndex * Page::kPageSize,
4482 FLAG_max_map_space_pages,
Leon Clarkee46be812010-01-19 14:06:41 +00004483 MAP_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004484 if (map_space_ == NULL) return false;
4485 if (!map_space_->Setup(NULL, 0)) return false;
4486
4487 // Initialize global property cell space.
Steve Block3ce2e202009-11-05 08:53:23 +00004488 cell_space_ = new CellSpace(max_old_generation_size_, CELL_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00004489 if (cell_space_ == NULL) return false;
4490 if (!cell_space_->Setup(NULL, 0)) return false;
4491
4492 // The large object code space may contain code or data. We set the memory
4493 // to be non-executable here for safety, but this means we need to enable it
4494 // explicitly when allocating large code objects.
4495 lo_space_ = new LargeObjectSpace(LO_SPACE);
4496 if (lo_space_ == NULL) return false;
4497 if (!lo_space_->Setup()) return false;
4498
4499 if (create_heap_objects) {
4500 // Create initial maps.
4501 if (!CreateInitialMaps()) return false;
4502 if (!CreateApiObjects()) return false;
4503
4504 // Create initial objects
4505 if (!CreateInitialObjects()) return false;
Ben Murdochf87a2032010-10-22 12:50:53 +01004506
4507 global_contexts_list_ = undefined_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00004508 }
4509
Ben Murdochf87a2032010-10-22 12:50:53 +01004510 LOG(IntPtrTEvent("heap-capacity", Capacity()));
4511 LOG(IntPtrTEvent("heap-available", Available()));
Steve Blocka7e24c12009-10-30 11:49:00 +00004512
Steve Block3ce2e202009-11-05 08:53:23 +00004513#ifdef ENABLE_LOGGING_AND_PROFILING
4514 // This should be called only after initial objects have been created.
4515 ProducerHeapProfile::Setup();
4516#endif
4517
Steve Blocka7e24c12009-10-30 11:49:00 +00004518 return true;
4519}
4520
4521
Steve Blockd0582a62009-12-15 09:54:21 +00004522void Heap::SetStackLimits() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004523 // On 64 bit machines, pointers are generally out of range of Smis. We write
4524 // something that looks like an out of range Smi to the GC.
4525
Steve Blockd0582a62009-12-15 09:54:21 +00004526 // Set up the special root array entries containing the stack limits.
4527 // These are actually addresses, but the tag makes the GC ignore it.
Steve Blocka7e24c12009-10-30 11:49:00 +00004528 roots_[kStackLimitRootIndex] =
Steve Blockd0582a62009-12-15 09:54:21 +00004529 reinterpret_cast<Object*>(
4530 (StackGuard::jslimit() & ~kSmiTagMask) | kSmiTag);
4531 roots_[kRealStackLimitRootIndex] =
4532 reinterpret_cast<Object*>(
4533 (StackGuard::real_jslimit() & ~kSmiTagMask) | kSmiTag);
Steve Blocka7e24c12009-10-30 11:49:00 +00004534}
4535
4536
4537void Heap::TearDown() {
Leon Clarkef7060e22010-06-03 12:02:55 +01004538 if (FLAG_print_cumulative_gc_stat) {
4539 PrintF("\n\n");
4540 PrintF("gc_count=%d ", gc_count_);
4541 PrintF("mark_sweep_count=%d ", ms_count_);
4542 PrintF("mark_compact_count=%d ", mc_count_);
4543 PrintF("max_gc_pause=%d ", GCTracer::get_max_gc_pause());
4544 PrintF("min_in_mutator=%d ", GCTracer::get_min_in_mutator());
Ben Murdochf87a2032010-10-22 12:50:53 +01004545 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
4546 GCTracer::get_max_alive_after_gc());
Leon Clarkef7060e22010-06-03 12:02:55 +01004547 PrintF("\n\n");
4548 }
4549
Steve Blocka7e24c12009-10-30 11:49:00 +00004550 GlobalHandles::TearDown();
4551
Leon Clarkee46be812010-01-19 14:06:41 +00004552 ExternalStringTable::TearDown();
4553
Steve Blocka7e24c12009-10-30 11:49:00 +00004554 new_space_.TearDown();
4555
4556 if (old_pointer_space_ != NULL) {
4557 old_pointer_space_->TearDown();
4558 delete old_pointer_space_;
4559 old_pointer_space_ = NULL;
4560 }
4561
4562 if (old_data_space_ != NULL) {
4563 old_data_space_->TearDown();
4564 delete old_data_space_;
4565 old_data_space_ = NULL;
4566 }
4567
4568 if (code_space_ != NULL) {
4569 code_space_->TearDown();
4570 delete code_space_;
4571 code_space_ = NULL;
4572 }
4573
4574 if (map_space_ != NULL) {
4575 map_space_->TearDown();
4576 delete map_space_;
4577 map_space_ = NULL;
4578 }
4579
4580 if (cell_space_ != NULL) {
4581 cell_space_->TearDown();
4582 delete cell_space_;
4583 cell_space_ = NULL;
4584 }
4585
4586 if (lo_space_ != NULL) {
4587 lo_space_->TearDown();
4588 delete lo_space_;
4589 lo_space_ = NULL;
4590 }
4591
4592 MemoryAllocator::TearDown();
4593}
4594
4595
4596void Heap::Shrink() {
4597 // Try to shrink all paged spaces.
4598 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004599 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
4600 space->Shrink();
Steve Blocka7e24c12009-10-30 11:49:00 +00004601}
4602
4603
4604#ifdef ENABLE_HEAP_PROTECTION
4605
4606void Heap::Protect() {
4607 if (HasBeenSetup()) {
4608 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004609 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4610 space->Protect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004611 }
4612}
4613
4614
4615void Heap::Unprotect() {
4616 if (HasBeenSetup()) {
4617 AllSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00004618 for (Space* space = spaces.next(); space != NULL; space = spaces.next())
4619 space->Unprotect();
Steve Blocka7e24c12009-10-30 11:49:00 +00004620 }
4621}
4622
4623#endif
4624
4625
Steve Block6ded16b2010-05-10 14:33:55 +01004626void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
4627 ASSERT(callback != NULL);
4628 GCPrologueCallbackPair pair(callback, gc_type);
4629 ASSERT(!gc_prologue_callbacks_.Contains(pair));
4630 return gc_prologue_callbacks_.Add(pair);
4631}
4632
4633
4634void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
4635 ASSERT(callback != NULL);
4636 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
4637 if (gc_prologue_callbacks_[i].callback == callback) {
4638 gc_prologue_callbacks_.Remove(i);
4639 return;
4640 }
4641 }
4642 UNREACHABLE();
4643}
4644
4645
4646void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
4647 ASSERT(callback != NULL);
4648 GCEpilogueCallbackPair pair(callback, gc_type);
4649 ASSERT(!gc_epilogue_callbacks_.Contains(pair));
4650 return gc_epilogue_callbacks_.Add(pair);
4651}
4652
4653
4654void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
4655 ASSERT(callback != NULL);
4656 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
4657 if (gc_epilogue_callbacks_[i].callback == callback) {
4658 gc_epilogue_callbacks_.Remove(i);
4659 return;
4660 }
4661 }
4662 UNREACHABLE();
4663}
4664
4665
Steve Blocka7e24c12009-10-30 11:49:00 +00004666#ifdef DEBUG
4667
4668class PrintHandleVisitor: public ObjectVisitor {
4669 public:
4670 void VisitPointers(Object** start, Object** end) {
4671 for (Object** p = start; p < end; p++)
Ben Murdochf87a2032010-10-22 12:50:53 +01004672 PrintF(" handle %p to %p\n",
4673 reinterpret_cast<void*>(p),
4674 reinterpret_cast<void*>(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00004675 }
4676};
4677
4678void Heap::PrintHandles() {
4679 PrintF("Handles:\n");
4680 PrintHandleVisitor v;
4681 HandleScopeImplementer::Iterate(&v);
4682}
4683
4684#endif
4685
4686
4687Space* AllSpaces::next() {
4688 switch (counter_++) {
4689 case NEW_SPACE:
4690 return Heap::new_space();
4691 case OLD_POINTER_SPACE:
4692 return Heap::old_pointer_space();
4693 case OLD_DATA_SPACE:
4694 return Heap::old_data_space();
4695 case CODE_SPACE:
4696 return Heap::code_space();
4697 case MAP_SPACE:
4698 return Heap::map_space();
4699 case CELL_SPACE:
4700 return Heap::cell_space();
4701 case LO_SPACE:
4702 return Heap::lo_space();
4703 default:
4704 return NULL;
4705 }
4706}
4707
4708
4709PagedSpace* PagedSpaces::next() {
4710 switch (counter_++) {
4711 case OLD_POINTER_SPACE:
4712 return Heap::old_pointer_space();
4713 case OLD_DATA_SPACE:
4714 return Heap::old_data_space();
4715 case CODE_SPACE:
4716 return Heap::code_space();
4717 case MAP_SPACE:
4718 return Heap::map_space();
4719 case CELL_SPACE:
4720 return Heap::cell_space();
4721 default:
4722 return NULL;
4723 }
4724}
4725
4726
4727
4728OldSpace* OldSpaces::next() {
4729 switch (counter_++) {
4730 case OLD_POINTER_SPACE:
4731 return Heap::old_pointer_space();
4732 case OLD_DATA_SPACE:
4733 return Heap::old_data_space();
4734 case CODE_SPACE:
4735 return Heap::code_space();
4736 default:
4737 return NULL;
4738 }
4739}
4740
4741
4742SpaceIterator::SpaceIterator() : current_space_(FIRST_SPACE), iterator_(NULL) {
4743}
4744
4745
4746SpaceIterator::~SpaceIterator() {
4747 // Delete active iterator if any.
4748 delete iterator_;
4749}
4750
4751
4752bool SpaceIterator::has_next() {
4753 // Iterate until no more spaces.
4754 return current_space_ != LAST_SPACE;
4755}
4756
4757
4758ObjectIterator* SpaceIterator::next() {
4759 if (iterator_ != NULL) {
4760 delete iterator_;
4761 iterator_ = NULL;
4762 // Move to the next space
4763 current_space_++;
4764 if (current_space_ > LAST_SPACE) {
4765 return NULL;
4766 }
4767 }
4768
4769 // Return iterator for the new current space.
4770 return CreateIterator();
4771}
4772
4773
4774// Create an iterator for the space to iterate.
4775ObjectIterator* SpaceIterator::CreateIterator() {
4776 ASSERT(iterator_ == NULL);
4777
4778 switch (current_space_) {
4779 case NEW_SPACE:
4780 iterator_ = new SemiSpaceIterator(Heap::new_space());
4781 break;
4782 case OLD_POINTER_SPACE:
4783 iterator_ = new HeapObjectIterator(Heap::old_pointer_space());
4784 break;
4785 case OLD_DATA_SPACE:
4786 iterator_ = new HeapObjectIterator(Heap::old_data_space());
4787 break;
4788 case CODE_SPACE:
4789 iterator_ = new HeapObjectIterator(Heap::code_space());
4790 break;
4791 case MAP_SPACE:
4792 iterator_ = new HeapObjectIterator(Heap::map_space());
4793 break;
4794 case CELL_SPACE:
4795 iterator_ = new HeapObjectIterator(Heap::cell_space());
4796 break;
4797 case LO_SPACE:
4798 iterator_ = new LargeObjectIterator(Heap::lo_space());
4799 break;
4800 }
4801
4802 // Return the newly allocated iterator;
4803 ASSERT(iterator_ != NULL);
4804 return iterator_;
4805}
4806
4807
4808HeapIterator::HeapIterator() {
4809 Init();
4810}
4811
4812
4813HeapIterator::~HeapIterator() {
4814 Shutdown();
4815}
4816
4817
4818void HeapIterator::Init() {
4819 // Start the iteration.
4820 space_iterator_ = new SpaceIterator();
4821 object_iterator_ = space_iterator_->next();
4822}
4823
4824
4825void HeapIterator::Shutdown() {
4826 // Make sure the last iterator is deallocated.
4827 delete space_iterator_;
4828 space_iterator_ = NULL;
4829 object_iterator_ = NULL;
4830}
4831
4832
Leon Clarked91b9f72010-01-27 17:25:45 +00004833HeapObject* HeapIterator::next() {
Steve Blocka7e24c12009-10-30 11:49:00 +00004834 // No iterator means we are done.
Leon Clarked91b9f72010-01-27 17:25:45 +00004835 if (object_iterator_ == NULL) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00004836
Leon Clarked91b9f72010-01-27 17:25:45 +00004837 if (HeapObject* obj = object_iterator_->next_object()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00004838 // If the current iterator has more objects we are fine.
Leon Clarked91b9f72010-01-27 17:25:45 +00004839 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00004840 } else {
4841 // Go though the spaces looking for one that has objects.
4842 while (space_iterator_->has_next()) {
4843 object_iterator_ = space_iterator_->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00004844 if (HeapObject* obj = object_iterator_->next_object()) {
4845 return obj;
Steve Blocka7e24c12009-10-30 11:49:00 +00004846 }
4847 }
4848 }
4849 // Done with the last space.
4850 object_iterator_ = NULL;
Leon Clarked91b9f72010-01-27 17:25:45 +00004851 return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00004852}
4853
4854
4855void HeapIterator::reset() {
4856 // Restart the iterator.
4857 Shutdown();
4858 Init();
4859}
4860
4861
4862#ifdef DEBUG
4863
4864static bool search_for_any_global;
4865static Object* search_target;
4866static bool found_target;
4867static List<Object*> object_stack(20);
4868
4869
4870// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
4871static const int kMarkTag = 2;
4872
4873static void MarkObjectRecursively(Object** p);
4874class MarkObjectVisitor : public ObjectVisitor {
4875 public:
4876 void VisitPointers(Object** start, Object** end) {
4877 // Copy all HeapObject pointers in [start, end)
4878 for (Object** p = start; p < end; p++) {
4879 if ((*p)->IsHeapObject())
4880 MarkObjectRecursively(p);
4881 }
4882 }
4883};
4884
4885static MarkObjectVisitor mark_visitor;
4886
4887static void MarkObjectRecursively(Object** p) {
4888 if (!(*p)->IsHeapObject()) return;
4889
4890 HeapObject* obj = HeapObject::cast(*p);
4891
4892 Object* map = obj->map();
4893
4894 if (!map->IsHeapObject()) return; // visited before
4895
4896 if (found_target) return; // stop if target found
4897 object_stack.Add(obj);
4898 if ((search_for_any_global && obj->IsJSGlobalObject()) ||
4899 (!search_for_any_global && (obj == search_target))) {
4900 found_target = true;
4901 return;
4902 }
4903
4904 // not visited yet
4905 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
4906
4907 Address map_addr = map_p->address();
4908
4909 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
4910
4911 MarkObjectRecursively(&map);
4912
4913 obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
4914 &mark_visitor);
4915
4916 if (!found_target) // don't pop if found the target
4917 object_stack.RemoveLast();
4918}
4919
4920
4921static void UnmarkObjectRecursively(Object** p);
4922class UnmarkObjectVisitor : public ObjectVisitor {
4923 public:
4924 void VisitPointers(Object** start, Object** end) {
4925 // Copy all HeapObject pointers in [start, end)
4926 for (Object** p = start; p < end; p++) {
4927 if ((*p)->IsHeapObject())
4928 UnmarkObjectRecursively(p);
4929 }
4930 }
4931};
4932
4933static UnmarkObjectVisitor unmark_visitor;
4934
4935static void UnmarkObjectRecursively(Object** p) {
4936 if (!(*p)->IsHeapObject()) return;
4937
4938 HeapObject* obj = HeapObject::cast(*p);
4939
4940 Object* map = obj->map();
4941
4942 if (map->IsHeapObject()) return; // unmarked already
4943
4944 Address map_addr = reinterpret_cast<Address>(map);
4945
4946 map_addr -= kMarkTag;
4947
4948 ASSERT_TAG_ALIGNED(map_addr);
4949
4950 HeapObject* map_p = HeapObject::FromAddress(map_addr);
4951
4952 obj->set_map(reinterpret_cast<Map*>(map_p));
4953
4954 UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
4955
4956 obj->IterateBody(Map::cast(map_p)->instance_type(),
4957 obj->SizeFromMap(Map::cast(map_p)),
4958 &unmark_visitor);
4959}
4960
4961
4962static void MarkRootObjectRecursively(Object** root) {
4963 if (search_for_any_global) {
4964 ASSERT(search_target == NULL);
4965 } else {
4966 ASSERT(search_target->IsHeapObject());
4967 }
4968 found_target = false;
4969 object_stack.Clear();
4970
4971 MarkObjectRecursively(root);
4972 UnmarkObjectRecursively(root);
4973
4974 if (found_target) {
4975 PrintF("=====================================\n");
4976 PrintF("==== Path to object ====\n");
4977 PrintF("=====================================\n\n");
4978
4979 ASSERT(!object_stack.is_empty());
4980 for (int i = 0; i < object_stack.length(); i++) {
4981 if (i > 0) PrintF("\n |\n |\n V\n\n");
4982 Object* obj = object_stack[i];
4983 obj->Print();
4984 }
4985 PrintF("=====================================\n");
4986 }
4987}
4988
4989
4990// Helper class for visiting HeapObjects recursively.
4991class MarkRootVisitor: public ObjectVisitor {
4992 public:
4993 void VisitPointers(Object** start, Object** end) {
4994 // Visit all HeapObject pointers in [start, end)
4995 for (Object** p = start; p < end; p++) {
4996 if ((*p)->IsHeapObject())
4997 MarkRootObjectRecursively(p);
4998 }
4999 }
5000};
5001
5002
5003// Triggers a depth-first traversal of reachable objects from roots
5004// and finds a path to a specific heap object and prints it.
Leon Clarkee46be812010-01-19 14:06:41 +00005005void Heap::TracePathToObject(Object* target) {
5006 search_target = target;
Steve Blocka7e24c12009-10-30 11:49:00 +00005007 search_for_any_global = false;
5008
5009 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005010 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005011}
5012
5013
5014// Triggers a depth-first traversal of reachable objects from roots
5015// and finds a path to any global object and prints it. Useful for
5016// determining the source for leaks of global objects.
5017void Heap::TracePathToGlobal() {
5018 search_target = NULL;
5019 search_for_any_global = true;
5020
5021 MarkRootVisitor root_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00005022 IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00005023}
5024#endif
5025
5026
Ben Murdochf87a2032010-10-22 12:50:53 +01005027static intptr_t CountTotalHolesSize() {
5028 intptr_t holes_size = 0;
Leon Clarkef7060e22010-06-03 12:02:55 +01005029 OldSpaces spaces;
5030 for (OldSpace* space = spaces.next();
5031 space != NULL;
5032 space = spaces.next()) {
5033 holes_size += space->Waste() + space->AvailableFree();
5034 }
5035 return holes_size;
5036}
5037
5038
Steve Blocka7e24c12009-10-30 11:49:00 +00005039GCTracer::GCTracer()
5040 : start_time_(0.0),
Leon Clarkef7060e22010-06-03 12:02:55 +01005041 start_size_(0),
Steve Blocka7e24c12009-10-30 11:49:00 +00005042 gc_count_(0),
5043 full_gc_count_(0),
5044 is_compacting_(false),
Leon Clarkef7060e22010-06-03 12:02:55 +01005045 marked_count_(0),
5046 allocated_since_last_gc_(0),
5047 spent_in_mutator_(0),
5048 promoted_objects_size_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +00005049 // These two fields reflect the state of the previous full collection.
5050 // Set them before they are changed by the collector.
5051 previous_has_compacted_ = MarkCompactCollector::HasCompacted();
5052 previous_marked_count_ = MarkCompactCollector::previous_marked_count();
Leon Clarkef7060e22010-06-03 12:02:55 +01005053 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00005054 start_time_ = OS::TimeCurrentMillis();
Leon Clarkef7060e22010-06-03 12:02:55 +01005055 start_size_ = Heap::SizeOfObjects();
5056
5057 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
5058 scopes_[i] = 0;
5059 }
5060
5061 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
5062
5063 allocated_since_last_gc_ = Heap::SizeOfObjects() - alive_after_last_gc_;
5064
5065 if (last_gc_end_timestamp_ > 0) {
5066 spent_in_mutator_ = Max(start_time_ - last_gc_end_timestamp_, 0.0);
5067 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005068}
5069
5070
5071GCTracer::~GCTracer() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005072 // Printf ONE line iff flag is set.
Leon Clarkef7060e22010-06-03 12:02:55 +01005073 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
5074
5075 bool first_gc = (last_gc_end_timestamp_ == 0);
5076
5077 alive_after_last_gc_ = Heap::SizeOfObjects();
5078 last_gc_end_timestamp_ = OS::TimeCurrentMillis();
5079
5080 int time = static_cast<int>(last_gc_end_timestamp_ - start_time_);
5081
5082 // Update cumulative GC statistics if required.
5083 if (FLAG_print_cumulative_gc_stat) {
5084 max_gc_pause_ = Max(max_gc_pause_, time);
5085 max_alive_after_gc_ = Max(max_alive_after_gc_, alive_after_last_gc_);
5086 if (!first_gc) {
5087 min_in_mutator_ = Min(min_in_mutator_,
5088 static_cast<int>(spent_in_mutator_));
5089 }
5090 }
5091
5092 if (!FLAG_trace_gc_nvp) {
5093 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
5094
5095 PrintF("%s %.1f -> %.1f MB, ",
5096 CollectorString(),
5097 static_cast<double>(start_size_) / MB,
5098 SizeOfHeapObjects());
5099
5100 if (external_time > 0) PrintF("%d / ", external_time);
5101 PrintF("%d ms.\n", time);
5102 } else {
5103 PrintF("pause=%d ", time);
5104 PrintF("mutator=%d ",
5105 static_cast<int>(spent_in_mutator_));
5106
5107 PrintF("gc=");
5108 switch (collector_) {
5109 case SCAVENGER:
5110 PrintF("s");
5111 break;
5112 case MARK_COMPACTOR:
5113 PrintF(MarkCompactCollector::HasCompacted() ? "mc" : "ms");
5114 break;
5115 default:
5116 UNREACHABLE();
5117 }
5118 PrintF(" ");
5119
5120 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
5121 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
5122 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
Iain Merrick75681382010-08-19 15:07:18 +01005123 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
Leon Clarkef7060e22010-06-03 12:02:55 +01005124 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
5125
Ben Murdochf87a2032010-10-22 12:50:53 +01005126 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
5127 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", Heap::SizeOfObjects());
5128 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
5129 in_free_list_or_wasted_before_gc_);
5130 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
Leon Clarkef7060e22010-06-03 12:02:55 +01005131
Ben Murdochf87a2032010-10-22 12:50:53 +01005132 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
5133 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
Leon Clarkef7060e22010-06-03 12:02:55 +01005134
5135 PrintF("\n");
5136 }
Steve Blocka7e24c12009-10-30 11:49:00 +00005137
5138#if defined(ENABLE_LOGGING_AND_PROFILING)
5139 Heap::PrintShortHeapStatistics();
5140#endif
5141}
5142
5143
5144const char* GCTracer::CollectorString() {
5145 switch (collector_) {
5146 case SCAVENGER:
5147 return "Scavenge";
5148 case MARK_COMPACTOR:
5149 return MarkCompactCollector::HasCompacted() ? "Mark-compact"
5150 : "Mark-sweep";
5151 }
5152 return "Unknown GC";
5153}
5154
5155
5156int KeyedLookupCache::Hash(Map* map, String* name) {
5157 // Uses only lower 32 bits if pointers are larger.
5158 uintptr_t addr_hash =
Leon Clarkee46be812010-01-19 14:06:41 +00005159 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
Andrei Popescu402d9372010-02-26 13:31:12 +00005160 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
Steve Blocka7e24c12009-10-30 11:49:00 +00005161}
5162
5163
5164int KeyedLookupCache::Lookup(Map* map, String* name) {
5165 int index = Hash(map, name);
5166 Key& key = keys_[index];
5167 if ((key.map == map) && key.name->Equals(name)) {
5168 return field_offsets_[index];
5169 }
5170 return -1;
5171}
5172
5173
5174void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
5175 String* symbol;
5176 if (Heap::LookupSymbolIfExists(name, &symbol)) {
5177 int index = Hash(map, symbol);
5178 Key& key = keys_[index];
5179 key.map = map;
5180 key.name = symbol;
5181 field_offsets_[index] = field_offset;
5182 }
5183}
5184
5185
5186void KeyedLookupCache::Clear() {
5187 for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
5188}
5189
5190
5191KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
5192
5193
5194int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
5195
5196
5197void DescriptorLookupCache::Clear() {
5198 for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
5199}
5200
5201
5202DescriptorLookupCache::Key
5203DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
5204
5205int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
5206
5207
5208#ifdef DEBUG
Ben Murdochf87a2032010-10-22 12:50:53 +01005209void Heap::GarbageCollectionGreedyCheck() {
Steve Blocka7e24c12009-10-30 11:49:00 +00005210 ASSERT(FLAG_gc_greedy);
Ben Murdochf87a2032010-10-22 12:50:53 +01005211 if (Bootstrapper::IsActive()) return;
5212 if (disallow_allocation_failure()) return;
5213 CollectGarbage(NEW_SPACE);
Steve Blocka7e24c12009-10-30 11:49:00 +00005214}
5215#endif
5216
5217
5218TranscendentalCache::TranscendentalCache(TranscendentalCache::Type t)
5219 : type_(t) {
5220 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
5221 uint32_t in1 = 0xffffffffu; // generated by the FPU.
5222 for (int i = 0; i < kCacheSize; i++) {
5223 elements_[i].in[0] = in0;
5224 elements_[i].in[1] = in1;
5225 elements_[i].output = NULL;
5226 }
5227}
5228
5229
5230TranscendentalCache* TranscendentalCache::caches_[kNumberOfCaches];
5231
5232
5233void TranscendentalCache::Clear() {
5234 for (int i = 0; i < kNumberOfCaches; i++) {
5235 if (caches_[i] != NULL) {
5236 delete caches_[i];
5237 caches_[i] = NULL;
5238 }
5239 }
5240}
5241
5242
Leon Clarkee46be812010-01-19 14:06:41 +00005243void ExternalStringTable::CleanUp() {
5244 int last = 0;
5245 for (int i = 0; i < new_space_strings_.length(); ++i) {
5246 if (new_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5247 if (Heap::InNewSpace(new_space_strings_[i])) {
5248 new_space_strings_[last++] = new_space_strings_[i];
5249 } else {
5250 old_space_strings_.Add(new_space_strings_[i]);
5251 }
5252 }
5253 new_space_strings_.Rewind(last);
5254 last = 0;
5255 for (int i = 0; i < old_space_strings_.length(); ++i) {
5256 if (old_space_strings_[i] == Heap::raw_unchecked_null_value()) continue;
5257 ASSERT(!Heap::InNewSpace(old_space_strings_[i]));
5258 old_space_strings_[last++] = old_space_strings_[i];
5259 }
5260 old_space_strings_.Rewind(last);
5261 Verify();
5262}
5263
5264
5265void ExternalStringTable::TearDown() {
5266 new_space_strings_.Free();
5267 old_space_strings_.Free();
5268}
5269
5270
5271List<Object*> ExternalStringTable::new_space_strings_;
5272List<Object*> ExternalStringTable::old_space_strings_;
5273
Steve Blocka7e24c12009-10-30 11:49:00 +00005274} } // namespace v8::internal