blob: 6ff1d7f5b0063941ef0b922e993e142b48e5fc07 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "accessors.h"
31#include "api.h"
32#include "execution.h"
33#include "global-handles.h"
34#include "ic-inl.h"
35#include "natives.h"
36#include "platform.h"
37#include "runtime.h"
38#include "serialize.h"
39#include "stub-cache.h"
40#include "v8threads.h"
Steve Block3ce2e202009-11-05 08:53:23 +000041#include "top.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000042
43namespace v8 {
44namespace internal {
45
46// 32-bit encoding: a RelativeAddress must be able to fit in a
47// pointer: it is encoded as an Address with (from LS to MS bits):
48// - 2 bits identifying this as a HeapObject.
49// - 4 bits to encode the AllocationSpace (including special values for
50// code and fixed arrays in LO space)
51// - 27 bits identifying a word in the space, in one of three formats:
52// - paged spaces: 16 bits of page number, 11 bits of word offset in page
53// - NEW space: 27 bits of word offset
54// - LO space: 27 bits of page number
55
56const int kSpaceShift = kHeapObjectTagSize;
57const int kSpaceBits = 4;
58const int kSpaceMask = (1 << kSpaceBits) - 1;
59
60const int kOffsetShift = kSpaceShift + kSpaceBits;
61const int kOffsetBits = 11;
62const int kOffsetMask = (1 << kOffsetBits) - 1;
63
64const int kPageShift = kOffsetShift + kOffsetBits;
65const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
66const int kPageMask = (1 << kPageBits) - 1;
67
68const int kPageAndOffsetShift = kOffsetShift;
69const int kPageAndOffsetBits = kPageBits + kOffsetBits;
70const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
71
72// These values are special allocation space tags used for
73// serialization.
74// Mark the pages executable on platforms that support it.
75const int kLargeCode = LAST_SPACE + 1;
76// Allocate extra remembered-set bits.
77const int kLargeFixedArray = LAST_SPACE + 2;
78
79
80static inline AllocationSpace GetSpace(Address addr) {
81 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
82 int space_number = (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
83 if (space_number > LAST_SPACE) space_number = LO_SPACE;
84 return static_cast<AllocationSpace>(space_number);
85}
86
87
88static inline bool IsLargeExecutableObject(Address addr) {
89 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
90 const int space_number =
91 (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
92 return (space_number == kLargeCode);
93}
94
95
96static inline bool IsLargeFixedArray(Address addr) {
97 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
98 const int space_number =
99 (static_cast<int>(encoded >> kSpaceShift) & kSpaceMask);
100 return (space_number == kLargeFixedArray);
101}
102
103
104static inline int PageIndex(Address addr) {
105 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
106 return static_cast<int>(encoded >> kPageShift) & kPageMask;
107}
108
109
110static inline int PageOffset(Address addr) {
111 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
112 const int offset = static_cast<int>(encoded >> kOffsetShift) & kOffsetMask;
113 return offset << kObjectAlignmentBits;
114}
115
116
117static inline int NewSpaceOffset(Address addr) {
118 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
119 const int page_offset =
120 static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
121 return page_offset << kObjectAlignmentBits;
122}
123
124
125static inline int LargeObjectIndex(Address addr) {
126 const intptr_t encoded = reinterpret_cast<intptr_t>(addr);
127 return static_cast<int>(encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
128}
129
130
131// A RelativeAddress encodes a heap address that is independent of
132// the actual memory addresses in real heap. The general case (for the
133// OLD, CODE and MAP spaces) is as a (space id, page number, page offset)
134// triple. The NEW space has page number == 0, because there are no
135// pages. The LARGE_OBJECT space has page offset = 0, since there is
136// exactly one object per page. RelativeAddresses are encodable as
137// Addresses, so that they can replace the map() pointers of
138// HeapObjects. The encoded Addresses are also encoded as HeapObjects
139// and allow for marking (is_marked() see mark(), clear_mark()...) as
140// used by the Mark-Compact collector.
141
142class RelativeAddress {
143 public:
144 RelativeAddress(AllocationSpace space,
145 int page_index,
146 int page_offset)
147 : space_(space), page_index_(page_index), page_offset_(page_offset) {
148 // Assert that the space encoding (plus the two pseudo-spaces for
149 // special large objects) fits in the available bits.
150 ASSERT(((LAST_SPACE + 2) & ~kSpaceMask) == 0);
151 ASSERT(space <= LAST_SPACE && space >= 0);
152 }
153
154 // Return the encoding of 'this' as an Address. Decode with constructor.
155 Address Encode() const;
156
157 AllocationSpace space() const {
158 if (space_ > LAST_SPACE) return LO_SPACE;
159 return static_cast<AllocationSpace>(space_);
160 }
161 int page_index() const { return page_index_; }
162 int page_offset() const { return page_offset_; }
163
164 bool in_paged_space() const {
165 return space_ == CODE_SPACE ||
166 space_ == OLD_POINTER_SPACE ||
167 space_ == OLD_DATA_SPACE ||
168 space_ == MAP_SPACE ||
169 space_ == CELL_SPACE;
170 }
171
172 void next_address(int offset) { page_offset_ += offset; }
173 void next_page(int init_offset = 0) {
174 page_index_++;
175 page_offset_ = init_offset;
176 }
177
178#ifdef DEBUG
179 void Verify();
180#endif
181
182 void set_to_large_code_object() {
183 ASSERT(space_ == LO_SPACE);
184 space_ = kLargeCode;
185 }
186 void set_to_large_fixed_array() {
187 ASSERT(space_ == LO_SPACE);
188 space_ = kLargeFixedArray;
189 }
190
191
192 private:
193 int space_;
194 int page_index_;
195 int page_offset_;
196};
197
198
199Address RelativeAddress::Encode() const {
200 ASSERT(page_index_ >= 0);
201 int word_offset = 0;
202 int result = 0;
203 switch (space_) {
204 case MAP_SPACE:
205 case CELL_SPACE:
206 case OLD_POINTER_SPACE:
207 case OLD_DATA_SPACE:
208 case CODE_SPACE:
209 ASSERT_EQ(0, page_index_ & ~kPageMask);
210 word_offset = page_offset_ >> kObjectAlignmentBits;
211 ASSERT_EQ(0, word_offset & ~kOffsetMask);
212 result = (page_index_ << kPageShift) | (word_offset << kOffsetShift);
213 break;
214 case NEW_SPACE:
215 ASSERT_EQ(0, page_index_);
216 word_offset = page_offset_ >> kObjectAlignmentBits;
217 ASSERT_EQ(0, word_offset & ~kPageAndOffsetMask);
218 result = word_offset << kPageAndOffsetShift;
219 break;
220 case LO_SPACE:
221 case kLargeCode:
222 case kLargeFixedArray:
223 ASSERT_EQ(0, page_offset_);
224 ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
225 result = page_index_ << kPageAndOffsetShift;
226 break;
227 }
228 // OR in AllocationSpace and kHeapObjectTag
229 ASSERT_EQ(0, space_ & ~kSpaceMask);
230 result |= (space_ << kSpaceShift) | kHeapObjectTag;
231 return reinterpret_cast<Address>(result);
232}
233
234
235#ifdef DEBUG
236void RelativeAddress::Verify() {
237 ASSERT(page_offset_ >= 0 && page_index_ >= 0);
238 switch (space_) {
239 case MAP_SPACE:
240 case CELL_SPACE:
241 case OLD_POINTER_SPACE:
242 case OLD_DATA_SPACE:
243 case CODE_SPACE:
244 ASSERT(Page::kObjectStartOffset <= page_offset_ &&
245 page_offset_ <= Page::kPageSize);
246 break;
247 case NEW_SPACE:
248 ASSERT(page_index_ == 0);
249 break;
250 case LO_SPACE:
251 case kLargeCode:
252 case kLargeFixedArray:
253 ASSERT(page_offset_ == 0);
254 break;
255 }
256}
257#endif
258
259enum GCTreatment {
260 DataObject, // Object that cannot contain a reference to new space.
261 PointerObject, // Object that can contain a reference to new space.
262 CodeObject // Object that contains executable code.
263};
264
265// A SimulatedHeapSpace simulates the allocation of objects in a page in
266// the heap. It uses linear allocation - that is, it doesn't simulate the
267// use of a free list. This simulated
268// allocation must exactly match that done by Heap.
269
270class SimulatedHeapSpace {
271 public:
272 // The default constructor initializes to an invalid state.
273 SimulatedHeapSpace(): current_(LAST_SPACE, -1, -1) {}
274
275 // Sets 'this' to the first address in 'space' that would be
276 // returned by allocation in an empty heap.
277 void InitEmptyHeap(AllocationSpace space);
278
279 // Sets 'this' to the next address in 'space' that would be returned
280 // by allocation in the current heap. Intended only for testing
281 // serialization and deserialization in the current address space.
282 void InitCurrentHeap(AllocationSpace space);
283
284 // Returns the RelativeAddress where the next
285 // object of 'size' bytes will be allocated, and updates 'this' to
286 // point to the next free address beyond that object.
287 RelativeAddress Allocate(int size, GCTreatment special_gc_treatment);
288
289 private:
290 RelativeAddress current_;
291};
292
293
294void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
295 switch (space) {
296 case MAP_SPACE:
297 case CELL_SPACE:
298 case OLD_POINTER_SPACE:
299 case OLD_DATA_SPACE:
300 case CODE_SPACE:
301 current_ = RelativeAddress(space, 0, Page::kObjectStartOffset);
302 break;
303 case NEW_SPACE:
304 case LO_SPACE:
305 current_ = RelativeAddress(space, 0, 0);
306 break;
307 }
308}
309
310
311void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
312 switch (space) {
313 case MAP_SPACE:
314 case CELL_SPACE:
315 case OLD_POINTER_SPACE:
316 case OLD_DATA_SPACE:
317 case CODE_SPACE: {
318 PagedSpace* ps;
319 if (space == MAP_SPACE) {
320 ps = Heap::map_space();
321 } else if (space == CELL_SPACE) {
322 ps = Heap::cell_space();
323 } else if (space == OLD_POINTER_SPACE) {
324 ps = Heap::old_pointer_space();
325 } else if (space == OLD_DATA_SPACE) {
326 ps = Heap::old_data_space();
327 } else {
328 ASSERT(space == CODE_SPACE);
329 ps = Heap::code_space();
330 }
331 Address top = ps->top();
332 Page* top_page = Page::FromAllocationTop(top);
333 int page_index = 0;
334 PageIterator it(ps, PageIterator::PAGES_IN_USE);
335 while (it.has_next()) {
336 if (it.next() == top_page) break;
337 page_index++;
338 }
339 current_ = RelativeAddress(space,
340 page_index,
341 top_page->Offset(top));
342 break;
343 }
344 case NEW_SPACE:
345 current_ = RelativeAddress(space,
346 0,
347 Heap::NewSpaceTop() - Heap::NewSpaceStart());
348 break;
349 case LO_SPACE:
350 int page_index = 0;
351 for (LargeObjectIterator it(Heap::lo_space()); it.has_next(); it.next()) {
352 page_index++;
353 }
354 current_ = RelativeAddress(space, page_index, 0);
355 break;
356 }
357}
358
359
360RelativeAddress SimulatedHeapSpace::Allocate(int size,
361 GCTreatment special_gc_treatment) {
362#ifdef DEBUG
363 current_.Verify();
364#endif
365 int alloc_size = OBJECT_SIZE_ALIGN(size);
366 if (current_.in_paged_space() &&
367 current_.page_offset() + alloc_size > Page::kPageSize) {
368 ASSERT(alloc_size <= Page::kMaxHeapObjectSize);
369 current_.next_page(Page::kObjectStartOffset);
370 }
371 RelativeAddress result = current_;
372 if (current_.space() == LO_SPACE) {
373 current_.next_page();
374 if (special_gc_treatment == CodeObject) {
375 result.set_to_large_code_object();
376 } else if (special_gc_treatment == PointerObject) {
377 result.set_to_large_fixed_array();
378 }
379 } else {
380 current_.next_address(alloc_size);
381 }
382#ifdef DEBUG
383 current_.Verify();
384 result.Verify();
385#endif
386 return result;
387}
388
389// -----------------------------------------------------------------------------
390// Coding of external references.
391
392// The encoding of an external reference. The type is in the high word.
393// The id is in the low word.
394static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
395 return static_cast<uint32_t>(type) << 16 | id;
396}
397
398
399static int* GetInternalPointer(StatsCounter* counter) {
400 // All counters refer to dummy_counter, if deserializing happens without
401 // setting up counters.
402 static int dummy_counter = 0;
403 return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
404}
405
406
407// ExternalReferenceTable is a helper class that defines the relationship
408// between external references and their encodings. It is used to build
409// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
410class ExternalReferenceTable {
411 public:
412 static ExternalReferenceTable* instance() {
413 if (!instance_) instance_ = new ExternalReferenceTable();
414 return instance_;
415 }
416
417 int size() const { return refs_.length(); }
418
419 Address address(int i) { return refs_[i].address; }
420
421 uint32_t code(int i) { return refs_[i].code; }
422
423 const char* name(int i) { return refs_[i].name; }
424
425 int max_id(int code) { return max_id_[code]; }
426
427 private:
428 static ExternalReferenceTable* instance_;
429
430 ExternalReferenceTable() : refs_(64) { PopulateTable(); }
431 ~ExternalReferenceTable() { }
432
433 struct ExternalReferenceEntry {
434 Address address;
435 uint32_t code;
436 const char* name;
437 };
438
439 void PopulateTable();
440
441 // For a few types of references, we can get their address from their id.
442 void AddFromId(TypeCode type, uint16_t id, const char* name);
443
444 // For other types of references, the caller will figure out the address.
445 void Add(Address address, TypeCode type, uint16_t id, const char* name);
446
447 List<ExternalReferenceEntry> refs_;
448 int max_id_[kTypeCodeCount];
449};
450
451
452ExternalReferenceTable* ExternalReferenceTable::instance_ = NULL;
453
454
455void ExternalReferenceTable::AddFromId(TypeCode type,
456 uint16_t id,
457 const char* name) {
458 Address address;
459 switch (type) {
460 case C_BUILTIN: {
461 ExternalReference ref(static_cast<Builtins::CFunctionId>(id));
462 address = ref.address();
463 break;
464 }
465 case BUILTIN: {
466 ExternalReference ref(static_cast<Builtins::Name>(id));
467 address = ref.address();
468 break;
469 }
470 case RUNTIME_FUNCTION: {
471 ExternalReference ref(static_cast<Runtime::FunctionId>(id));
472 address = ref.address();
473 break;
474 }
475 case IC_UTILITY: {
476 ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)));
477 address = ref.address();
478 break;
479 }
480 default:
481 UNREACHABLE();
482 return;
483 }
484 Add(address, type, id, name);
485}
486
487
488void ExternalReferenceTable::Add(Address address,
489 TypeCode type,
490 uint16_t id,
491 const char* name) {
492 CHECK_NE(NULL, address);
493 ExternalReferenceEntry entry;
494 entry.address = address;
495 entry.code = EncodeExternal(type, id);
496 entry.name = name;
497 CHECK_NE(0, entry.code);
498 refs_.Add(entry);
499 if (id > max_id_[type]) max_id_[type] = id;
500}
501
502
503void ExternalReferenceTable::PopulateTable() {
504 for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
505 max_id_[type_code] = 0;
506 }
507
508 // The following populates all of the different type of external references
509 // into the ExternalReferenceTable.
510 //
511 // NOTE: This function was originally 100k of code. It has since been
512 // rewritten to be mostly table driven, as the callback macro style tends to
513 // very easily cause code bloat. Please be careful in the future when adding
514 // new references.
515
516 struct RefTableEntry {
517 TypeCode type;
518 uint16_t id;
519 const char* name;
520 };
521
522 static const RefTableEntry ref_table[] = {
523 // Builtins
524#define DEF_ENTRY_C(name) \
525 { C_BUILTIN, \
526 Builtins::c_##name, \
527 "Builtins::" #name },
528
529 BUILTIN_LIST_C(DEF_ENTRY_C)
530#undef DEF_ENTRY_C
531
532#define DEF_ENTRY_C(name) \
533 { BUILTIN, \
534 Builtins::name, \
535 "Builtins::" #name },
536#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name)
537
538 BUILTIN_LIST_C(DEF_ENTRY_C)
539 BUILTIN_LIST_A(DEF_ENTRY_A)
540 BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
541#undef DEF_ENTRY_C
542#undef DEF_ENTRY_A
543
544 // Runtime functions
545#define RUNTIME_ENTRY(name, nargs, ressize) \
546 { RUNTIME_FUNCTION, \
547 Runtime::k##name, \
548 "Runtime::" #name },
549
550 RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
551#undef RUNTIME_ENTRY
552
553 // IC utilities
554#define IC_ENTRY(name) \
555 { IC_UTILITY, \
556 IC::k##name, \
557 "IC::" #name },
558
559 IC_UTIL_LIST(IC_ENTRY)
560#undef IC_ENTRY
561 }; // end of ref_table[].
562
563 for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
564 AddFromId(ref_table[i].type, ref_table[i].id, ref_table[i].name);
565 }
566
567#ifdef ENABLE_DEBUGGER_SUPPORT
568 // Debug addresses
569 Add(Debug_Address(Debug::k_after_break_target_address).address(),
570 DEBUG_ADDRESS,
571 Debug::k_after_break_target_address << kDebugIdShift,
572 "Debug::after_break_target_address()");
573 Add(Debug_Address(Debug::k_debug_break_return_address).address(),
574 DEBUG_ADDRESS,
575 Debug::k_debug_break_return_address << kDebugIdShift,
576 "Debug::debug_break_return_address()");
577 const char* debug_register_format = "Debug::register_address(%i)";
578 size_t dr_format_length = strlen(debug_register_format);
579 for (int i = 0; i < kNumJSCallerSaved; ++i) {
580 Vector<char> name = Vector<char>::New(dr_format_length + 1);
581 OS::SNPrintF(name, debug_register_format, i);
582 Add(Debug_Address(Debug::k_register_address, i).address(),
583 DEBUG_ADDRESS,
584 Debug::k_register_address << kDebugIdShift | i,
585 name.start());
586 }
587#endif
588
589 // Stat counters
590 struct StatsRefTableEntry {
591 StatsCounter* counter;
592 uint16_t id;
593 const char* name;
594 };
595
596 static const StatsRefTableEntry stats_ref_table[] = {
597#define COUNTER_ENTRY(name, caption) \
598 { &Counters::name, \
599 Counters::k_##name, \
600 "Counters::" #name },
601
602 STATS_COUNTER_LIST_1(COUNTER_ENTRY)
603 STATS_COUNTER_LIST_2(COUNTER_ENTRY)
604#undef COUNTER_ENTRY
605 }; // end of stats_ref_table[].
606
607 for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
608 Add(reinterpret_cast<Address>(
609 GetInternalPointer(stats_ref_table[i].counter)),
610 STATS_COUNTER,
611 stats_ref_table[i].id,
612 stats_ref_table[i].name);
613 }
614
615 // Top addresses
Steve Block3ce2e202009-11-05 08:53:23 +0000616 const char* top_address_format = "Top::%s";
617
618 const char* AddressNames[] = {
619#define C(name) #name,
620 TOP_ADDRESS_LIST(C)
621 TOP_ADDRESS_LIST_PROF(C)
622 NULL
623#undef C
624 };
625
626 size_t top_format_length = strlen(top_address_format) - 2;
Steve Blocka7e24c12009-10-30 11:49:00 +0000627 for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
Steve Block3ce2e202009-11-05 08:53:23 +0000628 const char* address_name = AddressNames[i];
629 Vector<char> name =
630 Vector<char>::New(top_format_length + strlen(address_name) + 1);
Steve Blocka7e24c12009-10-30 11:49:00 +0000631 const char* chars = name.start();
Steve Block3ce2e202009-11-05 08:53:23 +0000632 OS::SNPrintF(name, top_address_format, address_name);
Steve Blocka7e24c12009-10-30 11:49:00 +0000633 Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
634 }
635
636 // Extensions
637 Add(FUNCTION_ADDR(GCExtension::GC), EXTENSION, 1,
638 "GCExtension::GC");
639
640 // Accessors
641#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
642 Add((Address)&Accessors::name, \
643 ACCESSOR, \
644 Accessors::k##name, \
645 "Accessors::" #name);
646
647 ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
648#undef ACCESSOR_DESCRIPTOR_DECLARATION
649
650 // Stub cache tables
651 Add(SCTableReference::keyReference(StubCache::kPrimary).address(),
652 STUB_CACHE_TABLE,
653 1,
654 "StubCache::primary_->key");
655 Add(SCTableReference::valueReference(StubCache::kPrimary).address(),
656 STUB_CACHE_TABLE,
657 2,
658 "StubCache::primary_->value");
659 Add(SCTableReference::keyReference(StubCache::kSecondary).address(),
660 STUB_CACHE_TABLE,
661 3,
662 "StubCache::secondary_->key");
663 Add(SCTableReference::valueReference(StubCache::kSecondary).address(),
664 STUB_CACHE_TABLE,
665 4,
666 "StubCache::secondary_->value");
667
668 // Runtime entries
669 Add(ExternalReference::perform_gc_function().address(),
670 RUNTIME_ENTRY,
671 1,
672 "Runtime::PerformGC");
673 Add(ExternalReference::random_positive_smi_function().address(),
674 RUNTIME_ENTRY,
675 2,
676 "V8::RandomPositiveSmi");
677
678 // Miscellaneous
679 Add(ExternalReference::builtin_passed_function().address(),
680 UNCLASSIFIED,
681 1,
682 "Builtins::builtin_passed_function");
683 Add(ExternalReference::the_hole_value_location().address(),
684 UNCLASSIFIED,
685 2,
686 "Factory::the_hole_value().location()");
687 Add(ExternalReference::roots_address().address(),
688 UNCLASSIFIED,
689 3,
690 "Heap::roots_address()");
691 Add(ExternalReference::address_of_stack_guard_limit().address(),
692 UNCLASSIFIED,
693 4,
694 "StackGuard::address_of_jslimit()");
695 Add(ExternalReference::address_of_regexp_stack_limit().address(),
696 UNCLASSIFIED,
697 5,
698 "RegExpStack::limit_address()");
699 Add(ExternalReference::new_space_start().address(),
700 UNCLASSIFIED,
701 6,
702 "Heap::NewSpaceStart()");
703 Add(ExternalReference::heap_always_allocate_scope_depth().address(),
704 UNCLASSIFIED,
705 7,
706 "Heap::always_allocate_scope_depth()");
707 Add(ExternalReference::new_space_allocation_limit_address().address(),
708 UNCLASSIFIED,
709 8,
710 "Heap::NewSpaceAllocationLimitAddress()");
711 Add(ExternalReference::new_space_allocation_top_address().address(),
712 UNCLASSIFIED,
713 9,
714 "Heap::NewSpaceAllocationTopAddress()");
715#ifdef ENABLE_DEBUGGER_SUPPORT
716 Add(ExternalReference::debug_break().address(),
717 UNCLASSIFIED,
718 10,
719 "Debug::Break()");
720 Add(ExternalReference::debug_step_in_fp_address().address(),
721 UNCLASSIFIED,
722 11,
723 "Debug::step_in_fp_addr()");
724#endif
725 Add(ExternalReference::double_fp_operation(Token::ADD).address(),
726 UNCLASSIFIED,
727 12,
728 "add_two_doubles");
729 Add(ExternalReference::double_fp_operation(Token::SUB).address(),
730 UNCLASSIFIED,
731 13,
732 "sub_two_doubles");
733 Add(ExternalReference::double_fp_operation(Token::MUL).address(),
734 UNCLASSIFIED,
735 14,
736 "mul_two_doubles");
737 Add(ExternalReference::double_fp_operation(Token::DIV).address(),
738 UNCLASSIFIED,
739 15,
740 "div_two_doubles");
741 Add(ExternalReference::double_fp_operation(Token::MOD).address(),
742 UNCLASSIFIED,
743 16,
744 "mod_two_doubles");
745 Add(ExternalReference::compare_doubles().address(),
746 UNCLASSIFIED,
747 17,
748 "compare_doubles");
749#ifdef V8_NATIVE_REGEXP
750 Add(ExternalReference::re_case_insensitive_compare_uc16().address(),
751 UNCLASSIFIED,
752 18,
753 "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
754 Add(ExternalReference::re_check_stack_guard_state().address(),
755 UNCLASSIFIED,
756 19,
757 "RegExpMacroAssembler*::CheckStackGuardState()");
758 Add(ExternalReference::re_grow_stack().address(),
759 UNCLASSIFIED,
760 20,
761 "NativeRegExpMacroAssembler::GrowStack()");
762#endif
763}
764
765
766ExternalReferenceEncoder::ExternalReferenceEncoder()
767 : encodings_(Match) {
768 ExternalReferenceTable* external_references =
769 ExternalReferenceTable::instance();
770 for (int i = 0; i < external_references->size(); ++i) {
771 Put(external_references->address(i), i);
772 }
773}
774
775
776uint32_t ExternalReferenceEncoder::Encode(Address key) const {
777 int index = IndexOf(key);
778 return index >=0 ? ExternalReferenceTable::instance()->code(index) : 0;
779}
780
781
782const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
783 int index = IndexOf(key);
784 return index >=0 ? ExternalReferenceTable::instance()->name(index) : NULL;
785}
786
787
788int ExternalReferenceEncoder::IndexOf(Address key) const {
789 if (key == NULL) return -1;
790 HashMap::Entry* entry =
791 const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false);
792 return entry == NULL
793 ? -1
794 : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
795}
796
797
798void ExternalReferenceEncoder::Put(Address key, int index) {
799 HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
800 entry->value = reinterpret_cast<void *>(index);
801}
802
803
804ExternalReferenceDecoder::ExternalReferenceDecoder()
805 : encodings_(NewArray<Address*>(kTypeCodeCount)) {
806 ExternalReferenceTable* external_references =
807 ExternalReferenceTable::instance();
808 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
809 int max = external_references->max_id(type) + 1;
810 encodings_[type] = NewArray<Address>(max + 1);
811 }
812 for (int i = 0; i < external_references->size(); ++i) {
813 Put(external_references->code(i), external_references->address(i));
814 }
815}
816
817
818ExternalReferenceDecoder::~ExternalReferenceDecoder() {
819 for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
820 DeleteArray(encodings_[type]);
821 }
822 DeleteArray(encodings_);
823}
824
825
826//------------------------------------------------------------------------------
827// Implementation of Serializer
828
829
830// Helper class to write the bytes of the serialized heap.
831
832class SnapshotWriter {
833 public:
834 SnapshotWriter() {
835 len_ = 0;
836 max_ = 8 << 10; // 8K initial size
837 str_ = NewArray<byte>(max_);
838 }
839
840 ~SnapshotWriter() {
841 DeleteArray(str_);
842 }
843
844 void GetBytes(byte** str, int* len) {
845 *str = NewArray<byte>(len_);
846 memcpy(*str, str_, len_);
847 *len = len_;
848 }
849
850 void Reserve(int bytes, int pos);
851
852 void PutC(char c) {
853 InsertC(c, len_);
854 }
855
856 void PutInt(int i) {
857 InsertInt(i, len_);
858 }
859
860 void PutAddress(Address p) {
861 PutBytes(reinterpret_cast<byte*>(&p), sizeof(p));
862 }
863
864 void PutBytes(const byte* a, int size) {
865 InsertBytes(a, len_, size);
866 }
867
868 void PutString(const char* s) {
869 InsertString(s, len_);
870 }
871
872 int InsertC(char c, int pos) {
873 Reserve(1, pos);
874 str_[pos] = c;
875 len_++;
876 return pos + 1;
877 }
878
879 int InsertInt(int i, int pos) {
880 return InsertBytes(reinterpret_cast<byte*>(&i), pos, sizeof(i));
881 }
882
883 int InsertBytes(const byte* a, int pos, int size) {
884 Reserve(size, pos);
885 memcpy(&str_[pos], a, size);
886 len_ += size;
887 return pos + size;
888 }
889
890 int InsertString(const char* s, int pos);
891
892 int length() { return len_; }
893
894 Address position() { return reinterpret_cast<Address>(&str_[len_]); }
895
896 private:
897 byte* str_; // the snapshot
898 int len_; // the current length of str_
899 int max_; // the allocated size of str_
900};
901
902
903void SnapshotWriter::Reserve(int bytes, int pos) {
904 CHECK(0 <= pos && pos <= len_);
905 while (len_ + bytes >= max_) {
906 max_ *= 2;
907 byte* old = str_;
908 str_ = NewArray<byte>(max_);
909 memcpy(str_, old, len_);
910 DeleteArray(old);
911 }
912 if (pos < len_) {
913 byte* old = str_;
914 str_ = NewArray<byte>(max_);
915 memcpy(str_, old, pos);
916 memcpy(str_ + pos + bytes, old + pos, len_ - pos);
917 DeleteArray(old);
918 }
919}
920
921int SnapshotWriter::InsertString(const char* s, int pos) {
922 int size = strlen(s);
923 pos = InsertC('[', pos);
924 pos = InsertInt(size, pos);
925 pos = InsertC(']', pos);
926 return InsertBytes(reinterpret_cast<const byte*>(s), pos, size);
927}
928
929
930class ReferenceUpdater: public ObjectVisitor {
931 public:
932 ReferenceUpdater(HeapObject* obj, Serializer* serializer)
933 : obj_address_(obj->address()),
934 serializer_(serializer),
935 reference_encoder_(serializer->reference_encoder_),
936 offsets_(8),
Steve Block3ce2e202009-11-05 08:53:23 +0000937 addresses_(8),
938 offsets_32_bit_(0),
939 data_32_bit_(0) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000940 }
941
942 virtual void VisitPointers(Object** start, Object** end) {
943 for (Object** p = start; p < end; ++p) {
944 if ((*p)->IsHeapObject()) {
945 offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
946 Address a = serializer_->GetSavedAddress(HeapObject::cast(*p));
947 addresses_.Add(a);
948 }
949 }
950 }
951
952 virtual void VisitCodeTarget(RelocInfo* rinfo) {
953 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
954 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
955 Address encoded_target = serializer_->GetSavedAddress(target);
Steve Block3ce2e202009-11-05 08:53:23 +0000956 // All calls and jumps are to code objects that encode into 32 bits.
957 offsets_32_bit_.Add(rinfo->target_address_address() - obj_address_);
958 uint32_t small_target =
959 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(encoded_target));
960 ASSERT(reinterpret_cast<uintptr_t>(encoded_target) == small_target);
961 data_32_bit_.Add(small_target);
Steve Blocka7e24c12009-10-30 11:49:00 +0000962 }
963
964
965 virtual void VisitExternalReferences(Address* start, Address* end) {
966 for (Address* p = start; p < end; ++p) {
967 uint32_t code = reference_encoder_->Encode(*p);
968 CHECK(*p == NULL ? code == 0 : code != 0);
969 offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
970 addresses_.Add(reinterpret_cast<Address>(code));
971 }
972 }
973
974 virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
975 Address target = rinfo->target_address();
976 uint32_t encoding = reference_encoder_->Encode(target);
977 CHECK(target == NULL ? encoding == 0 : encoding != 0);
978 offsets_.Add(rinfo->target_address_address() - obj_address_);
979 addresses_.Add(reinterpret_cast<Address>(encoding));
980 }
981
982 void Update(Address start_address) {
983 for (int i = 0; i < offsets_.length(); i++) {
984 memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
985 }
Steve Block3ce2e202009-11-05 08:53:23 +0000986 for (int i = 0; i < offsets_32_bit_.length(); i++) {
987 memcpy(start_address + offsets_32_bit_[i], &data_32_bit_[i],
988 sizeof(uint32_t));
989 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000990 }
991
992 private:
993 Address obj_address_;
994 Serializer* serializer_;
995 ExternalReferenceEncoder* reference_encoder_;
996 List<int> offsets_;
997 List<Address> addresses_;
Steve Block3ce2e202009-11-05 08:53:23 +0000998 // Some updates are 32-bit even on a 64-bit platform.
999 // We keep a separate list of them on 64-bit platforms.
1000 List<int> offsets_32_bit_;
1001 List<uint32_t> data_32_bit_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001002};
1003
1004
1005// Helper functions for a map of encoded heap object addresses.
1006static uint32_t HeapObjectHash(HeapObject* key) {
1007 uint32_t low32bits = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key));
1008 return low32bits >> 2;
1009}
1010
1011
1012static bool MatchHeapObject(void* key1, void* key2) {
1013 return key1 == key2;
1014}
1015
1016
1017Serializer::Serializer()
1018 : global_handles_(4),
1019 saved_addresses_(MatchHeapObject) {
1020 root_ = true;
1021 roots_ = 0;
1022 objects_ = 0;
1023 reference_encoder_ = NULL;
1024 writer_ = new SnapshotWriter();
1025 for (int i = 0; i <= LAST_SPACE; i++) {
1026 allocator_[i] = new SimulatedHeapSpace();
1027 }
1028}
1029
1030
1031Serializer::~Serializer() {
1032 for (int i = 0; i <= LAST_SPACE; i++) {
1033 delete allocator_[i];
1034 }
1035 if (reference_encoder_) delete reference_encoder_;
1036 delete writer_;
1037}
1038
1039
1040bool Serializer::serialization_enabled_ = false;
1041
1042
1043#ifdef DEBUG
1044static const int kMaxTagLength = 32;
1045
1046void Serializer::Synchronize(const char* tag) {
1047 if (FLAG_debug_serialization) {
1048 int length = strlen(tag);
1049 ASSERT(length <= kMaxTagLength);
1050 writer_->PutC('S');
1051 writer_->PutInt(length);
1052 writer_->PutBytes(reinterpret_cast<const byte*>(tag), length);
1053 }
1054}
1055#endif
1056
1057
1058void Serializer::InitializeAllocators() {
1059 for (int i = 0; i <= LAST_SPACE; i++) {
1060 allocator_[i]->InitEmptyHeap(static_cast<AllocationSpace>(i));
1061 }
1062}
1063
1064
1065bool Serializer::IsVisited(HeapObject* obj) {
1066 HashMap::Entry* entry =
1067 saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
1068 return entry != NULL;
1069}
1070
1071
1072Address Serializer::GetSavedAddress(HeapObject* obj) {
1073 HashMap::Entry* entry =
1074 saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
1075 ASSERT(entry != NULL);
1076 return reinterpret_cast<Address>(entry->value);
1077}
1078
1079
1080void Serializer::SaveAddress(HeapObject* obj, Address addr) {
1081 HashMap::Entry* entry =
1082 saved_addresses_.Lookup(obj, HeapObjectHash(obj), true);
1083 entry->value = addr;
1084}
1085
1086
1087void Serializer::Serialize() {
1088 // No active threads.
1089 CHECK_EQ(NULL, ThreadState::FirstInUse());
1090 // No active or weak handles.
1091 CHECK(HandleScopeImplementer::instance()->blocks()->is_empty());
1092 CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
1093 // We need a counter function during serialization to resolve the
1094 // references to counters in the code on the heap.
1095 CHECK(StatsTable::HasCounterFunction());
1096 CHECK(enabled());
1097 InitializeAllocators();
1098 reference_encoder_ = new ExternalReferenceEncoder();
1099 PutHeader();
1100 Heap::IterateRoots(this);
1101 PutLog();
1102 PutContextStack();
1103 Disable();
1104}
1105
1106
1107void Serializer::Finalize(byte** str, int* len) {
1108 writer_->GetBytes(str, len);
1109}
1110
1111
1112// Serialize objects by writing them into the stream.
1113
1114void Serializer::VisitPointers(Object** start, Object** end) {
1115 bool root = root_;
1116 root_ = false;
1117 for (Object** p = start; p < end; ++p) {
1118 bool serialized;
1119 Address a = Encode(*p, &serialized);
1120 if (root) {
1121 roots_++;
1122 // If the object was not just serialized,
1123 // write its encoded address instead.
1124 if (!serialized) PutEncodedAddress(a);
1125 }
1126 }
1127 root_ = root;
1128}
1129
1130
1131void Serializer::VisitCodeTarget(RelocInfo* rinfo) {
1132 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1133 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1134 bool serialized;
1135 Encode(target, &serialized);
1136}
1137
1138
1139class GlobalHandlesRetriever: public ObjectVisitor {
1140 public:
1141 explicit GlobalHandlesRetriever(List<Object**>* handles)
1142 : global_handles_(handles) {}
1143
1144 virtual void VisitPointers(Object** start, Object** end) {
1145 for (; start != end; ++start) {
1146 global_handles_->Add(start);
1147 }
1148 }
1149
1150 private:
1151 List<Object**>* global_handles_;
1152};
1153
1154
1155void Serializer::PutFlags() {
1156 writer_->PutC('F');
1157 List<const char*>* argv = FlagList::argv();
1158 writer_->PutInt(argv->length());
1159 writer_->PutC('[');
1160 for (int i = 0; i < argv->length(); i++) {
1161 if (i > 0) writer_->PutC('|');
1162 writer_->PutString((*argv)[i]);
1163 DeleteArray((*argv)[i]);
1164 }
1165 writer_->PutC(']');
1166 flags_end_ = writer_->length();
1167 delete argv;
1168}
1169
1170
1171void Serializer::PutHeader() {
1172 PutFlags();
1173 writer_->PutC('D');
1174#ifdef DEBUG
1175 writer_->PutC(FLAG_debug_serialization ? '1' : '0');
1176#else
1177 writer_->PutC('0');
1178#endif
1179#ifdef V8_NATIVE_REGEXP
1180 writer_->PutC('N');
1181#else // Interpreted regexp
1182 writer_->PutC('I');
1183#endif
1184 // Write sizes of paged memory spaces. Allocate extra space for the old
1185 // and code spaces, because objects in new space will be promoted to them.
1186 writer_->PutC('S');
1187 writer_->PutC('[');
1188 writer_->PutInt(Heap::old_pointer_space()->Size() +
1189 Heap::new_space()->Size());
1190 writer_->PutC('|');
1191 writer_->PutInt(Heap::old_data_space()->Size() + Heap::new_space()->Size());
1192 writer_->PutC('|');
1193 writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
1194 writer_->PutC('|');
1195 writer_->PutInt(Heap::map_space()->Size());
1196 writer_->PutC('|');
1197 writer_->PutInt(Heap::cell_space()->Size());
1198 writer_->PutC(']');
1199 // Write global handles.
1200 writer_->PutC('G');
1201 writer_->PutC('[');
1202 GlobalHandlesRetriever ghr(&global_handles_);
1203 GlobalHandles::IterateRoots(&ghr);
1204 for (int i = 0; i < global_handles_.length(); i++) {
1205 writer_->PutC('N');
1206 }
1207 writer_->PutC(']');
1208}
1209
1210
1211void Serializer::PutLog() {
1212#ifdef ENABLE_LOGGING_AND_PROFILING
1213 if (FLAG_log_code) {
1214 Logger::TearDown();
1215 int pos = writer_->InsertC('L', flags_end_);
1216 bool exists;
1217 Vector<const char> log = ReadFile(FLAG_logfile, &exists);
1218 writer_->InsertString(log.start(), pos);
1219 log.Dispose();
1220 }
1221#endif
1222}
1223
1224
1225static int IndexOf(const List<Object**>& list, Object** element) {
1226 for (int i = 0; i < list.length(); i++) {
1227 if (list[i] == element) return i;
1228 }
1229 return -1;
1230}
1231
1232
1233void Serializer::PutGlobalHandleStack(const List<Handle<Object> >& stack) {
1234 writer_->PutC('[');
1235 writer_->PutInt(stack.length());
1236 for (int i = stack.length() - 1; i >= 0; i--) {
1237 writer_->PutC('|');
1238 int gh_index = IndexOf(global_handles_, stack[i].location());
1239 CHECK_GE(gh_index, 0);
1240 writer_->PutInt(gh_index);
1241 }
1242 writer_->PutC(']');
1243}
1244
1245
1246void Serializer::PutContextStack() {
1247 List<Context*> contexts(2);
1248 while (HandleScopeImplementer::instance()->HasSavedContexts()) {
1249 Context* context =
1250 HandleScopeImplementer::instance()->RestoreContext();
1251 contexts.Add(context);
1252 }
1253 for (int i = contexts.length() - 1; i >= 0; i--) {
1254 HandleScopeImplementer::instance()->SaveContext(contexts[i]);
1255 }
1256 writer_->PutC('C');
1257 writer_->PutC('[');
1258 writer_->PutInt(contexts.length());
1259 if (!contexts.is_empty()) {
1260 Object** start = reinterpret_cast<Object**>(&contexts.first());
1261 VisitPointers(start, start + contexts.length());
1262 }
1263 writer_->PutC(']');
1264}
1265
1266void Serializer::PutEncodedAddress(Address addr) {
1267 writer_->PutC('P');
1268 writer_->PutAddress(addr);
1269}
1270
1271
1272Address Serializer::Encode(Object* o, bool* serialized) {
1273 *serialized = false;
1274 if (o->IsSmi()) {
1275 return reinterpret_cast<Address>(o);
1276 } else {
1277 HeapObject* obj = HeapObject::cast(o);
1278 if (IsVisited(obj)) {
1279 return GetSavedAddress(obj);
1280 } else {
1281 // First visit: serialize the object.
1282 *serialized = true;
1283 return PutObject(obj);
1284 }
1285 }
1286}
1287
1288
1289Address Serializer::PutObject(HeapObject* obj) {
1290 Map* map = obj->map();
1291 InstanceType type = map->instance_type();
1292 int size = obj->SizeFromMap(map);
1293
1294 // Simulate the allocation of obj to predict where it will be
1295 // allocated during deserialization.
1296 Address addr = Allocate(obj).Encode();
1297
1298 SaveAddress(obj, addr);
1299
1300 if (type == CODE_TYPE) {
1301 LOG(CodeMoveEvent(obj->address(), addr));
1302 }
1303
1304 // Write out the object prologue: type, size, and simulated address of obj.
1305 writer_->PutC('[');
1306 CHECK_EQ(0, static_cast<int>(size & kObjectAlignmentMask));
1307 writer_->PutInt(type);
1308 writer_->PutInt(size >> kObjectAlignmentBits);
1309 PutEncodedAddress(addr); // encodes AllocationSpace
1310
1311 // Visit all the pointers in the object other than the map. This
1312 // will recursively serialize any as-yet-unvisited objects.
1313 obj->Iterate(this);
1314
1315 // Mark end of recursively embedded objects, start of object body.
1316 writer_->PutC('|');
1317 // Write out the raw contents of the object. No compression, but
1318 // fast to deserialize.
1319 writer_->PutBytes(obj->address(), size);
1320 // Update pointers and external references in the written object.
1321 ReferenceUpdater updater(obj, this);
1322 obj->Iterate(&updater);
1323 updater.Update(writer_->position() - size);
1324
1325#ifdef DEBUG
1326 if (FLAG_debug_serialization) {
1327 // Write out the object epilogue to catch synchronization errors.
1328 PutEncodedAddress(addr);
1329 writer_->PutC(']');
1330 }
1331#endif
1332
1333 objects_++;
1334 return addr;
1335}
1336
1337
1338RelativeAddress Serializer::Allocate(HeapObject* obj) {
1339 // Find out which AllocationSpace 'obj' is in.
1340 AllocationSpace s;
1341 bool found = false;
1342 for (int i = FIRST_SPACE; !found && i <= LAST_SPACE; i++) {
1343 s = static_cast<AllocationSpace>(i);
1344 found = Heap::InSpace(obj, s);
1345 }
1346 CHECK(found);
1347 int size = obj->Size();
1348 if (s == NEW_SPACE) {
1349 if (size > Heap::MaxObjectSizeInPagedSpace()) {
1350 s = LO_SPACE;
1351 } else {
1352 OldSpace* space = Heap::TargetSpace(obj);
1353 ASSERT(space == Heap::old_pointer_space() ||
1354 space == Heap::old_data_space());
1355 s = (space == Heap::old_pointer_space()) ?
1356 OLD_POINTER_SPACE :
1357 OLD_DATA_SPACE;
1358 }
1359 }
1360 GCTreatment gc_treatment = DataObject;
1361 if (obj->IsFixedArray()) gc_treatment = PointerObject;
1362 else if (obj->IsCode()) gc_treatment = CodeObject;
1363 return allocator_[s]->Allocate(size, gc_treatment);
1364}
1365
1366
1367//------------------------------------------------------------------------------
1368// Implementation of Deserializer
1369
1370
1371static const int kInitArraySize = 32;
1372
1373
1374Deserializer::Deserializer(const byte* str, int len)
1375 : reader_(str, len),
1376 map_pages_(kInitArraySize),
1377 cell_pages_(kInitArraySize),
1378 old_pointer_pages_(kInitArraySize),
1379 old_data_pages_(kInitArraySize),
1380 code_pages_(kInitArraySize),
1381 large_objects_(kInitArraySize),
1382 global_handles_(4) {
1383 root_ = true;
1384 roots_ = 0;
1385 objects_ = 0;
1386 reference_decoder_ = NULL;
1387#ifdef DEBUG
1388 expect_debug_information_ = false;
1389#endif
1390}
1391
1392
1393Deserializer::~Deserializer() {
1394 if (reference_decoder_) delete reference_decoder_;
1395}
1396
1397
1398void Deserializer::ExpectEncodedAddress(Address expected) {
1399 Address a = GetEncodedAddress();
1400 USE(a);
1401 ASSERT(a == expected);
1402}
1403
1404
1405#ifdef DEBUG
1406void Deserializer::Synchronize(const char* tag) {
1407 if (expect_debug_information_) {
1408 char buf[kMaxTagLength];
1409 reader_.ExpectC('S');
1410 int length = reader_.GetInt();
1411 ASSERT(length <= kMaxTagLength);
1412 reader_.GetBytes(reinterpret_cast<Address>(buf), length);
1413 ASSERT_EQ(strlen(tag), length);
1414 ASSERT(strncmp(tag, buf, length) == 0);
1415 }
1416}
1417#endif
1418
1419
1420void Deserializer::Deserialize() {
1421 // No active threads.
1422 ASSERT_EQ(NULL, ThreadState::FirstInUse());
1423 // No active handles.
1424 ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
1425 reference_decoder_ = new ExternalReferenceDecoder();
1426 // By setting linear allocation only, we forbid the use of free list
1427 // allocation which is not predicted by SimulatedAddress.
1428 GetHeader();
1429 Heap::IterateRoots(this);
1430 GetContextStack();
1431}
1432
1433
1434void Deserializer::VisitPointers(Object** start, Object** end) {
1435 bool root = root_;
1436 root_ = false;
1437 for (Object** p = start; p < end; ++p) {
1438 if (root) {
1439 roots_++;
1440 // Read the next object or pointer from the stream
1441 // pointer in the stream.
1442 int c = reader_.GetC();
1443 if (c == '[') {
1444 *p = GetObject(); // embedded object
1445 } else {
1446 ASSERT(c == 'P'); // pointer to previously serialized object
1447 *p = Resolve(reader_.GetAddress());
1448 }
1449 } else {
1450 // A pointer internal to a HeapObject that we've already
1451 // read: resolve it to a true address (or Smi)
1452 *p = Resolve(reinterpret_cast<Address>(*p));
1453 }
1454 }
1455 root_ = root;
1456}
1457
1458
1459void Deserializer::VisitCodeTarget(RelocInfo* rinfo) {
1460 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Steve Block3ce2e202009-11-05 08:53:23 +00001461 // On all platforms, the encoded code object address is only 32 bits.
1462 Address encoded_address = reinterpret_cast<Address>(Memory::uint32_at(
1463 reinterpret_cast<Address>(rinfo->target_object_address())));
Steve Blocka7e24c12009-10-30 11:49:00 +00001464 Code* target_object = reinterpret_cast<Code*>(Resolve(encoded_address));
1465 rinfo->set_target_address(target_object->instruction_start());
1466}
1467
1468
1469void Deserializer::VisitExternalReferences(Address* start, Address* end) {
1470 for (Address* p = start; p < end; ++p) {
1471 uint32_t code = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*p));
1472 *p = reference_decoder_->Decode(code);
1473 }
1474}
1475
1476
1477void Deserializer::VisitRuntimeEntry(RelocInfo* rinfo) {
1478 uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->target_address_address());
1479 uint32_t encoding = *pc;
1480 Address target = reference_decoder_->Decode(encoding);
1481 rinfo->set_target_address(target);
1482}
1483
1484
1485void Deserializer::GetFlags() {
1486 reader_.ExpectC('F');
1487 int argc = reader_.GetInt() + 1;
1488 char** argv = NewArray<char*>(argc);
1489 reader_.ExpectC('[');
1490 for (int i = 1; i < argc; i++) {
1491 if (i > 1) reader_.ExpectC('|');
1492 argv[i] = reader_.GetString();
1493 }
1494 reader_.ExpectC(']');
1495 has_log_ = false;
1496 for (int i = 1; i < argc; i++) {
1497 if (strcmp("--log_code", argv[i]) == 0) {
1498 has_log_ = true;
1499 } else if (strcmp("--nouse_ic", argv[i]) == 0) {
1500 FLAG_use_ic = false;
1501 } else if (strcmp("--debug_code", argv[i]) == 0) {
1502 FLAG_debug_code = true;
1503 } else if (strcmp("--nolazy", argv[i]) == 0) {
1504 FLAG_lazy = false;
1505 }
1506 DeleteArray(argv[i]);
1507 }
1508
1509 DeleteArray(argv);
1510}
1511
1512
1513void Deserializer::GetLog() {
1514 if (has_log_) {
1515 reader_.ExpectC('L');
1516 char* snapshot_log = reader_.GetString();
1517#ifdef ENABLE_LOGGING_AND_PROFILING
1518 if (FLAG_log_code) {
1519 LOG(Preamble(snapshot_log));
1520 }
1521#endif
1522 DeleteArray(snapshot_log);
1523 }
1524}
1525
1526
1527static void InitPagedSpace(PagedSpace* space,
1528 int capacity,
1529 List<Page*>* page_list) {
1530 if (!space->EnsureCapacity(capacity)) {
1531 V8::FatalProcessOutOfMemory("InitPagedSpace");
1532 }
1533 PageIterator it(space, PageIterator::ALL_PAGES);
1534 while (it.has_next()) page_list->Add(it.next());
1535}
1536
1537
1538void Deserializer::GetHeader() {
1539 reader_.ExpectC('D');
1540#ifdef DEBUG
1541 expect_debug_information_ = reader_.GetC() == '1';
1542#else
1543 // In release mode, don't attempt to read a snapshot containing
1544 // synchronization tags.
1545 if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags.");
1546#endif
1547#ifdef V8_NATIVE_REGEXP
1548 reader_.ExpectC('N');
1549#else // Interpreted regexp.
1550 reader_.ExpectC('I');
1551#endif
1552 // Ensure sufficient capacity in paged memory spaces to avoid growth
1553 // during deserialization.
1554 reader_.ExpectC('S');
1555 reader_.ExpectC('[');
1556 InitPagedSpace(Heap::old_pointer_space(),
1557 reader_.GetInt(),
1558 &old_pointer_pages_);
1559 reader_.ExpectC('|');
1560 InitPagedSpace(Heap::old_data_space(), reader_.GetInt(), &old_data_pages_);
1561 reader_.ExpectC('|');
1562 InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
1563 reader_.ExpectC('|');
1564 InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
1565 reader_.ExpectC('|');
1566 InitPagedSpace(Heap::cell_space(), reader_.GetInt(), &cell_pages_);
1567 reader_.ExpectC(']');
1568 // Create placeholders for global handles later to be fill during
1569 // IterateRoots.
1570 reader_.ExpectC('G');
1571 reader_.ExpectC('[');
1572 int c = reader_.GetC();
1573 while (c != ']') {
1574 ASSERT(c == 'N');
1575 global_handles_.Add(GlobalHandles::Create(NULL).location());
1576 c = reader_.GetC();
1577 }
1578}
1579
1580
1581void Deserializer::GetGlobalHandleStack(List<Handle<Object> >* stack) {
1582 reader_.ExpectC('[');
1583 int length = reader_.GetInt();
1584 for (int i = 0; i < length; i++) {
1585 reader_.ExpectC('|');
1586 int gh_index = reader_.GetInt();
1587 stack->Add(global_handles_[gh_index]);
1588 }
1589 reader_.ExpectC(']');
1590}
1591
1592
1593void Deserializer::GetContextStack() {
1594 reader_.ExpectC('C');
1595 CHECK_EQ(reader_.GetC(), '[');
1596 int count = reader_.GetInt();
1597 List<Context*> entered_contexts(count);
1598 if (count > 0) {
1599 Object** start = reinterpret_cast<Object**>(&entered_contexts.first());
1600 VisitPointers(start, start + count);
1601 }
1602 reader_.ExpectC(']');
1603 for (int i = 0; i < count; i++) {
1604 HandleScopeImplementer::instance()->SaveContext(entered_contexts[i]);
1605 }
1606}
1607
1608
1609Address Deserializer::GetEncodedAddress() {
1610 reader_.ExpectC('P');
1611 return reader_.GetAddress();
1612}
1613
1614
1615Object* Deserializer::GetObject() {
1616 // Read the prologue: type, size and encoded address.
1617 InstanceType type = static_cast<InstanceType>(reader_.GetInt());
1618 int size = reader_.GetInt() << kObjectAlignmentBits;
1619 Address a = GetEncodedAddress();
1620
1621 // Get a raw object of the right size in the right space.
1622 AllocationSpace space = GetSpace(a);
1623 Object* o;
1624 if (IsLargeExecutableObject(a)) {
1625 o = Heap::lo_space()->AllocateRawCode(size);
1626 } else if (IsLargeFixedArray(a)) {
1627 o = Heap::lo_space()->AllocateRawFixedArray(size);
1628 } else {
1629 AllocationSpace retry_space = (space == NEW_SPACE)
1630 ? Heap::TargetSpaceId(type)
1631 : space;
1632 o = Heap::AllocateRaw(size, space, retry_space);
1633 }
1634 ASSERT(!o->IsFailure());
1635 // Check that the simulation of heap allocation was correct.
1636 ASSERT(o == Resolve(a));
1637
1638 // Read any recursively embedded objects.
1639 int c = reader_.GetC();
1640 while (c == '[') {
1641 GetObject();
1642 c = reader_.GetC();
1643 }
1644 ASSERT(c == '|');
1645
1646 HeapObject* obj = reinterpret_cast<HeapObject*>(o);
1647 // Read the uninterpreted contents of the object after the map
1648 reader_.GetBytes(obj->address(), size);
1649#ifdef DEBUG
1650 if (expect_debug_information_) {
1651 // Read in the epilogue to check that we're still synchronized
1652 ExpectEncodedAddress(a);
1653 reader_.ExpectC(']');
1654 }
1655#endif
1656
1657 // Resolve the encoded pointers we just read in.
1658 // Same as obj->Iterate(this), but doesn't rely on the map pointer being set.
1659 VisitPointer(reinterpret_cast<Object**>(obj->address()));
1660 obj->IterateBody(type, size, this);
1661
1662 if (type == CODE_TYPE) {
1663 LOG(CodeMoveEvent(a, obj->address()));
1664 }
1665 objects_++;
1666 return o;
1667}
1668
1669
1670static inline Object* ResolvePaged(int page_index,
1671 int page_offset,
1672 PagedSpace* space,
1673 List<Page*>* page_list) {
1674 ASSERT(page_index < page_list->length());
1675 Address address = (*page_list)[page_index]->OffsetToAddress(page_offset);
1676 return HeapObject::FromAddress(address);
1677}
1678
1679
1680template<typename T>
1681void ConcatReversed(List<T>* target, const List<T>& source) {
1682 for (int i = source.length() - 1; i >= 0; i--) {
1683 target->Add(source[i]);
1684 }
1685}
1686
1687
1688Object* Deserializer::Resolve(Address encoded) {
1689 Object* o = reinterpret_cast<Object*>(encoded);
1690 if (o->IsSmi()) return o;
1691
1692 // Encoded addresses of HeapObjects always have 'HeapObject' tags.
1693 ASSERT(o->IsHeapObject());
Steve Blocka7e24c12009-10-30 11:49:00 +00001694 switch (GetSpace(encoded)) {
1695 // For Map space and Old space, we cache the known Pages in map_pages,
1696 // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
1697 // of page addresses, we don't rely on it since GetObject uses AllocateRaw,
1698 // and that appears not to update the page list.
1699 case MAP_SPACE:
1700 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1701 Heap::map_space(), &map_pages_);
1702 case CELL_SPACE:
1703 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1704 Heap::cell_space(), &cell_pages_);
1705 case OLD_POINTER_SPACE:
1706 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1707 Heap::old_pointer_space(), &old_pointer_pages_);
1708 case OLD_DATA_SPACE:
1709 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1710 Heap::old_data_space(), &old_data_pages_);
1711 case CODE_SPACE:
1712 return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1713 Heap::code_space(), &code_pages_);
1714 case NEW_SPACE:
1715 return HeapObject::FromAddress(Heap::NewSpaceStart() +
1716 NewSpaceOffset(encoded));
1717 case LO_SPACE:
1718 // Cache the known large_objects, allocated one per 'page'
1719 int index = LargeObjectIndex(encoded);
1720 if (index >= large_objects_.length()) {
1721 int new_object_count =
1722 Heap::lo_space()->PageCount() - large_objects_.length();
1723 List<Object*> new_objects(new_object_count);
1724 LargeObjectIterator it(Heap::lo_space());
1725 for (int i = 0; i < new_object_count; i++) {
1726 new_objects.Add(it.next());
1727 }
1728#ifdef DEBUG
1729 for (int i = large_objects_.length() - 1; i >= 0; i--) {
1730 ASSERT(it.next() == large_objects_[i]);
1731 }
1732#endif
1733 ConcatReversed(&large_objects_, new_objects);
1734 ASSERT(index < large_objects_.length());
1735 }
1736 return large_objects_[index]; // s.page_offset() is ignored.
1737 }
1738 UNREACHABLE();
1739 return NULL;
1740}
1741
1742
1743} } // namespace v8::internal