blob: f7420efea9dae2430c5497509e52f30def8fe773 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_SNAPSHOT_SERIALIZE_H_
6#define V8_SNAPSHOT_SERIALIZE_H_
7
8#include "src/address-map.h"
9#include "src/heap/heap.h"
10#include "src/objects.h"
11#include "src/snapshot/snapshot-source-sink.h"
12
13namespace v8 {
14namespace internal {
15
16class Isolate;
17class ScriptData;
18
19static const int kDeoptTableSerializeEntryCount = 64;
20
21// ExternalReferenceTable is a helper class that defines the relationship
22// between external references and their encodings. It is used to build
23// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
24class ExternalReferenceTable {
25 public:
26 static ExternalReferenceTable* instance(Isolate* isolate);
27
28 int size() const { return refs_.length(); }
29 Address address(int i) { return refs_[i].address; }
30 const char* name(int i) { return refs_[i].name; }
31
32 inline static Address NotAvailable() { return NULL; }
33
34 private:
35 struct ExternalReferenceEntry {
36 Address address;
37 const char* name;
38 };
39
40 explicit ExternalReferenceTable(Isolate* isolate);
41
42 void Add(Address address, const char* name) {
43 ExternalReferenceEntry entry = {address, name};
44 refs_.Add(entry);
45 }
46
47 List<ExternalReferenceEntry> refs_;
48
49 DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
50};
51
52
53class ExternalReferenceEncoder {
54 public:
55 explicit ExternalReferenceEncoder(Isolate* isolate);
56
57 uint32_t Encode(Address key) const;
58
59 const char* NameOfAddress(Isolate* isolate, Address address) const;
60
61 private:
62 static uint32_t Hash(Address key) {
63 return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
64 kPointerSizeLog2);
65 }
66
67 HashMap* map_;
68
69 DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
70};
71
72
73class PartialCacheIndexMap : public AddressMapBase {
74 public:
75 PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
76
77 static const int kInvalidIndex = -1;
78
79 // Lookup object in the map. Return its index if found, or create
80 // a new entry with new_index as value, and return kInvalidIndex.
81 int LookupOrInsert(HeapObject* obj, int new_index) {
82 HashMap::Entry* entry = LookupEntry(&map_, obj, false);
83 if (entry != NULL) return GetValue(entry);
84 SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
85 return kInvalidIndex;
86 }
87
88 private:
89 HashMap map_;
90
91 DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
92};
93
94
95class HotObjectsList {
96 public:
97 HotObjectsList() : index_(0) {
98 for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
99 }
100
101 void Add(HeapObject* object) {
102 circular_queue_[index_] = object;
103 index_ = (index_ + 1) & kSizeMask;
104 }
105
106 HeapObject* Get(int index) {
107 DCHECK_NOT_NULL(circular_queue_[index]);
108 return circular_queue_[index];
109 }
110
111 static const int kNotFound = -1;
112
113 int Find(HeapObject* object) {
114 for (int i = 0; i < kSize; i++) {
115 if (circular_queue_[i] == object) return i;
116 }
117 return kNotFound;
118 }
119
120 static const int kSize = 8;
121
122 private:
123 STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
124 static const int kSizeMask = kSize - 1;
125 HeapObject* circular_queue_[kSize];
126 int index_;
127
128 DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
129};
130
131
132// The Serializer/Deserializer class is a common superclass for Serializer and
133// Deserializer which is used to store common constants and methods used by
134// both.
135class SerializerDeserializer: public ObjectVisitor {
136 public:
137 static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
138
139 // No reservation for large object space necessary.
140 static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
141 static const int kNumberOfSpaces = LAST_SPACE + 1;
142
143 protected:
144 static bool CanBeDeferred(HeapObject* o);
145
146 // ---------- byte code range 0x00..0x7f ----------
147 // Byte codes in this range represent Where, HowToCode and WhereToPoint.
148 // Where the pointed-to object can be found:
149 // The static assert below will trigger when the number of preallocated spaces
150 // changed. If that happens, update the bytecode ranges in the comments below.
151 STATIC_ASSERT(5 == kNumberOfSpaces);
152 enum Where {
153 // 0x00..0x04 Allocate new object, in specified space.
154 kNewObject = 0,
155 // 0x05 Unused (including 0x25, 0x45, 0x65).
156 // 0x06 Unused (including 0x26, 0x46, 0x66).
157 // 0x07 Unused (including 0x27, 0x47, 0x67).
158 // 0x08..0x0c Reference to previous object from space.
159 kBackref = 0x08,
160 // 0x0d Unused (including 0x2d, 0x4d, 0x6d).
161 // 0x0e Unused (including 0x2e, 0x4e, 0x6e).
162 // 0x0f Unused (including 0x2f, 0x4f, 0x6f).
163 // 0x10..0x14 Reference to previous object from space after skip.
164 kBackrefWithSkip = 0x10,
165 // 0x15 Unused (including 0x35, 0x55, 0x75).
166 // 0x16 Unused (including 0x36, 0x56, 0x76).
167 // 0x17 Misc (including 0x37, 0x57, 0x77).
168 // 0x18 Root array item.
169 kRootArray = 0x18,
170 // 0x19 Object in the partial snapshot cache.
171 kPartialSnapshotCache = 0x19,
172 // 0x1a External reference referenced by id.
173 kExternalReference = 0x1a,
174 // 0x1b Object provided in the attached list.
175 kAttachedReference = 0x1b,
176 // 0x1c Builtin code referenced by index.
177 kBuiltin = 0x1c
178 // 0x1d..0x1f Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
179 };
180
181 static const int kWhereMask = 0x1f;
182 static const int kSpaceMask = 7;
183 STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
184
185 // How to code the pointer to the object.
186 enum HowToCode {
187 // Straight pointer.
188 kPlain = 0,
189 // A pointer inlined in code. What this means depends on the architecture.
190 kFromCode = 0x20
191 };
192
193 static const int kHowToCodeMask = 0x20;
194
195 // Where to point within the object.
196 enum WhereToPoint {
197 // Points to start of object
198 kStartOfObject = 0,
199 // Points to instruction in code object or payload of cell.
200 kInnerPointer = 0x40
201 };
202
203 static const int kWhereToPointMask = 0x40;
204
205 // ---------- Misc ----------
206 // Skip.
207 static const int kSkip = 0x1d;
208 // Internal reference encoded as offsets of pc and target from code entry.
209 static const int kInternalReference = 0x1e;
210 static const int kInternalReferenceEncoded = 0x1f;
211 // Do nothing, used for padding.
212 static const int kNop = 0x3d;
213 // Move to next reserved chunk.
214 static const int kNextChunk = 0x3e;
215 // Deferring object content.
216 static const int kDeferred = 0x3f;
217 // Used for the source code of the natives, which is in the executable, but
218 // is referred to from external strings in the snapshot.
219 static const int kNativesStringResource = 0x5d;
220 // Used for the source code for compiled stubs, which is in the executable,
221 // but is referred to from external strings in the snapshot.
222 static const int kExtraNativesStringResource = 0x5e;
223 // A tag emitted at strategic points in the snapshot to delineate sections.
224 // If the deserializer does not find these at the expected moments then it
225 // is an indication that the snapshot and the VM do not fit together.
226 // Examine the build process for architecture, version or configuration
227 // mismatches.
228 static const int kSynchronize = 0x17;
229 // Repeats of variable length.
230 static const int kVariableRepeat = 0x37;
231 // Raw data of variable length.
232 static const int kVariableRawData = 0x57;
233 // Alignment prefixes 0x7d..0x7f
234 static const int kAlignmentPrefix = 0x7d;
235
236 // 0x77 unused
237
238 // ---------- byte code range 0x80..0xff ----------
239 // First 32 root array items.
240 static const int kNumberOfRootArrayConstants = 0x20;
241 // 0x80..0x9f
242 static const int kRootArrayConstants = 0x80;
243 // 0xa0..0xbf
244 static const int kRootArrayConstantsWithSkip = 0xa0;
245 static const int kRootArrayConstantsMask = 0x1f;
246
247 // 8 hot (recently seen or back-referenced) objects with optional skip.
248 static const int kNumberOfHotObjects = 0x08;
249 // 0xc0..0xc7
250 static const int kHotObject = 0xc0;
251 // 0xc8..0xcf
252 static const int kHotObjectWithSkip = 0xc8;
253 static const int kHotObjectMask = 0x07;
254
255 // 32 common raw data lengths.
256 static const int kNumberOfFixedRawData = 0x20;
257 // 0xd0..0xef
258 static const int kFixedRawData = 0xd0;
259 static const int kOnePointerRawData = kFixedRawData;
260 static const int kFixedRawDataStart = kFixedRawData - 1;
261
262 // 16 repeats lengths.
263 static const int kNumberOfFixedRepeat = 0x10;
264 // 0xf0..0xff
265 static const int kFixedRepeat = 0xf0;
266 static const int kFixedRepeatStart = kFixedRepeat - 1;
267
268 // ---------- special values ----------
269 static const int kAnyOldSpace = -1;
270
271 // Sentinel after a new object to indicate that double alignment is needed.
272 static const int kDoubleAlignmentSentinel = 0;
273
274 // Used as index for the attached reference representing the source object.
275 static const int kSourceObjectReference = 0;
276
277 // Used as index for the attached reference representing the global proxy.
278 static const int kGlobalProxyReference = 0;
279
280 // ---------- member variable ----------
281 HotObjectsList hot_objects_;
282};
283
284
285class SerializedData {
286 public:
287 class Reservation {
288 public:
289 explicit Reservation(uint32_t size)
290 : reservation_(ChunkSizeBits::encode(size)) {}
291
292 uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
293 bool is_last() const { return IsLastChunkBits::decode(reservation_); }
294
295 void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
296
297 private:
298 uint32_t reservation_;
299 };
300
301 SerializedData(byte* data, int size)
302 : data_(data), size_(size), owns_data_(false) {}
303 SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
304
305 ~SerializedData() {
306 if (owns_data_) DeleteArray<byte>(data_);
307 }
308
309 uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
310
311 class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
312 class IsLastChunkBits : public BitField<bool, 31, 1> {};
313
314 static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) {
315 uint32_t external_refs = table->size();
316 return 0xC0DE0000 ^ external_refs;
317 }
318
319 protected:
320 void SetHeaderValue(int offset, uint32_t value) {
321 uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
322 memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
323 }
324
325 uint32_t GetHeaderValue(int offset) const {
326 uint32_t value;
327 memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
328 return value;
329 }
330
331 void AllocateData(int size);
332
333 static uint32_t ComputeMagicNumber(Isolate* isolate) {
334 return ComputeMagicNumber(ExternalReferenceTable::instance(isolate));
335 }
336
337 void SetMagicNumber(Isolate* isolate) {
338 SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
339 }
340
341 static const int kMagicNumberOffset = 0;
342
343 byte* data_;
344 int size_;
345 bool owns_data_;
346};
347
348
349// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
350class Deserializer: public SerializerDeserializer {
351 public:
352 // Create a deserializer from a snapshot byte source.
353 template <class Data>
354 explicit Deserializer(Data* data)
355 : isolate_(NULL),
356 source_(data->Payload()),
357 magic_number_(data->GetMagicNumber()),
358 external_reference_table_(NULL),
359 deserialized_large_objects_(0),
360 deserializing_user_code_(false),
361 next_alignment_(kWordAligned) {
362 DecodeReservation(data->Reservations());
363 }
364
365 ~Deserializer() override;
366
367 // Deserialize the snapshot into an empty heap.
368 void Deserialize(Isolate* isolate);
369
370 // Deserialize a single object and the objects reachable from it.
371 MaybeHandle<Object> DeserializePartial(Isolate* isolate,
372 Handle<JSGlobalProxy> global_proxy);
373
374 // Deserialize a shared function info. Fail gracefully.
375 MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
376
377 // Pass a vector of externally-provided objects referenced by the snapshot.
378 // The ownership to its backing store is handed over as well.
379 void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
380 attached_objects_ = attached_objects;
381 }
382
383 private:
384 void VisitPointers(Object** start, Object** end) override;
385
Ben Murdoch097c5b22016-05-18 11:27:45 +0100386 void Synchronize(VisitorSynchronization::SyncTag tag) override;
387
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000388 void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
389
390 void Initialize(Isolate* isolate);
391
392 bool deserializing_user_code() { return deserializing_user_code_; }
393
394 void DecodeReservation(Vector<const SerializedData::Reservation> res);
395
396 bool ReserveSpace();
397
398 void UnalignedCopy(Object** dest, Object** src) {
399 memcpy(dest, src, sizeof(*src));
400 }
401
402 void SetAlignment(byte data) {
403 DCHECK_EQ(kWordAligned, next_alignment_);
404 int alignment = data - (kAlignmentPrefix - 1);
405 DCHECK_LE(kWordAligned, alignment);
406 DCHECK_LE(alignment, kSimd128Unaligned);
407 next_alignment_ = static_cast<AllocationAlignment>(alignment);
408 }
409
410 void DeserializeDeferredObjects();
411
412 void FlushICacheForNewIsolate();
413 void FlushICacheForNewCodeObjects();
414
415 void CommitPostProcessedObjects(Isolate* isolate);
416
417 // Fills in some heap data in an area from start to end (non-inclusive). The
418 // space id is used for the write barrier. The object_address is the address
419 // of the object we are writing into, or NULL if we are not writing into an
420 // object, i.e. if we are writing a series of tagged values that are not on
421 // the heap. Return false if the object content has been deferred.
422 bool ReadData(Object** start, Object** end, int space,
423 Address object_address);
424 void ReadObject(int space_number, Object** write_back);
425 Address Allocate(int space_index, int size);
426
427 // Special handling for serialized code like hooking up internalized strings.
428 HeapObject* PostProcessNewObject(HeapObject* obj, int space);
429
430 // This returns the address of an object that has been described in the
431 // snapshot by chunk index and offset.
432 HeapObject* GetBackReferencedObject(int space);
433
434 Object** CopyInNativesSource(Vector<const char> source_vector,
435 Object** current);
436
437 // Cached current isolate.
438 Isolate* isolate_;
439
440 // Objects from the attached object descriptions in the serialized user code.
441 Vector<Handle<Object> > attached_objects_;
442
443 SnapshotByteSource source_;
444 uint32_t magic_number_;
445
446 // The address of the next object that will be allocated in each space.
447 // Each space has a number of chunks reserved by the GC, with each chunk
448 // fitting into a page. Deserialized objects are allocated into the
449 // current chunk of the target space by bumping up high water mark.
450 Heap::Reservation reservations_[kNumberOfSpaces];
451 uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
452 Address high_water_[kNumberOfPreallocatedSpaces];
453
454 ExternalReferenceTable* external_reference_table_;
455
456 List<HeapObject*> deserialized_large_objects_;
457 List<Code*> new_code_objects_;
458 List<Handle<String> > new_internalized_strings_;
459 List<Handle<Script> > new_scripts_;
460
461 bool deserializing_user_code_;
462
463 AllocationAlignment next_alignment_;
464
465 DISALLOW_COPY_AND_ASSIGN(Deserializer);
466};
467
468
469class CodeAddressMap;
470
471// There can be only one serializer per V8 process.
472class Serializer : public SerializerDeserializer {
473 public:
474 Serializer(Isolate* isolate, SnapshotByteSink* sink);
475 ~Serializer() override;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000476
477 void EncodeReservations(List<SerializedData::Reservation>* out) const;
478
479 void SerializeDeferredObjects();
480
481 Isolate* isolate() const { return isolate_; }
482
483 BackReferenceMap* back_reference_map() { return &back_reference_map_; }
484 RootIndexMap* root_index_map() { return &root_index_map_; }
485
486#ifdef OBJECT_PRINT
487 void CountInstanceType(Map* map, int size);
488#endif // OBJECT_PRINT
489
490 protected:
491 class ObjectSerializer;
492 class RecursionScope {
493 public:
494 explicit RecursionScope(Serializer* serializer) : serializer_(serializer) {
495 serializer_->recursion_depth_++;
496 }
497 ~RecursionScope() { serializer_->recursion_depth_--; }
498 bool ExceedsMaximum() {
499 return serializer_->recursion_depth_ >= kMaxRecursionDepth;
500 }
501
502 private:
503 static const int kMaxRecursionDepth = 32;
504 Serializer* serializer_;
505 };
506
507 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
508 WhereToPoint where_to_point, int skip) = 0;
509
510 void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
511 int skip);
512
513 void PutBackReference(HeapObject* object, BackReference reference);
514
515 // Emit alignment prefix if necessary, return required padding space in bytes.
516 int PutAlignmentPrefix(HeapObject* object);
517
518 // Returns true if the object was successfully serialized.
519 bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
520 WhereToPoint where_to_point, int skip);
521
522 inline void FlushSkip(int skip) {
523 if (skip != 0) {
524 sink_->Put(kSkip, "SkipFromSerializeObject");
525 sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
526 }
527 }
528
529 bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
530
531 // This will return the space for an object.
532 BackReference AllocateLargeObject(int size);
533 BackReference Allocate(AllocationSpace space, int size);
534 int EncodeExternalReference(Address addr) {
535 return external_reference_encoder_.Encode(addr);
536 }
537
538 // GetInt reads 4 bytes at once, requiring padding at the end.
539 void Pad();
540
541 // Some roots should not be serialized, because their actual value depends on
542 // absolute addresses and they are reset after deserialization, anyway.
543 bool ShouldBeSkipped(Object** current);
544
545 // We may not need the code address map for logging for every instance
546 // of the serializer. Initialize it on demand.
547 void InitializeCodeAddressMap();
548
549 Code* CopyCode(Code* code);
550
551 inline uint32_t max_chunk_size(int space) const {
552 DCHECK_LE(0, space);
553 DCHECK_LT(space, kNumberOfSpaces);
554 return max_chunk_size_[space];
555 }
556
557 SnapshotByteSink* sink() const { return sink_; }
558
559 void QueueDeferredObject(HeapObject* obj) {
560 DCHECK(back_reference_map_.Lookup(obj).is_valid());
561 deferred_objects_.Add(obj);
562 }
563
564 void OutputStatistics(const char* name);
565
566 Isolate* isolate_;
567
568 SnapshotByteSink* sink_;
569 ExternalReferenceEncoder external_reference_encoder_;
570
571 BackReferenceMap back_reference_map_;
572 RootIndexMap root_index_map_;
573
574 int recursion_depth_;
575
576 friend class Deserializer;
577 friend class ObjectSerializer;
578 friend class RecursionScope;
579 friend class SnapshotData;
580
581 private:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100582 void VisitPointers(Object** start, Object** end) override;
583
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000584 CodeAddressMap* code_address_map_;
585 // Objects from the same space are put into chunks for bulk-allocation
586 // when deserializing. We have to make sure that each chunk fits into a
587 // page. So we track the chunk size in pending_chunk_ of a space, but
588 // when it exceeds a page, we complete the current chunk and start a new one.
589 uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
590 List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
591 uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
592
593 // We map serialized large objects to indexes for back-referencing.
594 uint32_t large_objects_total_size_;
595 uint32_t seen_large_objects_index_;
596
597 List<byte> code_buffer_;
598
599 // To handle stack overflow.
600 List<HeapObject*> deferred_objects_;
601
602#ifdef OBJECT_PRINT
603 static const int kInstanceTypes = 256;
604 int* instance_type_count_;
605 size_t* instance_type_size_;
606#endif // OBJECT_PRINT
607
608 DISALLOW_COPY_AND_ASSIGN(Serializer);
609};
610
611
612class PartialSerializer : public Serializer {
613 public:
614 PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
615 SnapshotByteSink* sink)
616 : Serializer(isolate, sink),
617 startup_serializer_(startup_snapshot_serializer),
618 global_object_(NULL) {
619 InitializeCodeAddressMap();
620 }
621
622 ~PartialSerializer() override { OutputStatistics("PartialSerializer"); }
623
624 // Serialize the objects reachable from a single object pointer.
625 void Serialize(Object** o);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100626
627 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000628 void SerializeObject(HeapObject* o, HowToCode how_to_code,
629 WhereToPoint where_to_point, int skip) override;
630
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000631 int PartialSnapshotCacheIndex(HeapObject* o);
632 bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
633
634 Serializer* startup_serializer_;
635 Object* global_object_;
636 PartialCacheIndexMap partial_cache_index_map_;
637 DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
638};
639
640
641class StartupSerializer : public Serializer {
642 public:
643 StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
644 ~StartupSerializer() override { OutputStatistics("StartupSerializer"); }
645
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000646 // Serialize the current state of the heap. The order is:
647 // 1) Strong references.
648 // 2) Partial snapshot cache.
649 // 3) Weak references (e.g. the string table).
Ben Murdoch097c5b22016-05-18 11:27:45 +0100650 void SerializeStrongReferences();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000651 void SerializeWeakReferencesAndDeferred();
652
653 private:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100654 // The StartupSerializer has to serialize the root array, which is slightly
655 // different.
656 void VisitPointers(Object** start, Object** end) override;
657 void SerializeObject(HeapObject* o, HowToCode how_to_code,
658 WhereToPoint where_to_point, int skip) override;
659 void Synchronize(VisitorSynchronization::SyncTag tag) override;
660
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000661 intptr_t root_index_wave_front_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100662 bool serializing_builtins_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000663 DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
664};
665
666
667class CodeSerializer : public Serializer {
668 public:
669 static ScriptData* Serialize(Isolate* isolate,
670 Handle<SharedFunctionInfo> info,
671 Handle<String> source);
672
673 MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
674 Isolate* isolate, ScriptData* cached_data, Handle<String> source);
675
676 static const int kSourceObjectIndex = 0;
677 STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
678
679 static const int kCodeStubsBaseIndex = 1;
680
681 String* source() const {
682 DCHECK(!AllowHeapAllocation::IsAllowed());
683 return source_;
684 }
685
686 const List<uint32_t>* stub_keys() const { return &stub_keys_; }
687
688 private:
689 CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
690 : Serializer(isolate, sink), source_(source) {
691 back_reference_map_.AddSourceString(source);
692 }
693
694 ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
695
696 void SerializeObject(HeapObject* o, HowToCode how_to_code,
697 WhereToPoint where_to_point, int skip) override;
698
699 void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
700 WhereToPoint where_to_point);
701 void SerializeIC(Code* ic, HowToCode how_to_code,
702 WhereToPoint where_to_point);
703 void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
704 WhereToPoint where_to_point);
705 void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
706 WhereToPoint where_to_point);
707 int AddCodeStubKey(uint32_t stub_key);
708
709 DisallowHeapAllocation no_gc_;
710 String* source_;
711 List<uint32_t> stub_keys_;
712 DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
713};
714
715
716// Wrapper around reservation sizes and the serialization payload.
717class SnapshotData : public SerializedData {
718 public:
719 // Used when producing.
720 explicit SnapshotData(const Serializer& ser);
721
722 // Used when consuming.
723 explicit SnapshotData(const Vector<const byte> snapshot)
724 : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
725 CHECK(IsSane());
726 }
727
728 Vector<const Reservation> Reservations() const;
729 Vector<const byte> Payload() const;
730
731 Vector<const byte> RawData() const {
732 return Vector<const byte>(data_, size_);
733 }
734
735 private:
736 bool IsSane();
737
738 // The data header consists of uint32_t-sized entries:
739 // [0] magic number and external reference count
740 // [1] version hash
741 // [2] number of reservation size entries
742 // [3] payload length
743 // ... reservations
744 // ... serialized payload
745 static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
746 static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
747 static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
748 static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
749};
750
751
752// Wrapper around ScriptData to provide code-serializer-specific functionality.
753class SerializedCodeData : public SerializedData {
754 public:
755 // Used when consuming.
756 static SerializedCodeData* FromCachedData(Isolate* isolate,
757 ScriptData* cached_data,
758 String* source);
759
760 // Used when producing.
761 SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
762
763 // Return ScriptData object and relinquish ownership over it to the caller.
764 ScriptData* GetScriptData();
765
766 Vector<const Reservation> Reservations() const;
767 Vector<const byte> Payload() const;
768
769 Vector<const uint32_t> CodeStubKeys() const;
770
771 private:
772 explicit SerializedCodeData(ScriptData* data);
773
774 enum SanityCheckResult {
775 CHECK_SUCCESS = 0,
776 MAGIC_NUMBER_MISMATCH = 1,
777 VERSION_MISMATCH = 2,
778 SOURCE_MISMATCH = 3,
779 CPU_FEATURES_MISMATCH = 4,
780 FLAGS_MISMATCH = 5,
781 CHECKSUM_MISMATCH = 6
782 };
783
784 SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
785
786 uint32_t SourceHash(String* source) const;
787
788 // The data header consists of uint32_t-sized entries:
789 // [0] magic number and external reference count
790 // [1] version hash
791 // [2] source hash
792 // [3] cpu features
793 // [4] flag hash
794 // [5] number of code stub keys
795 // [6] number of reservation size entries
796 // [7] payload length
797 // [8] payload checksum part 1
798 // [9] payload checksum part 2
799 // ... reservations
800 // ... code stub keys
801 // ... serialized payload
802 static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
803 static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
804 static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
805 static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
806 static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
807 static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
808 static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
809 static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
810 static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
811 static const int kHeaderSize = kChecksum2Offset + kInt32Size;
812};
813} // namespace internal
814} // namespace v8
815
816#endif // V8_SNAPSHOT_SERIALIZE_H_