blob: f6f2200775be526712a44cc762b88983bfb37a42 [file] [log] [blame]
Ben Murdochda12d292016-06-02 14:46:10 +01001// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/snapshot/serializer.h"
6
7#include "src/macro-assembler.h"
8#include "src/snapshot/natives.h"
9
10namespace v8 {
11namespace internal {
12
13Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
14 : isolate_(isolate),
15 sink_(sink),
16 external_reference_encoder_(isolate),
17 root_index_map_(isolate),
18 recursion_depth_(0),
19 code_address_map_(NULL),
20 large_objects_total_size_(0),
21 seen_large_objects_index_(0) {
22 // The serializer is meant to be used only to generate initial heap images
23 // from a context in which there is only one isolate.
24 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
25 pending_chunk_[i] = 0;
26 max_chunk_size_[i] = static_cast<uint32_t>(
27 MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
28 }
29
30#ifdef OBJECT_PRINT
31 if (FLAG_serialization_statistics) {
32 instance_type_count_ = NewArray<int>(kInstanceTypes);
33 instance_type_size_ = NewArray<size_t>(kInstanceTypes);
34 for (int i = 0; i < kInstanceTypes; i++) {
35 instance_type_count_[i] = 0;
36 instance_type_size_[i] = 0;
37 }
38 } else {
39 instance_type_count_ = NULL;
40 instance_type_size_ = NULL;
41 }
42#endif // OBJECT_PRINT
43}
44
45Serializer::~Serializer() {
46 if (code_address_map_ != NULL) delete code_address_map_;
47#ifdef OBJECT_PRINT
48 if (instance_type_count_ != NULL) {
49 DeleteArray(instance_type_count_);
50 DeleteArray(instance_type_size_);
51 }
52#endif // OBJECT_PRINT
53}
54
55#ifdef OBJECT_PRINT
56void Serializer::CountInstanceType(Map* map, int size) {
57 int instance_type = map->instance_type();
58 instance_type_count_[instance_type]++;
59 instance_type_size_[instance_type] += size;
60}
61#endif // OBJECT_PRINT
62
63void Serializer::OutputStatistics(const char* name) {
64 if (!FLAG_serialization_statistics) return;
65 PrintF("%s:\n", name);
66 PrintF(" Spaces (bytes):\n");
67 for (int space = 0; space < kNumberOfSpaces; space++) {
68 PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
69 }
70 PrintF("\n");
71 for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
72 size_t s = pending_chunk_[space];
73 for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
Ben Murdochc5610432016-08-08 18:44:38 +010074 PrintF("%16" PRIuS, s);
Ben Murdochda12d292016-06-02 14:46:10 +010075 }
76 PrintF("%16d\n", large_objects_total_size_);
77#ifdef OBJECT_PRINT
78 PrintF(" Instance types (count and bytes):\n");
Ben Murdochc5610432016-08-08 18:44:38 +010079#define PRINT_INSTANCE_TYPE(Name) \
80 if (instance_type_count_[Name]) { \
81 PrintF("%10d %10" PRIuS " %s\n", instance_type_count_[Name], \
82 instance_type_size_[Name], #Name); \
Ben Murdochda12d292016-06-02 14:46:10 +010083 }
84 INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
85#undef PRINT_INSTANCE_TYPE
86 PrintF("\n");
87#endif // OBJECT_PRINT
88}
89
90void Serializer::SerializeDeferredObjects() {
91 while (deferred_objects_.length() > 0) {
92 HeapObject* obj = deferred_objects_.RemoveLast();
93 ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
94 obj_serializer.SerializeDeferred();
95 }
96 sink_->Put(kSynchronize, "Finished with deferred objects");
97}
98
99void Serializer::VisitPointers(Object** start, Object** end) {
100 for (Object** current = start; current < end; current++) {
101 if ((*current)->IsSmi()) {
102 PutSmi(Smi::cast(*current));
103 } else {
104 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
105 }
106 }
107}
108
109void Serializer::EncodeReservations(
110 List<SerializedData::Reservation>* out) const {
111 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
112 for (int j = 0; j < completed_chunks_[i].length(); j++) {
113 out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
114 }
115
116 if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
117 out->Add(SerializedData::Reservation(pending_chunk_[i]));
118 }
119 out->last().mark_as_last();
120 }
121
122 out->Add(SerializedData::Reservation(large_objects_total_size_));
123 out->last().mark_as_last();
124}
125
126#ifdef DEBUG
Ben Murdochc5610432016-08-08 18:44:38 +0100127bool Serializer::BackReferenceIsAlreadyAllocated(
128 SerializerReference reference) {
129 DCHECK(reference.is_back_reference());
Ben Murdochda12d292016-06-02 14:46:10 +0100130 AllocationSpace space = reference.space();
131 int chunk_index = reference.chunk_index();
132 if (space == LO_SPACE) {
133 return chunk_index == 0 &&
134 reference.large_object_index() < seen_large_objects_index_;
135 } else if (chunk_index == completed_chunks_[space].length()) {
136 return reference.chunk_offset() < pending_chunk_[space];
137 } else {
138 return chunk_index < completed_chunks_[space].length() &&
139 reference.chunk_offset() < completed_chunks_[space][chunk_index];
140 }
141}
142#endif // DEBUG
143
144bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
145 WhereToPoint where_to_point, int skip) {
146 if (how_to_code == kPlain && where_to_point == kStartOfObject) {
147 // Encode a reference to a hot object by its index in the working set.
148 int index = hot_objects_.Find(obj);
149 if (index != HotObjectsList::kNotFound) {
150 DCHECK(index >= 0 && index < kNumberOfHotObjects);
151 if (FLAG_trace_serializer) {
152 PrintF(" Encoding hot object %d:", index);
153 obj->ShortPrint();
154 PrintF("\n");
155 }
156 if (skip != 0) {
157 sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
158 sink_->PutInt(skip, "HotObjectSkipDistance");
159 } else {
160 sink_->Put(kHotObject + index, "HotObject");
161 }
162 return true;
163 }
164 }
Ben Murdochc5610432016-08-08 18:44:38 +0100165 SerializerReference reference = reference_map_.Lookup(obj);
166 if (reference.is_valid()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100167 // Encode the location of an already deserialized object in order to write
168 // its location into a later object. We can encode the location as an
169 // offset fromthe start of the deserialized objects or as an offset
170 // backwards from thecurrent allocation pointer.
Ben Murdochc5610432016-08-08 18:44:38 +0100171 if (reference.is_attached_reference()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100172 FlushSkip(skip);
Ben Murdochc5610432016-08-08 18:44:38 +0100173 if (FLAG_trace_serializer) {
174 PrintF(" Encoding attached reference %d\n",
175 reference.attached_reference_index());
176 }
177 PutAttachedReference(reference, how_to_code, where_to_point);
Ben Murdochda12d292016-06-02 14:46:10 +0100178 } else {
Ben Murdochc5610432016-08-08 18:44:38 +0100179 DCHECK(reference.is_back_reference());
Ben Murdochda12d292016-06-02 14:46:10 +0100180 if (FLAG_trace_serializer) {
181 PrintF(" Encoding back reference to: ");
182 obj->ShortPrint();
183 PrintF("\n");
184 }
185
186 PutAlignmentPrefix(obj);
Ben Murdochc5610432016-08-08 18:44:38 +0100187 AllocationSpace space = reference.space();
Ben Murdochda12d292016-06-02 14:46:10 +0100188 if (skip == 0) {
189 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
190 } else {
191 sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
192 "BackRefWithSkip");
193 sink_->PutInt(skip, "BackRefSkipDistance");
194 }
Ben Murdochc5610432016-08-08 18:44:38 +0100195 PutBackReference(obj, reference);
Ben Murdochda12d292016-06-02 14:46:10 +0100196 }
197 return true;
198 }
199 return false;
200}
201
202void Serializer::PutRoot(int root_index, HeapObject* object,
203 SerializerDeserializer::HowToCode how_to_code,
204 SerializerDeserializer::WhereToPoint where_to_point,
205 int skip) {
206 if (FLAG_trace_serializer) {
207 PrintF(" Encoding root %d:", root_index);
208 object->ShortPrint();
209 PrintF("\n");
210 }
211
212 if (how_to_code == kPlain && where_to_point == kStartOfObject &&
213 root_index < kNumberOfRootArrayConstants &&
214 !isolate()->heap()->InNewSpace(object)) {
215 if (skip == 0) {
216 sink_->Put(kRootArrayConstants + root_index, "RootConstant");
217 } else {
218 sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
219 sink_->PutInt(skip, "SkipInPutRoot");
220 }
221 } else {
222 FlushSkip(skip);
223 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
224 sink_->PutInt(root_index, "root_index");
225 }
226}
227
228void Serializer::PutSmi(Smi* smi) {
229 sink_->Put(kOnePointerRawData, "Smi");
230 byte* bytes = reinterpret_cast<byte*>(&smi);
231 for (int i = 0; i < kPointerSize; i++) sink_->Put(bytes[i], "Byte");
232}
233
Ben Murdochc5610432016-08-08 18:44:38 +0100234void Serializer::PutBackReference(HeapObject* object,
235 SerializerReference reference) {
Ben Murdochda12d292016-06-02 14:46:10 +0100236 DCHECK(BackReferenceIsAlreadyAllocated(reference));
Ben Murdochc5610432016-08-08 18:44:38 +0100237 sink_->PutInt(reference.back_reference(), "BackRefValue");
Ben Murdochda12d292016-06-02 14:46:10 +0100238 hot_objects_.Add(object);
239}
240
Ben Murdochc5610432016-08-08 18:44:38 +0100241void Serializer::PutAttachedReference(SerializerReference reference,
242 HowToCode how_to_code,
243 WhereToPoint where_to_point) {
244 DCHECK(reference.is_attached_reference());
245 DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
246 (how_to_code == kPlain && where_to_point == kInnerPointer) ||
247 (how_to_code == kFromCode && where_to_point == kInnerPointer));
248 sink_->Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
249 sink_->PutInt(reference.attached_reference_index(), "AttachedRefIndex");
250}
251
Ben Murdochda12d292016-06-02 14:46:10 +0100252int Serializer::PutAlignmentPrefix(HeapObject* object) {
253 AllocationAlignment alignment = object->RequiredAlignment();
254 if (alignment != kWordAligned) {
255 DCHECK(1 <= alignment && alignment <= 3);
256 byte prefix = (kAlignmentPrefix - 1) + alignment;
257 sink_->Put(prefix, "Alignment");
258 return Heap::GetMaximumFillToAlign(alignment);
259 }
260 return 0;
261}
262
Ben Murdochc5610432016-08-08 18:44:38 +0100263SerializerReference Serializer::AllocateLargeObject(int size) {
Ben Murdochda12d292016-06-02 14:46:10 +0100264 // Large objects are allocated one-by-one when deserializing. We do not
265 // have to keep track of multiple chunks.
266 large_objects_total_size_ += size;
Ben Murdochc5610432016-08-08 18:44:38 +0100267 return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
Ben Murdochda12d292016-06-02 14:46:10 +0100268}
269
Ben Murdochc5610432016-08-08 18:44:38 +0100270SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
Ben Murdochda12d292016-06-02 14:46:10 +0100271 DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
272 DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
273 uint32_t new_chunk_size = pending_chunk_[space] + size;
274 if (new_chunk_size > max_chunk_size(space)) {
275 // The new chunk size would not fit onto a single page. Complete the
276 // current chunk and start a new one.
277 sink_->Put(kNextChunk, "NextChunk");
278 sink_->Put(space, "NextChunkSpace");
279 completed_chunks_[space].Add(pending_chunk_[space]);
Ben Murdochda12d292016-06-02 14:46:10 +0100280 pending_chunk_[space] = 0;
281 new_chunk_size = size;
282 }
283 uint32_t offset = pending_chunk_[space];
284 pending_chunk_[space] = new_chunk_size;
Ben Murdochc5610432016-08-08 18:44:38 +0100285 return SerializerReference::BackReference(
286 space, completed_chunks_[space].length(), offset);
Ben Murdochda12d292016-06-02 14:46:10 +0100287}
288
289void Serializer::Pad() {
290 // The non-branching GetInt will read up to 3 bytes too far, so we need
291 // to pad the snapshot to make sure we don't read over the end.
292 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
293 sink_->Put(kNop, "Padding");
294 }
295 // Pad up to pointer size for checksum.
296 while (!IsAligned(sink_->Position(), kPointerAlignment)) {
297 sink_->Put(kNop, "Padding");
298 }
299}
300
301void Serializer::InitializeCodeAddressMap() {
302 isolate_->InitializeLoggingAndCounters();
303 code_address_map_ = new CodeAddressMap(isolate_);
304}
305
306Code* Serializer::CopyCode(Code* code) {
307 code_buffer_.Rewind(0); // Clear buffer without deleting backing store.
308 int size = code->CodeSize();
309 code_buffer_.AddAll(Vector<byte>(code->address(), size));
310 return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
311}
312
313bool Serializer::HasNotExceededFirstPageOfEachSpace() {
314 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
315 if (!completed_chunks_[i].is_empty()) return false;
316 }
317 return true;
318}
319
320void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
321 int size, Map* map) {
322 if (serializer_->code_address_map_) {
323 const char* code_name =
324 serializer_->code_address_map_->Lookup(object_->address());
325 LOG(serializer_->isolate_,
326 CodeNameEvent(object_->address(), sink_->Position(), code_name));
327 }
328
Ben Murdochc5610432016-08-08 18:44:38 +0100329 SerializerReference back_reference;
Ben Murdochda12d292016-06-02 14:46:10 +0100330 if (space == LO_SPACE) {
331 sink_->Put(kNewObject + reference_representation_ + space,
332 "NewLargeObject");
333 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
334 if (object_->IsCode()) {
335 sink_->Put(EXECUTABLE, "executable large object");
336 } else {
337 sink_->Put(NOT_EXECUTABLE, "not executable large object");
338 }
339 back_reference = serializer_->AllocateLargeObject(size);
340 } else {
341 int fill = serializer_->PutAlignmentPrefix(object_);
342 back_reference = serializer_->Allocate(space, size + fill);
343 sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
344 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
345 }
346
347#ifdef OBJECT_PRINT
348 if (FLAG_serialization_statistics) {
349 serializer_->CountInstanceType(map, size);
350 }
351#endif // OBJECT_PRINT
352
353 // Mark this object as already serialized.
Ben Murdochc5610432016-08-08 18:44:38 +0100354 serializer_->reference_map()->Add(object_, back_reference);
Ben Murdochda12d292016-06-02 14:46:10 +0100355
356 // Serialize the map (first word of the object).
357 serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
358}
359
360void Serializer::ObjectSerializer::SerializeExternalString() {
361 // Instead of serializing this as an external string, we serialize
362 // an imaginary sequential string with the same content.
363 Isolate* isolate = serializer_->isolate();
364 DCHECK(object_->IsExternalString());
365 DCHECK(object_->map() != isolate->heap()->native_source_string_map());
366 ExternalString* string = ExternalString::cast(object_);
367 int length = string->length();
368 Map* map;
369 int content_size;
370 int allocation_size;
371 const byte* resource;
372 // Find the map and size for the imaginary sequential string.
373 bool internalized = object_->IsInternalizedString();
374 if (object_->IsExternalOneByteString()) {
375 map = internalized ? isolate->heap()->one_byte_internalized_string_map()
376 : isolate->heap()->one_byte_string_map();
377 allocation_size = SeqOneByteString::SizeFor(length);
378 content_size = length * kCharSize;
379 resource = reinterpret_cast<const byte*>(
380 ExternalOneByteString::cast(string)->resource()->data());
381 } else {
382 map = internalized ? isolate->heap()->internalized_string_map()
383 : isolate->heap()->string_map();
384 allocation_size = SeqTwoByteString::SizeFor(length);
385 content_size = length * kShortSize;
386 resource = reinterpret_cast<const byte*>(
387 ExternalTwoByteString::cast(string)->resource()->data());
388 }
389
390 AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
391 ? LO_SPACE
392 : OLD_SPACE;
393 SerializePrologue(space, allocation_size, map);
394
395 // Output the rest of the imaginary string.
396 int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
397
398 // Output raw data header. Do not bother with common raw length cases here.
399 sink_->Put(kVariableRawData, "RawDataForString");
400 sink_->PutInt(bytes_to_output, "length");
401
402 // Serialize string header (except for map).
403 Address string_start = string->address();
404 for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
405 sink_->PutSection(string_start[i], "StringHeader");
406 }
407
408 // Serialize string content.
409 sink_->PutRaw(resource, content_size, "StringContent");
410
411 // Since the allocation size is rounded up to object alignment, there
412 // maybe left-over bytes that need to be padded.
413 int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
414 DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
415 for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
416
417 sink_->Put(kSkip, "SkipAfterString");
418 sink_->PutInt(bytes_to_output, "SkipDistance");
419}
420
421// Clear and later restore the next link in the weak cell or allocation site.
422// TODO(all): replace this with proper iteration of weak slots in serializer.
423class UnlinkWeakNextScope {
424 public:
425 explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
426 if (object->IsWeakCell()) {
427 object_ = object;
428 next_ = WeakCell::cast(object)->next();
429 WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
430 } else if (object->IsAllocationSite()) {
431 object_ = object;
432 next_ = AllocationSite::cast(object)->weak_next();
433 AllocationSite::cast(object)->set_weak_next(
434 object->GetHeap()->undefined_value());
435 }
436 }
437
438 ~UnlinkWeakNextScope() {
439 if (object_ != nullptr) {
440 if (object_->IsWeakCell()) {
441 WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
442 } else {
443 AllocationSite::cast(object_)->set_weak_next(next_,
444 UPDATE_WEAK_WRITE_BARRIER);
445 }
446 }
447 }
448
449 private:
450 HeapObject* object_;
451 Object* next_;
452 DisallowHeapAllocation no_gc_;
453};
454
455void Serializer::ObjectSerializer::Serialize() {
456 if (FLAG_trace_serializer) {
457 PrintF(" Encoding heap object: ");
458 object_->ShortPrint();
459 PrintF("\n");
460 }
461
462 // We cannot serialize typed array objects correctly.
463 DCHECK(!object_->IsJSTypedArray());
464
465 // We don't expect fillers.
466 DCHECK(!object_->IsFiller());
467
468 if (object_->IsScript()) {
469 // Clear cached line ends.
470 Object* undefined = serializer_->isolate()->heap()->undefined_value();
471 Script::cast(object_)->set_line_ends(undefined);
472 }
473
474 if (object_->IsExternalString()) {
475 Heap* heap = serializer_->isolate()->heap();
476 if (object_->map() != heap->native_source_string_map()) {
477 // Usually we cannot recreate resources for external strings. To work
478 // around this, external strings are serialized to look like ordinary
479 // sequential strings.
480 // The exception are native source code strings, since we can recreate
481 // their resources. In that case we fall through and leave it to
482 // VisitExternalOneByteString further down.
483 SerializeExternalString();
484 return;
485 }
486 }
487
488 int size = object_->Size();
489 Map* map = object_->map();
490 AllocationSpace space =
491 MemoryChunk::FromAddress(object_->address())->owner()->identity();
492 SerializePrologue(space, size, map);
493
494 // Serialize the rest of the object.
495 CHECK_EQ(0, bytes_processed_so_far_);
496 bytes_processed_so_far_ = kPointerSize;
497
498 RecursionScope recursion(serializer_);
499 // Objects that are immediately post processed during deserialization
500 // cannot be deferred, since post processing requires the object content.
501 if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
502 serializer_->QueueDeferredObject(object_);
503 sink_->Put(kDeferred, "Deferring object content");
504 return;
505 }
506
507 UnlinkWeakNextScope unlink_weak_next(object_);
508
509 object_->IterateBody(map->instance_type(), size, this);
510 OutputRawData(object_->address() + size);
511}
512
513void Serializer::ObjectSerializer::SerializeDeferred() {
514 if (FLAG_trace_serializer) {
515 PrintF(" Encoding deferred heap object: ");
516 object_->ShortPrint();
517 PrintF("\n");
518 }
519
520 int size = object_->Size();
521 Map* map = object_->map();
Ben Murdochc5610432016-08-08 18:44:38 +0100522 SerializerReference back_reference =
523 serializer_->reference_map()->Lookup(object_);
524 DCHECK(back_reference.is_back_reference());
Ben Murdochda12d292016-06-02 14:46:10 +0100525
526 // Serialize the rest of the object.
527 CHECK_EQ(0, bytes_processed_so_far_);
528 bytes_processed_so_far_ = kPointerSize;
529
530 serializer_->PutAlignmentPrefix(object_);
Ben Murdochc5610432016-08-08 18:44:38 +0100531 sink_->Put(kNewObject + back_reference.space(), "deferred object");
532 serializer_->PutBackReference(object_, back_reference);
Ben Murdochda12d292016-06-02 14:46:10 +0100533 sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
534
535 UnlinkWeakNextScope unlink_weak_next(object_);
536
537 object_->IterateBody(map->instance_type(), size, this);
538 OutputRawData(object_->address() + size);
539}
540
541void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
542 Object** current = start;
543 while (current < end) {
544 while (current < end && (*current)->IsSmi()) current++;
545 if (current < end) OutputRawData(reinterpret_cast<Address>(current));
546
547 while (current < end && !(*current)->IsSmi()) {
548 HeapObject* current_contents = HeapObject::cast(*current);
549 int root_index = serializer_->root_index_map()->Lookup(current_contents);
550 // Repeats are not subject to the write barrier so we can only use
551 // immortal immovable root members. They are never in new space.
552 if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
553 Heap::RootIsImmortalImmovable(root_index) &&
554 current_contents == current[-1]) {
555 DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
556 int repeat_count = 1;
557 while (&current[repeat_count] < end - 1 &&
558 current[repeat_count] == current_contents) {
559 repeat_count++;
560 }
561 current += repeat_count;
562 bytes_processed_so_far_ += repeat_count * kPointerSize;
563 if (repeat_count > kNumberOfFixedRepeat) {
564 sink_->Put(kVariableRepeat, "VariableRepeat");
565 sink_->PutInt(repeat_count, "repeat count");
566 } else {
567 sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
568 }
569 } else {
570 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
571 0);
572 bytes_processed_so_far_ += kPointerSize;
573 current++;
574 }
575 }
576 }
577}
578
579void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
580 int skip = OutputRawData(rinfo->target_address_address(),
581 kCanReturnSkipInsteadOfSkipping);
582 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
583 Object* object = rinfo->target_object();
584 serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
585 kStartOfObject, skip);
586 bytes_processed_so_far_ += rinfo->target_address_size();
587}
588
589void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
590 int skip = OutputRawData(reinterpret_cast<Address>(p),
591 kCanReturnSkipInsteadOfSkipping);
592 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
593 sink_->PutInt(skip, "SkipB4ExternalRef");
594 Address target = *p;
595 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
596 bytes_processed_so_far_ += kPointerSize;
597}
598
599void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
600 int skip = OutputRawData(rinfo->target_address_address(),
601 kCanReturnSkipInsteadOfSkipping);
602 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
603 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
604 sink_->PutInt(skip, "SkipB4ExternalRef");
605 Address target = rinfo->target_external_reference();
606 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
607 bytes_processed_so_far_ += rinfo->target_address_size();
608}
609
610void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
611 // We can only reference to internal references of code that has been output.
612 DCHECK(object_->IsCode() && code_has_been_output_);
613 // We do not use skip from last patched pc to find the pc to patch, since
614 // target_address_address may not return addresses in ascending order when
615 // used for internal references. External references may be stored at the
616 // end of the code in the constant pool, whereas internal references are
617 // inline. That would cause the skip to be negative. Instead, we store the
618 // offset from code entry.
619 Address entry = Code::cast(object_)->entry();
620 intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
621 intptr_t target_offset = rinfo->target_internal_reference() - entry;
622 DCHECK(0 <= pc_offset &&
623 pc_offset <= Code::cast(object_)->instruction_size());
624 DCHECK(0 <= target_offset &&
625 target_offset <= Code::cast(object_)->instruction_size());
626 sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
627 ? kInternalReference
628 : kInternalReferenceEncoded,
629 "InternalRef");
630 sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
631 sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
632}
633
634void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
635 int skip = OutputRawData(rinfo->target_address_address(),
636 kCanReturnSkipInsteadOfSkipping);
637 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
638 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
639 sink_->PutInt(skip, "SkipB4ExternalRef");
640 Address target = rinfo->target_address();
641 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
642 bytes_processed_so_far_ += rinfo->target_address_size();
643}
644
645void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
646 int skip = OutputRawData(rinfo->target_address_address(),
647 kCanReturnSkipInsteadOfSkipping);
648 Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
649 serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
650 bytes_processed_so_far_ += rinfo->target_address_size();
651}
652
653void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
654 int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
655 Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
656 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
657 bytes_processed_so_far_ += kPointerSize;
658}
659
660void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
661 int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
662 Cell* object = Cell::cast(rinfo->target_cell());
663 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
664 bytes_processed_so_far_ += kPointerSize;
665}
666
667bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
668 int builtin_count,
669 v8::String::ExternalOneByteStringResource** resource_pointer,
670 FixedArray* source_cache, int resource_index) {
671 for (int i = 0; i < builtin_count; i++) {
672 Object* source = source_cache->get(i);
673 if (!source->IsUndefined()) {
674 ExternalOneByteString* string = ExternalOneByteString::cast(source);
675 typedef v8::String::ExternalOneByteStringResource Resource;
676 const Resource* resource = string->resource();
677 if (resource == *resource_pointer) {
678 sink_->Put(resource_index, "NativesStringResource");
679 sink_->PutSection(i, "NativesStringResourceEnd");
680 bytes_processed_so_far_ += sizeof(resource);
681 return true;
682 }
683 }
684 }
685 return false;
686}
687
688void Serializer::ObjectSerializer::VisitExternalOneByteString(
689 v8::String::ExternalOneByteStringResource** resource_pointer) {
690 Address references_start = reinterpret_cast<Address>(resource_pointer);
691 OutputRawData(references_start);
692 if (SerializeExternalNativeSourceString(
693 Natives::GetBuiltinsCount(), resource_pointer,
694 Natives::GetSourceCache(serializer_->isolate()->heap()),
695 kNativesStringResource)) {
696 return;
697 }
698 if (SerializeExternalNativeSourceString(
699 ExtraNatives::GetBuiltinsCount(), resource_pointer,
700 ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
701 kExtraNativesStringResource)) {
702 return;
703 }
704 // One of the strings in the natives cache should match the resource. We
705 // don't expect any other kinds of external strings here.
706 UNREACHABLE();
707}
708
709Address Serializer::ObjectSerializer::PrepareCode() {
710 // To make snapshots reproducible, we make a copy of the code object
711 // and wipe all pointers in the copy, which we then serialize.
712 Code* original = Code::cast(object_);
713 Code* code = serializer_->CopyCode(original);
714 // Code age headers are not serializable.
715 code->MakeYoung(serializer_->isolate());
716 int mode_mask = RelocInfo::kCodeTargetMask |
717 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
718 RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
719 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
720 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
721 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
722 for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
723 RelocInfo* rinfo = it.rinfo();
724 rinfo->WipeOut();
725 }
726 // We need to wipe out the header fields *after* wiping out the
727 // relocations, because some of these fields are needed for the latter.
728 code->WipeOutHeader();
729 return code->address();
730}
731
732int Serializer::ObjectSerializer::OutputRawData(
733 Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
734 Address object_start = object_->address();
735 int base = bytes_processed_so_far_;
736 int up_to_offset = static_cast<int>(up_to - object_start);
737 int to_skip = up_to_offset - bytes_processed_so_far_;
738 int bytes_to_output = to_skip;
739 bytes_processed_so_far_ += to_skip;
740 // This assert will fail if the reloc info gives us the target_address_address
741 // locations in a non-ascending order. Luckily that doesn't happen.
742 DCHECK(to_skip >= 0);
743 bool outputting_code = false;
744 bool is_code_object = object_->IsCode();
745 if (to_skip != 0 && is_code_object && !code_has_been_output_) {
746 // Output the code all at once and fix later.
747 bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
748 outputting_code = true;
749 code_has_been_output_ = true;
750 }
751 if (bytes_to_output != 0 && (!is_code_object || outputting_code)) {
752 if (!outputting_code && bytes_to_output == to_skip &&
753 IsAligned(bytes_to_output, kPointerAlignment) &&
754 bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
755 int size_in_words = bytes_to_output >> kPointerSizeLog2;
756 sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
757 to_skip = 0; // This instruction includes skip.
758 } else {
759 // We always end up here if we are outputting the code of a code object.
760 sink_->Put(kVariableRawData, "VariableRawData");
761 sink_->PutInt(bytes_to_output, "length");
762 }
763
764 if (is_code_object) object_start = PrepareCode();
765
766 const char* description = is_code_object ? "Code" : "Byte";
767 sink_->PutRaw(object_start + base, bytes_to_output, description);
768 }
769 if (to_skip != 0 && return_skip == kIgnoringReturn) {
770 sink_->Put(kSkip, "Skip");
771 sink_->PutInt(to_skip, "SkipDistance");
772 to_skip = 0;
773 }
774 return to_skip;
775}
776
777} // namespace internal
778} // namespace v8