blob: b6a75ff4f9b674e559d118b5f2858c1b5840a03d [file] [log] [blame]
Ben Murdochda12d292016-06-02 14:46:10 +01001// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/snapshot/serializer.h"
6
7#include "src/macro-assembler.h"
8#include "src/snapshot/natives.h"
9
10namespace v8 {
11namespace internal {
12
Ben Murdoch61f157c2016-09-16 13:49:30 +010013Serializer::Serializer(Isolate* isolate)
Ben Murdochda12d292016-06-02 14:46:10 +010014 : isolate_(isolate),
Ben Murdochda12d292016-06-02 14:46:10 +010015 external_reference_encoder_(isolate),
16 root_index_map_(isolate),
17 recursion_depth_(0),
18 code_address_map_(NULL),
19 large_objects_total_size_(0),
20 seen_large_objects_index_(0) {
21 // The serializer is meant to be used only to generate initial heap images
22 // from a context in which there is only one isolate.
23 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
24 pending_chunk_[i] = 0;
25 max_chunk_size_[i] = static_cast<uint32_t>(
26 MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
27 }
28
29#ifdef OBJECT_PRINT
30 if (FLAG_serialization_statistics) {
31 instance_type_count_ = NewArray<int>(kInstanceTypes);
32 instance_type_size_ = NewArray<size_t>(kInstanceTypes);
33 for (int i = 0; i < kInstanceTypes; i++) {
34 instance_type_count_[i] = 0;
35 instance_type_size_[i] = 0;
36 }
37 } else {
38 instance_type_count_ = NULL;
39 instance_type_size_ = NULL;
40 }
41#endif // OBJECT_PRINT
42}
43
44Serializer::~Serializer() {
45 if (code_address_map_ != NULL) delete code_address_map_;
46#ifdef OBJECT_PRINT
47 if (instance_type_count_ != NULL) {
48 DeleteArray(instance_type_count_);
49 DeleteArray(instance_type_size_);
50 }
51#endif // OBJECT_PRINT
52}
53
54#ifdef OBJECT_PRINT
55void Serializer::CountInstanceType(Map* map, int size) {
56 int instance_type = map->instance_type();
57 instance_type_count_[instance_type]++;
58 instance_type_size_[instance_type] += size;
59}
60#endif // OBJECT_PRINT
61
62void Serializer::OutputStatistics(const char* name) {
63 if (!FLAG_serialization_statistics) return;
64 PrintF("%s:\n", name);
65 PrintF(" Spaces (bytes):\n");
66 for (int space = 0; space < kNumberOfSpaces; space++) {
67 PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
68 }
69 PrintF("\n");
70 for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
71 size_t s = pending_chunk_[space];
72 for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
Ben Murdochc5610432016-08-08 18:44:38 +010073 PrintF("%16" PRIuS, s);
Ben Murdochda12d292016-06-02 14:46:10 +010074 }
75 PrintF("%16d\n", large_objects_total_size_);
76#ifdef OBJECT_PRINT
77 PrintF(" Instance types (count and bytes):\n");
Ben Murdochc5610432016-08-08 18:44:38 +010078#define PRINT_INSTANCE_TYPE(Name) \
79 if (instance_type_count_[Name]) { \
80 PrintF("%10d %10" PRIuS " %s\n", instance_type_count_[Name], \
81 instance_type_size_[Name], #Name); \
Ben Murdochda12d292016-06-02 14:46:10 +010082 }
83 INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
84#undef PRINT_INSTANCE_TYPE
85 PrintF("\n");
86#endif // OBJECT_PRINT
87}
88
89void Serializer::SerializeDeferredObjects() {
90 while (deferred_objects_.length() > 0) {
91 HeapObject* obj = deferred_objects_.RemoveLast();
Ben Murdoch61f157c2016-09-16 13:49:30 +010092 ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
Ben Murdochda12d292016-06-02 14:46:10 +010093 obj_serializer.SerializeDeferred();
94 }
Ben Murdoch61f157c2016-09-16 13:49:30 +010095 sink_.Put(kSynchronize, "Finished with deferred objects");
Ben Murdochda12d292016-06-02 14:46:10 +010096}
97
98void Serializer::VisitPointers(Object** start, Object** end) {
99 for (Object** current = start; current < end; current++) {
100 if ((*current)->IsSmi()) {
101 PutSmi(Smi::cast(*current));
102 } else {
103 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
104 }
105 }
106}
107
108void Serializer::EncodeReservations(
109 List<SerializedData::Reservation>* out) const {
110 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
111 for (int j = 0; j < completed_chunks_[i].length(); j++) {
112 out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
113 }
114
115 if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
116 out->Add(SerializedData::Reservation(pending_chunk_[i]));
117 }
118 out->last().mark_as_last();
119 }
120
121 out->Add(SerializedData::Reservation(large_objects_total_size_));
122 out->last().mark_as_last();
123}
124
125#ifdef DEBUG
Ben Murdochc5610432016-08-08 18:44:38 +0100126bool Serializer::BackReferenceIsAlreadyAllocated(
127 SerializerReference reference) {
128 DCHECK(reference.is_back_reference());
Ben Murdochda12d292016-06-02 14:46:10 +0100129 AllocationSpace space = reference.space();
130 int chunk_index = reference.chunk_index();
131 if (space == LO_SPACE) {
132 return chunk_index == 0 &&
133 reference.large_object_index() < seen_large_objects_index_;
134 } else if (chunk_index == completed_chunks_[space].length()) {
135 return reference.chunk_offset() < pending_chunk_[space];
136 } else {
137 return chunk_index < completed_chunks_[space].length() &&
138 reference.chunk_offset() < completed_chunks_[space][chunk_index];
139 }
140}
141#endif // DEBUG
142
Ben Murdoch61f157c2016-09-16 13:49:30 +0100143bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
144 WhereToPoint where_to_point, int skip) {
145 if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
146 // Encode a reference to a hot object by its index in the working set.
147 int index = hot_objects_.Find(obj);
148 if (index == HotObjectsList::kNotFound) return false;
149 DCHECK(index >= 0 && index < kNumberOfHotObjects);
150 if (FLAG_trace_serializer) {
151 PrintF(" Encoding hot object %d:", index);
152 obj->ShortPrint();
153 PrintF("\n");
Ben Murdochda12d292016-06-02 14:46:10 +0100154 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100155 if (skip != 0) {
156 sink_.Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
157 sink_.PutInt(skip, "HotObjectSkipDistance");
158 } else {
159 sink_.Put(kHotObject + index, "HotObject");
160 }
161 return true;
162}
163bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
164 WhereToPoint where_to_point, int skip) {
Ben Murdochc5610432016-08-08 18:44:38 +0100165 SerializerReference reference = reference_map_.Lookup(obj);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100166 if (!reference.is_valid()) return false;
167 // Encode the location of an already deserialized object in order to write
168 // its location into a later object. We can encode the location as an
169 // offset fromthe start of the deserialized objects or as an offset
170 // backwards from thecurrent allocation pointer.
171 if (reference.is_attached_reference()) {
172 FlushSkip(skip);
173 if (FLAG_trace_serializer) {
174 PrintF(" Encoding attached reference %d\n",
175 reference.attached_reference_index());
Ben Murdochda12d292016-06-02 14:46:10 +0100176 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100177 PutAttachedReference(reference, how_to_code, where_to_point);
178 } else {
179 DCHECK(reference.is_back_reference());
180 if (FLAG_trace_serializer) {
181 PrintF(" Encoding back reference to: ");
182 obj->ShortPrint();
183 PrintF("\n");
184 }
185
186 PutAlignmentPrefix(obj);
187 AllocationSpace space = reference.space();
188 if (skip == 0) {
189 sink_.Put(kBackref + how_to_code + where_to_point + space, "BackRef");
190 } else {
191 sink_.Put(kBackrefWithSkip + how_to_code + where_to_point + space,
192 "BackRefWithSkip");
193 sink_.PutInt(skip, "BackRefSkipDistance");
194 }
195 PutBackReference(obj, reference);
Ben Murdochda12d292016-06-02 14:46:10 +0100196 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100197 return true;
Ben Murdochda12d292016-06-02 14:46:10 +0100198}
199
200void Serializer::PutRoot(int root_index, HeapObject* object,
201 SerializerDeserializer::HowToCode how_to_code,
202 SerializerDeserializer::WhereToPoint where_to_point,
203 int skip) {
204 if (FLAG_trace_serializer) {
205 PrintF(" Encoding root %d:", root_index);
206 object->ShortPrint();
207 PrintF("\n");
208 }
209
210 if (how_to_code == kPlain && where_to_point == kStartOfObject &&
211 root_index < kNumberOfRootArrayConstants &&
212 !isolate()->heap()->InNewSpace(object)) {
213 if (skip == 0) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100214 sink_.Put(kRootArrayConstants + root_index, "RootConstant");
Ben Murdochda12d292016-06-02 14:46:10 +0100215 } else {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100216 sink_.Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
217 sink_.PutInt(skip, "SkipInPutRoot");
Ben Murdochda12d292016-06-02 14:46:10 +0100218 }
219 } else {
220 FlushSkip(skip);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100221 sink_.Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
222 sink_.PutInt(root_index, "root_index");
223 hot_objects_.Add(object);
Ben Murdochda12d292016-06-02 14:46:10 +0100224 }
225}
226
227void Serializer::PutSmi(Smi* smi) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100228 sink_.Put(kOnePointerRawData, "Smi");
Ben Murdochda12d292016-06-02 14:46:10 +0100229 byte* bytes = reinterpret_cast<byte*>(&smi);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100230 for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
Ben Murdochda12d292016-06-02 14:46:10 +0100231}
232
Ben Murdochc5610432016-08-08 18:44:38 +0100233void Serializer::PutBackReference(HeapObject* object,
234 SerializerReference reference) {
Ben Murdochda12d292016-06-02 14:46:10 +0100235 DCHECK(BackReferenceIsAlreadyAllocated(reference));
Ben Murdoch61f157c2016-09-16 13:49:30 +0100236 sink_.PutInt(reference.back_reference(), "BackRefValue");
Ben Murdochda12d292016-06-02 14:46:10 +0100237 hot_objects_.Add(object);
238}
239
Ben Murdochc5610432016-08-08 18:44:38 +0100240void Serializer::PutAttachedReference(SerializerReference reference,
241 HowToCode how_to_code,
242 WhereToPoint where_to_point) {
243 DCHECK(reference.is_attached_reference());
244 DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
245 (how_to_code == kPlain && where_to_point == kInnerPointer) ||
246 (how_to_code == kFromCode && where_to_point == kInnerPointer));
Ben Murdoch61f157c2016-09-16 13:49:30 +0100247 sink_.Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
248 sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
Ben Murdochc5610432016-08-08 18:44:38 +0100249}
250
Ben Murdochda12d292016-06-02 14:46:10 +0100251int Serializer::PutAlignmentPrefix(HeapObject* object) {
252 AllocationAlignment alignment = object->RequiredAlignment();
253 if (alignment != kWordAligned) {
254 DCHECK(1 <= alignment && alignment <= 3);
255 byte prefix = (kAlignmentPrefix - 1) + alignment;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100256 sink_.Put(prefix, "Alignment");
Ben Murdochda12d292016-06-02 14:46:10 +0100257 return Heap::GetMaximumFillToAlign(alignment);
258 }
259 return 0;
260}
261
Ben Murdochc5610432016-08-08 18:44:38 +0100262SerializerReference Serializer::AllocateLargeObject(int size) {
Ben Murdochda12d292016-06-02 14:46:10 +0100263 // Large objects are allocated one-by-one when deserializing. We do not
264 // have to keep track of multiple chunks.
265 large_objects_total_size_ += size;
Ben Murdochc5610432016-08-08 18:44:38 +0100266 return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
Ben Murdochda12d292016-06-02 14:46:10 +0100267}
268
Ben Murdochc5610432016-08-08 18:44:38 +0100269SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
Ben Murdochda12d292016-06-02 14:46:10 +0100270 DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
271 DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
272 uint32_t new_chunk_size = pending_chunk_[space] + size;
273 if (new_chunk_size > max_chunk_size(space)) {
274 // The new chunk size would not fit onto a single page. Complete the
275 // current chunk and start a new one.
Ben Murdoch61f157c2016-09-16 13:49:30 +0100276 sink_.Put(kNextChunk, "NextChunk");
277 sink_.Put(space, "NextChunkSpace");
Ben Murdochda12d292016-06-02 14:46:10 +0100278 completed_chunks_[space].Add(pending_chunk_[space]);
Ben Murdochda12d292016-06-02 14:46:10 +0100279 pending_chunk_[space] = 0;
280 new_chunk_size = size;
281 }
282 uint32_t offset = pending_chunk_[space];
283 pending_chunk_[space] = new_chunk_size;
Ben Murdochc5610432016-08-08 18:44:38 +0100284 return SerializerReference::BackReference(
285 space, completed_chunks_[space].length(), offset);
Ben Murdochda12d292016-06-02 14:46:10 +0100286}
287
288void Serializer::Pad() {
289 // The non-branching GetInt will read up to 3 bytes too far, so we need
290 // to pad the snapshot to make sure we don't read over the end.
291 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100292 sink_.Put(kNop, "Padding");
Ben Murdochda12d292016-06-02 14:46:10 +0100293 }
294 // Pad up to pointer size for checksum.
Ben Murdoch61f157c2016-09-16 13:49:30 +0100295 while (!IsAligned(sink_.Position(), kPointerAlignment)) {
296 sink_.Put(kNop, "Padding");
Ben Murdochda12d292016-06-02 14:46:10 +0100297 }
298}
299
300void Serializer::InitializeCodeAddressMap() {
301 isolate_->InitializeLoggingAndCounters();
302 code_address_map_ = new CodeAddressMap(isolate_);
303}
304
305Code* Serializer::CopyCode(Code* code) {
306 code_buffer_.Rewind(0); // Clear buffer without deleting backing store.
307 int size = code->CodeSize();
308 code_buffer_.AddAll(Vector<byte>(code->address(), size));
309 return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
310}
311
312bool Serializer::HasNotExceededFirstPageOfEachSpace() {
313 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
314 if (!completed_chunks_[i].is_empty()) return false;
315 }
316 return true;
317}
318
319void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
320 int size, Map* map) {
321 if (serializer_->code_address_map_) {
322 const char* code_name =
323 serializer_->code_address_map_->Lookup(object_->address());
324 LOG(serializer_->isolate_,
325 CodeNameEvent(object_->address(), sink_->Position(), code_name));
326 }
327
Ben Murdochc5610432016-08-08 18:44:38 +0100328 SerializerReference back_reference;
Ben Murdochda12d292016-06-02 14:46:10 +0100329 if (space == LO_SPACE) {
330 sink_->Put(kNewObject + reference_representation_ + space,
331 "NewLargeObject");
332 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
333 if (object_->IsCode()) {
334 sink_->Put(EXECUTABLE, "executable large object");
335 } else {
336 sink_->Put(NOT_EXECUTABLE, "not executable large object");
337 }
338 back_reference = serializer_->AllocateLargeObject(size);
339 } else {
340 int fill = serializer_->PutAlignmentPrefix(object_);
341 back_reference = serializer_->Allocate(space, size + fill);
342 sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
343 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
344 }
345
346#ifdef OBJECT_PRINT
347 if (FLAG_serialization_statistics) {
348 serializer_->CountInstanceType(map, size);
349 }
350#endif // OBJECT_PRINT
351
352 // Mark this object as already serialized.
Ben Murdochc5610432016-08-08 18:44:38 +0100353 serializer_->reference_map()->Add(object_, back_reference);
Ben Murdochda12d292016-06-02 14:46:10 +0100354
355 // Serialize the map (first word of the object).
356 serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
357}
358
359void Serializer::ObjectSerializer::SerializeExternalString() {
360 // Instead of serializing this as an external string, we serialize
361 // an imaginary sequential string with the same content.
362 Isolate* isolate = serializer_->isolate();
363 DCHECK(object_->IsExternalString());
364 DCHECK(object_->map() != isolate->heap()->native_source_string_map());
365 ExternalString* string = ExternalString::cast(object_);
366 int length = string->length();
367 Map* map;
368 int content_size;
369 int allocation_size;
370 const byte* resource;
371 // Find the map and size for the imaginary sequential string.
372 bool internalized = object_->IsInternalizedString();
373 if (object_->IsExternalOneByteString()) {
374 map = internalized ? isolate->heap()->one_byte_internalized_string_map()
375 : isolate->heap()->one_byte_string_map();
376 allocation_size = SeqOneByteString::SizeFor(length);
377 content_size = length * kCharSize;
378 resource = reinterpret_cast<const byte*>(
379 ExternalOneByteString::cast(string)->resource()->data());
380 } else {
381 map = internalized ? isolate->heap()->internalized_string_map()
382 : isolate->heap()->string_map();
383 allocation_size = SeqTwoByteString::SizeFor(length);
384 content_size = length * kShortSize;
385 resource = reinterpret_cast<const byte*>(
386 ExternalTwoByteString::cast(string)->resource()->data());
387 }
388
389 AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
390 ? LO_SPACE
391 : OLD_SPACE;
392 SerializePrologue(space, allocation_size, map);
393
394 // Output the rest of the imaginary string.
395 int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
396
397 // Output raw data header. Do not bother with common raw length cases here.
398 sink_->Put(kVariableRawData, "RawDataForString");
399 sink_->PutInt(bytes_to_output, "length");
400
401 // Serialize string header (except for map).
402 Address string_start = string->address();
403 for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
404 sink_->PutSection(string_start[i], "StringHeader");
405 }
406
407 // Serialize string content.
408 sink_->PutRaw(resource, content_size, "StringContent");
409
410 // Since the allocation size is rounded up to object alignment, there
411 // maybe left-over bytes that need to be padded.
412 int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
413 DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
414 for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
415
416 sink_->Put(kSkip, "SkipAfterString");
417 sink_->PutInt(bytes_to_output, "SkipDistance");
418}
419
420// Clear and later restore the next link in the weak cell or allocation site.
421// TODO(all): replace this with proper iteration of weak slots in serializer.
422class UnlinkWeakNextScope {
423 public:
424 explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
425 if (object->IsWeakCell()) {
426 object_ = object;
427 next_ = WeakCell::cast(object)->next();
428 WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
429 } else if (object->IsAllocationSite()) {
430 object_ = object;
431 next_ = AllocationSite::cast(object)->weak_next();
432 AllocationSite::cast(object)->set_weak_next(
433 object->GetHeap()->undefined_value());
434 }
435 }
436
437 ~UnlinkWeakNextScope() {
438 if (object_ != nullptr) {
439 if (object_->IsWeakCell()) {
440 WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
441 } else {
442 AllocationSite::cast(object_)->set_weak_next(next_,
443 UPDATE_WEAK_WRITE_BARRIER);
444 }
445 }
446 }
447
448 private:
449 HeapObject* object_;
450 Object* next_;
451 DisallowHeapAllocation no_gc_;
452};
453
454void Serializer::ObjectSerializer::Serialize() {
455 if (FLAG_trace_serializer) {
456 PrintF(" Encoding heap object: ");
457 object_->ShortPrint();
458 PrintF("\n");
459 }
460
461 // We cannot serialize typed array objects correctly.
462 DCHECK(!object_->IsJSTypedArray());
463
464 // We don't expect fillers.
465 DCHECK(!object_->IsFiller());
466
467 if (object_->IsScript()) {
468 // Clear cached line ends.
469 Object* undefined = serializer_->isolate()->heap()->undefined_value();
470 Script::cast(object_)->set_line_ends(undefined);
471 }
472
473 if (object_->IsExternalString()) {
474 Heap* heap = serializer_->isolate()->heap();
475 if (object_->map() != heap->native_source_string_map()) {
476 // Usually we cannot recreate resources for external strings. To work
477 // around this, external strings are serialized to look like ordinary
478 // sequential strings.
479 // The exception are native source code strings, since we can recreate
480 // their resources. In that case we fall through and leave it to
481 // VisitExternalOneByteString further down.
482 SerializeExternalString();
483 return;
484 }
485 }
486
487 int size = object_->Size();
488 Map* map = object_->map();
489 AllocationSpace space =
490 MemoryChunk::FromAddress(object_->address())->owner()->identity();
491 SerializePrologue(space, size, map);
492
493 // Serialize the rest of the object.
494 CHECK_EQ(0, bytes_processed_so_far_);
495 bytes_processed_so_far_ = kPointerSize;
496
497 RecursionScope recursion(serializer_);
498 // Objects that are immediately post processed during deserialization
499 // cannot be deferred, since post processing requires the object content.
500 if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
501 serializer_->QueueDeferredObject(object_);
502 sink_->Put(kDeferred, "Deferring object content");
503 return;
504 }
505
506 UnlinkWeakNextScope unlink_weak_next(object_);
507
508 object_->IterateBody(map->instance_type(), size, this);
509 OutputRawData(object_->address() + size);
510}
511
512void Serializer::ObjectSerializer::SerializeDeferred() {
513 if (FLAG_trace_serializer) {
514 PrintF(" Encoding deferred heap object: ");
515 object_->ShortPrint();
516 PrintF("\n");
517 }
518
519 int size = object_->Size();
520 Map* map = object_->map();
Ben Murdochc5610432016-08-08 18:44:38 +0100521 SerializerReference back_reference =
522 serializer_->reference_map()->Lookup(object_);
523 DCHECK(back_reference.is_back_reference());
Ben Murdochda12d292016-06-02 14:46:10 +0100524
525 // Serialize the rest of the object.
526 CHECK_EQ(0, bytes_processed_so_far_);
527 bytes_processed_so_far_ = kPointerSize;
528
529 serializer_->PutAlignmentPrefix(object_);
Ben Murdochc5610432016-08-08 18:44:38 +0100530 sink_->Put(kNewObject + back_reference.space(), "deferred object");
531 serializer_->PutBackReference(object_, back_reference);
Ben Murdochda12d292016-06-02 14:46:10 +0100532 sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
533
534 UnlinkWeakNextScope unlink_weak_next(object_);
535
536 object_->IterateBody(map->instance_type(), size, this);
537 OutputRawData(object_->address() + size);
538}
539
540void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
541 Object** current = start;
542 while (current < end) {
543 while (current < end && (*current)->IsSmi()) current++;
544 if (current < end) OutputRawData(reinterpret_cast<Address>(current));
545
546 while (current < end && !(*current)->IsSmi()) {
547 HeapObject* current_contents = HeapObject::cast(*current);
548 int root_index = serializer_->root_index_map()->Lookup(current_contents);
549 // Repeats are not subject to the write barrier so we can only use
550 // immortal immovable root members. They are never in new space.
551 if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
552 Heap::RootIsImmortalImmovable(root_index) &&
553 current_contents == current[-1]) {
554 DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
555 int repeat_count = 1;
556 while (&current[repeat_count] < end - 1 &&
557 current[repeat_count] == current_contents) {
558 repeat_count++;
559 }
560 current += repeat_count;
561 bytes_processed_so_far_ += repeat_count * kPointerSize;
562 if (repeat_count > kNumberOfFixedRepeat) {
563 sink_->Put(kVariableRepeat, "VariableRepeat");
564 sink_->PutInt(repeat_count, "repeat count");
565 } else {
566 sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
567 }
568 } else {
569 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
570 0);
571 bytes_processed_so_far_ += kPointerSize;
572 current++;
573 }
574 }
575 }
576}
577
578void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
579 int skip = OutputRawData(rinfo->target_address_address(),
580 kCanReturnSkipInsteadOfSkipping);
581 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
582 Object* object = rinfo->target_object();
583 serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
584 kStartOfObject, skip);
585 bytes_processed_so_far_ += rinfo->target_address_size();
586}
587
588void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
589 int skip = OutputRawData(reinterpret_cast<Address>(p),
590 kCanReturnSkipInsteadOfSkipping);
591 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
592 sink_->PutInt(skip, "SkipB4ExternalRef");
593 Address target = *p;
594 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
595 bytes_processed_so_far_ += kPointerSize;
596}
597
598void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
599 int skip = OutputRawData(rinfo->target_address_address(),
600 kCanReturnSkipInsteadOfSkipping);
601 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
602 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
603 sink_->PutInt(skip, "SkipB4ExternalRef");
604 Address target = rinfo->target_external_reference();
605 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
606 bytes_processed_so_far_ += rinfo->target_address_size();
607}
608
609void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
610 // We can only reference to internal references of code that has been output.
611 DCHECK(object_->IsCode() && code_has_been_output_);
612 // We do not use skip from last patched pc to find the pc to patch, since
613 // target_address_address may not return addresses in ascending order when
614 // used for internal references. External references may be stored at the
615 // end of the code in the constant pool, whereas internal references are
616 // inline. That would cause the skip to be negative. Instead, we store the
617 // offset from code entry.
618 Address entry = Code::cast(object_)->entry();
619 intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
620 intptr_t target_offset = rinfo->target_internal_reference() - entry;
621 DCHECK(0 <= pc_offset &&
622 pc_offset <= Code::cast(object_)->instruction_size());
623 DCHECK(0 <= target_offset &&
624 target_offset <= Code::cast(object_)->instruction_size());
625 sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
626 ? kInternalReference
627 : kInternalReferenceEncoded,
628 "InternalRef");
629 sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
630 sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
631}
632
633void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
634 int skip = OutputRawData(rinfo->target_address_address(),
635 kCanReturnSkipInsteadOfSkipping);
636 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
637 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
638 sink_->PutInt(skip, "SkipB4ExternalRef");
639 Address target = rinfo->target_address();
640 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
641 bytes_processed_so_far_ += rinfo->target_address_size();
642}
643
644void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
645 int skip = OutputRawData(rinfo->target_address_address(),
646 kCanReturnSkipInsteadOfSkipping);
647 Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
648 serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
649 bytes_processed_so_far_ += rinfo->target_address_size();
650}
651
652void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
653 int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
654 Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
655 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
656 bytes_processed_so_far_ += kPointerSize;
657}
658
659void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
660 int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
661 Cell* object = Cell::cast(rinfo->target_cell());
662 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
663 bytes_processed_so_far_ += kPointerSize;
664}
665
666bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
667 int builtin_count,
668 v8::String::ExternalOneByteStringResource** resource_pointer,
669 FixedArray* source_cache, int resource_index) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100670 Isolate* isolate = serializer_->isolate();
Ben Murdochda12d292016-06-02 14:46:10 +0100671 for (int i = 0; i < builtin_count; i++) {
672 Object* source = source_cache->get(i);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100673 if (!source->IsUndefined(isolate)) {
Ben Murdochda12d292016-06-02 14:46:10 +0100674 ExternalOneByteString* string = ExternalOneByteString::cast(source);
675 typedef v8::String::ExternalOneByteStringResource Resource;
676 const Resource* resource = string->resource();
677 if (resource == *resource_pointer) {
678 sink_->Put(resource_index, "NativesStringResource");
679 sink_->PutSection(i, "NativesStringResourceEnd");
680 bytes_processed_so_far_ += sizeof(resource);
681 return true;
682 }
683 }
684 }
685 return false;
686}
687
688void Serializer::ObjectSerializer::VisitExternalOneByteString(
689 v8::String::ExternalOneByteStringResource** resource_pointer) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100690 DCHECK_EQ(serializer_->isolate()->heap()->native_source_string_map(),
691 object_->map());
692 DCHECK(ExternalOneByteString::cast(object_)->is_short());
Ben Murdochda12d292016-06-02 14:46:10 +0100693 Address references_start = reinterpret_cast<Address>(resource_pointer);
694 OutputRawData(references_start);
695 if (SerializeExternalNativeSourceString(
696 Natives::GetBuiltinsCount(), resource_pointer,
697 Natives::GetSourceCache(serializer_->isolate()->heap()),
698 kNativesStringResource)) {
699 return;
700 }
701 if (SerializeExternalNativeSourceString(
702 ExtraNatives::GetBuiltinsCount(), resource_pointer,
703 ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
704 kExtraNativesStringResource)) {
705 return;
706 }
707 // One of the strings in the natives cache should match the resource. We
708 // don't expect any other kinds of external strings here.
709 UNREACHABLE();
710}
711
712Address Serializer::ObjectSerializer::PrepareCode() {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100713 Code* code = Code::cast(object_);
714 if (FLAG_predictable) {
715 // To make snapshots reproducible, we make a copy of the code object
716 // and wipe all pointers in the copy, which we then serialize.
717 code = serializer_->CopyCode(code);
718 int mode_mask = RelocInfo::kCodeTargetMask |
719 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
720 RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
721 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
722 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
723 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
724 for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
725 RelocInfo* rinfo = it.rinfo();
726 rinfo->WipeOut();
727 }
728 // We need to wipe out the header fields *after* wiping out the
729 // relocations, because some of these fields are needed for the latter.
730 code->WipeOutHeader();
731 }
Ben Murdochda12d292016-06-02 14:46:10 +0100732 // Code age headers are not serializable.
733 code->MakeYoung(serializer_->isolate());
Ben Murdochda12d292016-06-02 14:46:10 +0100734 return code->address();
735}
736
737int Serializer::ObjectSerializer::OutputRawData(
738 Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
739 Address object_start = object_->address();
740 int base = bytes_processed_so_far_;
741 int up_to_offset = static_cast<int>(up_to - object_start);
742 int to_skip = up_to_offset - bytes_processed_so_far_;
743 int bytes_to_output = to_skip;
744 bytes_processed_so_far_ += to_skip;
745 // This assert will fail if the reloc info gives us the target_address_address
746 // locations in a non-ascending order. Luckily that doesn't happen.
747 DCHECK(to_skip >= 0);
748 bool outputting_code = false;
749 bool is_code_object = object_->IsCode();
750 if (to_skip != 0 && is_code_object && !code_has_been_output_) {
751 // Output the code all at once and fix later.
752 bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
753 outputting_code = true;
754 code_has_been_output_ = true;
755 }
756 if (bytes_to_output != 0 && (!is_code_object || outputting_code)) {
757 if (!outputting_code && bytes_to_output == to_skip &&
758 IsAligned(bytes_to_output, kPointerAlignment) &&
759 bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
760 int size_in_words = bytes_to_output >> kPointerSizeLog2;
761 sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
762 to_skip = 0; // This instruction includes skip.
763 } else {
764 // We always end up here if we are outputting the code of a code object.
765 sink_->Put(kVariableRawData, "VariableRawData");
766 sink_->PutInt(bytes_to_output, "length");
767 }
768
769 if (is_code_object) object_start = PrepareCode();
770
771 const char* description = is_code_object ? "Code" : "Byte";
772 sink_->PutRaw(object_start + base, bytes_to_output, description);
773 }
774 if (to_skip != 0 && return_skip == kIgnoringReturn) {
775 sink_->Put(kSkip, "Skip");
776 sink_->PutInt(to_skip, "SkipDistance");
777 to_skip = 0;
778 }
779 return to_skip;
780}
781
782} // namespace internal
783} // namespace v8