blob: 41693384f362bdfbb4bb81876840399022e04a1f [file] [log] [blame]
Ben Murdochda12d292016-06-02 14:46:10 +01001// Copyright 2016 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/snapshot/serializer.h"
6
7#include "src/macro-assembler.h"
8#include "src/snapshot/natives.h"
9
10namespace v8 {
11namespace internal {
12
13Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
14 : isolate_(isolate),
15 sink_(sink),
16 external_reference_encoder_(isolate),
17 root_index_map_(isolate),
18 recursion_depth_(0),
19 code_address_map_(NULL),
20 large_objects_total_size_(0),
21 seen_large_objects_index_(0) {
22 // The serializer is meant to be used only to generate initial heap images
23 // from a context in which there is only one isolate.
24 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
25 pending_chunk_[i] = 0;
26 max_chunk_size_[i] = static_cast<uint32_t>(
27 MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
28 }
29
30#ifdef OBJECT_PRINT
31 if (FLAG_serialization_statistics) {
32 instance_type_count_ = NewArray<int>(kInstanceTypes);
33 instance_type_size_ = NewArray<size_t>(kInstanceTypes);
34 for (int i = 0; i < kInstanceTypes; i++) {
35 instance_type_count_[i] = 0;
36 instance_type_size_[i] = 0;
37 }
38 } else {
39 instance_type_count_ = NULL;
40 instance_type_size_ = NULL;
41 }
42#endif // OBJECT_PRINT
43}
44
45Serializer::~Serializer() {
46 if (code_address_map_ != NULL) delete code_address_map_;
47#ifdef OBJECT_PRINT
48 if (instance_type_count_ != NULL) {
49 DeleteArray(instance_type_count_);
50 DeleteArray(instance_type_size_);
51 }
52#endif // OBJECT_PRINT
53}
54
55#ifdef OBJECT_PRINT
56void Serializer::CountInstanceType(Map* map, int size) {
57 int instance_type = map->instance_type();
58 instance_type_count_[instance_type]++;
59 instance_type_size_[instance_type] += size;
60}
61#endif // OBJECT_PRINT
62
63void Serializer::OutputStatistics(const char* name) {
64 if (!FLAG_serialization_statistics) return;
65 PrintF("%s:\n", name);
66 PrintF(" Spaces (bytes):\n");
67 for (int space = 0; space < kNumberOfSpaces; space++) {
68 PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
69 }
70 PrintF("\n");
71 for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
72 size_t s = pending_chunk_[space];
73 for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
74 PrintF("%16" V8_SIZET_PREFIX V8_PTR_PREFIX "d", s);
75 }
76 PrintF("%16d\n", large_objects_total_size_);
77#ifdef OBJECT_PRINT
78 PrintF(" Instance types (count and bytes):\n");
79#define PRINT_INSTANCE_TYPE(Name) \
80 if (instance_type_count_[Name]) { \
81 PrintF("%10d %10" V8_SIZET_PREFIX V8_PTR_PREFIX "d %s\n", \
82 instance_type_count_[Name], instance_type_size_[Name], #Name); \
83 }
84 INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
85#undef PRINT_INSTANCE_TYPE
86 PrintF("\n");
87#endif // OBJECT_PRINT
88}
89
90void Serializer::SerializeDeferredObjects() {
91 while (deferred_objects_.length() > 0) {
92 HeapObject* obj = deferred_objects_.RemoveLast();
93 ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
94 obj_serializer.SerializeDeferred();
95 }
96 sink_->Put(kSynchronize, "Finished with deferred objects");
97}
98
99void Serializer::VisitPointers(Object** start, Object** end) {
100 for (Object** current = start; current < end; current++) {
101 if ((*current)->IsSmi()) {
102 PutSmi(Smi::cast(*current));
103 } else {
104 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
105 }
106 }
107}
108
109void Serializer::EncodeReservations(
110 List<SerializedData::Reservation>* out) const {
111 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
112 for (int j = 0; j < completed_chunks_[i].length(); j++) {
113 out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
114 }
115
116 if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
117 out->Add(SerializedData::Reservation(pending_chunk_[i]));
118 }
119 out->last().mark_as_last();
120 }
121
122 out->Add(SerializedData::Reservation(large_objects_total_size_));
123 out->last().mark_as_last();
124}
125
126#ifdef DEBUG
127bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
128 DCHECK(reference.is_valid());
129 DCHECK(!reference.is_source());
130 DCHECK(!reference.is_global_proxy());
131 AllocationSpace space = reference.space();
132 int chunk_index = reference.chunk_index();
133 if (space == LO_SPACE) {
134 return chunk_index == 0 &&
135 reference.large_object_index() < seen_large_objects_index_;
136 } else if (chunk_index == completed_chunks_[space].length()) {
137 return reference.chunk_offset() < pending_chunk_[space];
138 } else {
139 return chunk_index < completed_chunks_[space].length() &&
140 reference.chunk_offset() < completed_chunks_[space][chunk_index];
141 }
142}
143#endif // DEBUG
144
145bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
146 WhereToPoint where_to_point, int skip) {
147 if (how_to_code == kPlain && where_to_point == kStartOfObject) {
148 // Encode a reference to a hot object by its index in the working set.
149 int index = hot_objects_.Find(obj);
150 if (index != HotObjectsList::kNotFound) {
151 DCHECK(index >= 0 && index < kNumberOfHotObjects);
152 if (FLAG_trace_serializer) {
153 PrintF(" Encoding hot object %d:", index);
154 obj->ShortPrint();
155 PrintF("\n");
156 }
157 if (skip != 0) {
158 sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
159 sink_->PutInt(skip, "HotObjectSkipDistance");
160 } else {
161 sink_->Put(kHotObject + index, "HotObject");
162 }
163 return true;
164 }
165 }
166 BackReference back_reference = back_reference_map_.Lookup(obj);
167 if (back_reference.is_valid()) {
168 // Encode the location of an already deserialized object in order to write
169 // its location into a later object. We can encode the location as an
170 // offset fromthe start of the deserialized objects or as an offset
171 // backwards from thecurrent allocation pointer.
172 if (back_reference.is_source()) {
173 FlushSkip(skip);
174 if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
175 DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
176 sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
177 sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
178 } else if (back_reference.is_global_proxy()) {
179 FlushSkip(skip);
180 if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
181 DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
182 sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
183 sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
184 } else {
185 if (FLAG_trace_serializer) {
186 PrintF(" Encoding back reference to: ");
187 obj->ShortPrint();
188 PrintF("\n");
189 }
190
191 PutAlignmentPrefix(obj);
192 AllocationSpace space = back_reference.space();
193 if (skip == 0) {
194 sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
195 } else {
196 sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
197 "BackRefWithSkip");
198 sink_->PutInt(skip, "BackRefSkipDistance");
199 }
200 PutBackReference(obj, back_reference);
201 }
202 return true;
203 }
204 return false;
205}
206
207void Serializer::PutRoot(int root_index, HeapObject* object,
208 SerializerDeserializer::HowToCode how_to_code,
209 SerializerDeserializer::WhereToPoint where_to_point,
210 int skip) {
211 if (FLAG_trace_serializer) {
212 PrintF(" Encoding root %d:", root_index);
213 object->ShortPrint();
214 PrintF("\n");
215 }
216
217 if (how_to_code == kPlain && where_to_point == kStartOfObject &&
218 root_index < kNumberOfRootArrayConstants &&
219 !isolate()->heap()->InNewSpace(object)) {
220 if (skip == 0) {
221 sink_->Put(kRootArrayConstants + root_index, "RootConstant");
222 } else {
223 sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
224 sink_->PutInt(skip, "SkipInPutRoot");
225 }
226 } else {
227 FlushSkip(skip);
228 sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
229 sink_->PutInt(root_index, "root_index");
230 }
231}
232
233void Serializer::PutSmi(Smi* smi) {
234 sink_->Put(kOnePointerRawData, "Smi");
235 byte* bytes = reinterpret_cast<byte*>(&smi);
236 for (int i = 0; i < kPointerSize; i++) sink_->Put(bytes[i], "Byte");
237}
238
239void Serializer::PutBackReference(HeapObject* object, BackReference reference) {
240 DCHECK(BackReferenceIsAlreadyAllocated(reference));
241 sink_->PutInt(reference.reference(), "BackRefValue");
242 hot_objects_.Add(object);
243}
244
245int Serializer::PutAlignmentPrefix(HeapObject* object) {
246 AllocationAlignment alignment = object->RequiredAlignment();
247 if (alignment != kWordAligned) {
248 DCHECK(1 <= alignment && alignment <= 3);
249 byte prefix = (kAlignmentPrefix - 1) + alignment;
250 sink_->Put(prefix, "Alignment");
251 return Heap::GetMaximumFillToAlign(alignment);
252 }
253 return 0;
254}
255
256BackReference Serializer::AllocateLargeObject(int size) {
257 // Large objects are allocated one-by-one when deserializing. We do not
258 // have to keep track of multiple chunks.
259 large_objects_total_size_ += size;
260 return BackReference::LargeObjectReference(seen_large_objects_index_++);
261}
262
263BackReference Serializer::Allocate(AllocationSpace space, int size) {
264 DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
265 DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
266 uint32_t new_chunk_size = pending_chunk_[space] + size;
267 if (new_chunk_size > max_chunk_size(space)) {
268 // The new chunk size would not fit onto a single page. Complete the
269 // current chunk and start a new one.
270 sink_->Put(kNextChunk, "NextChunk");
271 sink_->Put(space, "NextChunkSpace");
272 completed_chunks_[space].Add(pending_chunk_[space]);
273 DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
274 pending_chunk_[space] = 0;
275 new_chunk_size = size;
276 }
277 uint32_t offset = pending_chunk_[space];
278 pending_chunk_[space] = new_chunk_size;
279 return BackReference::Reference(space, completed_chunks_[space].length(),
280 offset);
281}
282
283void Serializer::Pad() {
284 // The non-branching GetInt will read up to 3 bytes too far, so we need
285 // to pad the snapshot to make sure we don't read over the end.
286 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
287 sink_->Put(kNop, "Padding");
288 }
289 // Pad up to pointer size for checksum.
290 while (!IsAligned(sink_->Position(), kPointerAlignment)) {
291 sink_->Put(kNop, "Padding");
292 }
293}
294
295void Serializer::InitializeCodeAddressMap() {
296 isolate_->InitializeLoggingAndCounters();
297 code_address_map_ = new CodeAddressMap(isolate_);
298}
299
300Code* Serializer::CopyCode(Code* code) {
301 code_buffer_.Rewind(0); // Clear buffer without deleting backing store.
302 int size = code->CodeSize();
303 code_buffer_.AddAll(Vector<byte>(code->address(), size));
304 return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
305}
306
307bool Serializer::HasNotExceededFirstPageOfEachSpace() {
308 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
309 if (!completed_chunks_[i].is_empty()) return false;
310 }
311 return true;
312}
313
314void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
315 int size, Map* map) {
316 if (serializer_->code_address_map_) {
317 const char* code_name =
318 serializer_->code_address_map_->Lookup(object_->address());
319 LOG(serializer_->isolate_,
320 CodeNameEvent(object_->address(), sink_->Position(), code_name));
321 }
322
323 BackReference back_reference;
324 if (space == LO_SPACE) {
325 sink_->Put(kNewObject + reference_representation_ + space,
326 "NewLargeObject");
327 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
328 if (object_->IsCode()) {
329 sink_->Put(EXECUTABLE, "executable large object");
330 } else {
331 sink_->Put(NOT_EXECUTABLE, "not executable large object");
332 }
333 back_reference = serializer_->AllocateLargeObject(size);
334 } else {
335 int fill = serializer_->PutAlignmentPrefix(object_);
336 back_reference = serializer_->Allocate(space, size + fill);
337 sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
338 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
339 }
340
341#ifdef OBJECT_PRINT
342 if (FLAG_serialization_statistics) {
343 serializer_->CountInstanceType(map, size);
344 }
345#endif // OBJECT_PRINT
346
347 // Mark this object as already serialized.
348 serializer_->back_reference_map()->Add(object_, back_reference);
349
350 // Serialize the map (first word of the object).
351 serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
352}
353
354void Serializer::ObjectSerializer::SerializeExternalString() {
355 // Instead of serializing this as an external string, we serialize
356 // an imaginary sequential string with the same content.
357 Isolate* isolate = serializer_->isolate();
358 DCHECK(object_->IsExternalString());
359 DCHECK(object_->map() != isolate->heap()->native_source_string_map());
360 ExternalString* string = ExternalString::cast(object_);
361 int length = string->length();
362 Map* map;
363 int content_size;
364 int allocation_size;
365 const byte* resource;
366 // Find the map and size for the imaginary sequential string.
367 bool internalized = object_->IsInternalizedString();
368 if (object_->IsExternalOneByteString()) {
369 map = internalized ? isolate->heap()->one_byte_internalized_string_map()
370 : isolate->heap()->one_byte_string_map();
371 allocation_size = SeqOneByteString::SizeFor(length);
372 content_size = length * kCharSize;
373 resource = reinterpret_cast<const byte*>(
374 ExternalOneByteString::cast(string)->resource()->data());
375 } else {
376 map = internalized ? isolate->heap()->internalized_string_map()
377 : isolate->heap()->string_map();
378 allocation_size = SeqTwoByteString::SizeFor(length);
379 content_size = length * kShortSize;
380 resource = reinterpret_cast<const byte*>(
381 ExternalTwoByteString::cast(string)->resource()->data());
382 }
383
384 AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
385 ? LO_SPACE
386 : OLD_SPACE;
387 SerializePrologue(space, allocation_size, map);
388
389 // Output the rest of the imaginary string.
390 int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
391
392 // Output raw data header. Do not bother with common raw length cases here.
393 sink_->Put(kVariableRawData, "RawDataForString");
394 sink_->PutInt(bytes_to_output, "length");
395
396 // Serialize string header (except for map).
397 Address string_start = string->address();
398 for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
399 sink_->PutSection(string_start[i], "StringHeader");
400 }
401
402 // Serialize string content.
403 sink_->PutRaw(resource, content_size, "StringContent");
404
405 // Since the allocation size is rounded up to object alignment, there
406 // maybe left-over bytes that need to be padded.
407 int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
408 DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
409 for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
410
411 sink_->Put(kSkip, "SkipAfterString");
412 sink_->PutInt(bytes_to_output, "SkipDistance");
413}
414
415// Clear and later restore the next link in the weak cell or allocation site.
416// TODO(all): replace this with proper iteration of weak slots in serializer.
417class UnlinkWeakNextScope {
418 public:
419 explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
420 if (object->IsWeakCell()) {
421 object_ = object;
422 next_ = WeakCell::cast(object)->next();
423 WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
424 } else if (object->IsAllocationSite()) {
425 object_ = object;
426 next_ = AllocationSite::cast(object)->weak_next();
427 AllocationSite::cast(object)->set_weak_next(
428 object->GetHeap()->undefined_value());
429 }
430 }
431
432 ~UnlinkWeakNextScope() {
433 if (object_ != nullptr) {
434 if (object_->IsWeakCell()) {
435 WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
436 } else {
437 AllocationSite::cast(object_)->set_weak_next(next_,
438 UPDATE_WEAK_WRITE_BARRIER);
439 }
440 }
441 }
442
443 private:
444 HeapObject* object_;
445 Object* next_;
446 DisallowHeapAllocation no_gc_;
447};
448
449void Serializer::ObjectSerializer::Serialize() {
450 if (FLAG_trace_serializer) {
451 PrintF(" Encoding heap object: ");
452 object_->ShortPrint();
453 PrintF("\n");
454 }
455
456 // We cannot serialize typed array objects correctly.
457 DCHECK(!object_->IsJSTypedArray());
458
459 // We don't expect fillers.
460 DCHECK(!object_->IsFiller());
461
462 if (object_->IsScript()) {
463 // Clear cached line ends.
464 Object* undefined = serializer_->isolate()->heap()->undefined_value();
465 Script::cast(object_)->set_line_ends(undefined);
466 }
467
468 if (object_->IsExternalString()) {
469 Heap* heap = serializer_->isolate()->heap();
470 if (object_->map() != heap->native_source_string_map()) {
471 // Usually we cannot recreate resources for external strings. To work
472 // around this, external strings are serialized to look like ordinary
473 // sequential strings.
474 // The exception are native source code strings, since we can recreate
475 // their resources. In that case we fall through and leave it to
476 // VisitExternalOneByteString further down.
477 SerializeExternalString();
478 return;
479 }
480 }
481
482 int size = object_->Size();
483 Map* map = object_->map();
484 AllocationSpace space =
485 MemoryChunk::FromAddress(object_->address())->owner()->identity();
486 SerializePrologue(space, size, map);
487
488 // Serialize the rest of the object.
489 CHECK_EQ(0, bytes_processed_so_far_);
490 bytes_processed_so_far_ = kPointerSize;
491
492 RecursionScope recursion(serializer_);
493 // Objects that are immediately post processed during deserialization
494 // cannot be deferred, since post processing requires the object content.
495 if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
496 serializer_->QueueDeferredObject(object_);
497 sink_->Put(kDeferred, "Deferring object content");
498 return;
499 }
500
501 UnlinkWeakNextScope unlink_weak_next(object_);
502
503 object_->IterateBody(map->instance_type(), size, this);
504 OutputRawData(object_->address() + size);
505}
506
507void Serializer::ObjectSerializer::SerializeDeferred() {
508 if (FLAG_trace_serializer) {
509 PrintF(" Encoding deferred heap object: ");
510 object_->ShortPrint();
511 PrintF("\n");
512 }
513
514 int size = object_->Size();
515 Map* map = object_->map();
516 BackReference reference = serializer_->back_reference_map()->Lookup(object_);
517
518 // Serialize the rest of the object.
519 CHECK_EQ(0, bytes_processed_so_far_);
520 bytes_processed_so_far_ = kPointerSize;
521
522 serializer_->PutAlignmentPrefix(object_);
523 sink_->Put(kNewObject + reference.space(), "deferred object");
524 serializer_->PutBackReference(object_, reference);
525 sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
526
527 UnlinkWeakNextScope unlink_weak_next(object_);
528
529 object_->IterateBody(map->instance_type(), size, this);
530 OutputRawData(object_->address() + size);
531}
532
533void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
534 Object** current = start;
535 while (current < end) {
536 while (current < end && (*current)->IsSmi()) current++;
537 if (current < end) OutputRawData(reinterpret_cast<Address>(current));
538
539 while (current < end && !(*current)->IsSmi()) {
540 HeapObject* current_contents = HeapObject::cast(*current);
541 int root_index = serializer_->root_index_map()->Lookup(current_contents);
542 // Repeats are not subject to the write barrier so we can only use
543 // immortal immovable root members. They are never in new space.
544 if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
545 Heap::RootIsImmortalImmovable(root_index) &&
546 current_contents == current[-1]) {
547 DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
548 int repeat_count = 1;
549 while (&current[repeat_count] < end - 1 &&
550 current[repeat_count] == current_contents) {
551 repeat_count++;
552 }
553 current += repeat_count;
554 bytes_processed_so_far_ += repeat_count * kPointerSize;
555 if (repeat_count > kNumberOfFixedRepeat) {
556 sink_->Put(kVariableRepeat, "VariableRepeat");
557 sink_->PutInt(repeat_count, "repeat count");
558 } else {
559 sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
560 }
561 } else {
562 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
563 0);
564 bytes_processed_so_far_ += kPointerSize;
565 current++;
566 }
567 }
568 }
569}
570
571void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
572 int skip = OutputRawData(rinfo->target_address_address(),
573 kCanReturnSkipInsteadOfSkipping);
574 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
575 Object* object = rinfo->target_object();
576 serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
577 kStartOfObject, skip);
578 bytes_processed_so_far_ += rinfo->target_address_size();
579}
580
581void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
582 int skip = OutputRawData(reinterpret_cast<Address>(p),
583 kCanReturnSkipInsteadOfSkipping);
584 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
585 sink_->PutInt(skip, "SkipB4ExternalRef");
586 Address target = *p;
587 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
588 bytes_processed_so_far_ += kPointerSize;
589}
590
591void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
592 int skip = OutputRawData(rinfo->target_address_address(),
593 kCanReturnSkipInsteadOfSkipping);
594 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
595 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
596 sink_->PutInt(skip, "SkipB4ExternalRef");
597 Address target = rinfo->target_external_reference();
598 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
599 bytes_processed_so_far_ += rinfo->target_address_size();
600}
601
602void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
603 // We can only reference to internal references of code that has been output.
604 DCHECK(object_->IsCode() && code_has_been_output_);
605 // We do not use skip from last patched pc to find the pc to patch, since
606 // target_address_address may not return addresses in ascending order when
607 // used for internal references. External references may be stored at the
608 // end of the code in the constant pool, whereas internal references are
609 // inline. That would cause the skip to be negative. Instead, we store the
610 // offset from code entry.
611 Address entry = Code::cast(object_)->entry();
612 intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
613 intptr_t target_offset = rinfo->target_internal_reference() - entry;
614 DCHECK(0 <= pc_offset &&
615 pc_offset <= Code::cast(object_)->instruction_size());
616 DCHECK(0 <= target_offset &&
617 target_offset <= Code::cast(object_)->instruction_size());
618 sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
619 ? kInternalReference
620 : kInternalReferenceEncoded,
621 "InternalRef");
622 sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
623 sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
624}
625
626void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
627 int skip = OutputRawData(rinfo->target_address_address(),
628 kCanReturnSkipInsteadOfSkipping);
629 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
630 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
631 sink_->PutInt(skip, "SkipB4ExternalRef");
632 Address target = rinfo->target_address();
633 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
634 bytes_processed_so_far_ += rinfo->target_address_size();
635}
636
637void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
638 int skip = OutputRawData(rinfo->target_address_address(),
639 kCanReturnSkipInsteadOfSkipping);
640 Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
641 serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
642 bytes_processed_so_far_ += rinfo->target_address_size();
643}
644
645void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
646 int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
647 Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
648 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
649 bytes_processed_so_far_ += kPointerSize;
650}
651
652void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
653 int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
654 Cell* object = Cell::cast(rinfo->target_cell());
655 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
656 bytes_processed_so_far_ += kPointerSize;
657}
658
659bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
660 int builtin_count,
661 v8::String::ExternalOneByteStringResource** resource_pointer,
662 FixedArray* source_cache, int resource_index) {
663 for (int i = 0; i < builtin_count; i++) {
664 Object* source = source_cache->get(i);
665 if (!source->IsUndefined()) {
666 ExternalOneByteString* string = ExternalOneByteString::cast(source);
667 typedef v8::String::ExternalOneByteStringResource Resource;
668 const Resource* resource = string->resource();
669 if (resource == *resource_pointer) {
670 sink_->Put(resource_index, "NativesStringResource");
671 sink_->PutSection(i, "NativesStringResourceEnd");
672 bytes_processed_so_far_ += sizeof(resource);
673 return true;
674 }
675 }
676 }
677 return false;
678}
679
680void Serializer::ObjectSerializer::VisitExternalOneByteString(
681 v8::String::ExternalOneByteStringResource** resource_pointer) {
682 Address references_start = reinterpret_cast<Address>(resource_pointer);
683 OutputRawData(references_start);
684 if (SerializeExternalNativeSourceString(
685 Natives::GetBuiltinsCount(), resource_pointer,
686 Natives::GetSourceCache(serializer_->isolate()->heap()),
687 kNativesStringResource)) {
688 return;
689 }
690 if (SerializeExternalNativeSourceString(
691 ExtraNatives::GetBuiltinsCount(), resource_pointer,
692 ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
693 kExtraNativesStringResource)) {
694 return;
695 }
696 // One of the strings in the natives cache should match the resource. We
697 // don't expect any other kinds of external strings here.
698 UNREACHABLE();
699}
700
701Address Serializer::ObjectSerializer::PrepareCode() {
702 // To make snapshots reproducible, we make a copy of the code object
703 // and wipe all pointers in the copy, which we then serialize.
704 Code* original = Code::cast(object_);
705 Code* code = serializer_->CopyCode(original);
706 // Code age headers are not serializable.
707 code->MakeYoung(serializer_->isolate());
708 int mode_mask = RelocInfo::kCodeTargetMask |
709 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
710 RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
711 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
712 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
713 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
714 for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
715 RelocInfo* rinfo = it.rinfo();
716 rinfo->WipeOut();
717 }
718 // We need to wipe out the header fields *after* wiping out the
719 // relocations, because some of these fields are needed for the latter.
720 code->WipeOutHeader();
721 return code->address();
722}
723
724int Serializer::ObjectSerializer::OutputRawData(
725 Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
726 Address object_start = object_->address();
727 int base = bytes_processed_so_far_;
728 int up_to_offset = static_cast<int>(up_to - object_start);
729 int to_skip = up_to_offset - bytes_processed_so_far_;
730 int bytes_to_output = to_skip;
731 bytes_processed_so_far_ += to_skip;
732 // This assert will fail if the reloc info gives us the target_address_address
733 // locations in a non-ascending order. Luckily that doesn't happen.
734 DCHECK(to_skip >= 0);
735 bool outputting_code = false;
736 bool is_code_object = object_->IsCode();
737 if (to_skip != 0 && is_code_object && !code_has_been_output_) {
738 // Output the code all at once and fix later.
739 bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
740 outputting_code = true;
741 code_has_been_output_ = true;
742 }
743 if (bytes_to_output != 0 && (!is_code_object || outputting_code)) {
744 if (!outputting_code && bytes_to_output == to_skip &&
745 IsAligned(bytes_to_output, kPointerAlignment) &&
746 bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
747 int size_in_words = bytes_to_output >> kPointerSizeLog2;
748 sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
749 to_skip = 0; // This instruction includes skip.
750 } else {
751 // We always end up here if we are outputting the code of a code object.
752 sink_->Put(kVariableRawData, "VariableRawData");
753 sink_->PutInt(bytes_to_output, "length");
754 }
755
756 if (is_code_object) object_start = PrepareCode();
757
758 const char* description = is_code_object ? "Code" : "Byte";
759 sink_->PutRaw(object_start + base, bytes_to_output, description);
760 }
761 if (to_skip != 0 && return_skip == kIgnoringReturn) {
762 sink_->Put(kSkip, "Skip");
763 sink_->PutInt(to_skip, "SkipDistance");
764 to_skip = 0;
765 }
766 return to_skip;
767}
768
769} // namespace internal
770} // namespace v8