Merge V8 5.3.332.45.  DO NOT MERGE

Test: Manual

FPIIM-449

Change-Id: Id3254828b068abdea3cb10442e0172a8c9a98e03
(cherry picked from commit 13e2dadd00298019ed862f2b2fc5068bba730bcf)
diff --git a/src/snapshot/code-serializer.cc b/src/snapshot/code-serializer.cc
index 1a2e077..4229607 100644
--- a/src/snapshot/code-serializer.cc
+++ b/src/snapshot/code-serializer.cc
@@ -7,7 +7,6 @@
 #include "src/code-stubs.h"
 #include "src/log.h"
 #include "src/macro-assembler.h"
-#include "src/profiler/cpu-profiler.h"
 #include "src/snapshot/deserializer.h"
 #include "src/version.h"
 
@@ -27,15 +26,14 @@
   }
 
   // Serialize code object.
-  SnapshotByteSink sink(info->code()->CodeSize() * 2);
-  CodeSerializer cs(isolate, &sink, *source);
+  CodeSerializer cs(isolate, *source);
   DisallowHeapAllocation no_gc;
   Object** location = Handle<Object>::cast(info).location();
   cs.VisitPointer(location);
   cs.SerializeDeferredObjects();
   cs.Pad();
 
-  SerializedCodeData data(sink.data(), cs);
+  SerializedCodeData data(cs.sink()->data(), &cs);
   ScriptData* script_data = data.GetScriptData();
 
   if (FLAG_profile_deserialization) {
@@ -49,13 +47,15 @@
 
 void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
                                      WhereToPoint where_to_point, int skip) {
+  if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+
   int root_index = root_index_map_.Lookup(obj);
   if (root_index != RootIndexMap::kInvalidRootIndex) {
     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
     return;
   }
 
-  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+  if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
 
   FlushSkip(skip);
 
@@ -106,7 +106,7 @@
                                       HowToCode how_to_code,
                                       WhereToPoint where_to_point) {
   // Object has not yet been serialized.  Serialize it here.
-  ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
+  ObjectSerializer serializer(this, heap_object, &sink_, how_to_code,
                               where_to_point);
   serializer.Serialize();
 }
@@ -124,8 +124,8 @@
            isolate()->builtins()->name(builtin_index));
   }
 
-  sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
-  sink_->PutInt(builtin_index, "builtin_index");
+  sink_.Put(kBuiltin + how_to_code + where_to_point, "Builtin");
+  sink_.PutInt(builtin_index, "builtin_index");
 }
 
 void CodeSerializer::SerializeCodeStub(Code* code_stub, HowToCode how_to_code,
@@ -185,15 +185,14 @@
   }
   result->set_deserialized(true);
 
-  if (isolate->logger()->is_logging_code_events() ||
-      isolate->cpu_profiler()->is_profiling()) {
+  if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
     String* name = isolate->heap()->empty_string();
     if (result->script()->IsScript()) {
       Script* script = Script::cast(result->script());
       if (script->name()->IsString()) name = String::cast(script->name());
     }
-    isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG,
-                                       result->abstract_code(), *result, name);
+    PROFILE(isolate, CodeCreateEvent(CodeEventListener::SCRIPT_TAG,
+                                     result->abstract_code(), *result, name));
   }
   return scope.CloseAndEscape(result);
 }
@@ -237,13 +236,13 @@
   DISALLOW_COPY_AND_ASSIGN(Checksum);
 };
 
-SerializedCodeData::SerializedCodeData(const List<byte>& payload,
-                                       const CodeSerializer& cs) {
+SerializedCodeData::SerializedCodeData(const List<byte>* payload,
+                                       const CodeSerializer* cs) {
   DisallowHeapAllocation no_gc;
-  const List<uint32_t>* stub_keys = cs.stub_keys();
+  const List<uint32_t>* stub_keys = cs->stub_keys();
 
   List<Reservation> reservations;
-  cs.EncodeReservations(&reservations);
+  cs->EncodeReservations(&reservations);
 
   // Calculate sizes.
   int reservation_size = reservations.length() * kInt32Size;
@@ -251,23 +250,23 @@
   int stub_keys_size = stub_keys->length() * kInt32Size;
   int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
   int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
-  int size = padded_payload_offset + payload.length();
+  int size = padded_payload_offset + payload->length();
 
   // Allocate backing store and create result data.
   AllocateData(size);
 
   // Set header values.
-  SetMagicNumber(cs.isolate());
+  SetMagicNumber(cs->isolate());
   SetHeaderValue(kVersionHashOffset, Version::Hash());
-  SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
+  SetHeaderValue(kSourceHashOffset, SourceHash(cs->source()));
   SetHeaderValue(kCpuFeaturesOffset,
                  static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
   SetHeaderValue(kFlagHashOffset, FlagList::Hash());
   SetHeaderValue(kNumReservationsOffset, reservations.length());
   SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
-  SetHeaderValue(kPayloadLengthOffset, payload.length());
+  SetHeaderValue(kPayloadLengthOffset, payload->length());
 
-  Checksum checksum(payload.ToConstVector());
+  Checksum checksum(payload->ToConstVector());
   SetHeaderValue(kChecksum1Offset, checksum.a());
   SetHeaderValue(kChecksum2Offset, checksum.b());
 
@@ -282,8 +281,8 @@
   memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
 
   // Copy serialized data.
-  CopyBytes(data_ + padded_payload_offset, payload.begin(),
-            static_cast<size_t>(payload.length()));
+  CopyBytes(data_ + padded_payload_offset, payload->begin(),
+            static_cast<size_t>(payload->length()));
 }
 
 SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
diff --git a/src/snapshot/code-serializer.h b/src/snapshot/code-serializer.h
index 8ed4cf6..1948939 100644
--- a/src/snapshot/code-serializer.h
+++ b/src/snapshot/code-serializer.h
@@ -28,8 +28,8 @@
   const List<uint32_t>* stub_keys() const { return &stub_keys_; }
 
  private:
-  CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
-      : Serializer(isolate, sink), source_(source) {
+  CodeSerializer(Isolate* isolate, String* source)
+      : Serializer(isolate), source_(source) {
     reference_map_.AddAttachedReference(source);
   }
 
@@ -60,7 +60,7 @@
                                             String* source);
 
   // Used when producing.
-  SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
+  SerializedCodeData(const List<byte>* payload, const CodeSerializer* cs);
 
   // Return ScriptData object and relinquish ownership over it to the caller.
   ScriptData* GetScriptData();
diff --git a/src/snapshot/deserializer.cc b/src/snapshot/deserializer.cc
index 88820ae..68d3489 100644
--- a/src/snapshot/deserializer.cc
+++ b/src/snapshot/deserializer.cc
@@ -31,9 +31,7 @@
 void Deserializer::FlushICacheForNewIsolate() {
   DCHECK(!deserializing_user_code_);
   // The entire isolate is newly deserialized. Simply flush all code pages.
-  PageIterator it(isolate_->heap()->code_space());
-  while (it.has_next()) {
-    Page* p = it.next();
+  for (Page* p : *isolate_->heap()->code_space()) {
     Assembler::FlushICache(isolate_, p->area_start(),
                            p->area_end() - p->area_start());
   }
@@ -101,10 +99,6 @@
         isolate_->heap()->undefined_value());
   }
 
-  // Update data pointers to the external strings containing natives sources.
-  Natives::UpdateSourceCache(isolate_->heap());
-  ExtraNatives::UpdateSourceCache(isolate_->heap());
-
   // Issue code events for newly deserialized code objects.
   LOG_CODE_EVENT(isolate_, LogCodeObjects());
   LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
@@ -481,6 +475,7 @@
         Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
         new_object = isolate->heap()->root(root_index);                        \
         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
+        hot_objects_.Add(HeapObject::cast(new_object));                        \
       } else if (where == kPartialSnapshotCache) {                             \
         int cache_index = source_.GetInt();                                    \
         new_object = isolate->partial_snapshot_cache()->at(cache_index);       \
@@ -507,12 +502,11 @@
         emit_write_barrier = false;                                            \
       }                                                                        \
       if (within == kInnerPointer) {                                           \
-        if (space_number != CODE_SPACE || new_object->IsCode()) {              \
-          Code* new_code_object = reinterpret_cast<Code*>(new_object);         \
+        if (new_object->IsCode()) {                                            \
+          Code* new_code_object = Code::cast(new_object);                      \
           new_object =                                                         \
               reinterpret_cast<Object*>(new_code_object->instruction_start()); \
         } else {                                                               \
-          DCHECK(space_number == CODE_SPACE);                                  \
           Cell* cell = Cell::cast(new_object);                                 \
           new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
         }                                                                      \
@@ -579,6 +573,9 @@
       // pointer because it points at the entry point, not at the start of the
       // code object.
       SINGLE_CASE(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
+      // Support for pointers into a cell. It's an inner pointer because it
+      // points directly at the value field, not the start of the cell object.
+      SINGLE_CASE(kNewObject, kPlain, kInnerPointer, OLD_SPACE)
       // Deserialize a new code object and write a pointer to its first
       // instruction to the current code object.
       ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
@@ -605,8 +602,12 @@
       // object.
       ALL_SPACES(kBackref, kFromCode, kInnerPointer)
       ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
-      ALL_SPACES(kBackref, kPlain, kInnerPointer)
-      ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
+      // Support for direct instruction pointers in functions.
+      SINGLE_CASE(kBackref, kPlain, kInnerPointer, CODE_SPACE)
+      SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, CODE_SPACE)
+      // Support for pointers into a cell.
+      SINGLE_CASE(kBackref, kPlain, kInnerPointer, OLD_SPACE)
+      SINGLE_CASE(kBackrefWithSkip, kPlain, kInnerPointer, OLD_SPACE)
       // Find an object in the roots array and write a pointer to it to the
       // current object.
       SINGLE_CASE(kRootArray, kPlain, kStartOfObject, 0)
@@ -767,9 +768,8 @@
         int index = data & kHotObjectMask;
         Object* hot_object = hot_objects_.Get(index);
         UnalignedCopy(current, &hot_object);
-        if (write_barrier_needed) {
+        if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
           Address current_address = reinterpret_cast<Address>(current);
-          SLOW_DCHECK(isolate->heap()->ContainsSlow(current_object_address));
           isolate->heap()->RecordWrite(
               HeapObject::FromAddress(current_object_address),
               static_cast<int>(current_address - current_object_address),
diff --git a/src/snapshot/mksnapshot.cc b/src/snapshot/mksnapshot.cc
index 9fe611a..f4362e5 100644
--- a/src/snapshot/mksnapshot.cc
+++ b/src/snapshot/mksnapshot.cc
@@ -79,7 +79,7 @@
   }
 
   void WriteData(const i::Vector<const i::byte>& blob) const {
-    fprintf(fp_, "static const byte blob_data[] __attribute__((aligned(8))) = {\n");
+    fprintf(fp_, "static const byte blob_data[] = {\n");
     WriteSnapshotData(blob);
     fprintf(fp_, "};\n");
     fprintf(fp_, "static const int blob_size = %d;\n", blob.length());
@@ -150,7 +150,7 @@
   }
 
   i::CpuFeatures::Probe(true);
-  V8::InitializeICU();
+  V8::InitializeICUDefaultLocation(argv[0]);
   v8::Platform* platform = v8::platform::CreateDefaultPlatform();
   v8::V8::InitializePlatform(platform);
   v8::V8::Initialize();
diff --git a/src/snapshot/natives-common.cc b/src/snapshot/natives-common.cc
index f30e794..338b92b 100644
--- a/src/snapshot/natives-common.cc
+++ b/src/snapshot/natives-common.cc
@@ -34,24 +34,5 @@
   return heap->experimental_extra_natives_source_cache();
 }
 
-
-template <NativeType type>
-void NativesCollection<type>::UpdateSourceCache(Heap* heap) {
-  for (int i = 0; i < GetBuiltinsCount(); i++) {
-    Object* source = GetSourceCache(heap)->get(i);
-    if (!source->IsUndefined()) {
-      ExternalOneByteString::cast(source)->update_data_cache();
-    }
-  }
-}
-
-
-// Explicit template instantiations.
-template void NativesCollection<CORE>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<EXPERIMENTAL>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<EXTRAS>::UpdateSourceCache(Heap* heap);
-template void NativesCollection<EXPERIMENTAL_EXTRAS>::UpdateSourceCache(
-    Heap* heap);
-
 }  // namespace internal
 }  // namespace v8
diff --git a/src/snapshot/natives.h b/src/snapshot/natives.h
index 07f6b1a..e447515 100644
--- a/src/snapshot/natives.h
+++ b/src/snapshot/natives.h
@@ -44,7 +44,6 @@
   // The following methods are implemented in natives-common.cc:
 
   static FixedArray* GetSourceCache(Heap* heap);
-  static void UpdateSourceCache(Heap* heap);
 };
 
 typedef NativesCollection<CORE> Natives;
diff --git a/src/snapshot/partial-serializer.cc b/src/snapshot/partial-serializer.cc
index 34defb4..b46f675 100644
--- a/src/snapshot/partial-serializer.cc
+++ b/src/snapshot/partial-serializer.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/snapshot/partial-serializer.h"
+#include "src/snapshot/startup-serializer.h"
 
 #include "src/objects-inl.h"
 
@@ -10,11 +11,8 @@
 namespace internal {
 
 PartialSerializer::PartialSerializer(Isolate* isolate,
-                                     Serializer* startup_snapshot_serializer,
-                                     SnapshotByteSink* sink)
-    : Serializer(isolate, sink),
-      startup_serializer_(startup_snapshot_serializer),
-      next_partial_cache_index_(0) {
+                                     StartupSerializer* startup_serializer)
+    : Serializer(isolate), startup_serializer_(startup_serializer) {
   InitializeCodeAddressMap();
 }
 
@@ -34,7 +32,7 @@
     if (context->IsNativeContext()) {
       context->set(Context::NEXT_CONTEXT_LINK,
                    isolate_->heap()->undefined_value());
-      DCHECK(!context->global_object()->IsUndefined());
+      DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
     }
   }
   VisitPointer(o);
@@ -53,19 +51,23 @@
   // Replace typed arrays by undefined.
   if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
 
+  if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+
   int root_index = root_index_map_.Lookup(obj);
   if (root_index != RootIndexMap::kInvalidRootIndex) {
     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
     return;
   }
 
+  if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
+
   if (ShouldBeInThePartialSnapshotCache(obj)) {
     FlushSkip(skip);
 
-    int cache_index = PartialSnapshotCacheIndex(obj);
-    sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
-               "PartialSnapshotCache");
-    sink_->PutInt(cache_index, "partial_snapshot_cache_index");
+    int cache_index = startup_serializer_->PartialSnapshotCacheIndex(obj);
+    sink_.Put(kPartialSnapshotCache + how_to_code + where_to_point,
+              "PartialSnapshotCache");
+    sink_.PutInt(cache_index, "partial_snapshot_cache_index");
     return;
   }
 
@@ -76,35 +78,26 @@
   // All the internalized strings that the partial snapshot needs should be
   // either in the root table or in the partial snapshot cache.
   DCHECK(!obj->IsInternalizedString());
-
-  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+  // Function and object templates are not context specific.
+  DCHECK(!obj->IsTemplateInfo());
 
   FlushSkip(skip);
 
   // Clear literal boilerplates.
   if (obj->IsJSFunction()) {
-    FixedArray* literals = JSFunction::cast(obj)->literals();
-    for (int i = 0; i < literals->length(); i++) literals->set_undefined(i);
+    JSFunction* function = JSFunction::cast(obj);
+    LiteralsArray* literals = function->literals();
+    for (int i = 0; i < literals->literals_count(); i++) {
+      literals->set_literal_undefined(i);
+    }
+    function->ClearTypeFeedbackInfo();
   }
 
   // Object has not yet been serialized.  Serialize it here.
-  ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
+  ObjectSerializer serializer(this, obj, &sink_, how_to_code, where_to_point);
   serializer.Serialize();
 }
 
-int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
-  int index = partial_cache_index_map_.LookupOrInsert(
-      heap_object, next_partial_cache_index_);
-  if (index == PartialCacheIndexMap::kInvalidIndex) {
-    // This object is not part of the partial snapshot cache yet. Add it to the
-    // startup snapshot so we can refer to it via partial snapshot index from
-    // the partial snapshot.
-    startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
-    return next_partial_cache_index_++;
-  }
-  return index;
-}
-
 bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
   // Scripts should be referred only through shared function infos.  We can't
   // allow them to be part of the partial snapshot because they contain a
diff --git a/src/snapshot/partial-serializer.h b/src/snapshot/partial-serializer.h
index ddaba5f..282f76e 100644
--- a/src/snapshot/partial-serializer.h
+++ b/src/snapshot/partial-serializer.h
@@ -11,10 +11,11 @@
 namespace v8 {
 namespace internal {
 
+class StartupSerializer;
+
 class PartialSerializer : public Serializer {
  public:
-  PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
-                    SnapshotByteSink* sink);
+  PartialSerializer(Isolate* isolate, StartupSerializer* startup_serializer);
 
   ~PartialSerializer() override;
 
@@ -22,36 +23,12 @@
   void Serialize(Object** o);
 
  private:
-  class PartialCacheIndexMap : public AddressMapBase {
-   public:
-    PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
-
-    static const int kInvalidIndex = -1;
-
-    // Lookup object in the map. Return its index if found, or create
-    // a new entry with new_index as value, and return kInvalidIndex.
-    int LookupOrInsert(HeapObject* obj, int new_index) {
-      HashMap::Entry* entry = LookupEntry(&map_, obj, false);
-      if (entry != NULL) return GetValue(entry);
-      SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
-      return kInvalidIndex;
-    }
-
-   private:
-    HashMap map_;
-
-    DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
-  };
-
   void SerializeObject(HeapObject* o, HowToCode how_to_code,
                        WhereToPoint where_to_point, int skip) override;
 
-  int PartialSnapshotCacheIndex(HeapObject* o);
   bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
 
-  Serializer* startup_serializer_;
-  PartialCacheIndexMap partial_cache_index_map_;
-  int next_partial_cache_index_;
+  StartupSerializer* startup_serializer_;
   DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
 };
 
diff --git a/src/snapshot/serializer-common.cc b/src/snapshot/serializer-common.cc
index 4afaa20..41c68e8 100644
--- a/src/snapshot/serializer-common.cc
+++ b/src/snapshot/serializer-common.cc
@@ -14,7 +14,7 @@
 ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
   map_ = isolate->external_reference_map();
   if (map_ != NULL) return;
-  map_ = new HashMap(HashMap::PointersMatch);
+  map_ = new base::HashMap(base::HashMap::PointersMatch);
   ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
   for (int i = 0; i < table->size(); ++i) {
     Address addr = table->address(i);
@@ -31,16 +31,16 @@
 
 uint32_t ExternalReferenceEncoder::Encode(Address address) const {
   DCHECK_NOT_NULL(address);
-  HashMap::Entry* entry =
-      const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+  base::HashMap::Entry* entry =
+      const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
   DCHECK_NOT_NULL(entry);
   return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
 }
 
 const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
                                                     Address address) const {
-  HashMap::Entry* entry =
-      const_cast<HashMap*>(map_)->Lookup(address, Hash(address));
+  base::HashMap::Entry* entry =
+      const_cast<base::HashMap*>(map_)->Lookup(address, Hash(address));
   if (entry == NULL) return "<unknown>";
   uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
   return ExternalReferenceTable::instance(isolate)->name(i);
@@ -67,7 +67,7 @@
     // During deserialization, the visitor populates the partial snapshot cache
     // and eventually terminates the cache with undefined.
     visitor->VisitPointer(&cache->at(i));
-    if (cache->at(i)->IsUndefined()) break;
+    if (cache->at(i)->IsUndefined(isolate)) break;
   }
 }
 
diff --git a/src/snapshot/serializer-common.h b/src/snapshot/serializer-common.h
index 1ce5ced..bdd2b51 100644
--- a/src/snapshot/serializer-common.h
+++ b/src/snapshot/serializer-common.h
@@ -28,7 +28,7 @@
                                  kPointerSizeLog2);
   }
 
-  HashMap* map_;
+  base::HashMap* map_;
 
   DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
 };
@@ -94,31 +94,26 @@
   STATIC_ASSERT(5 == kNumberOfSpaces);
   enum Where {
     // 0x00..0x04  Allocate new object, in specified space.
-    kNewObject = 0,
-    // 0x05        Unused (including 0x25, 0x45, 0x65).
-    // 0x06        Unused (including 0x26, 0x46, 0x66).
-    // 0x07        Unused (including 0x27, 0x47, 0x67).
+    kNewObject = 0x00,
     // 0x08..0x0c  Reference to previous object from space.
     kBackref = 0x08,
-    // 0x0d        Unused (including 0x2d, 0x4d, 0x6d).
-    // 0x0e        Unused (including 0x2e, 0x4e, 0x6e).
-    // 0x0f        Unused (including 0x2f, 0x4f, 0x6f).
     // 0x10..0x14  Reference to previous object from space after skip.
     kBackrefWithSkip = 0x10,
-    // 0x15        Unused (including 0x35, 0x55, 0x75).
-    // 0x16        Unused (including 0x36, 0x56, 0x76).
-    // 0x17        Misc (including 0x37, 0x57, 0x77).
-    // 0x18        Root array item.
-    kRootArray = 0x18,
-    // 0x19        Object in the partial snapshot cache.
-    kPartialSnapshotCache = 0x19,
-    // 0x1a        External reference referenced by id.
-    kExternalReference = 0x1a,
-    // 0x1b        Object provided in the attached list.
-    kAttachedReference = 0x1b,
-    // 0x1c        Builtin code referenced by index.
-    kBuiltin = 0x1c
-    // 0x1d..0x1f  Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
+
+    // 0x05       Root array item.
+    kRootArray = 0x05,
+    // 0x06        Object in the partial snapshot cache.
+    kPartialSnapshotCache = 0x06,
+    // 0x07        External reference referenced by id.
+    kExternalReference = 0x07,
+
+    // 0x0d        Object provided in the attached list.
+    kAttachedReference = 0x0d,
+    // 0x0e        Builtin code referenced by index.
+    kBuiltin = 0x0e,
+
+    // 0x0f        Misc, see below (incl. 0x2f, 0x4f, 0x6f).
+    // 0x15..0x1f  Misc, see below (incl. 0x35..0x3f, 0x55..0x5f, 0x75..0x7f).
   };
 
   static const int kWhereMask = 0x1f;
@@ -147,36 +142,45 @@
 
   // ---------- Misc ----------
   // Skip.
-  static const int kSkip = 0x1d;
-  // Internal reference encoded as offsets of pc and target from code entry.
-  static const int kInternalReference = 0x1e;
-  static const int kInternalReferenceEncoded = 0x1f;
+  static const int kSkip = 0x0f;
   // Do nothing, used for padding.
-  static const int kNop = 0x3d;
+  static const int kNop = 0x2f;
   // Move to next reserved chunk.
-  static const int kNextChunk = 0x3e;
+  static const int kNextChunk = 0x4f;
   // Deferring object content.
-  static const int kDeferred = 0x3f;
-  // Used for the source code of the natives, which is in the executable, but
-  // is referred to from external strings in the snapshot.
-  static const int kNativesStringResource = 0x5d;
-  // Used for the source code for compiled stubs, which is in the executable,
-  // but is referred to from external strings in the snapshot.
-  static const int kExtraNativesStringResource = 0x5e;
+  static const int kDeferred = 0x6f;
+  // Alignment prefixes 0x15..0x17
+  static const int kAlignmentPrefix = 0x15;
   // A tag emitted at strategic points in the snapshot to delineate sections.
   // If the deserializer does not find these at the expected moments then it
   // is an indication that the snapshot and the VM do not fit together.
   // Examine the build process for architecture, version or configuration
   // mismatches.
-  static const int kSynchronize = 0x17;
+  static const int kSynchronize = 0x18;
   // Repeats of variable length.
-  static const int kVariableRepeat = 0x37;
+  static const int kVariableRepeat = 0x19;
   // Raw data of variable length.
-  static const int kVariableRawData = 0x57;
-  // Alignment prefixes 0x7d..0x7f
-  static const int kAlignmentPrefix = 0x7d;
+  static const int kVariableRawData = 0x1a;
+  // Internal reference encoded as offsets of pc and target from code entry.
+  static const int kInternalReference = 0x1b;
+  static const int kInternalReferenceEncoded = 0x1c;
+  // Used for the source code of the natives, which is in the executable, but
+  // is referred to from external strings in the snapshot.
+  static const int kNativesStringResource = 0x1d;
+  // Used for the source code for compiled stubs, which is in the executable,
+  // but is referred to from external strings in the snapshot.
+  static const int kExtraNativesStringResource = 0x1e;
 
-  // 0x77 unused
+  // 8 hot (recently seen or back-referenced) objects with optional skip.
+  static const int kNumberOfHotObjects = 8;
+  STATIC_ASSERT(kNumberOfHotObjects == HotObjectsList::kSize);
+  // 0x38..0x3f
+  static const int kHotObject = 0x38;
+  // 0x58..0x5f
+  static const int kHotObjectWithSkip = 0x58;
+  static const int kHotObjectMask = 0x07;
+
+  // 0x1f, 0x35..0x37, 0x55..0x57, 0x75..0x7f unused.
 
   // ---------- byte code range 0x80..0xff ----------
   // First 32 root array items.
@@ -187,27 +191,21 @@
   static const int kRootArrayConstantsWithSkip = 0xa0;
   static const int kRootArrayConstantsMask = 0x1f;
 
-  // 8 hot (recently seen or back-referenced) objects with optional skip.
-  static const int kNumberOfHotObjects = 0x08;
-  // 0xc0..0xc7
-  static const int kHotObject = 0xc0;
-  // 0xc8..0xcf
-  static const int kHotObjectWithSkip = 0xc8;
-  static const int kHotObjectMask = 0x07;
-
   // 32 common raw data lengths.
   static const int kNumberOfFixedRawData = 0x20;
-  // 0xd0..0xef
-  static const int kFixedRawData = 0xd0;
+  // 0xc0..0xdf
+  static const int kFixedRawData = 0xc0;
   static const int kOnePointerRawData = kFixedRawData;
   static const int kFixedRawDataStart = kFixedRawData - 1;
 
   // 16 repeats lengths.
   static const int kNumberOfFixedRepeat = 0x10;
-  // 0xf0..0xff
-  static const int kFixedRepeat = 0xf0;
+  // 0xe0..0xef
+  static const int kFixedRepeat = 0xe0;
   static const int kFixedRepeatStart = kFixedRepeat - 1;
 
+  // 0xf0..0xff unused.
+
   // ---------- special values ----------
   static const int kAnyOldSpace = -1;
 
diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc
index f6f2200..b6a75ff 100644
--- a/src/snapshot/serializer.cc
+++ b/src/snapshot/serializer.cc
@@ -10,9 +10,8 @@
 namespace v8 {
 namespace internal {
 
-Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
+Serializer::Serializer(Isolate* isolate)
     : isolate_(isolate),
-      sink_(sink),
       external_reference_encoder_(isolate),
       root_index_map_(isolate),
       recursion_depth_(0),
@@ -90,10 +89,10 @@
 void Serializer::SerializeDeferredObjects() {
   while (deferred_objects_.length() > 0) {
     HeapObject* obj = deferred_objects_.RemoveLast();
-    ObjectSerializer obj_serializer(this, obj, sink_, kPlain, kStartOfObject);
+    ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
     obj_serializer.SerializeDeferred();
   }
-  sink_->Put(kSynchronize, "Finished with deferred objects");
+  sink_.Put(kSynchronize, "Finished with deferred objects");
 }
 
 void Serializer::VisitPointers(Object** start, Object** end) {
@@ -141,62 +140,61 @@
 }
 #endif  // DEBUG
 
-bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
-                                      WhereToPoint where_to_point, int skip) {
-  if (how_to_code == kPlain && where_to_point == kStartOfObject) {
-    // Encode a reference to a hot object by its index in the working set.
-    int index = hot_objects_.Find(obj);
-    if (index != HotObjectsList::kNotFound) {
-      DCHECK(index >= 0 && index < kNumberOfHotObjects);
-      if (FLAG_trace_serializer) {
-        PrintF(" Encoding hot object %d:", index);
-        obj->ShortPrint();
-        PrintF("\n");
-      }
-      if (skip != 0) {
-        sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
-        sink_->PutInt(skip, "HotObjectSkipDistance");
-      } else {
-        sink_->Put(kHotObject + index, "HotObject");
-      }
-      return true;
-    }
+bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
+                                    WhereToPoint where_to_point, int skip) {
+  if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
+  // Encode a reference to a hot object by its index in the working set.
+  int index = hot_objects_.Find(obj);
+  if (index == HotObjectsList::kNotFound) return false;
+  DCHECK(index >= 0 && index < kNumberOfHotObjects);
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding hot object %d:", index);
+    obj->ShortPrint();
+    PrintF("\n");
   }
+  if (skip != 0) {
+    sink_.Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
+    sink_.PutInt(skip, "HotObjectSkipDistance");
+  } else {
+    sink_.Put(kHotObject + index, "HotObject");
+  }
+  return true;
+}
+bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
+                                        WhereToPoint where_to_point, int skip) {
   SerializerReference reference = reference_map_.Lookup(obj);
-  if (reference.is_valid()) {
-    // Encode the location of an already deserialized object in order to write
-    // its location into a later object.  We can encode the location as an
-    // offset fromthe start of the deserialized objects or as an offset
-    // backwards from thecurrent allocation pointer.
-    if (reference.is_attached_reference()) {
-      FlushSkip(skip);
-      if (FLAG_trace_serializer) {
-        PrintF(" Encoding attached reference %d\n",
-               reference.attached_reference_index());
-      }
-      PutAttachedReference(reference, how_to_code, where_to_point);
-    } else {
-      DCHECK(reference.is_back_reference());
-      if (FLAG_trace_serializer) {
-        PrintF(" Encoding back reference to: ");
-        obj->ShortPrint();
-        PrintF("\n");
-      }
-
-      PutAlignmentPrefix(obj);
-      AllocationSpace space = reference.space();
-      if (skip == 0) {
-        sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
-      } else {
-        sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
-                   "BackRefWithSkip");
-        sink_->PutInt(skip, "BackRefSkipDistance");
-      }
-      PutBackReference(obj, reference);
+  if (!reference.is_valid()) return false;
+  // Encode the location of an already deserialized object in order to write
+  // its location into a later object.  We can encode the location as an
+  // offset fromthe start of the deserialized objects or as an offset
+  // backwards from thecurrent allocation pointer.
+  if (reference.is_attached_reference()) {
+    FlushSkip(skip);
+    if (FLAG_trace_serializer) {
+      PrintF(" Encoding attached reference %d\n",
+             reference.attached_reference_index());
     }
-    return true;
+    PutAttachedReference(reference, how_to_code, where_to_point);
+  } else {
+    DCHECK(reference.is_back_reference());
+    if (FLAG_trace_serializer) {
+      PrintF(" Encoding back reference to: ");
+      obj->ShortPrint();
+      PrintF("\n");
+    }
+
+    PutAlignmentPrefix(obj);
+    AllocationSpace space = reference.space();
+    if (skip == 0) {
+      sink_.Put(kBackref + how_to_code + where_to_point + space, "BackRef");
+    } else {
+      sink_.Put(kBackrefWithSkip + how_to_code + where_to_point + space,
+                "BackRefWithSkip");
+      sink_.PutInt(skip, "BackRefSkipDistance");
+    }
+    PutBackReference(obj, reference);
   }
-  return false;
+  return true;
 }
 
 void Serializer::PutRoot(int root_index, HeapObject* object,
@@ -213,28 +211,29 @@
       root_index < kNumberOfRootArrayConstants &&
       !isolate()->heap()->InNewSpace(object)) {
     if (skip == 0) {
-      sink_->Put(kRootArrayConstants + root_index, "RootConstant");
+      sink_.Put(kRootArrayConstants + root_index, "RootConstant");
     } else {
-      sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
-      sink_->PutInt(skip, "SkipInPutRoot");
+      sink_.Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
+      sink_.PutInt(skip, "SkipInPutRoot");
     }
   } else {
     FlushSkip(skip);
-    sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
-    sink_->PutInt(root_index, "root_index");
+    sink_.Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
+    sink_.PutInt(root_index, "root_index");
+    hot_objects_.Add(object);
   }
 }
 
 void Serializer::PutSmi(Smi* smi) {
-  sink_->Put(kOnePointerRawData, "Smi");
+  sink_.Put(kOnePointerRawData, "Smi");
   byte* bytes = reinterpret_cast<byte*>(&smi);
-  for (int i = 0; i < kPointerSize; i++) sink_->Put(bytes[i], "Byte");
+  for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
 }
 
 void Serializer::PutBackReference(HeapObject* object,
                                   SerializerReference reference) {
   DCHECK(BackReferenceIsAlreadyAllocated(reference));
-  sink_->PutInt(reference.back_reference(), "BackRefValue");
+  sink_.PutInt(reference.back_reference(), "BackRefValue");
   hot_objects_.Add(object);
 }
 
@@ -245,8 +244,8 @@
   DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
          (how_to_code == kPlain && where_to_point == kInnerPointer) ||
          (how_to_code == kFromCode && where_to_point == kInnerPointer));
-  sink_->Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
-  sink_->PutInt(reference.attached_reference_index(), "AttachedRefIndex");
+  sink_.Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
+  sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
 }
 
 int Serializer::PutAlignmentPrefix(HeapObject* object) {
@@ -254,7 +253,7 @@
   if (alignment != kWordAligned) {
     DCHECK(1 <= alignment && alignment <= 3);
     byte prefix = (kAlignmentPrefix - 1) + alignment;
-    sink_->Put(prefix, "Alignment");
+    sink_.Put(prefix, "Alignment");
     return Heap::GetMaximumFillToAlign(alignment);
   }
   return 0;
@@ -274,8 +273,8 @@
   if (new_chunk_size > max_chunk_size(space)) {
     // The new chunk size would not fit onto a single page. Complete the
     // current chunk and start a new one.
-    sink_->Put(kNextChunk, "NextChunk");
-    sink_->Put(space, "NextChunkSpace");
+    sink_.Put(kNextChunk, "NextChunk");
+    sink_.Put(space, "NextChunkSpace");
     completed_chunks_[space].Add(pending_chunk_[space]);
     pending_chunk_[space] = 0;
     new_chunk_size = size;
@@ -290,11 +289,11 @@
   // The non-branching GetInt will read up to 3 bytes too far, so we need
   // to pad the snapshot to make sure we don't read over the end.
   for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
-    sink_->Put(kNop, "Padding");
+    sink_.Put(kNop, "Padding");
   }
   // Pad up to pointer size for checksum.
-  while (!IsAligned(sink_->Position(), kPointerAlignment)) {
-    sink_->Put(kNop, "Padding");
+  while (!IsAligned(sink_.Position(), kPointerAlignment)) {
+    sink_.Put(kNop, "Padding");
   }
 }
 
@@ -668,9 +667,10 @@
     int builtin_count,
     v8::String::ExternalOneByteStringResource** resource_pointer,
     FixedArray* source_cache, int resource_index) {
+  Isolate* isolate = serializer_->isolate();
   for (int i = 0; i < builtin_count; i++) {
     Object* source = source_cache->get(i);
-    if (!source->IsUndefined()) {
+    if (!source->IsUndefined(isolate)) {
       ExternalOneByteString* string = ExternalOneByteString::cast(source);
       typedef v8::String::ExternalOneByteStringResource Resource;
       const Resource* resource = string->resource();
@@ -687,6 +687,9 @@
 
 void Serializer::ObjectSerializer::VisitExternalOneByteString(
     v8::String::ExternalOneByteStringResource** resource_pointer) {
+  DCHECK_EQ(serializer_->isolate()->heap()->native_source_string_map(),
+            object_->map());
+  DCHECK(ExternalOneByteString::cast(object_)->is_short());
   Address references_start = reinterpret_cast<Address>(resource_pointer);
   OutputRawData(references_start);
   if (SerializeExternalNativeSourceString(
@@ -707,25 +710,27 @@
 }
 
 Address Serializer::ObjectSerializer::PrepareCode() {
-  // To make snapshots reproducible, we make a copy of the code object
-  // and wipe all pointers in the copy, which we then serialize.
-  Code* original = Code::cast(object_);
-  Code* code = serializer_->CopyCode(original);
+  Code* code = Code::cast(object_);
+  if (FLAG_predictable) {
+    // To make snapshots reproducible, we make a copy of the code object
+    // and wipe all pointers in the copy, which we then serialize.
+    code = serializer_->CopyCode(code);
+    int mode_mask = RelocInfo::kCodeTargetMask |
+                    RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                    RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                    RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+                    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+                    RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+    for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
+      RelocInfo* rinfo = it.rinfo();
+      rinfo->WipeOut();
+    }
+    // We need to wipe out the header fields *after* wiping out the
+    // relocations, because some of these fields are needed for the latter.
+    code->WipeOutHeader();
+  }
   // Code age headers are not serializable.
   code->MakeYoung(serializer_->isolate());
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
-                  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
-  for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
-    RelocInfo* rinfo = it.rinfo();
-    rinfo->WipeOut();
-  }
-  // We need to wipe out the header fields *after* wiping out the
-  // relocations, because some of these fields are needed for the latter.
-  code->WipeOutHeader();
   return code->address();
 }
 
diff --git a/src/snapshot/serializer.h b/src/snapshot/serializer.h
index f99cd72..45f891e 100644
--- a/src/snapshot/serializer.h
+++ b/src/snapshot/serializer.h
@@ -38,28 +38,29 @@
  private:
   class NameMap {
    public:
-    NameMap() : impl_(HashMap::PointersMatch) {}
+    NameMap() : impl_(base::HashMap::PointersMatch) {}
 
     ~NameMap() {
-      for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
+      for (base::HashMap::Entry* p = impl_.Start(); p != NULL;
+           p = impl_.Next(p)) {
         DeleteArray(static_cast<const char*>(p->value));
       }
     }
 
     void Insert(Address code_address, const char* name, int name_size) {
-      HashMap::Entry* entry = FindOrCreateEntry(code_address);
+      base::HashMap::Entry* entry = FindOrCreateEntry(code_address);
       if (entry->value == NULL) {
         entry->value = CopyName(name, name_size);
       }
     }
 
     const char* Lookup(Address code_address) {
-      HashMap::Entry* entry = FindEntry(code_address);
+      base::HashMap::Entry* entry = FindEntry(code_address);
       return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
     }
 
     void Remove(Address code_address) {
-      HashMap::Entry* entry = FindEntry(code_address);
+      base::HashMap::Entry* entry = FindEntry(code_address);
       if (entry != NULL) {
         DeleteArray(static_cast<char*>(entry->value));
         RemoveEntry(entry);
@@ -68,11 +69,11 @@
 
     void Move(Address from, Address to) {
       if (from == to) return;
-      HashMap::Entry* from_entry = FindEntry(from);
+      base::HashMap::Entry* from_entry = FindEntry(from);
       DCHECK(from_entry != NULL);
       void* value = from_entry->value;
       RemoveEntry(from_entry);
-      HashMap::Entry* to_entry = FindOrCreateEntry(to);
+      base::HashMap::Entry* to_entry = FindOrCreateEntry(to);
       DCHECK(to_entry->value == NULL);
       to_entry->value = value;
     }
@@ -89,20 +90,20 @@
       return result;
     }
 
-    HashMap::Entry* FindOrCreateEntry(Address code_address) {
+    base::HashMap::Entry* FindOrCreateEntry(Address code_address) {
       return impl_.LookupOrInsert(code_address,
                                   ComputePointerHash(code_address));
     }
 
-    HashMap::Entry* FindEntry(Address code_address) {
+    base::HashMap::Entry* FindEntry(Address code_address) {
       return impl_.Lookup(code_address, ComputePointerHash(code_address));
     }
 
-    void RemoveEntry(HashMap::Entry* entry) {
+    void RemoveEntry(base::HashMap::Entry* entry) {
       impl_.Remove(entry->key, entry->hash);
     }
 
-    HashMap impl_;
+    base::HashMap impl_;
 
     DISALLOW_COPY_AND_ASSIGN(NameMap);
   };
@@ -119,7 +120,7 @@
 // There can be only one serializer per V8 process.
 class Serializer : public SerializerDeserializer {
  public:
-  Serializer(Isolate* isolate, SnapshotByteSink* sink);
+  explicit Serializer(Isolate* isolate);
   ~Serializer() override;
 
   void EncodeReservations(List<SerializedData::Reservation>* out) const;
@@ -170,14 +171,18 @@
   // Emit alignment prefix if necessary, return required padding space in bytes.
   int PutAlignmentPrefix(HeapObject* object);
 
-  // Returns true if the object was successfully serialized.
-  bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
-                            WhereToPoint where_to_point, int skip);
+  // Returns true if the object was successfully serialized as hot object.
+  bool SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
+                          WhereToPoint where_to_point, int skip);
+
+  // Returns true if the object was successfully serialized as back reference.
+  bool SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
+                              WhereToPoint where_to_point, int skip);
 
   inline void FlushSkip(int skip) {
     if (skip != 0) {
-      sink_->Put(kSkip, "SkipFromSerializeObject");
-      sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+      sink_.Put(kSkip, "SkipFromSerializeObject");
+      sink_.PutInt(skip, "SkipDistanceFromSerializeObject");
     }
   }
 
@@ -207,7 +212,7 @@
     return max_chunk_size_[space];
   }
 
-  SnapshotByteSink* sink() const { return sink_; }
+  const SnapshotByteSink* sink() const { return &sink_; }
 
   void QueueDeferredObject(HeapObject* obj) {
     DCHECK(reference_map_.Lookup(obj).is_back_reference());
@@ -218,7 +223,7 @@
 
   Isolate* isolate_;
 
-  SnapshotByteSink* sink_;
+  SnapshotByteSink sink_;
   ExternalReferenceEncoder external_reference_encoder_;
 
   SerializerReferenceMap reference_map_;
diff --git a/src/snapshot/snapshot-common.cc b/src/snapshot/snapshot-common.cc
index a951b0d..5eac4af 100644
--- a/src/snapshot/snapshot-common.cc
+++ b/src/snapshot/snapshot-common.cc
@@ -18,8 +18,7 @@
 
 #ifdef DEBUG
 bool Snapshot::SnapshotIsValid(v8::StartupData* snapshot_blob) {
-  return !Snapshot::ExtractStartupData(snapshot_blob).is_empty() &&
-         !Snapshot::ExtractContextData(snapshot_blob).is_empty();
+  return Snapshot::ExtractNumContexts(snapshot_blob) > 0;
 }
 #endif  // DEBUG
 
@@ -31,12 +30,6 @@
 }
 
 
-bool Snapshot::EmbedsScript(Isolate* isolate) {
-  if (!isolate->snapshot_available()) return false;
-  return ExtractMetadata(isolate->snapshot_blob()).embeds_script();
-}
-
-
 uint32_t Snapshot::SizeOfFirstPage(Isolate* isolate, AllocationSpace space) {
   DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE);
   if (!isolate->snapshot_available()) {
@@ -67,15 +60,16 @@
   return success;
 }
 
-
 MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
-    Isolate* isolate, Handle<JSGlobalProxy> global_proxy) {
+    Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+    size_t context_index) {
   if (!isolate->snapshot_available()) return Handle<Context>();
   base::ElapsedTimer timer;
   if (FLAG_profile_deserialization) timer.Start();
 
   const v8::StartupData* blob = isolate->snapshot_blob();
-  Vector<const byte> context_data = ExtractContextData(blob);
+  Vector<const byte> context_data =
+      ExtractContextData(blob, static_cast<int>(context_index));
   SnapshotData snapshot_data(context_data);
   Deserializer deserializer(&snapshot_data);
 
@@ -87,178 +81,192 @@
   if (FLAG_profile_deserialization) {
     double ms = timer.Elapsed().InMillisecondsF();
     int bytes = context_data.length();
-    PrintF("[Deserializing context (%d bytes) took %0.3f ms]\n", bytes, ms);
+    PrintF("[Deserializing context #%zu (%d bytes) took %0.3f ms]\n",
+           context_index, bytes, ms);
   }
   return Handle<Context>::cast(result);
 }
 
+void UpdateMaxRequirementPerPage(
+    uint32_t* requirements,
+    Vector<const SerializedData::Reservation> reservations) {
+  int space = 0;
+  uint32_t current_requirement = 0;
+  for (const auto& reservation : reservations) {
+    current_requirement += reservation.chunk_size();
+    if (reservation.is_last()) {
+      requirements[space] = std::max(requirements[space], current_requirement);
+      current_requirement = 0;
+      space++;
+    }
+  }
+  DCHECK_EQ(i::Serializer::kNumberOfSpaces, space);
+}
 
-void CalculateFirstPageSizes(bool is_default_snapshot,
-                             const SnapshotData& startup_snapshot,
-                             const SnapshotData& context_snapshot,
+void CalculateFirstPageSizes(const SnapshotData* startup_snapshot,
+                             const List<SnapshotData*>* context_snapshots,
                              uint32_t* sizes_out) {
-  Vector<const SerializedData::Reservation> startup_reservations =
-      startup_snapshot.Reservations();
-  Vector<const SerializedData::Reservation> context_reservations =
-      context_snapshot.Reservations();
-  int startup_index = 0;
-  int context_index = 0;
-
   if (FLAG_profile_deserialization) {
     int startup_total = 0;
-    int context_total = 0;
-    for (auto& reservation : startup_reservations) {
+    PrintF("Deserialization will reserve:\n");
+    for (const auto& reservation : startup_snapshot->Reservations()) {
       startup_total += reservation.chunk_size();
     }
-    for (auto& reservation : context_reservations) {
-      context_total += reservation.chunk_size();
+    PrintF("%10d bytes per isolate\n", startup_total);
+    for (int i = 0; i < context_snapshots->length(); i++) {
+      int context_total = 0;
+      for (const auto& reservation : context_snapshots->at(i)->Reservations()) {
+        context_total += reservation.chunk_size();
+      }
+      PrintF("%10d bytes per context #%d\n", context_total, i);
     }
-    PrintF(
-        "Deserialization will reserve:\n"
-        "%10d bytes per isolate\n"
-        "%10d bytes per context\n",
-        startup_total, context_total);
+  }
+
+  uint32_t startup_requirements[i::Serializer::kNumberOfSpaces];
+  uint32_t context_requirements[i::Serializer::kNumberOfSpaces];
+  for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
+    startup_requirements[space] = 0;
+    context_requirements[space] = 0;
+  }
+
+  UpdateMaxRequirementPerPage(startup_requirements,
+                              startup_snapshot->Reservations());
+  for (const auto& context_snapshot : *context_snapshots) {
+    UpdateMaxRequirementPerPage(context_requirements,
+                                context_snapshot->Reservations());
   }
 
   for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
-    bool single_chunk = true;
-    while (!startup_reservations[startup_index].is_last()) {
-      single_chunk = false;
-      startup_index++;
-    }
-    while (!context_reservations[context_index].is_last()) {
-      single_chunk = false;
-      context_index++;
-    }
-
-    uint32_t required = kMaxUInt32;
-    if (single_chunk) {
-      // If both the startup snapshot data and the context snapshot data on
-      // this space fit in a single page, then we consider limiting the size
-      // of the first page. For this, we add the chunk sizes and some extra
-      // allowance. This way we achieve a smaller startup memory footprint.
-      required = (startup_reservations[startup_index].chunk_size() +
-                  2 * context_reservations[context_index].chunk_size()) +
-                 Page::kObjectStartOffset;
-      // Add a small allowance to the code space for small scripts.
-      if (space == CODE_SPACE) required += 32 * KB;
-    } else if (!FLAG_debug_code) {
-      // We expect the vanilla snapshot to only require one page per space,
-      // unless we are emitting debug code.
-      DCHECK(!is_default_snapshot);
-    }
+    // If the space requirement for a page is less than a page size, we consider
+    // limiting the size of the first page in order to save memory on startup.
+    uint32_t required = startup_requirements[space] +
+                        2 * context_requirements[space] +
+                        Page::kObjectStartOffset;
+    // Add a small allowance to the code space for small scripts.
+    if (space == CODE_SPACE) required += 32 * KB;
 
     if (space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE) {
       uint32_t max_size =
           MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(space));
-      sizes_out[space - FIRST_PAGED_SPACE] = Min(required, max_size);
-    } else {
-      DCHECK(single_chunk);
+      sizes_out[space - FIRST_PAGED_SPACE] = std::min(required, max_size);
     }
-    startup_index++;
-    context_index++;
   }
-
-  DCHECK_EQ(startup_reservations.length(), startup_index);
-  DCHECK_EQ(context_reservations.length(), context_index);
 }
 
-
 v8::StartupData Snapshot::CreateSnapshotBlob(
-    const i::StartupSerializer& startup_ser,
-    const i::PartialSerializer& context_ser, Snapshot::Metadata metadata) {
-  SnapshotData startup_snapshot(startup_ser);
-  SnapshotData context_snapshot(context_ser);
-  Vector<const byte> startup_data = startup_snapshot.RawData();
-  Vector<const byte> context_data = context_snapshot.RawData();
+    const SnapshotData* startup_snapshot,
+    const List<SnapshotData*>* context_snapshots) {
+  int num_contexts = context_snapshots->length();
+  int startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
+  int total_length = startup_snapshot_offset;
+  total_length += startup_snapshot->RawData().length();
+  for (const auto& context_snapshot : *context_snapshots) {
+    total_length += context_snapshot->RawData().length();
+  }
 
   uint32_t first_page_sizes[kNumPagedSpaces];
+  CalculateFirstPageSizes(startup_snapshot, context_snapshots,
+                          first_page_sizes);
 
-  CalculateFirstPageSizes(!metadata.embeds_script(), startup_snapshot,
-                          context_snapshot, first_page_sizes);
-
-  int startup_length = startup_data.length();
-  int context_length = context_data.length();
-  int context_offset = ContextOffset(startup_length);
-
-  int length = context_offset + context_length;
-  char* data = new char[length];
-
-  memcpy(data + kMetadataOffset, &metadata.RawValue(), kInt32Size);
+  char* data = new char[total_length];
   memcpy(data + kFirstPageSizesOffset, first_page_sizes,
          kNumPagedSpaces * kInt32Size);
-  memcpy(data + kStartupLengthOffset, &startup_length, kInt32Size);
-  memcpy(data + kStartupDataOffset, startup_data.begin(), startup_length);
-  memcpy(data + context_offset, context_data.begin(), context_length);
-  v8::StartupData result = {data, length};
-
+  memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size);
+  int payload_offset = StartupSnapshotOffset(num_contexts);
+  int payload_length = startup_snapshot->RawData().length();
+  memcpy(data + payload_offset, startup_snapshot->RawData().start(),
+         payload_length);
   if (FLAG_profile_deserialization) {
-    PrintF(
-        "Snapshot blob consists of:\n"
-        "%10d bytes for startup\n"
-        "%10d bytes for context\n",
-        startup_length, context_length);
+    PrintF("Snapshot blob consists of:\n%10d bytes for startup\n",
+           payload_length);
   }
+  payload_offset += payload_length;
+  for (int i = 0; i < num_contexts; i++) {
+    memcpy(data + ContextSnapshotOffsetOffset(i), &payload_offset, kInt32Size);
+    SnapshotData* context_snapshot = context_snapshots->at(i);
+    payload_length = context_snapshot->RawData().length();
+    memcpy(data + payload_offset, context_snapshot->RawData().start(),
+           payload_length);
+    if (FLAG_profile_deserialization) {
+      PrintF("%10d bytes for context #%d\n", payload_length, i);
+    }
+    payload_offset += payload_length;
+  }
+
+  v8::StartupData result = {data, total_length};
   return result;
 }
 
-
-Snapshot::Metadata Snapshot::ExtractMetadata(const v8::StartupData* data) {
-  uint32_t raw;
-  memcpy(&raw, data->data + kMetadataOffset, kInt32Size);
-  return Metadata(raw);
+int Snapshot::ExtractNumContexts(const v8::StartupData* data) {
+  CHECK_LT(kNumberOfContextsOffset, data->raw_size);
+  int num_contexts;
+  memcpy(&num_contexts, data->data + kNumberOfContextsOffset, kInt32Size);
+  return num_contexts;
 }
 
-
 Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
-  DCHECK_LT(kIntSize, data->raw_size);
-  int startup_length;
-  memcpy(&startup_length, data->data + kStartupLengthOffset, kInt32Size);
-  DCHECK_LT(startup_length, data->raw_size);
+  int num_contexts = ExtractNumContexts(data);
+  int startup_offset = StartupSnapshotOffset(num_contexts);
+  CHECK_LT(startup_offset, data->raw_size);
+  int first_context_offset;
+  memcpy(&first_context_offset, data->data + ContextSnapshotOffsetOffset(0),
+         kInt32Size);
+  CHECK_LT(first_context_offset, data->raw_size);
+  int startup_length = first_context_offset - startup_offset;
   const byte* startup_data =
-      reinterpret_cast<const byte*>(data->data + kStartupDataOffset);
+      reinterpret_cast<const byte*>(data->data + startup_offset);
   return Vector<const byte>(startup_data, startup_length);
 }
 
+Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data,
+                                                int index) {
+  int num_contexts = ExtractNumContexts(data);
+  CHECK_LT(index, num_contexts);
 
-Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data) {
-  DCHECK_LT(kIntSize, data->raw_size);
-  int startup_length;
-  memcpy(&startup_length, data->data + kStartupLengthOffset, kIntSize);
-  int context_offset = ContextOffset(startup_length);
+  int context_offset;
+  memcpy(&context_offset, data->data + ContextSnapshotOffsetOffset(index),
+         kInt32Size);
+  int next_context_offset;
+  if (index == num_contexts - 1) {
+    next_context_offset = data->raw_size;
+  } else {
+    memcpy(&next_context_offset,
+           data->data + ContextSnapshotOffsetOffset(index + 1), kInt32Size);
+    CHECK_LT(next_context_offset, data->raw_size);
+  }
+
   const byte* context_data =
       reinterpret_cast<const byte*>(data->data + context_offset);
-  DCHECK_LT(context_offset, data->raw_size);
-  int context_length = data->raw_size - context_offset;
+  int context_length = next_context_offset - context_offset;
   return Vector<const byte>(context_data, context_length);
 }
 
-SnapshotData::SnapshotData(const Serializer& ser) {
+SnapshotData::SnapshotData(const Serializer* serializer) {
   DisallowHeapAllocation no_gc;
   List<Reservation> reservations;
-  ser.EncodeReservations(&reservations);
-  const List<byte>& payload = ser.sink()->data();
+  serializer->EncodeReservations(&reservations);
+  const List<byte>* payload = serializer->sink()->data();
 
   // Calculate sizes.
   int reservation_size = reservations.length() * kInt32Size;
-  int size = kHeaderSize + reservation_size + payload.length();
+  int size = kHeaderSize + reservation_size + payload->length();
 
   // Allocate backing store and create result data.
   AllocateData(size);
 
   // Set header values.
-  SetMagicNumber(ser.isolate());
+  SetMagicNumber(serializer->isolate());
   SetHeaderValue(kCheckSumOffset, Version::Hash());
   SetHeaderValue(kNumReservationsOffset, reservations.length());
-  SetHeaderValue(kPayloadLengthOffset, payload.length());
+  SetHeaderValue(kPayloadLengthOffset, payload->length());
 
   // Copy reservation chunk sizes.
   CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
             reservation_size);
 
   // Copy serialized data.
-  CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
-            static_cast<size_t>(payload.length()));
+  CopyBytes(data_ + kHeaderSize + reservation_size, payload->begin(),
+            static_cast<size_t>(payload->length()));
 }
 
 bool SnapshotData::IsSane() {
diff --git a/src/snapshot/snapshot-source-sink.h b/src/snapshot/snapshot-source-sink.h
index 360ec76..5d4c08d 100644
--- a/src/snapshot/snapshot-source-sink.h
+++ b/src/snapshot/snapshot-source-sink.h
@@ -94,7 +94,7 @@
   void PutRaw(const byte* data, int number_of_bytes, const char* description);
   int Position() { return data_.length(); }
 
-  const List<byte>& data() const { return data_; }
+  const List<byte>* data() const { return &data_; }
 
  private:
   List<byte> data_;
diff --git a/src/snapshot/snapshot.h b/src/snapshot/snapshot.h
index c648d75..e332967 100644
--- a/src/snapshot/snapshot.h
+++ b/src/snapshot/snapshot.h
@@ -16,84 +16,11 @@
 class PartialSerializer;
 class StartupSerializer;
 
-class Snapshot : public AllStatic {
- public:
-  class Metadata {
-   public:
-    explicit Metadata(uint32_t data = 0) : data_(data) {}
-    bool embeds_script() { return EmbedsScriptBits::decode(data_); }
-    void set_embeds_script(bool v) {
-      data_ = EmbedsScriptBits::update(data_, v);
-    }
-
-    uint32_t& RawValue() { return data_; }
-
-   private:
-    class EmbedsScriptBits : public BitField<bool, 0, 1> {};
-    uint32_t data_;
-  };
-
-  // Initialize the Isolate from the internal snapshot. Returns false if no
-  // snapshot could be found.
-  static bool Initialize(Isolate* isolate);
-  // Create a new context using the internal partial snapshot.
-  static MaybeHandle<Context> NewContextFromSnapshot(
-      Isolate* isolate, Handle<JSGlobalProxy> global_proxy);
-
-  static bool HaveASnapshotToStartFrom(Isolate* isolate);
-
-  static bool EmbedsScript(Isolate* isolate);
-
-  static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
-
-
-  // To be implemented by the snapshot source.
-  static const v8::StartupData* DefaultSnapshotBlob();
-
-  static v8::StartupData CreateSnapshotBlob(
-      const StartupSerializer& startup_ser,
-      const PartialSerializer& context_ser, Snapshot::Metadata metadata);
-
-#ifdef DEBUG
-  static bool SnapshotIsValid(v8::StartupData* snapshot_blob);
-#endif  // DEBUG
-
- private:
-  static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
-  static Vector<const byte> ExtractContextData(const v8::StartupData* data);
-  static Metadata ExtractMetadata(const v8::StartupData* data);
-
-  // Snapshot blob layout:
-  // [0] metadata
-  // [1 - 6] pre-calculated first page sizes for paged spaces
-  // [7] serialized start up data length
-  // ... serialized start up data
-  // ... serialized context data
-
-  static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
-
-  static const int kMetadataOffset = 0;
-  static const int kFirstPageSizesOffset = kMetadataOffset + kInt32Size;
-  static const int kStartupLengthOffset =
-      kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
-  static const int kStartupDataOffset = kStartupLengthOffset + kInt32Size;
-
-  static int ContextOffset(int startup_length) {
-    return kStartupDataOffset + startup_length;
-  }
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
-};
-
-#ifdef V8_USE_EXTERNAL_STARTUP_DATA
-void SetSnapshotFromFile(StartupData* snapshot_blob);
-#endif
-
 // Wrapper around reservation sizes and the serialization payload.
 class SnapshotData : public SerializedData {
  public:
   // Used when producing.
-  explicit SnapshotData(const Serializer& ser);
+  explicit SnapshotData(const Serializer* serializer);
 
   // Used when consuming.
   explicit SnapshotData(const Vector<const byte> snapshot)
@@ -124,6 +51,74 @@
   static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
 };
 
+class Snapshot : public AllStatic {
+ public:
+  // Initialize the Isolate from the internal snapshot. Returns false if no
+  // snapshot could be found.
+  static bool Initialize(Isolate* isolate);
+  // Create a new context using the internal partial snapshot.
+  static MaybeHandle<Context> NewContextFromSnapshot(
+      Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
+      size_t context_index);
+
+  static bool HaveASnapshotToStartFrom(Isolate* isolate);
+
+  static bool EmbedsScript(Isolate* isolate);
+
+  static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
+
+
+  // To be implemented by the snapshot source.
+  static const v8::StartupData* DefaultSnapshotBlob();
+
+  static v8::StartupData CreateSnapshotBlob(
+      const SnapshotData* startup_snapshot,
+      const List<SnapshotData*>* context_snapshots);
+
+#ifdef DEBUG
+  static bool SnapshotIsValid(v8::StartupData* snapshot_blob);
+#endif  // DEBUG
+
+ private:
+  static int ExtractNumContexts(const v8::StartupData* data);
+  static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
+  static Vector<const byte> ExtractContextData(const v8::StartupData* data,
+                                               int index);
+
+  // Snapshot blob layout:
+  // [0 - 5] pre-calculated first page sizes for paged spaces
+  // [6] number of contexts N
+  // [7] offset to context 0
+  // [8] offset to context 1
+  // ...
+  // ... offset to context N - 1
+  // ... startup snapshot data
+  // ... context 0 snapshot data
+  // ... context 1 snapshot data
+
+  static const int kNumPagedSpaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+
+  static const int kFirstPageSizesOffset = 0;
+  static const int kNumberOfContextsOffset =
+      kFirstPageSizesOffset + kNumPagedSpaces * kInt32Size;
+  static const int kFirstContextOffsetOffset =
+      kNumberOfContextsOffset + kInt32Size;
+
+  static int StartupSnapshotOffset(int num_contexts) {
+    return kFirstContextOffsetOffset + num_contexts * kInt32Size;
+  }
+
+  static int ContextSnapshotOffsetOffset(int index) {
+    return kFirstContextOffsetOffset + index * kInt32Size;
+  }
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
+};
+
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+void SetSnapshotFromFile(StartupData* snapshot_blob);
+#endif
+
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/snapshot/startup-serializer.cc b/src/snapshot/startup-serializer.cc
index c3f9b3e..80598e8 100644
--- a/src/snapshot/startup-serializer.cc
+++ b/src/snapshot/startup-serializer.cc
@@ -11,10 +11,11 @@
 namespace internal {
 
 StartupSerializer::StartupSerializer(
-    Isolate* isolate, SnapshotByteSink* sink,
-    FunctionCodeHandling function_code_handling)
-    : Serializer(isolate, sink),
-      function_code_handling_(function_code_handling),
+    Isolate* isolate,
+    v8::SnapshotCreator::FunctionCodeHandling function_code_handling)
+    : Serializer(isolate),
+      clear_function_code_(function_code_handling ==
+                           v8::SnapshotCreator::FunctionCodeHandling::kClear),
       serializing_builtins_(false) {
   InitializeCodeAddressMap();
 }
@@ -27,21 +28,21 @@
                                         WhereToPoint where_to_point, int skip) {
   DCHECK(!obj->IsJSFunction());
 
-  if (function_code_handling_ == CLEAR_FUNCTION_CODE) {
+  if (clear_function_code_) {
     if (obj->IsCode()) {
       Code* code = Code::cast(obj);
       // If the function code is compiled (either as native code or bytecode),
       // replace it with lazy-compile builtin. Only exception is when we are
       // serializing the canonical interpreter-entry-trampoline builtin.
       if (code->kind() == Code::FUNCTION ||
-          (!serializing_builtins_ && code->is_interpreter_entry_trampoline())) {
+          (!serializing_builtins_ &&
+           code->is_interpreter_trampoline_builtin())) {
         obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
       }
     } else if (obj->IsBytecodeArray()) {
       obj = isolate()->heap()->undefined_value();
     }
   } else if (obj->IsCode()) {
-    DCHECK_EQ(KEEP_FUNCTION_CODE, function_code_handling_);
     Code* code = Code::cast(obj);
     if (code->kind() == Code::FUNCTION) {
       code->ClearInlineCaches();
@@ -49,6 +50,8 @@
     }
   }
 
+  if (SerializeHotObject(obj, how_to_code, where_to_point, skip)) return;
+
   int root_index = root_index_map_.Lookup(obj);
   // We can only encode roots as such if it has already been serialized.
   // That applies to root indices below the wave front.
@@ -59,12 +62,12 @@
     }
   }
 
-  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+  if (SerializeBackReference(obj, how_to_code, where_to_point, skip)) return;
 
   FlushSkip(skip);
 
   // Object has not yet been serialized.  Serialize it here.
-  ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
+  ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
                                      where_to_point);
   object_serializer.Serialize();
 
@@ -89,11 +92,22 @@
   Pad();
 }
 
+int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
+  int index;
+  if (!partial_cache_index_map_.LookupOrInsert(heap_object, &index)) {
+    // This object is not part of the partial snapshot cache yet. Add it to the
+    // startup snapshot so we can refer to it via partial snapshot index from
+    // the partial snapshot.
+    VisitPointer(reinterpret_cast<Object**>(&heap_object));
+  }
+  return index;
+}
+
 void StartupSerializer::Synchronize(VisitorSynchronization::SyncTag tag) {
   // We expect the builtins tag after builtins have been serialized.
   DCHECK(!serializing_builtins_ || tag == VisitorSynchronization::kBuiltins);
   serializing_builtins_ = (tag == VisitorSynchronization::kHandleScope);
-  sink_->Put(kSynchronize, "Synchronize");
+  sink_.Put(kSynchronize, "Synchronize");
 }
 
 void StartupSerializer::SerializeStrongReferences() {
diff --git a/src/snapshot/startup-serializer.h b/src/snapshot/startup-serializer.h
index 71b8475..cc66f71 100644
--- a/src/snapshot/startup-serializer.h
+++ b/src/snapshot/startup-serializer.h
@@ -6,6 +6,7 @@
 #define V8_SNAPSHOT_STARTUP_SERIALIZER_H_
 
 #include <bitset>
+#include "include/v8.h"
 #include "src/snapshot/serializer.h"
 
 namespace v8 {
@@ -13,11 +14,9 @@
 
 class StartupSerializer : public Serializer {
  public:
-  enum FunctionCodeHandling { CLEAR_FUNCTION_CODE, KEEP_FUNCTION_CODE };
-
   StartupSerializer(
-      Isolate* isolate, SnapshotByteSink* sink,
-      FunctionCodeHandling function_code_handling = CLEAR_FUNCTION_CODE);
+      Isolate* isolate,
+      v8::SnapshotCreator::FunctionCodeHandling function_code_handling);
   ~StartupSerializer() override;
 
   // Serialize the current state of the heap.  The order is:
@@ -28,7 +27,34 @@
   void SerializeStrongReferences();
   void SerializeWeakReferencesAndDeferred();
 
+  int PartialSnapshotCacheIndex(HeapObject* o);
+
  private:
+  class PartialCacheIndexMap : public AddressMapBase {
+   public:
+    PartialCacheIndexMap()
+        : map_(base::HashMap::PointersMatch), next_index_(0) {}
+
+    // Lookup object in the map. Return its index if found, or create
+    // a new entry with new_index as value, and return kInvalidIndex.
+    bool LookupOrInsert(HeapObject* obj, int* index_out) {
+      base::HashMap::Entry* entry = LookupEntry(&map_, obj, false);
+      if (entry != NULL) {
+        *index_out = GetValue(entry);
+        return true;
+      }
+      *index_out = next_index_;
+      SetValue(LookupEntry(&map_, obj, true), next_index_++);
+      return false;
+    }
+
+   private:
+    base::HashMap map_;
+    int next_index_;
+
+    DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
+  };
+
   // The StartupSerializer has to serialize the root array, which is slightly
   // different.
   void VisitPointers(Object** start, Object** end) override;
@@ -42,10 +68,11 @@
   // roots. In the second pass, we serialize the rest.
   bool RootShouldBeSkipped(int root_index);
 
-  FunctionCodeHandling function_code_handling_;
+  bool clear_function_code_;
   bool serializing_builtins_;
   bool serializing_immortal_immovables_roots_;
   std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
+  PartialCacheIndexMap partial_cache_index_map_;
   DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
 };