AAPT2: Fix up file IO

This also enables an AAPT behavior that CTS tests have
come to depend on.

Small files that compress negatively (get larger) are stored
uncompressed. Some CTS tests assume this and try to open these
files by mmapping them, which is only possible if they are
uncompressed.

Bug: 35461578
Test: make aapt2_tests
Change-Id: Id622a6150fe72477ad65d67d1bad897a8ee2ffb9
diff --git a/tools/aapt2/Android.bp b/tools/aapt2/Android.bp
index 57036aa..ef3797c 100644
--- a/tools/aapt2/Android.bp
+++ b/tools/aapt2/Android.bp
@@ -87,6 +87,7 @@
         "flatten/Archive.cpp",
         "flatten/TableFlattener.cpp",
         "flatten/XmlFlattener.cpp",
+        "io/BigBufferStreams.cpp",
         "io/File.cpp",
         "io/FileSystem.cpp",
         "io/Io.cpp",
diff --git a/tools/aapt2/LoadedApk.cpp b/tools/aapt2/LoadedApk.cpp
index 1d04b35..b855f8f 100644
--- a/tools/aapt2/LoadedApk.cpp
+++ b/tools/aapt2/LoadedApk.cpp
@@ -20,6 +20,7 @@
 #include "ValueVisitor.h"
 #include "flatten/Archive.h"
 #include "flatten/TableFlattener.h"
+#include "io/BigBufferInputStream.h"
 
 namespace aapt {
 
@@ -27,8 +28,7 @@
                                                       const android::StringPiece& path) {
   Source source(path);
   std::string error;
-  std::unique_ptr<io::ZipFileCollection> apk =
-      io::ZipFileCollection::Create(path, &error);
+  std::unique_ptr<io::ZipFileCollection> apk = io::ZipFileCollection::Create(path, &error);
   if (!apk) {
     context->GetDiagnostics()->Error(DiagMessage(source) << error);
     return {};
@@ -36,21 +36,18 @@
 
   io::IFile* file = apk->FindFile("resources.arsc");
   if (!file) {
-    context->GetDiagnostics()->Error(DiagMessage(source)
-                                     << "no resources.arsc found");
+    context->GetDiagnostics()->Error(DiagMessage(source) << "no resources.arsc found");
     return {};
   }
 
   std::unique_ptr<io::IData> data = file->OpenAsData();
   if (!data) {
-    context->GetDiagnostics()->Error(DiagMessage(source)
-                                     << "could not open resources.arsc");
+    context->GetDiagnostics()->Error(DiagMessage(source) << "could not open resources.arsc");
     return {};
   }
 
   std::unique_ptr<ResourceTable> table = util::make_unique<ResourceTable>();
-  BinaryResourceParser parser(context, table.get(), source, data->data(),
-                              data->size());
+  BinaryResourceParser parser(context, table.get(), source, data->data(), data->size());
   if (!parser.Parse()) {
     return {};
   }
@@ -92,9 +89,9 @@
       continue;
     }
 
-    // The resource table needs to be reserialized since it might have changed.
+    // The resource table needs to be re-serialized since it might have changed.
     if (path == "resources.arsc") {
-      BigBuffer buffer = BigBuffer(1024);
+      BigBuffer buffer(4096);
       // TODO(adamlesinski): How to determine if there were sparse entries (and if to encode
       // with sparse entries) b/35389232.
       TableFlattener flattener(options, &buffer);
@@ -102,8 +99,8 @@
         return false;
       }
 
-      if (!writer->StartEntry(path, ArchiveEntry::kAlign) || !writer->WriteEntry(buffer) ||
-          !writer->FinishEntry()) {
+      io::BigBufferInputStream input_stream(&buffer);
+      if (!writer->WriteFile(path, ArchiveEntry::kAlign, &input_stream)) {
         context->GetDiagnostics()->Error(DiagMessage()
                                          << "Error when writing file '" << path << "' in APK.");
         return false;
@@ -113,14 +110,12 @@
 
     std::unique_ptr<io::IData> data = file->OpenAsData();
     uint32_t compression_flags = file->WasCompressed() ? ArchiveEntry::kCompress : 0u;
-    if (!writer->StartEntry(path, compression_flags) ||
-        !writer->WriteEntry(data->data(), data->size()) || !writer->FinishEntry()) {
+    if (!writer->WriteFile(path, compression_flags, data.get())) {
       context->GetDiagnostics()->Error(DiagMessage()
                                        << "Error when writing file '" << path << "' in APK.");
       return false;
     }
   }
-
   return true;
 }
 
diff --git a/tools/aapt2/Main.cpp b/tools/aapt2/Main.cpp
index 456f686..5e9b81a 100644
--- a/tools/aapt2/Main.cpp
+++ b/tools/aapt2/Main.cpp
@@ -25,7 +25,7 @@
 static const char* sMajorVersion = "2";
 
 // Update minor version whenever a feature or flag is added.
-static const char* sMinorVersion = "10";
+static const char* sMinorVersion = "11";
 
 int PrintVersion() {
   std::cerr << "Android Asset Packaging Tool (aapt) " << sMajorVersion << "."
diff --git a/tools/aapt2/compile/Compile.cpp b/tools/aapt2/compile/Compile.cpp
index 8027f42..1fe30f0 100644
--- a/tools/aapt2/compile/Compile.cpp
+++ b/tools/aapt2/compile/Compile.cpp
@@ -37,6 +37,7 @@
 #include "compile/XmlIdCollector.h"
 #include "flatten/Archive.h"
 #include "flatten/XmlFlattener.h"
+#include "io/BigBufferOutputStream.h"
 #include "proto/ProtoSerialize.h"
 #include "util/Files.h"
 #include "util/Maybe.h"
@@ -46,7 +47,6 @@
 
 using android::StringPiece;
 using google::protobuf::io::CopyingOutputStreamAdaptor;
-using google::protobuf::io::ZeroCopyOutputStream;
 
 namespace aapt {
 
@@ -142,10 +142,10 @@
     IAaptContext* context, const CompileOptions& options,
     std::vector<ResourcePathData>* out_path_data) {
   const std::string& root_dir = options.res_dir.value();
-  std::unique_ptr<DIR, decltype(closedir)*> d(opendir(root_dir.data()),
-                                              closedir);
+  std::unique_ptr<DIR, decltype(closedir)*> d(opendir(root_dir.data()), closedir);
   if (!d) {
-    context->GetDiagnostics()->Error(DiagMessage() << strerror(errno));
+    context->GetDiagnostics()->Error(DiagMessage()
+                                     << android::base::SystemErrorCodeToString(errno));
     return false;
   }
 
@@ -161,10 +161,10 @@
       continue;
     }
 
-    std::unique_ptr<DIR, decltype(closedir)*> subdir(
-        opendir(prefix_path.data()), closedir);
+    std::unique_ptr<DIR, decltype(closedir)*> subdir(opendir(prefix_path.data()), closedir);
     if (!subdir) {
-      context->GetDiagnostics()->Error(DiagMessage() << strerror(errno));
+      context->GetDiagnostics()->Error(DiagMessage()
+                                       << android::base::SystemErrorCodeToString(errno));
       return false;
     }
 
@@ -177,8 +177,7 @@
       file::AppendPath(&full_path, leaf_entry->d_name);
 
       std::string err_str;
-      Maybe<ResourcePathData> path_data =
-          ExtractResourcePathData(full_path, &err_str);
+      Maybe<ResourcePathData> path_data = ExtractResourcePathData(full_path, &err_str);
       if (!path_data) {
         context->GetDiagnostics()->Error(DiagMessage() << err_str);
         return false;
@@ -199,7 +198,7 @@
     std::ifstream fin(path_data.source.path, std::ifstream::binary);
     if (!fin) {
       context->GetDiagnostics()->Error(DiagMessage(path_data.source)
-                                       << strerror(errno));
+                                       << android::base::SystemErrorCodeToString(errno));
       return false;
     }
 
@@ -249,8 +248,7 @@
 
   // Create the file/zip entry.
   if (!writer->StartEntry(output_path, 0)) {
-    context->GetDiagnostics()->Error(DiagMessage(output_path)
-                                     << "failed to open");
+    context->GetDiagnostics()->Error(DiagMessage(output_path) << "failed to open");
     return false;
   }
 
@@ -258,21 +256,18 @@
   // writer->FinishEntry().
   {
     // Wrap our IArchiveWriter with an adaptor that implements the
-    // ZeroCopyOutputStream
-    // interface.
+    // ZeroCopyOutputStream interface.
     CopyingOutputStreamAdaptor copying_adaptor(writer);
 
     std::unique_ptr<pb::ResourceTable> pb_table = SerializeTableToPb(&table);
     if (!pb_table->SerializeToZeroCopyStream(&copying_adaptor)) {
-      context->GetDiagnostics()->Error(DiagMessage(output_path)
-                                       << "failed to write");
+      context->GetDiagnostics()->Error(DiagMessage(output_path) << "failed to write");
       return false;
     }
   }
 
   if (!writer->FinishEntry()) {
-    context->GetDiagnostics()->Error(DiagMessage(output_path)
-                                     << "failed to finish entry");
+    context->GetDiagnostics()->Error(DiagMessage(output_path) << "failed to finish entry");
     return false;
   }
   return true;
@@ -293,16 +288,14 @@
   // writer->FinishEntry().
   {
     // Wrap our IArchiveWriter with an adaptor that implements the
-    // ZeroCopyOutputStream
-    // interface.
+    // ZeroCopyOutputStream interface.
     CopyingOutputStreamAdaptor copying_adaptor(writer);
     CompiledFileOutputStream output_stream(&copying_adaptor);
 
     // Number of CompiledFiles.
     output_stream.WriteLittleEndian32(1);
 
-    std::unique_ptr<pb::CompiledFile> compiled_file =
-        SerializeCompiledFileToPb(file);
+    std::unique_ptr<pb::CompiledFile> compiled_file = SerializeCompiledFileToPb(file);
     output_stream.WriteCompiledFile(compiled_file.get());
     output_stream.WriteData(&buffer);
 
@@ -371,14 +364,12 @@
     return false;
   }
 
-  std::unique_ptr<pb::CompiledFile> pb_compiled_file =
-      SerializeCompiledFileToPb(xmlres->file);
+  std::unique_ptr<pb::CompiledFile> pb_compiled_file = SerializeCompiledFileToPb(xmlres->file);
   out->WriteCompiledFile(pb_compiled_file.get());
   out->WriteData(&buffer);
 
   if (out->HadError()) {
-    context->GetDiagnostics()->Error(DiagMessage(output_path)
-                                     << "failed to write data");
+    context->GetDiagnostics()->Error(DiagMessage(output_path) << "failed to write data");
     return false;
   }
   return true;
@@ -388,8 +379,7 @@
                        const ResourcePathData& path_data,
                        IArchiveWriter* writer, const std::string& output_path) {
   if (context->IsVerbose()) {
-    context->GetDiagnostics()->Note(DiagMessage(path_data.source)
-                                    << "compiling XML");
+    context->GetDiagnostics()->Note(DiagMessage(path_data.source) << "compiling XML");
   }
 
   std::unique_ptr<xml::XmlResource> xmlres;
@@ -397,7 +387,7 @@
     std::ifstream fin(path_data.source.path, std::ifstream::binary);
     if (!fin) {
       context->GetDiagnostics()->Error(DiagMessage(path_data.source)
-                                       << strerror(errno));
+                                       << android::base::SystemErrorCodeToString(errno));
       return false;
     }
 
@@ -470,31 +460,6 @@
   return true;
 }
 
-class BigBufferOutputStream : public io::OutputStream {
- public:
-  explicit BigBufferOutputStream(BigBuffer* buffer) : buffer_(buffer) {}
-
-  bool Next(void** data, int* len) override {
-    size_t count;
-    *data = buffer_->NextBlock(&count);
-    *len = static_cast<int>(count);
-    return true;
-  }
-
-  void BackUp(int count) override { buffer_->BackUp(count); }
-
-  google::protobuf::int64 ByteCount() const override {
-    return buffer_->size();
-  }
-
-  bool HadError() const override { return false; }
-
- private:
-  BigBuffer* buffer_;
-
-  DISALLOW_COPY_AND_ASSIGN(BigBufferOutputStream);
-};
-
 static bool CompilePng(IAaptContext* context, const CompileOptions& options,
                        const ResourcePathData& path_data,
                        IArchiveWriter* writer, const std::string& output_path) {
@@ -520,7 +485,7 @@
     }
 
     BigBuffer crunched_png_buffer(4096);
-    BigBufferOutputStream crunched_png_buffer_out(&crunched_png_buffer);
+    io::BigBufferOutputStream crunched_png_buffer_out(&crunched_png_buffer);
 
     // Ensure that we only keep the chunks we care about if we end up
     // using the original PNG instead of the crunched one.
@@ -533,8 +498,7 @@
     std::unique_ptr<NinePatch> nine_patch;
     if (path_data.extension == "9.png") {
       std::string err;
-      nine_patch = NinePatch::Create(image->rows.get(), image->width,
-                                     image->height, &err);
+      nine_patch = NinePatch::Create(image->rows.get(), image->width, image->height, &err);
       if (!nine_patch) {
         context->GetDiagnostics()->Error(DiagMessage() << err);
         return false;
@@ -547,8 +511,7 @@
       // width - 2.
       image->width -= 2;
       image->height -= 2;
-      memmove(image->rows.get(), image->rows.get() + 1,
-              image->height * sizeof(uint8_t**));
+      memmove(image->rows.get(), image->rows.get() + 1, image->height * sizeof(uint8_t**));
       for (int32_t h = 0; h < image->height; h++) {
         memmove(image->rows[h], image->rows[h] + 4, image->width * 4);
       }
@@ -560,8 +523,7 @@
     }
 
     // Write the crunched PNG.
-    if (!WritePng(context, image.get(), nine_patch.get(),
-                  &crunched_png_buffer_out, {})) {
+    if (!WritePng(context, image.get(), nine_patch.get(), &crunched_png_buffer_out, {})) {
       return false;
     }
 
@@ -574,24 +536,21 @@
       // The re-encoded PNG is larger than the original, and there is
       // no mandatory transformation. Use the original.
       if (context->IsVerbose()) {
-        context->GetDiagnostics()->Note(
-            DiagMessage(path_data.source)
-            << "original PNG is smaller than crunched PNG"
-            << ", using original");
+        context->GetDiagnostics()->Note(DiagMessage(path_data.source)
+                                        << "original PNG is smaller than crunched PNG"
+                                        << ", using original");
       }
 
-      PngChunkFilter png_chunk_filter_again(content);
+      png_chunk_filter.Rewind();
       BigBuffer filtered_png_buffer(4096);
-      BigBufferOutputStream filtered_png_buffer_out(&filtered_png_buffer);
-      io::Copy(&filtered_png_buffer_out, &png_chunk_filter_again);
+      io::BigBufferOutputStream filtered_png_buffer_out(&filtered_png_buffer);
+      io::Copy(&filtered_png_buffer_out, &png_chunk_filter);
       buffer.AppendBuffer(std::move(filtered_png_buffer));
     }
 
     if (context->IsVerbose()) {
-      // For debugging only, use the legacy PNG cruncher and compare the
-      // resulting file sizes.
-      // This will help catch exotic cases where the new code may generate
-      // larger PNGs.
+      // For debugging only, use the legacy PNG cruncher and compare the resulting file sizes.
+      // This will help catch exotic cases where the new code may generate larger PNGs.
       std::stringstream legacy_stream(content);
       BigBuffer legacy_buffer(4096);
       Png png(context->GetDiagnostics());
diff --git a/tools/aapt2/compile/Png.cpp b/tools/aapt2/compile/Png.cpp
index 5e15c88..6d6147d 100644
--- a/tools/aapt2/compile/Png.cpp
+++ b/tools/aapt2/compile/Png.cpp
@@ -33,7 +33,6 @@
 namespace aapt {
 
 constexpr bool kDebug = false;
-constexpr size_t kPngSignatureSize = 8u;
 
 struct PngInfo {
   ~PngInfo() {
diff --git a/tools/aapt2/compile/Png.h b/tools/aapt2/compile/Png.h
index a820051..e4255e7 100644
--- a/tools/aapt2/compile/Png.h
+++ b/tools/aapt2/compile/Png.h
@@ -31,6 +31,9 @@
 
 namespace aapt {
 
+// Size in bytes of the PNG signature.
+constexpr size_t kPngSignatureSize = 8u;
+
 struct PngOptions {
   int grayscale_tolerance = 0;
 };
@@ -46,9 +49,9 @@
                const PngOptions& options);
 
  private:
-  IDiagnostics* mDiag;
-
   DISALLOW_COPY_AND_ASSIGN(Png);
+
+  IDiagnostics* mDiag;
 };
 
 /**
@@ -57,26 +60,26 @@
 class PngChunkFilter : public io::InputStream {
  public:
   explicit PngChunkFilter(const android::StringPiece& data);
+  virtual ~PngChunkFilter() = default;
 
-  bool Next(const void** buffer, int* len) override;
-  void BackUp(int count) override;
-  bool Skip(int count) override;
+  bool Next(const void** buffer, size_t* len) override;
+  void BackUp(size_t count) override;
 
-  google::protobuf::int64 ByteCount() const override {
-    return static_cast<google::protobuf::int64>(window_start_);
-  }
+  bool CanRewind() const override { return true; }
+  bool Rewind() override;
+  size_t ByteCount() const override { return window_start_; }
 
   bool HadError() const override { return error_; }
 
  private:
-  bool ConsumeWindow(const void** buffer, int* len);
+  DISALLOW_COPY_AND_ASSIGN(PngChunkFilter);
+
+  bool ConsumeWindow(const void** buffer, size_t* len);
 
   android::StringPiece data_;
   size_t window_start_ = 0;
   size_t window_end_ = 0;
   bool error_ = false;
-
-  DISALLOW_COPY_AND_ASSIGN(PngChunkFilter);
 };
 
 /**
diff --git a/tools/aapt2/compile/PngChunkFilter.cpp b/tools/aapt2/compile/PngChunkFilter.cpp
index edec123..f9043b5 100644
--- a/tools/aapt2/compile/PngChunkFilter.cpp
+++ b/tools/aapt2/compile/PngChunkFilter.cpp
@@ -71,16 +71,16 @@
 PngChunkFilter::PngChunkFilter(const StringPiece& data) : data_(data) {
   if (util::StartsWith(data_, kPngSignature)) {
     window_start_ = 0;
-    window_end_ = strlen(kPngSignature);
+    window_end_ = kPngSignatureSize;
   } else {
     error_ = true;
   }
 }
 
-bool PngChunkFilter::ConsumeWindow(const void** buffer, int* len) {
+bool PngChunkFilter::ConsumeWindow(const void** buffer, size_t* len) {
   if (window_start_ != window_end_) {
     // We have bytes to give from our window.
-    const int bytes_read = (int)(window_end_ - window_start_);
+    const size_t bytes_read = window_end_ - window_start_;
     *buffer = data_.data() + window_start_;
     *len = bytes_read;
     window_start_ = window_end_;
@@ -89,7 +89,7 @@
   return false;
 }
 
-bool PngChunkFilter::Next(const void** buffer, int* len) {
+bool PngChunkFilter::Next(const void** buffer, size_t* len) {
   if (error_) {
     return false;
   }
@@ -113,16 +113,14 @@
 
     // Verify the chunk length.
     const uint32_t chunk_len = Peek32LE(data_.data() + window_end_);
-    if (((uint64_t)chunk_len) + ((uint64_t)window_end_) + sizeof(uint32_t) >
-        data_.size()) {
+    if (((uint64_t)chunk_len) + ((uint64_t)window_end_) + sizeof(uint32_t) > data_.size()) {
       // Overflow.
       error_ = true;
       return false;
     }
 
     // Do we strip this chunk?
-    const uint32_t chunk_type =
-        Peek32LE(data_.data() + window_end_ + sizeof(uint32_t));
+    const uint32_t chunk_type = Peek32LE(data_.data() + window_end_ + sizeof(uint32_t));
     if (IsPngChunkWhitelisted(chunk_type)) {
       // Advance the window to include this chunk.
       window_end_ += kMinChunkHeaderSize + chunk_len;
@@ -146,31 +144,19 @@
   return false;
 }
 
-void PngChunkFilter::BackUp(int count) {
+void PngChunkFilter::BackUp(size_t count) {
   if (error_) {
     return;
   }
   window_start_ -= count;
 }
 
-bool PngChunkFilter::Skip(int count) {
+bool PngChunkFilter::Rewind() {
   if (error_) {
     return false;
   }
-
-  const void* buffer;
-  int len;
-  while (count > 0) {
-    if (!Next(&buffer, &len)) {
-      return false;
-    }
-    if (len > count) {
-      BackUp(len - count);
-      count = 0;
-    } else {
-      count -= len;
-    }
-  }
+  window_start_ = 0;
+  window_end_ = kPngSignatureSize;
   return true;
 }
 
diff --git a/tools/aapt2/compile/PngCrunch.cpp b/tools/aapt2/compile/PngCrunch.cpp
index 3b46d8b..ae98afc 100644
--- a/tools/aapt2/compile/PngCrunch.cpp
+++ b/tools/aapt2/compile/PngCrunch.cpp
@@ -29,12 +29,7 @@
 
 namespace aapt {
 
-// Size in bytes of the PNG signature.
-constexpr size_t kPngSignatureSize = 8u;
-
-/**
- * Custom deleter that destroys libpng read and info structs.
- */
+// Custom deleter that destroys libpng read and info structs.
 class PngReadStructDeleter {
  public:
   PngReadStructDeleter(png_structp read_ptr, png_infop info_ptr)
@@ -51,9 +46,7 @@
   DISALLOW_COPY_AND_ASSIGN(PngReadStructDeleter);
 };
 
-/**
- * Custom deleter that destroys libpng write and info structs.
- */
+// Custom deleter that destroys libpng write and info structs.
 class PngWriteStructDeleter {
  public:
   PngWriteStructDeleter(png_structp write_ptr, png_infop info_ptr)
@@ -82,12 +75,11 @@
   diag->Error(DiagMessage() << error_msg);
 }
 
-static void ReadDataFromStream(png_structp png_ptr, png_bytep buffer,
-                               png_size_t len) {
+static void ReadDataFromStream(png_structp png_ptr, png_bytep buffer, png_size_t len) {
   io::InputStream* in = (io::InputStream*)png_get_io_ptr(png_ptr);
 
   const void* in_buffer;
-  int in_len;
+  size_t in_len;
   if (!in->Next(&in_buffer, &in_len)) {
     if (in->HadError()) {
       std::string err = in->GetError();
@@ -96,19 +88,18 @@
     return;
   }
 
-  const size_t bytes_read = std::min(static_cast<size_t>(in_len), len);
+  const size_t bytes_read = std::min(in_len, len);
   memcpy(buffer, in_buffer, bytes_read);
-  if (bytes_read != static_cast<size_t>(in_len)) {
-    in->BackUp(in_len - static_cast<int>(bytes_read));
+  if (bytes_read != in_len) {
+    in->BackUp(in_len - bytes_read);
   }
 }
 
-static void WriteDataToStream(png_structp png_ptr, png_bytep buffer,
-                              png_size_t len) {
+static void WriteDataToStream(png_structp png_ptr, png_bytep buffer, png_size_t len) {
   io::OutputStream* out = (io::OutputStream*)png_get_io_ptr(png_ptr);
 
   void* out_buffer;
-  int out_len;
+  size_t out_len;
   while (len > 0) {
     if (!out->Next(&out_buffer, &out_len)) {
       if (out->HadError()) {
@@ -118,7 +109,7 @@
       return;
     }
 
-    const size_t bytes_written = std::min(static_cast<size_t>(out_len), len);
+    const size_t bytes_written = std::min(out_len, len);
     memcpy(out_buffer, buffer, bytes_written);
 
     // Advance the input buffer.
@@ -126,7 +117,7 @@
     len -= bytes_written;
 
     // Advance the output buffer.
-    out_len -= static_cast<int>(bytes_written);
+    out_len -= bytes_written;
   }
 
   // If the entire output buffer wasn't used, backup.
@@ -139,41 +130,35 @@
   // Read the first 8 bytes of the file looking for the PNG signature.
   // Bail early if it does not match.
   const png_byte* signature;
-  int buffer_size;
+  size_t buffer_size;
   if (!in->Next((const void**)&signature, &buffer_size)) {
-    context->GetDiagnostics()->Error(
-        DiagMessage() << android::base::SystemErrorCodeToString(errno));
+    context->GetDiagnostics()->Error(DiagMessage()
+                                     << android::base::SystemErrorCodeToString(errno));
     return {};
   }
 
-  if (static_cast<size_t>(buffer_size) < kPngSignatureSize ||
-      png_sig_cmp(signature, 0, kPngSignatureSize) != 0) {
-    context->GetDiagnostics()->Error(
-        DiagMessage() << "file signature does not match PNG signature");
+  if (buffer_size < kPngSignatureSize || png_sig_cmp(signature, 0, kPngSignatureSize) != 0) {
+    context->GetDiagnostics()->Error(DiagMessage()
+                                     << "file signature does not match PNG signature");
     return {};
   }
 
   // Start at the beginning of the first chunk.
-  in->BackUp(buffer_size - static_cast<int>(kPngSignatureSize));
+  in->BackUp(buffer_size - kPngSignatureSize);
 
-  // Create and initialize the png_struct with the default error and warning
-  // handlers.
-  // The header version is also passed in to ensure that this was built against
-  // the same
+  // Create and initialize the png_struct with the default error and warning handlers.
+  // The header version is also passed in to ensure that this was built against the same
   // version of libpng.
-  png_structp read_ptr =
-      png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);
+  png_structp read_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);
   if (read_ptr == nullptr) {
-    context->GetDiagnostics()->Error(
-        DiagMessage() << "failed to create libpng read png_struct");
+    context->GetDiagnostics()->Error(DiagMessage() << "failed to create libpng read png_struct");
     return {};
   }
 
   // Create and initialize the memory for image header and data.
   png_infop info_ptr = png_create_info_struct(read_ptr);
   if (info_ptr == nullptr) {
-    context->GetDiagnostics()->Error(
-        DiagMessage() << "failed to create libpng read png_info");
+    context->GetDiagnostics()->Error(DiagMessage() << "failed to create libpng read png_info");
     png_destroy_read_struct(&read_ptr, nullptr, nullptr);
     return {};
   }
@@ -189,8 +174,7 @@
   }
 
   // Handle warnings ourselves via IDiagnostics.
-  png_set_error_fn(read_ptr, (png_voidp)context->GetDiagnostics(), LogError,
-                   LogWarning);
+  png_set_error_fn(read_ptr, (png_voidp)context->GetDiagnostics(), LogError, LogWarning);
 
   // Set up the read functions which read from our custom data sources.
   png_set_read_fn(read_ptr, (png_voidp)in, ReadDataFromStream);
@@ -203,8 +187,7 @@
 
   // Extract image meta-data from the various chunk headers.
   uint32_t width, height;
-  int bit_depth, color_type, interlace_method, compression_method,
-      filter_method;
+  int bit_depth, color_type, interlace_method, compression_method, filter_method;
   png_get_IHDR(read_ptr, info_ptr, &width, &height, &bit_depth, &color_type,
                &interlace_method, &compression_method, &filter_method);
 
@@ -247,11 +230,9 @@
   // 9-patch uses int32_t to index images, so we cap the image dimensions to
   // something
   // that can always be represented by 9-patch.
-  if (width > std::numeric_limits<int32_t>::max() ||
-      height > std::numeric_limits<int32_t>::max()) {
-    context->GetDiagnostics()->Error(DiagMessage()
-                                     << "PNG image dimensions are too large: "
-                                     << width << "x" << height);
+  if (width > std::numeric_limits<int32_t>::max() || height > std::numeric_limits<int32_t>::max()) {
+    context->GetDiagnostics()->Error(
+        DiagMessage() << "PNG image dimensions are too large: " << width << "x" << height);
     return {};
   }
 
@@ -263,8 +244,7 @@
   CHECK(row_bytes == 4 * width);  // RGBA
 
   // Allocate one large block to hold the image.
-  output_image->data =
-      std::unique_ptr<uint8_t[]>(new uint8_t[height * row_bytes]);
+  output_image->data = std::unique_ptr<uint8_t[]>(new uint8_t[height * row_bytes]);
 
   // Create an array of rows that index into the data block.
   output_image->rows = std::unique_ptr<uint8_t* []>(new uint8_t*[height]);
@@ -281,19 +261,13 @@
   return output_image;
 }
 
-/**
- * Experimentally chosen constant to be added to the overhead of using color
- * type
- * PNG_COLOR_TYPE_PALETTE to account for the uncompressability of the palette
- * chunk.
- * Without this, many small PNGs encoded with palettes are larger after
- * compression than
- * the same PNGs encoded as RGBA.
- */
+// Experimentally chosen constant to be added to the overhead of using color type
+// PNG_COLOR_TYPE_PALETTE to account for the uncompressability of the palette chunk.
+// Without this, many small PNGs encoded with palettes are larger after compression than
+// the same PNGs encoded as RGBA.
 constexpr static const size_t kPaletteOverheadConstant = 1024u * 10u;
 
-// Pick a color type by which to encode the image, based on which color type
-// will take
+// Pick a color type by which to encode the image, based on which color type will take
 // the least amount of disk space.
 //
 // 9-patch images traditionally have not been encoded with palettes.
@@ -372,20 +346,17 @@
   return PNG_COLOR_TYPE_RGBA;
 }
 
-// Assigns indices to the color and alpha palettes, encodes them, and then
-// invokes
+// Assigns indices to the color and alpha palettes, encodes them, and then invokes
 // png_set_PLTE/png_set_tRNS.
 // This must be done before writing image data.
-// Image data must be transformed to use the indices assigned within the
-// palette.
+// Image data must be transformed to use the indices assigned within the palette.
 static void WritePalette(png_structp write_ptr, png_infop write_info_ptr,
                          std::unordered_map<uint32_t, int>* color_palette,
                          std::unordered_set<uint32_t>* alpha_palette) {
   CHECK(color_palette->size() <= 256);
   CHECK(alpha_palette->size() <= 256);
 
-  // Populate the PNG palette struct and assign indices to the color
-  // palette.
+  // Populate the PNG palette struct and assign indices to the color palette.
 
   // Colors in the alpha palette should have smaller indices.
   // This will ensure that we can truncate the alpha palette if it is
@@ -403,13 +374,11 @@
   }
 
   // Create the PNG color palette struct.
-  auto color_palette_bytes =
-      std::unique_ptr<png_color[]>(new png_color[color_palette->size()]);
+  auto color_palette_bytes = std::unique_ptr<png_color[]>(new png_color[color_palette->size()]);
 
   std::unique_ptr<png_byte[]> alpha_palette_bytes;
   if (!alpha_palette->empty()) {
-    alpha_palette_bytes =
-        std::unique_ptr<png_byte[]>(new png_byte[alpha_palette->size()]);
+    alpha_palette_bytes = std::unique_ptr<png_byte[]>(new png_byte[alpha_palette->size()]);
   }
 
   for (const auto& entry : *color_palette) {
@@ -433,23 +402,20 @@
   // The bytes get copied here, so it is safe to release color_palette_bytes at
   // the end of function
   // scope.
-  png_set_PLTE(write_ptr, write_info_ptr, color_palette_bytes.get(),
-               color_palette->size());
+  png_set_PLTE(write_ptr, write_info_ptr, color_palette_bytes.get(), color_palette->size());
 
   if (alpha_palette_bytes) {
-    png_set_tRNS(write_ptr, write_info_ptr, alpha_palette_bytes.get(),
-                 alpha_palette->size(), nullptr);
+    png_set_tRNS(write_ptr, write_info_ptr, alpha_palette_bytes.get(), alpha_palette->size(),
+                 nullptr);
   }
 }
 
 // Write the 9-patch custom PNG chunks to write_info_ptr. This must be done
-// before
-// writing image data.
+// before writing image data.
 static void WriteNinePatch(png_structp write_ptr, png_infop write_info_ptr,
                            const NinePatch* nine_patch) {
   // The order of the chunks is important.
-  // 9-patch code in older platforms expects the 9-patch chunk to
-  // be last.
+  // 9-patch code in older platforms expects the 9-patch chunk to be last.
 
   png_unknown_chunk unknown_chunks[3];
   memset(unknown_chunks, 0, sizeof(unknown_chunks));
@@ -475,8 +441,7 @@
     index++;
   }
 
-  std::unique_ptr<uint8_t[]> serialized_nine_patch =
-      nine_patch->SerializeBase(&chunk_len);
+  std::unique_ptr<uint8_t[]> serialized_nine_patch = nine_patch->SerializeBase(&chunk_len);
   strcpy((char*)unknown_chunks[index].name, "npTc");
   unknown_chunks[index].size = chunk_len;
   unknown_chunks[index].data = (png_bytep)serialized_nine_patch.get();
@@ -497,22 +462,18 @@
               const PngOptions& options) {
   // Create and initialize the write png_struct with the default error and
   // warning handlers.
-  // The header version is also passed in to ensure that this was built against
-  // the same
+  // The header version is also passed in to ensure that this was built against the same
   // version of libpng.
-  png_structp write_ptr =
-      png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);
+  png_structp write_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr);
   if (write_ptr == nullptr) {
-    context->GetDiagnostics()->Error(
-        DiagMessage() << "failed to create libpng write png_struct");
+    context->GetDiagnostics()->Error(DiagMessage() << "failed to create libpng write png_struct");
     return false;
   }
 
   // Allocate memory to store image header data.
   png_infop write_info_ptr = png_create_info_struct(write_ptr);
   if (write_info_ptr == nullptr) {
-    context->GetDiagnostics()->Error(
-        DiagMessage() << "failed to create libpng write png_info");
+    context->GetDiagnostics()->Error(DiagMessage() << "failed to create libpng write png_info");
     png_destroy_write_struct(&write_ptr, nullptr);
     return false;
   }
@@ -527,8 +488,7 @@
   }
 
   // Handle warnings with our IDiagnostics.
-  png_set_error_fn(write_ptr, (png_voidp)context->GetDiagnostics(), LogError,
-                   LogWarning);
+  png_set_error_fn(write_ptr, (png_voidp)context->GetDiagnostics(), LogError, LogWarning);
 
   // Set up the write functions which write to our custom data sources.
   png_set_write_fn(write_ptr, (png_voidp)out, WriteDataToStream, nullptr);
@@ -599,8 +559,7 @@
     context->GetDiagnostics()->Note(msg);
   }
 
-  const bool convertible_to_grayscale =
-      max_gray_deviation <= options.grayscale_tolerance;
+  const bool convertible_to_grayscale = max_gray_deviation <= options.grayscale_tolerance;
 
   const int new_color_type = PickColorType(
       image->width, image->height, grayscale, convertible_to_grayscale,
@@ -715,15 +674,12 @@
       }
       png_write_row(write_ptr, out_row.get());
     }
-  } else if (new_color_type == PNG_COLOR_TYPE_RGB ||
-             new_color_type == PNG_COLOR_TYPE_RGBA) {
+  } else if (new_color_type == PNG_COLOR_TYPE_RGB || new_color_type == PNG_COLOR_TYPE_RGBA) {
     const size_t bpp = new_color_type == PNG_COLOR_TYPE_RGB ? 3 : 4;
     if (needs_to_zero_rgb_channels_of_transparent_pixels) {
       // The source RGBA data can't be used as-is, because we need to zero out
-      // the RGB
-      // values of transparent pixels.
-      auto out_row =
-          std::unique_ptr<png_byte[]>(new png_byte[image->width * bpp]);
+      // the RGB values of transparent pixels.
+      auto out_row = std::unique_ptr<png_byte[]>(new png_byte[image->width * bpp]);
 
       for (int32_t y = 0; y < image->height; y++) {
         png_const_bytep in_row = image->rows[y];
@@ -747,8 +703,7 @@
       }
     } else {
       // The source image can be used as-is, just tell libpng whether or not to
-      // ignore
-      // the alpha channel.
+      // ignore the alpha channel.
       if (new_color_type == PNG_COLOR_TYPE_RGB) {
         // Delete the extraneous alpha values that we appended to our buffer
         // when reading the original values.
diff --git a/tools/aapt2/flatten/Archive.cpp b/tools/aapt2/flatten/Archive.cpp
index 5c96a4d..826f91b 100644
--- a/tools/aapt2/flatten/Archive.cpp
+++ b/tools/aapt2/flatten/Archive.cpp
@@ -21,6 +21,7 @@
 #include <string>
 #include <vector>
 
+#include "android-base/errors.h"
 #include "android-base/macros.h"
 #include "androidfw/StringPiece.h"
 #include "ziparchive/zip_writer.h"
@@ -37,14 +38,14 @@
  public:
   DirectoryWriter() = default;
 
-  bool Open(IDiagnostics* diag, const StringPiece& out_dir) {
+  bool Open(const StringPiece& out_dir) {
     dir_ = out_dir.to_string();
     file::FileType type = file::GetFileType(dir_);
     if (type == file::FileType::kNonexistant) {
-      diag->Error(DiagMessage() << "directory " << dir_ << " does not exist");
+      error_ = "directory does not exist";
       return false;
     } else if (type != file::FileType::kDirectory) {
-      diag->Error(DiagMessage() << dir_ << " is not a directory");
+      error_ = "not a directory";
       return false;
     }
     return true;
@@ -61,27 +62,19 @@
 
     file_ = {fopen(full_path.data(), "wb"), fclose};
     if (!file_) {
+      error_ = android::base::SystemErrorCodeToString(errno);
       return false;
     }
     return true;
   }
 
-  bool WriteEntry(const BigBuffer& buffer) override {
+  bool Write(const void* data, int len) override {
     if (!file_) {
       return false;
     }
 
-    for (const BigBuffer::Block& b : buffer) {
-      if (fwrite(b.buffer.get(), 1, b.size, file_.get()) != b.size) {
-        file_.reset(nullptr);
-        return false;
-      }
-    }
-    return true;
-  }
-
-  bool WriteEntry(const void* data, size_t len) override {
-    if (fwrite(data, 1, len, file_.get()) != len) {
+    if (fwrite(data, 1, len, file_.get()) != static_cast<size_t>(len)) {
+      error_ = android::base::SystemErrorCodeToString(errno);
       file_.reset(nullptr);
       return false;
     }
@@ -96,22 +89,41 @@
     return true;
   }
 
+  bool WriteFile(const StringPiece& path, uint32_t flags, io::InputStream* in) override {
+    if (!StartEntry(path, flags)) {
+      return false;
+    }
+
+    const void* data = nullptr;
+    size_t len = 0;
+    while (in->Next(&data, &len)) {
+      if (!Write(data, static_cast<int>(len))) {
+        return false;
+      }
+    }
+    return !in->HadError();
+  }
+
+  bool HadError() const override { return !error_.empty(); }
+
+  std::string GetError() const override { return error_; }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(DirectoryWriter);
 
   std::string dir_;
   std::unique_ptr<FILE, decltype(fclose)*> file_ = {nullptr, fclose};
+  std::string error_;
 };
 
 class ZipFileWriter : public IArchiveWriter {
  public:
   ZipFileWriter() = default;
 
-  bool Open(IDiagnostics* diag, const StringPiece& path) {
+  bool Open(const StringPiece& path) {
     file_ = {fopen(path.data(), "w+b"), fclose};
     if (!file_) {
-      diag->Error(DiagMessage() << "failed to Open " << path << ": "
-                                << strerror(errno));
+      error_ = android::base::SystemErrorCodeToString(errno);
       return false;
     }
     writer_ = util::make_unique<ZipWriter>(file_.get());
@@ -134,37 +146,83 @@
 
     int32_t result = writer_->StartEntry(path.data(), zip_flags);
     if (result != 0) {
+      error_ = ZipWriter::ErrorCodeString(result);
       return false;
     }
     return true;
   }
 
-  bool WriteEntry(const void* data, size_t len) override {
+  bool Write(const void* data, int len) override {
     int32_t result = writer_->WriteBytes(data, len);
     if (result != 0) {
+      error_ = ZipWriter::ErrorCodeString(result);
       return false;
     }
     return true;
   }
 
-  bool WriteEntry(const BigBuffer& buffer) override {
-    for (const BigBuffer::Block& b : buffer) {
-      int32_t result = writer_->WriteBytes(b.buffer.get(), b.size);
-      if (result != 0) {
-        return false;
-      }
-    }
-    return true;
-  }
-
   bool FinishEntry() override {
     int32_t result = writer_->FinishEntry();
     if (result != 0) {
+      error_ = ZipWriter::ErrorCodeString(result);
       return false;
     }
     return true;
   }
 
+  bool WriteFile(const StringPiece& path, uint32_t flags, io::InputStream* in) override {
+    while (true) {
+      if (!StartEntry(path, flags)) {
+        return false;
+      }
+
+      const void* data = nullptr;
+      size_t len = 0;
+      while (in->Next(&data, &len)) {
+        if (!Write(data, static_cast<int>(len))) {
+          return false;
+        }
+      }
+
+      if (in->HadError()) {
+        return false;
+      }
+
+      if (!FinishEntry()) {
+        return false;
+      }
+
+      // Check to see if the file was compressed enough. This is preserving behavior of AAPT.
+      if ((flags & ArchiveEntry::kCompress) != 0 && in->CanRewind()) {
+        ZipWriter::FileEntry last_entry;
+        int32_t result = writer_->GetLastEntry(&last_entry);
+        CHECK(result == 0);
+        if (last_entry.compressed_size + (last_entry.compressed_size / 10) >
+            last_entry.uncompressed_size) {
+          // The file was not compressed enough, rewind and store it uncompressed.
+          if (!in->Rewind()) {
+            // Well we tried, may as well keep what we had.
+            return true;
+          }
+
+          int32_t result = writer_->DiscardLastEntry();
+          if (result != 0) {
+            error_ = ZipWriter::ErrorCodeString(result);
+            return false;
+          }
+          flags &= ~ArchiveEntry::kCompress;
+
+          continue;
+        }
+      }
+      return true;
+    }
+  }
+
+  bool HadError() const override { return !error_.empty(); }
+
+  std::string GetError() const override { return error_; }
+
   virtual ~ZipFileWriter() {
     if (writer_) {
       writer_->Finish();
@@ -176,24 +234,26 @@
 
   std::unique_ptr<FILE, decltype(fclose)*> file_ = {nullptr, fclose};
   std::unique_ptr<ZipWriter> writer_;
+  std::string error_;
 };
 
 }  // namespace
 
-std::unique_ptr<IArchiveWriter> CreateDirectoryArchiveWriter(
-    IDiagnostics* diag, const StringPiece& path) {
-  std::unique_ptr<DirectoryWriter> writer =
-      util::make_unique<DirectoryWriter>();
-  if (!writer->Open(diag, path)) {
+std::unique_ptr<IArchiveWriter> CreateDirectoryArchiveWriter(IDiagnostics* diag,
+                                                             const StringPiece& path) {
+  std::unique_ptr<DirectoryWriter> writer = util::make_unique<DirectoryWriter>();
+  if (!writer->Open(path)) {
+    diag->Error(DiagMessage(path) << writer->GetError());
     return {};
   }
   return std::move(writer);
 }
 
-std::unique_ptr<IArchiveWriter> CreateZipFileArchiveWriter(
-    IDiagnostics* diag, const StringPiece& path) {
+std::unique_ptr<IArchiveWriter> CreateZipFileArchiveWriter(IDiagnostics* diag,
+                                                           const StringPiece& path) {
   std::unique_ptr<ZipFileWriter> writer = util::make_unique<ZipFileWriter>();
-  if (!writer->Open(diag, path)) {
+  if (!writer->Open(path)) {
+    diag->Error(DiagMessage(path) << writer->GetError());
     return {};
   }
   return std::move(writer);
diff --git a/tools/aapt2/flatten/Archive.h b/tools/aapt2/flatten/Archive.h
index f0681bd..4ee4ce7 100644
--- a/tools/aapt2/flatten/Archive.h
+++ b/tools/aapt2/flatten/Archive.h
@@ -26,6 +26,7 @@
 #include "google/protobuf/io/zero_copy_stream_impl_lite.h"
 
 #include "Diagnostics.h"
+#include "io/Io.h"
 #include "util/BigBuffer.h"
 #include "util/Files.h"
 
@@ -42,19 +43,31 @@
   size_t uncompressed_size;
 };
 
-class IArchiveWriter : public google::protobuf::io::CopyingOutputStream {
+class IArchiveWriter : public ::google::protobuf::io::CopyingOutputStream {
  public:
   virtual ~IArchiveWriter() = default;
 
+  virtual bool WriteFile(const android::StringPiece& path, uint32_t flags, io::InputStream* in) = 0;
+
+  // Starts a new entry and allows caller to write bytes to it sequentially.
+  // Only use StartEntry if code you do not control needs to write to a CopyingOutputStream.
+  // Prefer WriteFile instead of manually calling StartEntry/FinishEntry.
   virtual bool StartEntry(const android::StringPiece& path, uint32_t flags) = 0;
-  virtual bool WriteEntry(const BigBuffer& buffer) = 0;
-  virtual bool WriteEntry(const void* data, size_t len) = 0;
+
+  // Called to finish writing an entry previously started by StartEntry.
+  // Prefer WriteFile instead of manually calling StartEntry/FinishEntry.
   virtual bool FinishEntry() = 0;
 
-  // CopyingOutputStream implementations.
-  bool Write(const void* buffer, int size) override {
-    return WriteEntry(buffer, size);
-  }
+  // CopyingOutputStream implementation that allows sequential writes to this archive. Only
+  // valid between calls to StartEntry and FinishEntry.
+  virtual bool Write(const void* buffer, int size) = 0;
+
+  // Returns true if there was an error writing to the archive.
+  // The resulting error message can be retrieved from GetError().
+  virtual bool HadError() const = 0;
+
+  // Returns the error message if HadError() returns true.
+  virtual std::string GetError() const = 0;
 };
 
 std::unique_ptr<IArchiveWriter> CreateDirectoryArchiveWriter(IDiagnostics* diag,
diff --git a/tools/aapt2/io/BigBufferInputStream.h b/tools/aapt2/io/BigBufferInputStream.h
new file mode 100644
index 0000000..92612c7
--- /dev/null
+++ b/tools/aapt2/io/BigBufferInputStream.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAPT_IO_BIGBUFFERINPUTSTREAM_H
+#define AAPT_IO_BIGBUFFERINPUTSTREAM_H
+
+#include "io/Io.h"
+#include "util/BigBuffer.h"
+
+namespace aapt {
+namespace io {
+
+class BigBufferInputStream : public InputStream {
+ public:
+  inline explicit BigBufferInputStream(const BigBuffer* buffer)
+      : buffer_(buffer), iter_(buffer->begin()) {}
+  virtual ~BigBufferInputStream() = default;
+
+  bool Next(const void** data, size_t* size) override;
+
+  void BackUp(size_t count) override;
+
+  bool CanRewind() const override;
+
+  bool Rewind() override;
+
+  size_t ByteCount() const override;
+
+  bool HadError() const override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BigBufferInputStream);
+
+  const BigBuffer* buffer_;
+  BigBuffer::const_iterator iter_;
+  size_t offset_ = 0;
+  size_t bytes_read_ = 0;
+};
+
+}  // namespace io
+}  // namespace aapt
+
+#endif  // AAPT_IO_BIGBUFFERINPUTSTREAM_H
diff --git a/tools/aapt2/io/BigBufferOutputStream.h b/tools/aapt2/io/BigBufferOutputStream.h
new file mode 100644
index 0000000..95113bc
--- /dev/null
+++ b/tools/aapt2/io/BigBufferOutputStream.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAPT_IO_BIGBUFFEROUTPUTSTREAM_H
+#define AAPT_IO_BIGBUFFEROUTPUTSTREAM_H
+
+#include "io/Io.h"
+#include "util/BigBuffer.h"
+
+namespace aapt {
+namespace io {
+
+class BigBufferOutputStream : public OutputStream {
+ public:
+  inline explicit BigBufferOutputStream(BigBuffer* buffer) : buffer_(buffer) {}
+  virtual ~BigBufferOutputStream() = default;
+
+  bool Next(void** data, size_t* size) override;
+
+  void BackUp(size_t count) override;
+
+  size_t ByteCount() const override;
+
+  bool HadError() const override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BigBufferOutputStream);
+
+  BigBuffer* buffer_;
+};
+
+}  // namespace io
+}  // namespace aapt
+
+#endif  // AAPT_IO_BIGBUFFEROUTPUTSTREAM_H
diff --git a/tools/aapt2/io/BigBufferStreams.cpp b/tools/aapt2/io/BigBufferStreams.cpp
new file mode 100644
index 0000000..eb99033
--- /dev/null
+++ b/tools/aapt2/io/BigBufferStreams.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "io/BigBufferInputStream.h"
+#include "io/BigBufferOutputStream.h"
+
+namespace aapt {
+namespace io {
+
+//
+// BigBufferInputStream
+//
+
+bool BigBufferInputStream::Next(const void** data, size_t* size) {
+  if (iter_ == buffer_->end()) {
+    return false;
+  }
+
+  if (offset_ == iter_->size) {
+    ++iter_;
+    if (iter_ == buffer_->end()) {
+      return false;
+    }
+    offset_ = 0;
+  }
+
+  *data = iter_->buffer.get() + offset_;
+  *size = iter_->size - offset_;
+  bytes_read_ += iter_->size - offset_;
+  offset_ = iter_->size;
+  return true;
+}
+
+void BigBufferInputStream::BackUp(size_t count) {
+  if (count > offset_) {
+    bytes_read_ -= offset_;
+    offset_ = 0;
+  } else {
+    offset_ -= count;
+    bytes_read_ -= count;
+  }
+}
+
+bool BigBufferInputStream::CanRewind() const { return true; }
+
+bool BigBufferInputStream::Rewind() {
+  iter_ = buffer_->begin();
+  offset_ = 0;
+  bytes_read_ = 0;
+  return true;
+}
+
+size_t BigBufferInputStream::ByteCount() const { return bytes_read_; }
+
+bool BigBufferInputStream::HadError() const { return false; }
+
+//
+// BigBufferOutputStream
+//
+
+bool BigBufferOutputStream::Next(void** data, size_t* size) {
+  *data = buffer_->NextBlock(size);
+  return true;
+}
+
+void BigBufferOutputStream::BackUp(size_t count) { buffer_->BackUp(count); }
+
+size_t BigBufferOutputStream::ByteCount() const { return buffer_->size(); }
+
+bool BigBufferOutputStream::HadError() const { return false; }
+
+}  // namespace io
+}  // namespace aapt
diff --git a/tools/aapt2/io/Data.h b/tools/aapt2/io/Data.h
index fdc044d..09dc7ea 100644
--- a/tools/aapt2/io/Data.h
+++ b/tools/aapt2/io/Data.h
@@ -22,14 +22,13 @@
 #include "android-base/macros.h"
 #include "utils/FileMap.h"
 
+#include "io/Io.h"
+
 namespace aapt {
 namespace io {
 
-/**
- * Interface for a block of contiguous memory. An instance of this interface
- * owns the data.
- */
-class IData {
+// Interface for a block of contiguous memory. An instance of this interface owns the data.
+class IData : public InputStream {
  public:
   virtual ~IData() = default;
 
@@ -40,7 +39,8 @@
 class DataSegment : public IData {
  public:
   explicit DataSegment(std::unique_ptr<IData> data, size_t offset, size_t len)
-      : data_(std::move(data)), offset_(offset), len_(len) {}
+      : data_(std::move(data)), offset_(offset), len_(len), next_read_(offset) {}
+  virtual ~DataSegment() = default;
 
   const void* data() const override {
     return static_cast<const uint8_t*>(data_->data()) + offset_;
@@ -48,63 +48,163 @@
 
   size_t size() const override { return len_; }
 
+  bool Next(const void** data, size_t* size) override {
+    if (next_read_ == offset_ + len_) {
+      return false;
+    }
+    *data = static_cast<const uint8_t*>(data_->data()) + next_read_;
+    *size = len_ - (next_read_ - offset_);
+    next_read_ = offset_ + len_;
+    return true;
+  }
+
+  void BackUp(size_t count) override {
+    if (count > next_read_ - offset_) {
+      next_read_ = offset_;
+    } else {
+      next_read_ -= count;
+    }
+  }
+
+  bool CanRewind() const override { return true; }
+
+  bool Rewind() override {
+    next_read_ = offset_;
+    return true;
+  }
+
+  size_t ByteCount() const override { return next_read_ - offset_; }
+
+  bool HadError() const override { return false; }
+
  private:
   DISALLOW_COPY_AND_ASSIGN(DataSegment);
 
   std::unique_ptr<IData> data_;
   size_t offset_;
   size_t len_;
+  size_t next_read_;
 };
 
-/**
- * Implementation of IData that exposes a memory mapped file. The mmapped file
- * is owned by this
- * object.
- */
+// Implementation of IData that exposes a memory mapped file.
+// The mmapped file is owned by this object.
 class MmappedData : public IData {
  public:
-  explicit MmappedData(android::FileMap&& map)
-      : map_(std::forward<android::FileMap>(map)) {}
+  explicit MmappedData(android::FileMap&& map) : map_(std::forward<android::FileMap>(map)) {}
+  virtual ~MmappedData() = default;
 
   const void* data() const override { return map_.getDataPtr(); }
 
   size_t size() const override { return map_.getDataLength(); }
 
+  bool Next(const void** data, size_t* size) override {
+    if (next_read_ == map_.getDataLength()) {
+      return false;
+    }
+    *data = reinterpret_cast<const uint8_t*>(map_.getDataPtr()) + next_read_;
+    *size = map_.getDataLength() - next_read_;
+    next_read_ = map_.getDataLength();
+    return true;
+  }
+
+  void BackUp(size_t count) override {
+    if (count > next_read_) {
+      next_read_ = 0;
+    } else {
+      next_read_ -= count;
+    }
+  }
+
+  bool CanRewind() const override { return true; }
+
+  bool Rewind() override {
+    next_read_ = 0;
+    return true;
+  }
+
+  size_t ByteCount() const override { return next_read_; }
+
+  bool HadError() const override { return false; }
+
  private:
+  DISALLOW_COPY_AND_ASSIGN(MmappedData);
+
   android::FileMap map_;
+  size_t next_read_ = 0;
 };
 
-/**
- * Implementation of IData that exposes a block of memory that was malloc'ed
- * (new'ed). The
- * memory is owned by this object.
- */
+// Implementation of IData that exposes a block of memory that was malloc'ed (new'ed).
+// The memory is owned by this object.
 class MallocData : public IData {
  public:
   MallocData(std::unique_ptr<const uint8_t[]> data, size_t size)
       : data_(std::move(data)), size_(size) {}
+  virtual ~MallocData() = default;
 
   const void* data() const override { return data_.get(); }
 
   size_t size() const override { return size_; }
 
+  bool Next(const void** data, size_t* size) override {
+    if (next_read_ == size_) {
+      return false;
+    }
+    *data = data_.get() + next_read_;
+    *size = size_ - next_read_;
+    next_read_ = size_;
+    return true;
+  }
+
+  void BackUp(size_t count) override {
+    if (count > next_read_) {
+      next_read_ = 0;
+    } else {
+      next_read_ -= count;
+    }
+  }
+
+  bool CanRewind() const override { return true; }
+
+  bool Rewind() override {
+    next_read_ = 0;
+    return true;
+  }
+
+  size_t ByteCount() const override { return next_read_; }
+
+  bool HadError() const override { return false; }
+
  private:
+  DISALLOW_COPY_AND_ASSIGN(MallocData);
+
   std::unique_ptr<const uint8_t[]> data_;
   size_t size_;
+  size_t next_read_ = 0;
 };
 
-/**
- * When mmap fails because the file has length 0, we use the EmptyData to
- * simulate data of length 0.
- */
+// When mmap fails because the file has length 0, we use the EmptyData to simulate data of length 0.
 class EmptyData : public IData {
  public:
+  virtual ~EmptyData() = default;
+
   const void* data() const override {
     static const uint8_t d = 0;
     return &d;
   }
 
   size_t size() const override { return 0u; }
+
+  bool Next(const void** /*data*/, size_t* /*size*/) override { return false; }
+
+  void BackUp(size_t /*count*/) override {}
+
+  bool CanRewind() const override { return true; }
+
+  bool Rewind() override { return true; }
+
+  size_t ByteCount() const override { return 0u; }
+
+  bool HadError() const override { return false; }
 };
 
 }  // namespace io
diff --git a/tools/aapt2/io/File.h b/tools/aapt2/io/File.h
index 1ef9743..7ef6d88 100644
--- a/tools/aapt2/io/File.h
+++ b/tools/aapt2/io/File.h
@@ -30,40 +30,27 @@
 namespace aapt {
 namespace io {
 
-/**
- * Interface for a file, which could be a real file on the file system, or a
- * file inside
- * a ZIP archive.
- */
+// Interface for a file, which could be a real file on the file system, or a
+// file inside a ZIP archive.
 class IFile {
  public:
   virtual ~IFile() = default;
 
-  /**
-   * Open the file and return it as a block of contiguous memory. How this
-   * occurs is
-   * implementation dependent. For example, if this is a file on the file
-   * system, it may
-   * simply mmap the contents. If this file represents a compressed file in a
-   * ZIP archive,
-   * it may need to inflate it to memory, incurring a copy.
-   *
-   * Returns nullptr on failure.
-   */
+  // Open the file and return it as a block of contiguous memory. How this
+  // occurs is implementation dependent. For example, if this is a file on the file
+  // system, it may simply mmap the contents. If this file represents a compressed file in a
+  // ZIP archive, it may need to inflate it to memory, incurring a copy.
+  // Returns nullptr on failure.
   virtual std::unique_ptr<IData> OpenAsData() = 0;
 
-  /**
-   * Returns the source of this file. This is for presentation to the user and
-   * may not be a
-   * valid file system path (for example, it may contain a '@' sign to separate
-   * the files within
-   * a ZIP archive from the path to the containing ZIP archive.
-   */
+  // Returns the source of this file. This is for presentation to the user and
+  // may not be a valid file system path (for example, it may contain a '@' sign to separate
+  // the files within a ZIP archive from the path to the containing ZIP archive.
   virtual const Source& GetSource() const = 0;
 
   IFile* CreateFileSegment(size_t offset, size_t len);
 
-  /** Returns whether the file was compressed before it was stored in memory. */
+  // Returns whether the file was compressed before it was stored in memory.
   virtual bool WasCompressed() {
     return false;
   }
@@ -77,10 +64,7 @@
   std::list<std::unique_ptr<IFile>> segments_;
 };
 
-/**
- * An IFile that wraps an underlying IFile but limits it to a subsection of that
- * file.
- */
+// An IFile that wraps an underlying IFile but limits it to a subsection of that file.
 class FileSegment : public IFile {
  public:
   explicit FileSegment(IFile* file, size_t offset, size_t len)
@@ -106,11 +90,8 @@
   virtual IFile* Next() = 0;
 };
 
-/**
- * Interface for a collection of files, all of which share a common source. That
- * source may
- * simply be the filesystem, or a ZIP archive.
- */
+// Interface for a collection of files, all of which share a common source. That source may
+// simply be the filesystem, or a ZIP archive.
 class IFileCollection {
  public:
   virtual ~IFileCollection() = default;
diff --git a/tools/aapt2/io/Io.cpp b/tools/aapt2/io/Io.cpp
index cab4b65..f5c5737 100644
--- a/tools/aapt2/io/Io.cpp
+++ b/tools/aapt2/io/Io.cpp
@@ -16,7 +16,6 @@
 
 #include "io/Io.h"
 
-#include <algorithm>
 #include <cstring>
 
 namespace aapt {
@@ -24,15 +23,15 @@
 
 bool Copy(OutputStream* out, InputStream* in) {
   const void* in_buffer;
-  int in_len;
+  size_t in_len;
   while (in->Next(&in_buffer, &in_len)) {
     void* out_buffer;
-    int out_len;
+    size_t out_len;
     if (!out->Next(&out_buffer, &out_len)) {
       return !out->HadError();
     }
 
-    const int bytes_to_copy = std::min(in_len, out_len);
+    const size_t bytes_to_copy = in_len < out_len ? in_len : out_len;
     memcpy(out_buffer, in_buffer, bytes_to_copy);
     out->BackUp(out_len - bytes_to_copy);
     in->BackUp(in_len - bytes_to_copy);
diff --git a/tools/aapt2/io/Io.h b/tools/aapt2/io/Io.h
index 33cdc7b..2a34d4d 100644
--- a/tools/aapt2/io/Io.h
+++ b/tools/aapt2/io/Io.h
@@ -19,42 +19,76 @@
 
 #include <string>
 
-#include "google/protobuf/io/zero_copy_stream_impl_lite.h"
-
 namespace aapt {
 namespace io {
 
-/**
- * InputStream interface that inherits from protobuf's ZeroCopyInputStream,
- * but adds error handling methods to better report issues.
- *
- * The code style here matches the protobuf style.
- */
-class InputStream : public ::google::protobuf::io::ZeroCopyInputStream {
+// InputStream interface that mimics protobuf's ZeroCopyInputStream,
+// with added error handling methods to better report issues.
+class InputStream {
  public:
+  virtual ~InputStream() = default;
+
+  // Returns a chunk of data for reading. data and size must not be nullptr.
+  // Returns true so long as there is more data to read, returns false if an error occurred
+  // or no data remains. If an error occurred, check HadError().
+  // The stream owns the buffer returned from this method and the buffer is invalidated
+  // anytime another mutable method is called.
+  virtual bool Next(const void** data, size_t* size) = 0;
+
+  // Backup count bytes, where count is smaller or equal to the size of the last buffer returned
+  // from Next().
+  // Useful when the last block returned from Next() wasn't fully read.
+  virtual void BackUp(size_t count) = 0;
+
+  // Returns true if this InputStream can rewind. If so, Rewind() can be called.
+  virtual bool CanRewind() const { return false; };
+
+  // Rewinds the stream to the beginning so it can be read again.
+  // Returns true if the rewind succeeded.
+  // This does nothing if CanRewind() returns false.
+  virtual bool Rewind() { return false; }
+
+  // Returns the number of bytes that have been read from the stream.
+  virtual size_t ByteCount() const = 0;
+
+  // Returns an error message if HadError() returned true.
   virtual std::string GetError() const { return {}; }
 
+  // Returns true if an error occurred. Errors are permanent.
   virtual bool HadError() const = 0;
 };
 
-/**
- * OutputStream interface that inherits from protobuf's ZeroCopyOutputStream,
- * but adds error handling methods to better report issues.
- *
- * The code style here matches the protobuf style.
- */
-class OutputStream : public ::google::protobuf::io::ZeroCopyOutputStream {
+// OutputStream interface that mimics protobuf's ZeroCopyOutputStream,
+// with added error handling methods to better report issues.
+class OutputStream {
  public:
+  virtual ~OutputStream() = default;
+
+  // Returns a buffer to which data can be written to. The data written to this buffer will
+  // eventually be written to the stream. Call BackUp() if the data written doesn't occupy the
+  // entire buffer.
+  // Return false if there was an error.
+  // The stream owns the buffer returned from this method and the buffer is invalidated
+  // anytime another mutable method is called.
+  virtual bool Next(void** data, size_t* size) = 0;
+
+  // Backup count bytes, where count is smaller or equal to the size of the last buffer returned
+  // from Next().
+  // Useful for when the last block returned from Next() wasn't fully written to.
+  virtual void BackUp(size_t count) = 0;
+
+  // Returns the number of bytes that have been written to the stream.
+  virtual size_t ByteCount() const = 0;
+
+  // Returns an error message if HadError() returned true.
   virtual std::string GetError() const { return {}; }
 
+  // Returns true if an error occurred. Errors are permanent.
   virtual bool HadError() const = 0;
 };
 
-/**
- * Copies the data from in to out. Returns true if there was no error.
- * If there was an error, check the individual streams' HadError/GetError
- * methods.
- */
+// Copies the data from in to out. Returns false if there was an error.
+// If there was an error, check the individual streams' HadError/GetError methods.
 bool Copy(OutputStream* out, InputStream* in);
 
 }  // namespace io
diff --git a/tools/aapt2/link/Link.cpp b/tools/aapt2/link/Link.cpp
index 1b4d5bb..7f71589 100644
--- a/tools/aapt2/link/Link.cpp
+++ b/tools/aapt2/link/Link.cpp
@@ -38,6 +38,7 @@
 #include "flatten/Archive.h"
 #include "flatten/TableFlattener.h"
 #include "flatten/XmlFlattener.h"
+#include "io/BigBufferInputStream.h"
 #include "io/FileSystem.h"
 #include "io/ZipArchive.h"
 #include "java/JavaClassGenerator.h"
@@ -168,34 +169,57 @@
   int min_sdk_version_ = 0;
 };
 
+static bool CopyInputStreamToArchive(io::InputStream* in, const std::string& out_path,
+                                     uint32_t compression_flags, IArchiveWriter* writer,
+                                     IAaptContext* context) {
+  if (context->IsVerbose()) {
+    context->GetDiagnostics()->Note(DiagMessage() << "writing " << out_path << " to archive");
+  }
+
+  if (!writer->WriteFile(out_path, compression_flags, in)) {
+    context->GetDiagnostics()->Error(DiagMessage() << "failed to write " << out_path
+                                                   << " to archive: " << writer->GetError());
+    return false;
+  }
+  return true;
+}
+
 static bool CopyFileToArchive(io::IFile* file, const std::string& out_path,
                               uint32_t compression_flags,
                               IArchiveWriter* writer, IAaptContext* context) {
   std::unique_ptr<io::IData> data = file->OpenAsData();
   if (!data) {
-    context->GetDiagnostics()->Error(DiagMessage(file->GetSource())
-                                     << "failed to open file");
+    context->GetDiagnostics()->Error(DiagMessage(file->GetSource()) << "failed to open file");
     return false;
   }
+  return CopyInputStreamToArchive(data.get(), out_path, compression_flags, writer, context);
+}
 
-  const uint8_t* buffer = reinterpret_cast<const uint8_t*>(data->data());
-  const size_t buffer_size = data->size();
-
+static bool CopyProtoToArchive(::google::protobuf::MessageLite* proto_msg,
+                               const std::string& out_path, uint32_t compression_flags,
+                               IArchiveWriter* writer, IAaptContext* context) {
   if (context->IsVerbose()) {
-    context->GetDiagnostics()->Note(DiagMessage() << "writing " << out_path
-                                                  << " to archive");
+    context->GetDiagnostics()->Note(DiagMessage() << "writing " << out_path << " to archive");
   }
 
   if (writer->StartEntry(out_path, compression_flags)) {
-    if (writer->WriteEntry(buffer, buffer_size)) {
-      if (writer->FinishEntry()) {
-        return true;
+    // Make sure CopyingOutputStreamAdaptor is deleted before we call writer->FinishEntry().
+    {
+      // Wrap our IArchiveWriter with an adaptor that implements the ZeroCopyOutputStream interface.
+      ::google::protobuf::io::CopyingOutputStreamAdaptor adaptor(writer);
+      if (!proto_msg->SerializeToZeroCopyStream(&adaptor)) {
+        context->GetDiagnostics()->Error(DiagMessage()
+                                         << "failed to write " << out_path << " to archive");
+        return false;
       }
     }
-  }
 
-  context->GetDiagnostics()->Error(DiagMessage() << "failed to write file "
-                                                 << out_path);
+    if (writer->FinishEntry()) {
+      return true;
+    }
+  }
+  context->GetDiagnostics()->Error(DiagMessage() << "failed to write " << out_path
+                                                 << " to archive: " << writer->GetError());
   return false;
 }
 
@@ -221,16 +245,9 @@
     context->GetDiagnostics()->Note(msg);
   }
 
-  if (writer->StartEntry(path, ArchiveEntry::kCompress)) {
-    if (writer->WriteEntry(buffer)) {
-      if (writer->FinishEntry()) {
-        return true;
-      }
-    }
-  }
-  context->GetDiagnostics()->Error(DiagMessage() << "failed to write " << path
-                                                 << " to archive");
-  return false;
+  io::BigBufferInputStream input_stream(&buffer);
+  return CopyInputStreamToArchive(&input_stream, path.to_string(), ArchiveEntry::kCompress, writer,
+                                  context);
 }
 
 static std::unique_ptr<ResourceTable> LoadTableFromPb(const Source& source,
@@ -243,8 +260,7 @@
     return {};
   }
 
-  std::unique_ptr<ResourceTable> table =
-      DeserializeTableFromPb(pb_table, source, diag);
+  std::unique_ptr<ResourceTable> table = DeserializeTableFromPb(pb_table, source, diag);
   if (!table) {
     return {};
   }
@@ -898,49 +914,18 @@
     BigBuffer buffer(1024);
     TableFlattener flattener(options_.table_flattener_options, &buffer);
     if (!flattener.Consume(context_, table)) {
+      context_->GetDiagnostics()->Error(DiagMessage() << "failed to flatten resource table");
       return false;
     }
 
-    if (writer->StartEntry("resources.arsc", ArchiveEntry::kAlign)) {
-      if (writer->WriteEntry(buffer)) {
-        if (writer->FinishEntry()) {
-          return true;
-        }
-      }
-    }
-
-    context_->GetDiagnostics()->Error(
-        DiagMessage() << "failed to write resources.arsc to archive");
-    return false;
+    io::BigBufferInputStream input_stream(&buffer);
+    return CopyInputStreamToArchive(&input_stream, "resources.arsc", ArchiveEntry::kAlign, writer,
+                                    context_);
   }
 
   bool FlattenTableToPb(ResourceTable* table, IArchiveWriter* writer) {
-    // Create the file/zip entry.
-    if (!writer->StartEntry("resources.arsc.flat", 0)) {
-      context_->GetDiagnostics()->Error(DiagMessage() << "failed to open");
-      return false;
-    }
-
-    // Make sure CopyingOutputStreamAdaptor is deleted before we call
-    // writer->FinishEntry().
-    {
-      // Wrap our IArchiveWriter with an adaptor that implements the
-      // ZeroCopyOutputStream interface.
-      CopyingOutputStreamAdaptor adaptor(writer);
-
-      std::unique_ptr<pb::ResourceTable> pb_table = SerializeTableToPb(table);
-      if (!pb_table->SerializeToZeroCopyStream(&adaptor)) {
-        context_->GetDiagnostics()->Error(DiagMessage() << "failed to write");
-        return false;
-      }
-    }
-
-    if (!writer->FinishEntry()) {
-      context_->GetDiagnostics()->Error(DiagMessage()
-                                        << "failed to finish entry");
-      return false;
-    }
-    return true;
+    std::unique_ptr<pb::ResourceTable> pb_table = SerializeTableToPb(table);
+    return CopyProtoToArchive(pb_table.get(), "resources.arsc.flat", 0, writer, context_);
   }
 
   bool WriteJavaFile(ResourceTable* table,
@@ -971,8 +956,7 @@
 
     JavaClassGenerator generator(context_, table, java_options);
     if (!generator.Generate(package_name_to_generate, out_package, &fout)) {
-      context_->GetDiagnostics()->Error(DiagMessage(out_path)
-                                        << generator.getError());
+      context_->GetDiagnostics()->Error(DiagMessage(out_path) << generator.getError());
       return false;
     }
 
@@ -1484,7 +1468,6 @@
 
     if (options_.package_type == PackageType::kStaticLib) {
       if (!FlattenTableToPb(table, writer)) {
-        context_->GetDiagnostics()->Error(DiagMessage() << "failed to write resources.arsc.flat");
         return false;
       }
     } else {
diff --git a/tools/aapt2/readme.md b/tools/aapt2/readme.md
index 1c9a75d..9899f80 100644
--- a/tools/aapt2/readme.md
+++ b/tools/aapt2/readme.md
@@ -1,5 +1,12 @@
 # Android Asset Packaging Tool 2.0 (AAPT2) release notes
 
+## Version 2.11
+### `aapt2 link ...`
+- Adds the ability to specify assets directories with the -A parameter. Assets work just like
+  assets in the original AAPT. It is not recommended to package assets with aapt2, however,
+  since the resulting APK is post-processed by other tools anyways. Assets do not get processed
+  by AAPT2, just copied, so incremental building gets slower if they are included early on.
+
 ## Version 2.10
 ### `aapt2 link ...`
 - Add ability to specify package ID to compile with for regular apps (not shared or static libs).