Merge change 3004 into donut

* changes:
  fix the SDK build for real this time. the missing macros weren't declared in the proper order last time.
diff --git a/cleanspec.mk b/cleanspec.mk
index 50a1f1f..14c8016 100644
--- a/cleanspec.mk
+++ b/cleanspec.mk
@@ -78,6 +78,7 @@
 $(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/framework_intermediates/src/telephony)
 $(call add-clean-step, rm -rf $(OUT_DIR)/target/product/*/obj)
 $(call add-clean-step, rm -f $(PRODUCT_OUT)/system/bin/tcpdump)
+$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/framework_intermediates/src/location)
 
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/core/Makefile b/core/Makefile
index fa0550c..824fc79 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -131,6 +131,7 @@
 			TARGET_BOOTLOADER_BOARD_NAME="$(TARGET_BOOTLOADER_BOARD_NAME)" \
 			BUILD_FINGERPRINT="$(BUILD_FINGERPRINT)" \
 			TARGET_BOARD_PLATFORM="$(TARGET_BOARD_PLATFORM)" \
+			TARGET_CPU_ABI="$(TARGET_CPU_ABI)" \
 	        bash $(BUILDINFO_SH) > $@
 	$(hide) if [ -f $(TARGET_DEVICE_DIR)/system.prop ]; then \
 	          cat $(TARGET_DEVICE_DIR)/system.prop >> $@; \
@@ -248,9 +249,9 @@
 
 # We just build this directly to the install location.
 INSTALLED_RAMDISK_TARGET := $(BUILT_RAMDISK_TARGET)
-$(INSTALLED_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_RAMDISK_FILES)
+$(INSTALLED_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_RAMDISK_FILES) | $(MINIGZIP)
 	$(call pretty,"Target ram disk: $@")
-	$(hide) $(MKBOOTFS) $(TARGET_ROOT_OUT) | gzip > $@
+	$(hide) $(MKBOOTFS) $(TARGET_ROOT_OUT) | $(MINIGZIP) > $@
 
 
 ifneq ($(strip $(TARGET_NO_KERNEL)),true)
@@ -437,8 +438,8 @@
 # the module processing has already been done -- in fact, we used the
 # fact that all that has been done to get the list of modules that we
 # need notice files for.
-$(target_notice_file_html_gz): $(target_notice_file_html)
-	gzip -c $< > $@
+$(target_notice_file_html_gz): $(target_notice_file_html) | $(MINIGZIP)
+	$(MINIGZIP) -9 < $< > $@
 installed_notice_html_gz := $(TARGET_OUT)/etc/NOTICE.html.gz
 $(installed_notice_html_gz): $(target_notice_file_html_gz) | $(ACP)
 	$(copy-file-to-target)
@@ -672,7 +673,7 @@
 	@mkdir -p $(dir $@)
 	java -jar $(DUMPKEY_JAR) $(PRIVATE_OTA_PUBLIC_KEYS) > $@
 
-$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) \
+$(INSTALLED_RECOVERYIMAGE_TARGET): $(MKBOOTFS) $(MKBOOTIMG) $(MINIGZIP) \
 		$(INSTALLED_RAMDISK_TARGET) \
 		$(INSTALLED_BOOTIMAGE_TARGET) \
 		$(recovery_binary) \
@@ -697,7 +698,7 @@
 	cp $(RECOVERY_INSTALL_OTA_KEYS) $(TARGET_RECOVERY_ROOT_OUT)/res/keys
 	cat $(INSTALLED_DEFAULT_PROP_TARGET) $(recovery_build_prop) \
 	        > $(TARGET_RECOVERY_ROOT_OUT)/default.prop
-	$(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) | gzip > $(recovery_ramdisk)
+	$(MKBOOTFS) $(TARGET_RECOVERY_ROOT_OUT) | $(MINIGZIP) > $(recovery_ramdisk)
 	$(MKBOOTIMG) $(INTERNAL_RECOVERYIMAGE_ARGS) --output $@
 	@echo ----- Made recovery image -------- $@
 	$(hide) $(call assert-max-file-size,$@,$(BOARD_RECOVERYIMAGE_MAX_SIZE))
diff --git a/core/apicheck_msg_current.txt b/core/apicheck_msg_current.txt
index c277ecd..d723a19 100644
--- a/core/apicheck_msg_current.txt
+++ b/core/apicheck_msg_current.txt
@@ -6,12 +6,11 @@
    1) You can add "@hide" javadoc comments to the methods, etc. listed in the
       errors above.
 
-   2) You can update current.xml by executing the following commands:
+   2) You can update current.xml by executing the following command:
 
-         p4 edit frameworks/base/api/current.xml
          make update-api
 
-      To check in the revised current.xml, you will need OWNERS approval.
+      To check in the revised current.xml, you will need approval from the android API council.
 ******************************
 
 
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 4ee2985..c182a77 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -267,7 +267,6 @@
         JAVA_LIBRARIES,$(lib),$(LOCAL_IS_HOST_MODULE))/javalib.jar)
 
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_INSTALL_DIR := $(dir $(LOCAL_INSTALLED_MODULE))
-$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_INTERMEDIATES_DIR := $(intermediates)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CLASS_INTERMEDIATES_DIR := $(intermediates)/classes
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_SOURCE_INTERMEDIATES_DIR := $(intermediates)/src
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_JAVA_SOURCES := $(all_java_sources)
@@ -357,6 +356,8 @@
 $(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_IS_HOST_MODULE := $(LOCAL_IS_HOST_MODULE)
 $(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_HOST:= $(my_host)
 
+$(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_INTERMEDIATES_DIR:= $(intermediates)
+
 # Tell the module and all of its sub-modules who it is.
 $(LOCAL_INTERMEDIATE_TARGETS) : PRIVATE_MODULE:= $(LOCAL_MODULE)
 
diff --git a/core/binary.mk b/core/binary.mk
index b64bc81..ddcdc6f 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -212,6 +212,19 @@
 endif
 
 ###########################################################
+## ObjC: Compile .m files to .o
+###########################################################
+
+objc_sources := $(filter %.m,$(LOCAL_SRC_FILES))
+objc_objects := $(addprefix $(intermediates)/,$(objc_sources:.m=.o))
+
+ifneq ($(strip $(objc_objects)),)
+$(objc_objects): $(intermediates)/%.o: $(TOPDIR)$(LOCAL_PATH)/%.m $(yacc_cpps) $(PRIVATE_ADDITIONAL_DEPENDENCIES)
+	$(transform-$(PRIVATE_HOST)m-to-o)
+-include $(objc_objects:%.o=%.P)
+endif
+
+###########################################################
 ## AS: Compile .S files to .o.
 ###########################################################
 
diff --git a/core/config.mk b/core/config.mk
index 1ce5937..a5f3720 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -156,6 +156,7 @@
 ICUDATA := $(HOST_OUT_EXECUTABLES)/icudata$(HOST_EXECUTABLE_SUFFIX)
 SIGNAPK_JAR := $(HOST_OUT_JAVA_LIBRARIES)/signapk$(COMMON_JAVA_PACKAGE_SUFFIX)
 MKBOOTFS := $(HOST_OUT_EXECUTABLES)/mkbootfs$(HOST_EXECUTABLE_SUFFIX)
+MINIGZIP := $(HOST_OUT_EXECUTABLES)/minigzip$(HOST_EXECUTABLE_SUFFIX)
 MKBOOTIMG := $(HOST_OUT_EXECUTABLES)/mkbootimg$(HOST_EXECUTABLE_SUFFIX)
 MKYAFFS2 := $(HOST_OUT_EXECUTABLES)/mkyaffs2image$(HOST_EXECUTABLE_SUFFIX)
 APICHECK := $(HOST_OUT_EXECUTABLES)/apicheck$(HOST_EXECUTABLE_SUFFIX)
@@ -274,7 +275,7 @@
 # The 'current' version is whatever this source tree is.  Once the apicheck
 # tool can generate the stubs from the xml files, we'll use that to be
 # able to build back-versions.  In the meantime, 'current' is the only
-# one supported.  
+# one supported.
 #
 # sgrax     is the opposite of xargs.  It takes the list of args and puts them
 #           on each line for sort to process.
diff --git a/core/definitions.mk b/core/definitions.mk
index 5631fb9..f3382c3 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -794,6 +794,22 @@
 endef
 
 ###########################################################
+## Commands for running gcc to compile an Objective-C file
+## This should never happen for target builds but this
+## will error at build time.
+###########################################################
+
+define transform-m-to-o-no-deps
+@echo "target ObjC: $(PRIVATE_MODULE) <= $<"
+$(call transform-c-or-s-to-o-no-deps)
+endef
+
+define transform-m-to-o
+$(transform-m-to-o-no-deps)
+$(hide) $(transform-d-to-p)
+endef
+
+###########################################################
 ## Commands for running gcc to compile a host C++ file
 ###########################################################
 
@@ -871,15 +887,45 @@
 endef
 
 ###########################################################
+## Commands for running gcc to compile a host Objective-C file
+###########################################################
+
+define transform-host-m-to-o-no-deps
+@echo "host ObjC: $(PRIVATE_MODULE) <= $<"
+$(call transform-host-c-or-s-to-o-no-deps)
+endef
+
+define tranform-host-m-to-o
+$(transform-host-m-to-o-no-deps)
+$(transform-d-to-p)
+endef
+
+###########################################################
 ## Commands for running ar
 ###########################################################
 
+define extract-and-include-whole-static-libs
+$(foreach lib,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES), \
+	@echo "preparing StaticLib: $(PRIVATE_MODULE) [including $(lib)]"; \
+	ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(lib)))_objs;\
+	rm -rf $$ldir; \
+	mkdir -p $$ldir; \
+	filelist=; \
+	for f in `$(TARGET_AR) t $(lib)`; do \
+	    $(TARGET_AR) p $(lib) $$f > $$ldir/$$f; \
+	    filelist="$$filelist $$ldir/$$f"; \
+	done ; \
+	$(TARGET_AR) $(TARGET_GLOBAL_ARFLAGS) $(PRIVATE_ARFLAGS) $@ $$filelist;\
+)
+endef
+
 # Explicitly delete the archive first so that ar doesn't
 # try to add to an existing archive.
 define transform-o-to-static-lib
 @mkdir -p $(dir $@)
-@echo "target StaticLib: $(PRIVATE_MODULE) ($@)"
 @rm -f $@
+$(extract-and-include-whole-static-libs)
+@echo "target StaticLib: $(PRIVATE_MODULE) ($@)"
 $(hide) $(TARGET_AR) $(TARGET_GLOBAL_ARFLAGS) $(PRIVATE_ARFLAGS) $@ $^
 endef
 
diff --git a/core/main.mk b/core/main.mk
index 655a592..2bf8102 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -406,6 +406,10 @@
 
 # Clean up/verify variables defined by the board config file.
 TARGET_BOOTLOADER_BOARD_NAME := $(strip $(TARGET_BOOTLOADER_BOARD_NAME))
+TARGET_CPU_ABI := $(strip $(TARGET_CPU_ABI))
+ifeq ($(TARGET_CPU_ABI),)
+  $(error No TARGET_CPU_ABI defined by board config: $(board_config_mk))
+endif
 
 #
 # Include all of the makefiles in the system
diff --git a/core/pathmap.mk b/core/pathmap.mk
index 13cb80d..de7c1bb 100644
--- a/core/pathmap.mk
+++ b/core/pathmap.mk
@@ -82,6 +82,7 @@
 	    opengl \
 	    sax \
 	    telephony \
+	    tts \
 	    wifi \
 	 )
 
diff --git a/core/static_library.mk b/core/static_library.mk
index 252dfd0..2138e46 100644
--- a/core/static_library.mk
+++ b/core/static_library.mk
@@ -25,5 +25,6 @@
 $(all_objects) : TARGET_GLOBAL_CPPFLAGS := 
 endif
 
+$(LOCAL_BUILT_MODULE): $(built_whole_libraries)
 $(LOCAL_BUILT_MODULE): $(all_objects)
 	$(transform-o-to-static-lib)
diff --git a/target/board/generic/BoardConfig.mk b/target/board/generic/BoardConfig.mk
index a874742..6ec2de3 100644
--- a/target/board/generic/BoardConfig.mk
+++ b/target/board/generic/BoardConfig.mk
@@ -7,5 +7,6 @@
 TARGET_NO_BOOTLOADER := true
 TARGET_NO_KERNEL := true
 TARGET_NO_RADIOIMAGE := true
+TARGET_CPU_ABI := armeabi
 HAVE_HTC_AUDIO_DRIVER := true
 BOARD_USES_GENERIC_AUDIO := true
diff --git a/target/board/sim/BoardConfig.mk b/target/board/sim/BoardConfig.mk
index 92679d9..491b30f 100644
--- a/target/board/sim/BoardConfig.mk
+++ b/target/board/sim/BoardConfig.mk
@@ -17,6 +17,9 @@
 # Don't bother with a kernel
 TARGET_NO_KERNEL := true
 
+# The simulator does not support native code at all
+TARGET_CPU_ABI := none
+
 #the simulator partially emulates the original HTC /dev/eac audio interface
 HAVE_HTC_AUDIO_DRIVER := true
 BOARD_USES_GENERIC_AUDIO := true
diff --git a/tools/applypatch/Android.mk b/tools/applypatch/Android.mk
index 09f9862..c25e0d6 100644
--- a/tools/applypatch/Android.mk
+++ b/tools/applypatch/Android.mk
@@ -12,18 +12,29 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+ifneq ($(TARGET_SIMULATOR),true)
+
 LOCAL_PATH := $(call my-dir)
 include $(CLEAR_VARS)
 
-ifneq ($(TARGET_SIMULATOR),true)
-
-LOCAL_SRC_FILES := applypatch.c bsdiff.c freecache.c
+LOCAL_SRC_FILES := applypatch.c bsdiff.c freecache.c imgpatch.c
 LOCAL_MODULE := applypatch
 LOCAL_FORCE_STATIC_EXECUTABLE := true
 LOCAL_MODULE_TAGS := eng
-LOCAL_C_INCLUDES += external/bzip2
-LOCAL_STATIC_LIBRARIES += libmincrypt libbz libc
+LOCAL_C_INCLUDES += external/bzip2 external/zlib bootable/recovery
+LOCAL_STATIC_LIBRARIES += libmtdutils libmincrypt libbz libz libc
 
 include $(BUILD_EXECUTABLE)
 
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := imgdiff.c
+LOCAL_MODULE := imgdiff
+LOCAL_FORCE_STATIC_EXECUTABLE := true
+LOCAL_MODULE_TAGS := eng
+LOCAL_C_INCLUDES += external/zlib
+LOCAL_STATIC_LIBRARIES += libz
+
+include $(BUILD_HOST_EXECUTABLE)
+
 endif  # !TARGET_SIMULATOR
diff --git a/tools/applypatch/applypatch.c b/tools/applypatch/applypatch.c
index 23b41d7..db4c30b 100644
--- a/tools/applypatch/applypatch.c
+++ b/tools/applypatch/applypatch.c
@@ -25,12 +25,25 @@
 
 #include "mincrypt/sha.h"
 #include "applypatch.h"
+#include "mtdutils/mtdutils.h"
+
+int SaveFileContents(const char* filename, FileContents file);
+int LoadMTDContents(const char* filename, FileContents* file);
+int ParseSha1(const char* str, uint8_t* digest);
+
+static int mtd_partitions_scanned = 0;
 
 // Read a file into memory; store it and its associated metadata in
 // *file.  Return 0 on success.
 int LoadFileContents(const char* filename, FileContents* file) {
   file->data = NULL;
 
+  // A special 'filename' beginning with "MTD:" means to load the
+  // contents of an MTD partition.
+  if (strncmp(filename, "MTD:", 4) == 0) {
+    return LoadMTDContents(filename, file);
+  }
+
   if (stat(filename, &file->st) != 0) {
     fprintf(stderr, "failed to stat \"%s\": %s\n", filename, strerror(errno));
     return -1;
@@ -61,6 +74,182 @@
   return 0;
 }
 
+static size_t* size_array;
+// comparison function for qsort()ing an int array of indexes into
+// size_array[].
+static int compare_size_indices(const void* a, const void* b) {
+  int aa = *(int*)a;
+  int bb = *(int*)b;
+  if (size_array[aa] < size_array[bb]) {
+    return -1;
+  } else if (size_array[aa] > size_array[bb]) {
+    return 1;
+  } else {
+    return 0;
+  }
+}
+
+// Load the contents of an MTD partition into the provided
+// FileContents.  filename should be a string of the form
+// "MTD:<partition_name>:<size_1>:<sha1_1>:<size_2>:<sha1_2>:...".
+// The smallest size_n bytes for which that prefix of the mtd contents
+// has the corresponding sha1 hash will be loaded.  It is acceptable
+// for a size value to be repeated with different sha1s.  Will return
+// 0 on success.
+//
+// This complexity is needed because if an OTA installation is
+// interrupted, the partition might contain either the source or the
+// target data, which might be of different lengths.  We need to know
+// the length in order to read from MTD (there is no "end-of-file"
+// marker), so the caller must specify the possible lengths and the
+// hash of the data, and we'll do the load expecting to find one of
+// those hashes.
+int LoadMTDContents(const char* filename, FileContents* file) {
+  char* copy = strdup(filename);
+  const char* magic = strtok(copy, ":");
+  if (strcmp(magic, "MTD") != 0) {
+    fprintf(stderr, "LoadMTDContents called with bad filename (%s)\n",
+            filename);
+    return -1;
+  }
+  const char* partition = strtok(NULL, ":");
+
+  int i;
+  int colons = 0;
+  for (i = 0; filename[i] != '\0'; ++i) {
+    if (filename[i] == ':') {
+      ++colons;
+    }
+  }
+  if (colons < 3 || colons%2 == 0) {
+    fprintf(stderr, "LoadMTDContents called with bad filename (%s)\n",
+            filename);
+  }
+
+  int pairs = (colons-1)/2;     // # of (size,sha1) pairs in filename
+  int* index = malloc(pairs * sizeof(int));
+  size_t* size = malloc(pairs * sizeof(size_t));
+  char** sha1sum = malloc(pairs * sizeof(char*));
+
+  for (i = 0; i < pairs; ++i) {
+    const char* size_str = strtok(NULL, ":");
+    size[i] = strtol(size_str, NULL, 10);
+    if (size[i] == 0) {
+      fprintf(stderr, "LoadMTDContents called with bad size (%s)\n", filename);
+      return -1;
+    }
+    sha1sum[i] = strtok(NULL, ":");
+    index[i] = i;
+  }
+
+  // sort the index[] array so it indexes the pairs in order of
+  // increasing size.
+  size_array = size;
+  qsort(index, pairs, sizeof(int), compare_size_indices);
+
+  if (!mtd_partitions_scanned) {
+    mtd_scan_partitions();
+    mtd_partitions_scanned = 1;
+  }
+
+  const MtdPartition* mtd = mtd_find_partition_by_name(partition);
+  if (mtd == NULL) {
+    fprintf(stderr, "mtd partition \"%s\" not found (loading %s)\n",
+            partition, filename);
+    return -1;
+  }
+
+  MtdReadContext* ctx = mtd_read_partition(mtd);
+  if (ctx == NULL) {
+    fprintf(stderr, "failed to initialize read of mtd partition \"%s\"\n",
+            partition);
+    return -1;
+  }
+
+  SHA_CTX sha_ctx;
+  SHA_init(&sha_ctx);
+  uint8_t parsed_sha[SHA_DIGEST_SIZE];
+
+  // allocate enough memory to hold the largest size.
+  file->data = malloc(size[index[pairs-1]]);
+  char* p = (char*)file->data;
+  file->size = 0;                // # bytes read so far
+
+  for (i = 0; i < pairs; ++i) {
+    // Read enough additional bytes to get us up to the next size
+    // (again, we're trying the possibilities in order of increasing
+    // size).
+    size_t next = size[index[i]] - file->size;
+    size_t read = 0;
+    if (next > 0) {
+      read = mtd_read_data(ctx, p, next);
+      if (next != read) {
+        fprintf(stderr, "short read (%d bytes of %d) for partition \"%s\"\n",
+                read, next, partition);
+        free(file->data);
+        file->data = NULL;
+        return -1;
+      }
+      SHA_update(&sha_ctx, p, read);
+      file->size += read;
+    }
+
+    // Duplicate the SHA context and finalize the duplicate so we can
+    // check it against this pair's expected hash.
+    SHA_CTX temp_ctx;
+    memcpy(&temp_ctx, &sha_ctx, sizeof(SHA_CTX));
+    const uint8_t* sha_so_far = SHA_final(&temp_ctx);
+
+    if (ParseSha1(sha1sum[index[i]], parsed_sha) != 0) {
+      fprintf(stderr, "failed to parse sha1 %s in %s\n",
+              sha1sum[index[i]], filename);
+      free(file->data);
+      file->data = NULL;
+      return -1;
+    }
+
+    if (memcmp(sha_so_far, parsed_sha, SHA_DIGEST_SIZE) == 0) {
+      // we have a match.  stop reading the partition; we'll return
+      // the data we've read so far.
+      printf("mtd read matched size %d sha %s\n",
+             size[index[i]], sha1sum[index[i]]);
+      break;
+    }
+
+    p += read;
+  }
+
+  mtd_read_close(ctx);
+
+  if (i == pairs) {
+    // Ran off the end of the list of (size,sha1) pairs without
+    // finding a match.
+    fprintf(stderr, "contents of MTD partition \"%s\" didn't match %s\n",
+            partition, filename);
+    free(file->data);
+    file->data = NULL;
+    return -1;
+  }
+
+  const uint8_t* sha_final = SHA_final(&sha_ctx);
+  for (i = 0; i < SHA_DIGEST_SIZE; ++i) {
+    file->sha1[i] = sha_final[i];
+  }
+
+  // Fake some stat() info.
+  file->st.st_mode = 0644;
+  file->st.st_uid = 0;
+  file->st.st_gid = 0;
+
+  free(copy);
+  free(index);
+  free(size);
+  free(sha1sum);
+
+  return 0;
+}
+
+
 // Save the contents of the given FileContents object under the given
 // filename.  Return 0 on success.
 int SaveFileContents(const char* filename, FileContents file) {
@@ -93,6 +282,76 @@
   return 0;
 }
 
+// Copy the contents of source_file to target_mtd partition, a string
+// of the form "MTD:<partition>[:...]".  Return 0 on success.
+int CopyToMTDPartition(const char* source_file, const char* target_mtd) {
+  char* partition = strchr(target_mtd, ':');
+  if (partition == NULL) {
+    fprintf(stderr, "bad MTD target name \"%s\"\n", target_mtd);
+    return -1;
+  }
+  ++partition;
+  // Trim off anything after a colon, eg "MTD:boot:blah:blah:blah...".
+  // We want just the partition name "boot".
+  partition = strdup(partition);
+  char* end = strchr(partition, ':');
+  if (end != NULL)
+    *end = '\0';
+
+  FILE* f = fopen(source_file, "rb");
+  if (f == NULL) {
+    fprintf(stderr, "failed to open %s for reading: %s\n",
+            source_file, strerror(errno));
+    return -1;
+  }
+
+  if (!mtd_partitions_scanned) {
+    mtd_scan_partitions();
+    mtd_partitions_scanned = 1;
+  }
+
+  const MtdPartition* mtd = mtd_find_partition_by_name(partition);
+  if (mtd == NULL) {
+    fprintf(stderr, "mtd partition \"%s\" not found for writing\n", partition);
+    return -1;
+  }
+
+  MtdWriteContext* ctx = mtd_write_partition(mtd);
+  if (ctx == NULL) {
+    fprintf(stderr, "failed to init mtd partition \"%s\" for writing\n",
+            partition);
+    return -1;
+  }
+
+  const int buffer_size = 4096;
+  char buffer[buffer_size];
+  size_t read;
+  while ((read = fread(buffer, 1, buffer_size, f)) > 0) {
+    size_t written = mtd_write_data(ctx, buffer, read);
+    if (written != read) {
+      fprintf(stderr, "only wrote %d of %d bytes to MTD %s\n",
+              written, read, partition);
+      mtd_write_close(ctx);
+      return -1;
+    }
+  }
+
+  fclose(f);
+  if (mtd_erase_blocks(ctx, -1) < 0) {
+    fprintf(stderr, "error finishing mtd write of %s\n", partition);
+    mtd_write_close(ctx);
+    return -1;
+  }
+
+  if (mtd_write_close(ctx)) {
+    fprintf(stderr, "error closing mtd write of %s\n", partition);
+    return -1;
+  }
+
+  free(partition);
+  return 0;
+}
+
 
 // Take a string 'str' of 40 hex digits and parse it into the 20
 // byte array 'digest'.  'str' may contain only the digest or be of
@@ -178,8 +437,13 @@
   FileContents file;
   file.data = NULL;
 
+  // It's okay to specify no sha1s; the check will pass if the
+  // LoadFileContents is successful.  (Useful for reading MTD
+  // partitions, where the filename encodes the sha1s; no need to
+  // check them twice.)
   if (LoadFileContents(argv[2], &file) != 0 ||
-      FindMatchingPatch(file.sha1, patches, num_patches) == NULL) {
+      (num_patches > 0 &&
+       FindMatchingPatch(file.sha1, patches, num_patches) == NULL)) {
     fprintf(stderr, "file \"%s\" doesn't have any of expected "
             "sha1 sums; checking cache\n", argv[2]);
 
@@ -241,14 +505,25 @@
 //
 // - otherwise, or if any error is encountered, exits with non-zero
 //   status.
+//
+// <src-file> (or <file> in check mode) may refer to an MTD partition
+// to read the source data.  See the comments for the
+// LoadMTDContents() function above for the format of such a filename.
 
 int main(int argc, char** argv) {
   if (argc < 2) {
  usage:
-    fprintf(stderr, "usage: %s <src-file> <tgt-file> <tgt-sha1> <tgt-size> [<src-sha1>:<patch> ...]\n"
-                    "   or  %s -c <file> [<sha1> ...]\n"
-                    "   or  %s -s <bytes>\n"
-                    "   or  %s -l\n",
+    fprintf(stderr,
+            "usage: %s <src-file> <tgt-file> <tgt-sha1> <tgt-size> "
+            "[<src-sha1>:<patch> ...]\n"
+            "   or  %s -c <file> [<sha1> ...]\n"
+            "   or  %s -s <bytes>\n"
+            "   or  %s -l\n"
+            "\n"
+            "Filenames may be of the form\n"
+            "  MTD:<partition>:<len_1>:<sha1_1>:<len_2>:<sha1_2>"
+              ":...:<backup-file>\n"
+            "to specify reading from or writing to an MTD partition.\n\n",
             argv[0], argv[0], argv[0], argv[0]);
     return 1;
   }
@@ -283,16 +558,7 @@
     target_filename = source_filename;
   }
 
-  // assume that target_filename (eg "/system/app/Foo.apk") is located
-  // on the same filesystem as its top-level directory ("/system").
-  // We need something that exists for calling statfs().
-  char* target_fs = strdup(target_filename);
-  char* slash = strchr(target_fs+1, '/');
-  if (slash != NULL) {
-    *slash = '\0';
-  }
-
-  if (ParseSha1(argv[3], target_sha1) != 0) {
+ if (ParseSha1(argv[3], target_sha1) != 0) {
     fprintf(stderr, "failed to parse tgt-sha1 \"%s\"\n", argv[3]);
     return 1;
   }
@@ -360,30 +626,70 @@
     }
   }
 
-  // Is there enough room in the target filesystem to hold the patched file?
-  size_t free_space = FreeSpaceForFile(target_fs);
-  int enough_space = free_space > (target_size * 3 / 2);  // 50% margin of error
-  printf("target %ld bytes; free space %ld bytes; enough %d\n",
-         (long)target_size, (long)free_space, enough_space);
+  // Is there enough room in the target filesystem to hold the patched
+  // file?
 
-  if (!enough_space && source_patch_filename != NULL) {
-    // Using the original source, but not enough free space.  First
-    // copy the source file to cache, then delete it from the original
-    // location.
+  if (strncmp(target_filename, "MTD:", 4) == 0) {
+    // If the target is an MTD partition, we're actually going to
+    // write the output to /tmp and then copy it to the partition.
+    // statfs() always returns 0 blocks free for /tmp, so instead
+    // we'll just assume that /tmp has enough space to hold the file.
+
+    // We still write the original source to cache, in case the MTD
+    // write is interrupted.
     if (MakeFreeSpaceOnCache(source_file.size) < 0) {
       fprintf(stderr, "not enough free space on /cache\n");
       return 1;
     }
-
     if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) {
       fprintf(stderr, "failed to back up source file\n");
       return 1;
     }
     made_copy = 1;
-    unlink(source_filename);
+  } else {
+    // assume that target_filename (eg "/system/app/Foo.apk") is located
+    // on the same filesystem as its top-level directory ("/system").
+    // We need something that exists for calling statfs().
+    char* target_fs = strdup(target_filename);
+    char* slash = strchr(target_fs+1, '/');
+    if (slash != NULL) {
+      *slash = '\0';
+    }
 
     size_t free_space = FreeSpaceForFile(target_fs);
-    printf("(now %ld bytes free for target)\n", (long)free_space);
+    int enough_space =
+        free_space > (target_size * 3 / 2);  // 50% margin of error
+    printf("target %ld bytes; free space %ld bytes; enough %d\n",
+           (long)target_size, (long)free_space, enough_space);
+
+    if (!enough_space && source_patch_filename != NULL) {
+      // Using the original source, but not enough free space.  First
+      // copy the source file to cache, then delete it from the original
+      // location.
+
+      if (strncmp(source_filename, "MTD:", 4) == 0) {
+        // It's impossible to free space on the target filesystem by
+        // deleting the source if the source is an MTD partition.  If
+        // we're ever in a state where we need to do this, fail.
+        fprintf(stderr, "not enough free space for target but source is MTD\n");
+        return 1;
+      }
+
+      if (MakeFreeSpaceOnCache(source_file.size) < 0) {
+        fprintf(stderr, "not enough free space on /cache\n");
+        return 1;
+      }
+
+      if (SaveFileContents(CACHE_TEMP_SOURCE, source_file) < 0) {
+        fprintf(stderr, "failed to back up source file\n");
+        return 1;
+      }
+      made_copy = 1;
+      unlink(source_filename);
+
+      size_t free_space = FreeSpaceForFile(target_fs);
+      printf("(now %ld bytes free for target)\n", (long)free_space);
+    }
   }
 
   FileContents* source_to_use;
@@ -396,14 +702,19 @@
     patch_filename = copy_patch_filename;
   }
 
-  // We write the decoded output to "<tgt-file>.patch".
-  char* outname = (char*)malloc(strlen(target_filename) + 10);
-  strcpy(outname, target_filename);
-  strcat(outname, ".patch");
+  char* outname = NULL;
+  if (strncmp(target_filename, "MTD:", 4) == 0) {
+    outname = MTD_TARGET_TEMP_FILE;
+  } else {
+    // We write the decoded output to "<tgt-file>.patch".
+    outname = (char*)malloc(strlen(target_filename) + 10);
+    strcpy(outname, target_filename);
+    strcat(outname, ".patch");
+  }
   FILE* output = fopen(outname, "wb");
   if (output == NULL) {
-    fprintf(stderr, "failed to patch file %s: %s\n",
-            target_filename, strerror(errno));
+    fprintf(stderr, "failed to open output file %s: %s\n",
+            outname, strerror(errno));
     return 1;
   }
 
@@ -431,11 +742,19 @@
   } else if (header_bytes_read >= 8 &&
              memcmp(header, "BSDIFF40", 8) == 0) {
     int result = ApplyBSDiffPatch(source_to_use->data, source_to_use->size,
-                                  patch_filename, output, &ctx);
+                                  patch_filename, 0, output, &ctx);
     if (result != 0) {
       fprintf(stderr, "ApplyBSDiffPatch failed\n");
       return result;
     }
+  } else if (header_bytes_read >= 8 &&
+             memcmp(header, "IMGDIFF1", 8) == 0) {
+    int result = ApplyImagePatch(source_to_use->data, source_to_use->size,
+                                 patch_filename, output, &ctx);
+    if (result != 0) {
+      fprintf(stderr, "ApplyImagePatch failed\n");
+      return result;
+    }
   } else {
     fprintf(stderr, "Unknown patch file format");
     return 1;
@@ -451,22 +770,32 @@
     return 1;
   }
 
-  // Give the .patch file the same owner, group, and mode of the
-  // original source file.
-  if (chmod(outname, source_to_use->st.st_mode) != 0) {
-    fprintf(stderr, "chmod of \"%s\" failed: %s\n", outname, strerror(errno));
-    return 1;
-  }
-  if (chown(outname, source_to_use->st.st_uid, source_to_use->st.st_gid) != 0) {
-    fprintf(stderr, "chown of \"%s\" failed: %s\n", outname, strerror(errno));
-    return 1;
-  }
+  if (strcmp(outname, MTD_TARGET_TEMP_FILE) == 0) {
+    // Copy the temp file to the MTD partition.
+    if (CopyToMTDPartition(outname, target_filename) != 0) {
+      fprintf(stderr, "copy of %s to %s failed\n", outname, target_filename);
+      return 1;
+    }
+    unlink(outname);
+  } else {
+    // Give the .patch file the same owner, group, and mode of the
+    // original source file.
+    if (chmod(outname, source_to_use->st.st_mode) != 0) {
+      fprintf(stderr, "chmod of \"%s\" failed: %s\n", outname, strerror(errno));
+      return 1;
+    }
+    if (chown(outname, source_to_use->st.st_uid,
+              source_to_use->st.st_gid) != 0) {
+      fprintf(stderr, "chown of \"%s\" failed: %s\n", outname, strerror(errno));
+      return 1;
+    }
 
-  // Finally, rename the .patch file to replace the target file.
-  if (rename(outname, target_filename) != 0) {
-    fprintf(stderr, "rename of .patch to \"%s\" failed: %s\n",
-            target_filename, strerror(errno));
-    return 1;
+    // Finally, rename the .patch file to replace the target file.
+    if (rename(outname, target_filename) != 0) {
+      fprintf(stderr, "rename of .patch to \"%s\" failed: %s\n",
+              target_filename, strerror(errno));
+      return 1;
+    }
   }
 
   // If this run of applypatch created the copy, and we're here, we
diff --git a/tools/applypatch/applypatch.h b/tools/applypatch/applypatch.h
index 76fc80a..e0320fb 100644
--- a/tools/applypatch/applypatch.h
+++ b/tools/applypatch/applypatch.h
@@ -38,12 +38,25 @@
 // and use it as the source instead.
 #define CACHE_TEMP_SOURCE "/cache/saved.file"
 
+// When writing to an MTD partition, we first put the output in this
+// temp file, then copy it to the partition once the patching is
+// finished (and the target sha1 verified).
+#define MTD_TARGET_TEMP_FILE "/tmp/mtd-temp"
+
 // applypatch.c
 size_t FreeSpaceForFile(const char* filename);
 
 // bsdiff.c
 void ShowBSDiffLicense();
 int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
+                     const char* patch_filename, ssize_t offset,
+                     FILE* output, SHA_CTX* ctx);
+int ApplyBSDiffPatchMem(const unsigned char* old_data, ssize_t old_size,
+                        const char* patch_filename, ssize_t patch_offset,
+                        unsigned char** new_data, ssize_t* new_size);
+
+// imgpatch.c
+int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size,
                      const char* patch_filename,
                      FILE* output, SHA_CTX* ctx);
 
diff --git a/tools/applypatch/bsdiff.c b/tools/applypatch/bsdiff.c
index a2851f9..9d55f3b 100644
--- a/tools/applypatch/bsdiff.c
+++ b/tools/applypatch/bsdiff.c
@@ -29,6 +29,7 @@
 #include <bzlib.h>
 
 #include "mincrypt/sha.h"
+#include "applypatch.h"
 
 void ShowBSDiffLicense() {
   puts("The bsdiff library used herein is:\n"
@@ -80,10 +81,34 @@
   return y;
 }
 
+
 int ApplyBSDiffPatch(const unsigned char* old_data, ssize_t old_size,
-                     const char* patch_filename,
+                     const char* patch_filename, ssize_t patch_offset,
                      FILE* output, SHA_CTX* ctx) {
 
+  unsigned char* new_data;
+  ssize_t new_size;
+  if (ApplyBSDiffPatchMem(old_data, old_size, patch_filename, patch_offset,
+                          &new_data, &new_size) != 0) {
+    return -1;
+  }
+
+  if (fwrite(new_data, 1, new_size, output) < new_size) {
+    fprintf(stderr, "short write of output: %d (%s)\n", errno, strerror(errno));
+    return 1;
+  }
+  if (ctx) {
+    SHA_update(ctx, new_data, new_size);
+  }
+  free(new_data);
+
+  return 0;
+}
+
+int ApplyBSDiffPatchMem(const unsigned char* old_data, ssize_t old_size,
+                        const char* patch_filename, ssize_t patch_offset,
+                        unsigned char** new_data, ssize_t* new_size) {
+
   FILE* f;
   if ((f = fopen(patch_filename, "rb")) == NULL) {
     fprintf(stderr, "failed to open patch file\n");
@@ -102,6 +127,8 @@
   // from oldfile to x bytes from the diff block; copy y bytes from the
   // extra block; seek forwards in oldfile by z bytes".
 
+  fseek(f, patch_offset, SEEK_SET);
+
   unsigned char header[32];
   if (fread(header, 1, 32, f) < 32) {
     fprintf(stderr, "failed to read patch file header\n");
@@ -109,17 +136,16 @@
   }
 
   if (memcmp(header, "BSDIFF40", 8) != 0) {
-    fprintf(stderr, "corrupt patch file header (magic number)\n");
+    fprintf(stderr, "corrupt bsdiff patch file header (magic number)\n");
     return 1;
   }
 
   ssize_t ctrl_len, data_len;
-  ssize_t new_size;
   ctrl_len = offtin(header+8);
   data_len = offtin(header+16);
-  new_size = offtin(header+24);
+  *new_size = offtin(header+24);
 
-  if (ctrl_len < 0 || data_len < 0 || new_size < 0) {
+  if (ctrl_len < 0 || data_len < 0 || *new_size < 0) {
     fprintf(stderr, "corrupt patch file header (data lengths)\n");
     return 1;
   }
@@ -135,7 +161,7 @@
     fprintf(stderr, "failed to open patch file\n");                      \
     return 1;                                                            \
   }                                                                      \
-  if (fseeko(f, offset, SEEK_SET)) {                                     \
+  if (fseeko(f, offset+patch_offset, SEEK_SET)) {                        \
     fprintf(stderr, "failed to seek in patch file\n");                   \
     return 1;                                                            \
   }                                                                      \
@@ -150,9 +176,10 @@
 
 #undef OPEN_AT
 
-  unsigned char* new_data = malloc(new_size);
-  if (new_data == NULL) {
-    fprintf(stderr, "failed to allocate memory for output file\n");
+  *new_data = malloc(*new_size);
+  if (*new_data == NULL) {
+    fprintf(stderr, "failed to allocate %d bytes of memory for output file\n",
+            (int)*new_size);
     return 1;
   }
 
@@ -161,7 +188,7 @@
   off_t len_read;
   int i;
   unsigned char buf[8];
-  while (newpos < new_size) {
+  while (newpos < *new_size) {
     // Read control data
     for (i = 0; i < 3; ++i) {
       len_read = BZ2_bzRead(&bzerr, cpfbz2, buf, 8);
@@ -173,13 +200,13 @@
     }
 
     // Sanity check
-    if (newpos + ctrl[0] > new_size) {
+    if (newpos + ctrl[0] > *new_size) {
       fprintf(stderr, "corrupt patch (new file overrun)\n");
       return 1;
     }
 
     // Read diff string
-    len_read = BZ2_bzRead(&bzerr, dpfbz2, new_data + newpos, ctrl[0]);
+    len_read = BZ2_bzRead(&bzerr, dpfbz2, *new_data + newpos, ctrl[0]);
     if (len_read < ctrl[0] || !(bzerr == BZ_OK || bzerr == BZ_STREAM_END)) {
       fprintf(stderr, "corrupt patch (read diff)\n");
       return 1;
@@ -188,7 +215,7 @@
     // Add old data to diff string
     for (i = 0; i < ctrl[0]; ++i) {
       if ((oldpos+i >= 0) && (oldpos+i < old_size)) {
-        new_data[newpos+i] += old_data[oldpos+i];
+        (*new_data)[newpos+i] += old_data[oldpos+i];
       }
     }
 
@@ -197,13 +224,13 @@
     oldpos += ctrl[0];
 
     // Sanity check
-    if (newpos + ctrl[1] > new_size) {
+    if (newpos + ctrl[1] > *new_size) {
       fprintf(stderr, "corrupt patch (new file overrun)\n");
       return 1;
     }
 
     // Read extra string
-    len_read = BZ2_bzRead(&bzerr, epfbz2, new_data + newpos, ctrl[1]);
+    len_read = BZ2_bzRead(&bzerr, epfbz2, *new_data + newpos, ctrl[1]);
     if (len_read < ctrl[1] || !(bzerr == BZ_OK || bzerr == BZ_STREAM_END)) {
       fprintf(stderr, "corrupt patch (read extra)\n");
       return 1;
@@ -221,12 +248,5 @@
   fclose(dpf);
   fclose(epf);
 
-  if (fwrite(new_data, 1, new_size, output) < new_size) {
-    fprintf(stderr, "short write of output: %d (%s)\n", errno, strerror(errno));
-    return 1;
-  }
-  SHA_update(ctx, new_data, new_size);
-  free(new_data);
-
   return 0;
 }
diff --git a/tools/applypatch/imgdiff.c b/tools/applypatch/imgdiff.c
new file mode 100644
index 0000000..f0b5fea
--- /dev/null
+++ b/tools/applypatch/imgdiff.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This program constructs binary patches for images -- such as boot.img
+ * and recovery.img -- that consist primarily of large chunks of gzipped
+ * data interspersed with uncompressed data.  Doing a naive bsdiff of
+ * these files is not useful because small changes in the data lead to
+ * large changes in the compressed bitstream; bsdiff patches of gzipped
+ * data are typically as large as the data itself.
+ *
+ * To patch these usefully, we break the source and target images up into
+ * chunks of two types: "normal" and "gzip".  Normal chunks are simply
+ * patched using a plain bsdiff.  Gzip chunks are first expanded, then a
+ * bsdiff is applied to the uncompressed data, then the patched data is
+ * gzipped using the same encoder parameters.  Patched chunks are
+ * concatenated together to create the output file; the output image
+ * should be *exactly* the same series of bytes as the target image used
+ * originally to generate the patch.
+ *
+ * To work well with this tool, the gzipped sections of the target
+ * image must have been generated using the same deflate encoder that
+ * is available in applypatch, namely, the one in the zlib library.
+ * In practice this means that images should be compressed using the
+ * "minigzip" tool included in the zlib distribution, not the GNU gzip
+ * program.
+ *
+ * An "imgdiff" patch consists of a header describing the chunk structure
+ * of the file and any encoding parameters needed for the gzipped
+ * chunks, followed by N bsdiff patches, one per chunk.
+ *
+ * For a diff to be generated, the source and target images must have the
+ * same "chunk" structure: that is, the same number of gzipped and normal
+ * chunks in the same order.  Android boot and recovery images currently
+ * consist of five chunks:  a small normal header, a gzipped kernel, a
+ * small normal section, a gzipped ramdisk, and finally a small normal
+ * footer.
+ *
+ * Caveats:  we locate gzipped sections within the source and target
+ * images by searching for the byte sequence 1f8b0800:  1f8b is the gzip
+ * magic number; 08 specifies the "deflate" encoding [the only encoding
+ * supported by the gzip standard]; and 00 is the flags byte.  We do not
+ * currently support any extra header fields (which would be indicated by
+ * a nonzero flags byte).  We also don't handle the case when that byte
+ * sequence appears spuriously in the file.  (Note that it would have to
+ * occur spuriously within a normal chunk to be a problem.)
+ *
+ *
+ * The imgdiff patch header looks like this:
+ *
+ *    "IMGDIFF1"                  (8)   [magic number and version]
+ *    chunk count                 (4)
+ *    for each chunk:
+ *        chunk type              (4)   [CHUNK_NORMAL or CHUNK_GZIP]
+ *        source start            (8)
+ *        source len              (8)
+ *        bsdiff patch offset     (8)   [from start of patch file]
+ *        if chunk type == CHUNK_GZIP:
+ *           source expanded len  (8)   [size of uncompressed source]
+ *           target expected len  (8)   [size of uncompressed target]
+ *           gzip level           (4)
+ *                method          (4)
+ *                windowBits      (4)
+ *                memLevel        (4)
+ *                strategy        (4)
+ *           gzip header len      (4)
+ *           gzip header          (gzip header len)
+ *           gzip footer          (8)
+ *
+ * All integers are little-endian.  "source start" and "source len"
+ * specify the section of the input image that comprises this chunk,
+ * including the gzip header and footer for gzip chunks.  "source
+ * expanded len" is the size of the uncompressed source data.  "target
+ * expected len" is the size of the uncompressed data after applying
+ * the bsdiff patch.  The next five parameters specify the zlib
+ * parameters to be used when compressing the patched data, and the
+ * next three specify the header and footer to be wrapped around the
+ * compressed data to create the output chunk (so that header contents
+ * like the timestamp are recreated exactly).
+ *
+ * After the header there are 'chunk count' bsdiff patches; the offset
+ * of each from the beginning of the file is specified in the header.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "zlib.h"
+#include "imgdiff.h"
+
+typedef struct {
+  int type;             // CHUNK_NORMAL or CHUNK_GZIP
+  size_t start;         // offset of chunk in original image file
+
+  size_t len;
+  unsigned char* data;  // data to be patched (ie, uncompressed, for
+                        // gzip chunks)
+
+  // everything else is for CHUNK_GZIP chunks only:
+
+  size_t gzip_header_len;
+  unsigned char* gzip_header;
+  unsigned char* gzip_footer;
+
+  // original (compressed) gzip data, including header and footer
+  size_t gzip_len;
+  unsigned char* gzip_data;
+
+  // deflate encoder parameters
+  int level, method, windowBits, memLevel, strategy;
+} ImageChunk;
+
+/*
+ * Read the given file and break it up into chunks, putting the number
+ * of chunks and their info in *num_chunks and **chunks,
+ * respectively.  Returns a malloc'd block of memory containing the
+ * contents of the file; various pointers in the output chunk array
+ * will point into this block of memory.  The caller should free the
+ * return value when done with all the chunks.  Returns NULL on
+ * failure.
+ */
+unsigned char* ReadImage(const char* filename,
+                         int* num_chunks, ImageChunk** chunks) {
+  struct stat st;
+  if (stat(filename, &st) != 0) {
+    fprintf(stderr, "failed to stat \"%s\": %s\n", filename, strerror(errno));
+    return NULL;
+  }
+
+  unsigned char* img = malloc(st.st_size + 4);
+  FILE* f = fopen(filename, "rb");
+  if (fread(img, 1, st.st_size, f) != st.st_size) {
+    fprintf(stderr, "failed to read \"%s\" %s\n", filename, strerror(errno));
+    fclose(f);
+    return NULL;
+  }
+  fclose(f);
+
+  // append 4 zero bytes to the data so we can always search for the
+  // four-byte string 1f8b0800 starting at any point in the actual
+  // file data, without special-casing the end of the data.
+  memset(img+st.st_size, 0, 4);
+
+  size_t pos = 0;
+
+  *num_chunks = 0;
+  *chunks = NULL;
+
+  while (pos < st.st_size) {
+    unsigned char* p = img+pos;
+
+    // Reallocate the list for every chunk; we expect the number of
+    // chunks to be small (5 for typical boot and recovery images).
+    ++*num_chunks;
+    *chunks = realloc(*chunks, *num_chunks * sizeof(ImageChunk));
+    ImageChunk* curr = *chunks + (*num_chunks-1);
+    curr->start = pos;
+
+    if (st.st_size - pos >= 4 &&
+        p[0] == 0x1f && p[1] == 0x8b &&
+        p[2] == 0x08 &&    // deflate compression
+        p[3] == 0x00) {    // no header flags
+      // 'pos' is the offset of the start of a gzip chunk.
+
+      curr->type = CHUNK_GZIP;
+      curr->gzip_header_len = GZIP_HEADER_LEN;
+      curr->gzip_header = p;
+
+      // We must decompress this chunk in order to discover where it
+      // ends, and so we can put the uncompressed data and its length
+      // into curr->data and curr->len;
+
+      size_t allocated = 32768;
+      curr->len = 0;
+      curr->data = malloc(allocated);
+      curr->gzip_data = p;
+
+      z_stream strm;
+      strm.zalloc = Z_NULL;
+      strm.zfree = Z_NULL;
+      strm.opaque = Z_NULL;
+      strm.avail_in = st.st_size - (pos + curr->gzip_header_len);
+      strm.next_in = p + GZIP_HEADER_LEN;
+
+      // -15 means we are decoding a 'raw' deflate stream; zlib will
+      // not expect zlib headers.
+      int ret = inflateInit2(&strm, -15);
+
+      do {
+        strm.avail_out = allocated - curr->len;
+        strm.next_out = curr->data + curr->len;
+        ret = inflate(&strm, Z_NO_FLUSH);
+        curr->len = allocated - strm.avail_out;
+        if (strm.avail_out == 0) {
+          allocated *= 2;
+          curr->data = realloc(curr->data, allocated);
+        }
+      } while (ret != Z_STREAM_END);
+
+      curr->gzip_len = st.st_size - strm.avail_in - pos + GZIP_FOOTER_LEN;
+      pos = st.st_size - strm.avail_in;
+      inflateEnd(&strm);
+
+      // consume the gzip footer.
+      curr->gzip_footer = img+pos;
+      pos += GZIP_FOOTER_LEN;
+      p = img+pos;
+
+      // The footer (that we just skipped over) contains the size of
+      // the uncompressed data.  Double-check to make sure that it
+      // matches the size of the data we got when we actually did
+      // the decompression.
+      size_t footer_size = p[-4] + (p[-3] << 8) + (p[-2] << 16) + (p[-1] << 24);
+      if (footer_size != curr->len) {
+        fprintf(stderr, "Error: footer size %d != decompressed size %d\n",
+                footer_size, curr->len);
+        free(img);
+        return NULL;
+      }
+    } else {
+      // 'pos' is not the offset of the start of a gzip chunk, so scan
+      // forward until we find a gzip header.
+      curr->type = CHUNK_NORMAL;
+      curr->data = p;
+
+      for (curr->len = 0; curr->len < (st.st_size - pos); ++curr->len) {
+        if (p[curr->len] == 0x1f &&
+            p[curr->len+1] == 0x8b &&
+            p[curr->len+2] == 0x08 &&
+            p[curr->len+3] == 0x00) {
+          break;
+        }
+      }
+      pos += curr->len;
+    }
+  }
+
+  return img;
+}
+
+#define BUFFER_SIZE 32768
+
+/*
+ * Takes the uncompressed data stored in the chunk, compresses it
+ * using the zlib parameters stored in the chunk, and checks that it
+ * matches exactly the compressed data we started with (also stored in
+ * the chunk).  Return 0 on success.
+ */
+int TryReconstruction(ImageChunk* chunk, unsigned char* out) {
+  size_t p = chunk->gzip_header_len;
+
+  z_stream strm;
+  strm.zalloc = Z_NULL;
+  strm.zfree = Z_NULL;
+  strm.opaque = Z_NULL;
+  strm.avail_in = chunk->len;
+  strm.next_in = chunk->data;
+  int ret;
+  ret = deflateInit2(&strm, chunk->level, chunk->method, chunk->windowBits,
+                     chunk->memLevel, chunk->strategy);
+  do {
+    strm.avail_out = BUFFER_SIZE;
+    strm.next_out = out;
+    ret = deflate(&strm, Z_FINISH);
+    size_t have = BUFFER_SIZE - strm.avail_out;
+
+    if (memcmp(out, chunk->gzip_data+p, have) != 0) {
+      // mismatch; data isn't the same.
+      deflateEnd(&strm);
+      return -1;
+    }
+    p += have;
+  } while (ret != Z_STREAM_END);
+  deflateEnd(&strm);
+  if (p + GZIP_FOOTER_LEN != chunk->gzip_len) {
+    // mismatch; ran out of data before we should have.
+    return -1;
+  }
+  return 0;
+}
+
+/*
+ * Verify that we can reproduce exactly the same compressed data that
+ * we started with.  Sets the level, method, windowBits, memLevel, and
+ * strategy fields in the chunk to the encoding parameters needed to
+ * produce the right output.  Returns 0 on success.
+ */
+int ReconstructGzipChunk(ImageChunk* chunk) {
+  if (chunk->type != CHUNK_GZIP) {
+    fprintf(stderr, "attempt to reconstruct non-gzip chunk\n");
+    return -1;
+  }
+
+  size_t p = 0;
+  unsigned char* out = malloc(BUFFER_SIZE);
+
+  // We only check two combinations of encoder parameters:  level 6
+  // (the default) and level 9 (the maximum).
+  for (chunk->level = 6; chunk->level <= 9; chunk->level += 3) {
+    chunk->windowBits = -15;  // 32kb window; negative to indicate a raw stream.
+    chunk->memLevel = 8;      // the default value.
+    chunk->method = Z_DEFLATED;
+    chunk->strategy = Z_DEFAULT_STRATEGY;
+
+    if (TryReconstruction(chunk, out) == 0) {
+      free(out);
+      return 0;
+    }
+  }
+
+  free(out);
+  return -1;
+}
+
+/** Write a 4-byte value to f in little-endian order. */
+void Write4(int value, FILE* f) {
+  fputc(value & 0xff, f);
+  fputc((value >> 8) & 0xff, f);
+  fputc((value >> 16) & 0xff, f);
+  fputc((value >> 24) & 0xff, f);
+}
+
+/** Write an 8-byte value to f in little-endian order. */
+void Write8(long long value, FILE* f) {
+  fputc(value & 0xff, f);
+  fputc((value >> 8) & 0xff, f);
+  fputc((value >> 16) & 0xff, f);
+  fputc((value >> 24) & 0xff, f);
+  fputc((value >> 32) & 0xff, f);
+  fputc((value >> 40) & 0xff, f);
+  fputc((value >> 48) & 0xff, f);
+  fputc((value >> 56) & 0xff, f);
+}
+
+
+/*
+ * Given source and target chunks, compute a bsdiff patch between them
+ * by running bsdiff in a subprocess.  Return the patch data, placing
+ * its length in *size.  Return NULL on failure.  We expect the bsdiff
+ * program to be in the path.
+ */
+unsigned char* MakePatch(ImageChunk* src, ImageChunk* tgt, size_t* size) {
+  char stemp[] = "/tmp/imgdiff-src-XXXXXX";
+  char ttemp[] = "/tmp/imgdiff-tgt-XXXXXX";
+  char ptemp[] = "/tmp/imgdiff-patch-XXXXXX";
+  mkstemp(stemp);
+  mkstemp(ttemp);
+  mkstemp(ptemp);
+
+  FILE* f = fopen(stemp, "wb");
+  if (f == NULL) {
+    fprintf(stderr, "failed to open src chunk %s: %s\n",
+            stemp, strerror(errno));
+    return NULL;
+  }
+  if (fwrite(src->data, 1, src->len, f) != src->len) {
+    fprintf(stderr, "failed to write src chunk to %s: %s\n",
+            stemp, strerror(errno));
+    return NULL;
+  }
+  fclose(f);
+
+  f = fopen(ttemp, "wb");
+  if (f == NULL) {
+    fprintf(stderr, "failed to open tgt chunk %s: %s\n",
+            ttemp, strerror(errno));
+    return NULL;
+  }
+  if (fwrite(tgt->data, 1, tgt->len, f) != tgt->len) {
+    fprintf(stderr, "failed to write tgt chunk to %s: %s\n",
+            ttemp, strerror(errno));
+    return NULL;
+  }
+  fclose(f);
+
+  char cmd[200];
+  sprintf(cmd, "bsdiff %s %s %s", stemp, ttemp, ptemp);
+  if (system(cmd) != 0) {
+    fprintf(stderr, "failed to run bsdiff: %s\n", strerror(errno));
+    return NULL;
+  }
+
+  struct stat st;
+  if (stat(ptemp, &st) != 0) {
+    fprintf(stderr, "failed to stat patch file %s: %s\n",
+            ptemp, strerror(errno));
+    return NULL;
+  }
+
+  unsigned char* data = malloc(st.st_size);
+  *size = st.st_size;
+
+  f = fopen(ptemp, "rb");
+  if (f == NULL) {
+    fprintf(stderr, "failed to open patch %s: %s\n", ptemp, strerror(errno));
+    return NULL;
+  }
+  if (fread(data, 1, st.st_size, f) != st.st_size) {
+    fprintf(stderr, "failed to read patch %s: %s\n", ptemp, strerror(errno));
+    return NULL;
+  }
+  fclose(f);
+
+  unlink(stemp);
+  unlink(ttemp);
+  unlink(ptemp);
+
+  return data;
+}
+
+/*
+ * Cause a gzip chunk to be treated as a normal chunk (ie, as a blob
+ * of uninterpreted data).  The resulting patch will likely be about
+ * as big as the target file, but it lets us handle the case of images
+ * where some gzip chunks are reconstructible but others aren't (by
+ * treating the ones that aren't as normal chunks).
+ */
+void ChangeGzipChunkToNormal(ImageChunk* ch) {
+  ch->type = CHUNK_NORMAL;
+  free(ch->data);
+  ch->data = ch->gzip_data;
+  ch->len = ch->gzip_len;
+}
+
+int main(int argc, char** argv) {
+  if (argc != 4) {
+    fprintf(stderr, "usage: %s <src-img> <tgt-img> <patch-file>\n", argv[0]);
+    return 2;
+  }
+
+  int num_src_chunks;
+  ImageChunk* src_chunks;
+  if (ReadImage(argv[1], &num_src_chunks, &src_chunks) == NULL) {
+    fprintf(stderr, "failed to break apart source image\n");
+    return 1;
+  }
+
+  int num_tgt_chunks;
+  ImageChunk* tgt_chunks;
+  if (ReadImage(argv[2], &num_tgt_chunks, &tgt_chunks) == NULL) {
+    fprintf(stderr, "failed to break apart target image\n");
+    return 1;
+  }
+
+  // Verify that the source and target images have the same chunk
+  // structure (ie, the same sequence of gzip and normal chunks).
+
+  if (num_src_chunks != num_tgt_chunks) {
+    fprintf(stderr, "source and target don't have same number of chunks!\n");
+    return 1;
+  }
+  int i;
+  for (i = 0; i < num_src_chunks; ++i) {
+    if (src_chunks[i].type != tgt_chunks[i].type) {
+      fprintf(stderr, "source and target don't have same chunk "
+              "structure! (chunk %d)\n", i);
+      return 1;
+    }
+  }
+
+  // Confirm that given the uncompressed chunk data in the target, we
+  // can recompress it and get exactly the same bits as are in the
+  // input target image.  If this fails, treat the chunk as a normal
+  // non-gzipped chunk.
+
+  for (i = 0; i < num_tgt_chunks; ++i) {
+    if (tgt_chunks[i].type == CHUNK_GZIP) {
+      if (ReconstructGzipChunk(tgt_chunks+i) < 0) {
+        printf("failed to reconstruct target gzip chunk %d; "
+               "treating as normal chunk\n", i);
+        ChangeGzipChunkToNormal(tgt_chunks+i);
+        ChangeGzipChunkToNormal(src_chunks+i);
+      } else {
+        printf("reconstructed target gzip chunk %d\n", i);
+      }
+    }
+  }
+
+  // Compute bsdiff patches for each chunk's data (the uncompressed
+  // data, in the case of gzip chunks).
+
+  unsigned char** patch_data = malloc(num_src_chunks * sizeof(unsigned char*));
+  size_t* patch_size = malloc(num_src_chunks * sizeof(size_t));
+  for (i = 0; i < num_src_chunks; ++i) {
+    patch_data[i] = MakePatch(src_chunks+i, tgt_chunks+i, patch_size+i);
+    printf("patch %d is %d bytes (of %d)\n", i, patch_size[i],
+           tgt_chunks[i].type == CHUNK_NORMAL ? tgt_chunks[i].len : tgt_chunks[i].gzip_len);
+
+  }
+
+  // Figure out how big the imgdiff file header is going to be, so
+  // that we can correctly compute the offset of each bsdiff patch
+  // within the file.
+
+  size_t total_header_size = 12;
+  for (i = 0; i < num_src_chunks; ++i) {
+    total_header_size += 4 + 8*3;
+    if (src_chunks[i].type == CHUNK_GZIP) {
+      total_header_size += 8*2 + 4*6 + tgt_chunks[i].gzip_header_len + 8;
+    }
+  }
+
+  size_t offset = total_header_size;
+
+  FILE* f = fopen(argv[3], "wb");
+
+  // Write out the headers.
+
+  fwrite("IMGDIFF1", 1, 8, f);
+  Write4(num_src_chunks, f);
+  for (i = 0; i < num_tgt_chunks; ++i) {
+    Write4(tgt_chunks[i].type, f);
+    Write8(src_chunks[i].start, f);
+    Write8(src_chunks[i].type == CHUNK_NORMAL ? src_chunks[i].len :
+           (src_chunks[i].gzip_len + src_chunks[i].gzip_header_len + 8), f);
+    Write8(offset, f);
+
+    if (tgt_chunks[i].type == CHUNK_GZIP) {
+      Write8(src_chunks[i].len, f);
+      Write8(tgt_chunks[i].len, f);
+      Write4(tgt_chunks[i].level, f);
+      Write4(tgt_chunks[i].method, f);
+      Write4(tgt_chunks[i].windowBits, f);
+      Write4(tgt_chunks[i].memLevel, f);
+      Write4(tgt_chunks[i].strategy, f);
+      Write4(tgt_chunks[i].gzip_header_len, f);
+      fwrite(tgt_chunks[i].gzip_header, 1, tgt_chunks[i].gzip_header_len, f);
+      fwrite(tgt_chunks[i].gzip_footer, 1, GZIP_FOOTER_LEN, f);
+    }
+
+    offset += patch_size[i];
+  }
+
+  // Append each chunk's bsdiff patch, in order.
+
+  for (i = 0; i < num_tgt_chunks; ++i) {
+    fwrite(patch_data[i], 1, patch_size[i], f);
+  }
+
+  fclose(f);
+
+  return 0;
+}
diff --git a/tools/applypatch/imgdiff.h b/tools/applypatch/imgdiff.h
new file mode 100644
index 0000000..7ec45c5
--- /dev/null
+++ b/tools/applypatch/imgdiff.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Image patch chunk types
+#define CHUNK_NORMAL 0
+#define CHUNK_GZIP   1
+
+// The gzip header size is actually variable, but we currently don't
+// support gzipped data with any of the optional fields, so for now it
+// will always be ten bytes.  See RFC 1952 for the definition of the
+// gzip format.
+#define GZIP_HEADER_LEN   10
+
+// The gzip footer size really is fixed.
+#define GZIP_FOOTER_LEN   8
diff --git a/tools/applypatch/imgpatch.c b/tools/applypatch/imgpatch.c
new file mode 100644
index 0000000..2efe874
--- /dev/null
+++ b/tools/applypatch/imgpatch.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// See imgdiff.c in this directory for a description of the patch file
+// format.
+
+#include <stdio.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+
+#include "zlib.h"
+#include "mincrypt/sha.h"
+#include "applypatch.h"
+#include "imgdiff.h"
+
+int Read4(unsigned char* p) {
+  return (int)(((unsigned int)p[3] << 24) |
+               ((unsigned int)p[2] << 16) |
+               ((unsigned int)p[1] << 8) |
+               (unsigned int)p[0]);
+}
+
+long long Read8(unsigned char* p) {
+  return (long long)(((unsigned long long)p[7] << 56) |
+                     ((unsigned long long)p[6] << 48) |
+                     ((unsigned long long)p[5] << 40) |
+                     ((unsigned long long)p[4] << 32) |
+                     ((unsigned long long)p[3] << 24) |
+                     ((unsigned long long)p[2] << 16) |
+                     ((unsigned long long)p[1] << 8) |
+                     (unsigned long long)p[0]);
+}
+
+/*
+ * Apply the patch given in 'patch_filename' to the source data given
+ * by (old_data, old_size).  Write the patched output to the 'output'
+ * file, and update the SHA context with the output data as well.
+ * Return 0 on success.
+ */
+int ApplyImagePatch(const unsigned char* old_data, ssize_t old_size,
+                    const char* patch_filename,
+                    FILE* output, SHA_CTX* ctx) {
+  FILE* f;
+  if ((f = fopen(patch_filename, "rb")) == NULL) {
+    fprintf(stderr, "failed to open patch file\n");
+    return -1;
+  }
+
+  unsigned char header[12];
+  if (fread(header, 1, 12, f) != 12) {
+    fprintf(stderr, "failed to read patch file header\n");
+    return -1;
+  }
+
+  if (memcmp(header, "IMGDIFF1", 8) != 0) {
+    fprintf(stderr, "corrupt patch file header (magic number)\n");
+    return -1;
+  }
+
+  int num_chunks = Read4(header+8);
+
+  int i;
+  for (i = 0; i < num_chunks; ++i) {
+    // each chunk's header record starts with 28 bytes (4 + 8*3).
+    unsigned char chunk[28];
+    if (fread(chunk, 1, 28, f) != 28) {
+      fprintf(stderr, "failed to read chunk %d record\n", i);
+      return -1;
+    }
+
+    int type = Read4(chunk);
+    size_t src_start = Read8(chunk+4);
+    size_t src_len = Read8(chunk+12);
+    size_t patch_offset = Read8(chunk+20);
+
+    if (type == CHUNK_NORMAL) {
+      fprintf(stderr, "CHUNK %d:  normal   patch offset %d\n", i, patch_offset);
+
+      ApplyBSDiffPatch(old_data + src_start, src_len,
+                       patch_filename, patch_offset,
+                       output, ctx);
+    } else if (type == CHUNK_GZIP) {
+      fprintf(stderr, "CHUNK %d:  gzip     patch offset %d\n", i, patch_offset);
+
+      // gzip chunks have an additional 40 + gzip_header_len + 8 bytes
+      // in their chunk header.
+      unsigned char* gzip = malloc(40);
+      if (fread(gzip, 1, 40, f) != 40) {
+        fprintf(stderr, "failed to read chunk %d initial gzip data\n", i);
+        return -1;
+      }
+      size_t gzip_header_len = Read4(gzip+36);
+      gzip = realloc(gzip, 40 + gzip_header_len + 8);
+      if (fread(gzip+40, 1, gzip_header_len+8, f) != gzip_header_len+8) {
+        fprintf(stderr, "failed to read chunk %d remaining gzip data\n", i);
+        return -1;
+      }
+
+      size_t expanded_len = Read8(gzip);
+      size_t target_len = Read8(gzip);
+      int gz_level = Read4(gzip+16);
+      int gz_method = Read4(gzip+20);
+      int gz_windowBits = Read4(gzip+24);
+      int gz_memLevel = Read4(gzip+28);
+      int gz_strategy = Read4(gzip+32);
+
+      // Decompress the source data; the chunk header tells us exactly
+      // how big we expect it to be when decompressed.
+
+      unsigned char* expanded_source = malloc(expanded_len);
+      if (expanded_source == NULL) {
+        fprintf(stderr, "failed to allocate %d bytes for expanded_source\n",
+                expanded_len);
+        return -1;
+      }
+
+      z_stream strm;
+      strm.zalloc = Z_NULL;
+      strm.zfree = Z_NULL;
+      strm.opaque = Z_NULL;
+      strm.avail_in = src_len - (gzip_header_len + 8);
+      strm.next_in = (unsigned char*)(old_data + src_start + gzip_header_len);
+      strm.avail_out = expanded_len;
+      strm.next_out = expanded_source;
+
+      int ret;
+      ret = inflateInit2(&strm, -15);
+      if (ret != Z_OK) {
+        fprintf(stderr, "failed to init source inflation: %d\n", ret);
+        return -1;
+      }
+
+      // Because we've provided enough room to accommodate the output
+      // data, we expect one call to inflate() to suffice.
+      ret = inflate(&strm, Z_SYNC_FLUSH);
+      if (ret != Z_STREAM_END) {
+        fprintf(stderr, "source inflation returned %d\n", ret);
+        return -1;
+      }
+      // We should have filled the output buffer exactly.
+      if (strm.avail_out != 0) {
+        fprintf(stderr, "source inflation short by %d bytes\n", strm.avail_out);
+        return -1;
+      }
+      inflateEnd(&strm);
+
+      // Next, apply the bsdiff patch (in memory) to the uncompressed
+      // data.
+      unsigned char* uncompressed_target_data;
+      ssize_t uncompressed_target_size;
+      if (ApplyBSDiffPatchMem(expanded_source, expanded_len,
+                              patch_filename, patch_offset,
+                              &uncompressed_target_data,
+                              &uncompressed_target_size) != 0) {
+        return -1;
+      }
+
+      // Now compress the target data and append it to the output.
+
+      // start with the gzip header.
+      fwrite(gzip+40, 1, gzip_header_len, output);
+      SHA_update(ctx, gzip+40, gzip_header_len);
+
+      // we're done with the expanded_source data buffer, so we'll
+      // reuse that memory to receive the output of deflate.
+      unsigned char* temp_data = expanded_source;
+      ssize_t temp_size = expanded_len;
+      if (temp_size < 32768) {
+        // ... unless the buffer is too small, in which case we'll
+        // allocate a fresh one.
+        free(temp_data);
+        temp_data = malloc(32768);
+        temp_size = 32768;
+      }
+
+      // now the deflate stream
+      strm.zalloc = Z_NULL;
+      strm.zfree = Z_NULL;
+      strm.opaque = Z_NULL;
+      strm.avail_in = uncompressed_target_size;
+      strm.next_in = uncompressed_target_data;
+      ret = deflateInit2(&strm, gz_level, gz_method, gz_windowBits,
+                         gz_memLevel, gz_strategy);
+      do {
+        strm.avail_out = temp_size;
+        strm.next_out = temp_data;
+        ret = deflate(&strm, Z_FINISH);
+        size_t have = temp_size - strm.avail_out;
+
+        if (fwrite(temp_data, 1, have, output) != have) {
+          fprintf(stderr, "failed to write %d compressed bytes to output\n",
+                  have);
+          return -1;
+        }
+        SHA_update(ctx, temp_data, have);
+      } while (ret != Z_STREAM_END);
+      deflateEnd(&strm);
+
+      // lastly, the gzip footer.
+      fwrite(gzip+40+gzip_header_len, 1, 8, output);
+      SHA_update(ctx, gzip+40+gzip_header_len, 8);
+
+      free(temp_data);
+      free(uncompressed_target_data);
+      free(gzip);
+    } else {
+      fprintf(stderr, "patch chunk %d is unknown type %d\n", i, type);
+      return -1;
+    }
+  }
+
+  return 0;
+}
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index 1fcac76..5c738a2 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -20,6 +20,7 @@
 echo "ro.product.name=$PRODUCT_NAME"
 echo "ro.product.device=$TARGET_DEVICE"
 echo "ro.product.board=$TARGET_BOOTLOADER_BOARD_NAME"
+echo "ro.product.cpu.abi=$TARGET_CPU_ABI"
 echo "ro.product.manufacturer=$PRODUCT_MANUFACTURER"
 echo "ro.product.locale.language=$PRODUCT_DEFAULT_LANGUAGE"
 echo "ro.product.locale.region=$PRODUCT_DEFAULT_REGION"
diff --git a/tools/droiddoc/templates-pdk/customization.cs b/tools/droiddoc/templates-pdk/customization.cs
index 01b6e96..563af1e 100644
--- a/tools/droiddoc/templates-pdk/customization.cs
+++ b/tools/droiddoc/templates-pdk/customization.cs
@@ -5,7 +5,7 @@
 def:custom_masthead() ?>
   <div id="header">
       <div id="headerLeft">
-          <a href="<?cs var:toroot ?>index.html" tabindex="-1"><img
+          <a href="<?cs var:toroot ?>guide/index.html" tabindex="-1"><img
               src="<?cs var:toroot ?>assets/images/open_source.png" alt="Open Source Project: Platform Development Kit" /></a>
           <ul class="<?cs 
                   if:reference ?> <?cs
@@ -15,7 +15,7 @@
                   elif:community ?> <?cs
                   elif:publish ?> <?cs
                   elif:about ?> <?cs /if ?>">
-              <li id="guide-link"><a href="<?cs var:toroot ?>index.html"
+              <li id="guide-link"><a href="<?cs var:toroot ?>guide/index.html"
                                   onClick="return loadLast('guide)'"><span>Porting Guide</span></a></li>
               <li id="opensource-link"><a href="http://source.android.com/"
 				 onClick="return loadLast('open')"><span>Open Source</span></a></li>
diff --git a/tools/droiddoc/templates/assets/android-developer-docs.css b/tools/droiddoc/templates/assets/android-developer-docs.css
index 004d5cf..f4aa5ef 100644
--- a/tools/droiddoc/templates/assets/android-developer-docs.css
+++ b/tools/droiddoc/templates/assets/android-developer-docs.css
@@ -804,6 +804,63 @@
 
 /* End sidebox sidebar element styles */
 
+/* BEGIN image and caption styles (originally for UI Guidelines docs) */
+
+table.image-caption {
+  padding:0;
+  margin:.5em 0;
+  border:0;
+}
+
+td.image-caption-i {
+  font-size:92%;
+  padding:0;
+  margin:0;
+  border:0;
+}
+
+td.image-caption-i img {
+  padding:0 1em;
+  margin:0;
+}
+
+.image-list {
+  width:24px;
+  text-align:center;
+}
+
+.image-list .caption {
+  margin:0 2px;
+}
+
+td.image-caption-c {
+  font-size:92%;
+  padding:1em 2px 2px 2px;
+  margin:0;
+  border:0;
+  width:350px;
+}
+
+.grad-rule-top {
+background-image:url(images/grad-rule-qv.png);
+background-repeat:no-repeat;
+padding-top:1em;
+margin-top:0;
+}
+
+.image-caption-nested {
+  margin-top:0;
+  padding:0 0 0 1em;
+}
+
+.image-caption-nested td {
+  padding:0 4px 2px 0;
+  margin:0;
+  border:0;
+}
+
+/* END image and caption styles */
+
 /* table of contents */
 
 ol.toc {
diff --git a/tools/droiddoc/templates/assets/images/icon_guidelines_logo.png b/tools/droiddoc/templates/assets/images/icon_guidelines_logo.png
new file mode 100644
index 0000000..9362c8f
--- /dev/null
+++ b/tools/droiddoc/templates/assets/images/icon_guidelines_logo.png
Binary files differ
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index a512ff8..686c6ef 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import errno
 import getopt
 import getpass
 import os
@@ -80,12 +81,13 @@
 
   p1 = Run(["mkbootfs", os.path.join(sourcedir, "RAMDISK")],
            stdout=subprocess.PIPE)
-  p2 = Run(["gzip", "-n"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
+  p2 = Run(["minigzip"],
+           stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
 
   p2.wait()
   p1.wait()
   assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (targetname,)
-  assert p2.returncode == 0, "gzip of %s ramdisk failed" % (targetname,)
+  assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (targetname,)
 
   cmdline = open(os.path.join(sourcedir, "cmdline")).read().rstrip("\n")
   p = Run(["mkbootimg",
@@ -132,13 +134,14 @@
   those which require them.  Return a {key: password} dict.  password
   will be None if the key has no password."""
 
-  key_passwords = {}
+  no_passwords = []
+  need_passwords = []
   devnull = open("/dev/null", "w+b")
   for k in sorted(keylist):
     # An empty-string key is used to mean don't re-sign this package.
     # Obviously we don't need a password for this non-key.
     if not k:
-      key_passwords[k] = None
+      no_passwords.append(k)
       continue
 
     p = subprocess.Popen(["openssl", "pkcs8", "-in", k+".pk8",
@@ -148,12 +151,13 @@
                          stderr=subprocess.STDOUT)
     p.communicate()
     if p.returncode == 0:
-      print "%s.pk8 does not require a password" % (k,)
-      key_passwords[k] = None
+      no_passwords.append(k)
     else:
-      key_passwords[k] = getpass.getpass("Enter password for %s.pk8> " % (k,))
+      need_passwords.append(k)
   devnull.close()
-  print
+
+  key_passwords = PasswordManager().GetPasswords(need_passwords)
+  key_passwords.update(dict.fromkeys(no_passwords, None))
   return key_passwords
 
 
@@ -278,3 +282,102 @@
       shutil.rmtree(i)
     else:
       os.remove(i)
+
+
+class PasswordManager(object):
+  def __init__(self):
+    self.editor = os.getenv("EDITOR", None)
+    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
+
+  def GetPasswords(self, items):
+    """Get passwords corresponding to each string in 'items',
+    returning a dict.  (The dict may have keys in addition to the
+    values in 'items'.)
+
+    Uses the passwords in $ANDROID_PW_FILE if available, letting the
+    user edit that file to add more needed passwords.  If no editor is
+    available, or $ANDROID_PW_FILE isn't define, prompts the user
+    interactively in the ordinary way.
+    """
+
+    current = self.ReadFile()
+
+    first = True
+    while True:
+      missing = []
+      for i in items:
+        if i not in current or not current[i]:
+          missing.append(i)
+      # Are all the passwords already in the file?
+      if not missing: return current
+
+      for i in missing:
+        current[i] = ""
+
+      if not first:
+        print "key file %s still missing some passwords." % (self.pwfile,)
+        answer = raw_input("try to edit again? [y]> ").strip()
+        if answer and answer[0] not in 'yY':
+          raise RuntimeError("key passwords unavailable")
+      first = False
+
+      current = self.UpdateAndReadFile(current)
+
+  def PromptResult(self, current):
+    """Prompt the user to enter a value (password) for each key in
+    'current' whose value is fales.  Returns a new dict with all the
+    values.
+    """
+    result = {}
+    for k, v in sorted(current.iteritems()):
+      if v:
+        result[k] = v
+      else:
+        while True:
+          result[k] = getpass.getpass("Enter password for %s key> "
+                                      % (k,)).strip()
+          if result[k]: break
+    return result
+
+  def UpdateAndReadFile(self, current):
+    if not self.editor or not self.pwfile:
+      return self.PromptResult(current)
+
+    f = open(self.pwfile, "w")
+    os.chmod(self.pwfile, 0600)
+    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
+    f.write("# (Additional spaces are harmless.)\n\n")
+
+    first_line = None
+    sorted = [(not v, k, v) for (k, v) in current.iteritems()]
+    sorted.sort()
+    for i, (_, k, v) in enumerate(sorted):
+      f.write("[[[  %s  ]]] %s\n" % (v, k))
+      if not v and first_line is None:
+        # position cursor on first line with no password.
+        first_line = i + 4
+    f.close()
+
+    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
+    _, _ = p.communicate()
+
+    return self.ReadFile()
+
+  def ReadFile(self):
+    result = {}
+    if self.pwfile is None: return result
+    try:
+      f = open(self.pwfile, "r")
+      for line in f:
+        line = line.strip()
+        if not line or line[0] == '#': continue
+        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
+        if not m:
+          print "failed to parse password file: ", line
+        else:
+          result[m.group(2)] = m.group(1)
+      f.close()
+    except IOError, e:
+      if e.errno != errno.ENOENT:
+        print "error reading password file: ", str(e)
+    return result
diff --git a/tools/releasetools/ota_from_target_files b/tools/releasetools/ota_from_target_files
index 364f751..7e36da2 100755
--- a/tools/releasetools/ota_from_target_files
+++ b/tools/releasetools/ota_from_target_files
@@ -322,9 +322,12 @@
                              for b in bootloaders]))
 
 
-def IncludeBinary(name, input_zip, output_zip):
+def IncludeBinary(name, input_zip, output_zip, input_path=None):
   try:
-    data = input_zip.read(os.path.join("OTA/bin", name))
+    if input_path is not None:
+      data = open(input_path).read()
+    else:
+      data = input_zip.read(os.path.join("OTA/bin", name))
     output_zip.writestr(name, data)
   except IOError:
     raise ExternalError('unable to include device binary "%s"' % (name,))
@@ -402,8 +405,11 @@
   return out
 
 
-def Difference(tf, sf):
-  """Return the patch (as a string of data) needed to turn sf into tf."""
+def Difference(tf, sf, diff_program):
+  """Return the patch (as a string of data) needed to turn sf into tf.
+  diff_program is the name of an external program (or list, if
+  additional arguments are desired) to run to generate the diff.
+  """
 
   ttemp = tf.WriteToTemp()
   stemp = sf.WriteToTemp()
@@ -412,13 +418,21 @@
 
   try:
     ptemp = tempfile.NamedTemporaryFile()
-    p = common.Run(["bsdiff", stemp.name, ttemp.name, ptemp.name])
+    if isinstance(diff_program, list):
+      cmd = copy.copy(diff_program)
+    else:
+      cmd = [diff_program]
+    cmd.append(stemp.name)
+    cmd.append(ttemp.name)
+    cmd.append(ptemp.name)
+    p = common.Run(cmd)
     _, err = p.communicate()
-    if err:
-      raise ExternalError("failure running bsdiff:\n%s\n" % (err,))
+    if err or p.returncode != 0:
+      print "WARNING: failure running %s:\n%s\n" % (diff_program, err)
+      return None
     diff = ptemp.read()
-    ptemp.close()
   finally:
+    ptemp.close()
     stemp.close()
     ttemp.close()
 
@@ -461,9 +475,13 @@
       verbatim_targets.append((fn, tf.size))
     elif tf.sha1 != sf.sha1:
       # File is different; consider sending as a patch
-      d = Difference(tf, sf)
-      print fn, tf.size, len(d), (float(len(d)) / tf.size)
-      if len(d) > tf.size * OPTIONS.patch_threshold:
+      diff_method = "bsdiff"
+      if tf.name.endswith(".gz"):
+        diff_method = "imgdiff"
+      d = Difference(tf, sf, diff_method)
+      if d is not None:
+        print fn, tf.size, len(d), (float(len(d)) / tf.size)
+      if d is None or len(d) > tf.size * OPTIONS.patch_threshold:
         # patch is almost as big as the file; don't bother patching
         tf.AddToZip(output_zip)
         verbatim_targets.append((fn, tf.size))
@@ -487,17 +505,21 @@
                  '"ro.build.fingerprint=%s") == "true"') %
                 (source_fp, target_fp))
 
-  source_boot = common.BuildBootableImage(
-      os.path.join(OPTIONS.source_tmp, "BOOT"))
-  target_boot = common.BuildBootableImage(
-      os.path.join(OPTIONS.target_tmp, "BOOT"))
-  updating_boot = (source_boot != target_boot)
+  source_boot = File("/tmp/boot.img",
+                     common.BuildBootableImage(
+      os.path.join(OPTIONS.source_tmp, "BOOT")))
+  target_boot = File("/tmp/boot.img",
+                     common.BuildBootableImage(
+      os.path.join(OPTIONS.target_tmp, "BOOT")))
+  updating_boot = (source_boot.data != target_boot.data)
 
-  source_recovery = common.BuildBootableImage(
-      os.path.join(OPTIONS.source_tmp, "RECOVERY"))
-  target_recovery = common.BuildBootableImage(
-      os.path.join(OPTIONS.target_tmp, "RECOVERY"))
-  updating_recovery = (source_recovery != target_recovery)
+  source_recovery = File("system/recovery.img",
+                         common.BuildBootableImage(
+      os.path.join(OPTIONS.source_tmp, "RECOVERY")))
+  target_recovery = File("system/recovery.img",
+                         common.BuildBootableImage(
+      os.path.join(OPTIONS.target_tmp, "RECOVERY")))
+  updating_recovery = (source_recovery.data != target_recovery.data)
 
   source_radio = source_zip.read("RADIO/image")
   target_radio = target_zip.read("RADIO/image")
@@ -515,17 +537,41 @@
 
   pb_verify = progress_bar_total * 0.3 * \
               (total_patched_size /
-               float(total_patched_size+total_verbatim_size))
+               float(total_patched_size+total_verbatim_size+1))
 
   for i, (fn, tf, sf, size) in enumerate(patch_list):
     if i % 5 == 0:
       next_sizes = sum([i[3] for i in patch_list[i:i+5]])
       script.append("show_progress %f 1" %
-                    (next_sizes * pb_verify / total_patched_size,))
+                    (next_sizes * pb_verify / (total_patched_size+1),))
     script.append("run_program PACKAGE:applypatch -c /%s %s %s" %
                   (fn, tf.sha1, sf.sha1))
 
-  if patch_list:
+  if updating_recovery:
+    d = Difference(target_recovery, source_recovery, "imgdiff")
+    print "recovery  target: %d  source: %d  diff: %d" % (
+        target_recovery.size, source_recovery.size, len(d))
+
+    output_zip.writestr("patch/recovery.img.p", d)
+
+    script.append(("run_program PACKAGE:applypatch -c "
+                   "MTD:recovery:%d:%s:%d:%s") %
+                  (source_recovery.size, source_recovery.sha1,
+                   target_recovery.size, target_recovery.sha1))
+
+  if updating_boot:
+    d = Difference(target_boot, source_boot, "imgdiff")
+    print "boot      target: %d  source: %d  diff: %d" % (
+        target_boot.size, source_boot.size, len(d))
+
+    output_zip.writestr("patch/boot.img.p", d)
+
+    script.append(("run_program PACKAGE:applypatch -c "
+                   "MTD:boot:%d:%s:%d:%s") %
+                  (source_boot.size, source_boot.sha1,
+                   target_boot.size, target_boot.sha1))
+
+  if patch_list or updating_recovery or updating_boot:
     script.append("run_program PACKAGE:applypatch -s %d" %
                   (largest_source_size,))
     script.append("copy_dir PACKAGE:patch CACHE:../tmp/patchtmp")
@@ -539,14 +585,31 @@
   DeleteFiles(script, [SubstituteRoot(i[0]) for i in verbatim_targets])
 
   if updating_boot:
-    script.append("format BOOT:")
-    output_zip.writestr("boot.img", target_boot)
+    # Produce the boot image by applying a patch to the current
+    # contents of the boot partition, and write it back to the
+    # partition.
+    script.append(("run_program PACKAGE:applypatch "
+                   "MTD:boot:%d:%s:%d:%s - "
+                   "%s %d %s:/tmp/patchtmp/boot.img.p")
+                  % (source_boot.size, source_boot.sha1,
+                     target_boot.size, target_boot.sha1,
+                     target_boot.sha1,
+                     target_boot.size,
+                     source_boot.sha1))
     print "boot image changed; including."
   else:
     print "boot image unchanged; skipping."
 
   if updating_recovery:
-    output_zip.writestr("system/recovery.img", target_recovery)
+    # Produce /system/recovery.img by applying a patch to the current
+    # contents of the recovery partition.
+    script.append(("run_program PACKAGE:applypatch MTD:recovery:%d:%s:%d:%s "
+                   "/system/recovery.img %s %d %s:/tmp/patchtmp/recovery.img.p")
+                  % (source_recovery.size, source_recovery.sha1,
+                     target_recovery.size, target_recovery.sha1,
+                     target_recovery.sha1,
+                     target_recovery.size,
+                     source_recovery.sha1))
     print "recovery image changed; including."
   else:
     print "recovery image unchanged; skipping."
@@ -561,12 +624,12 @@
 
   pb_apply = progress_bar_total * 0.7 * \
              (total_patched_size /
-              float(total_patched_size+total_verbatim_size))
+              float(total_patched_size+total_verbatim_size+1))
   for i, (fn, tf, sf, size) in enumerate(patch_list):
     if i % 5 == 0:
       next_sizes = sum([i[3] for i in patch_list[i:i+5]])
       script.append("show_progress %f 1" %
-                    (next_sizes * pb_apply / total_patched_size,))
+                    (next_sizes * pb_apply / (total_patched_size+1),))
     script.append(("run_program PACKAGE:applypatch "
                    "/%s - %s %d %s:/tmp/patchtmp/%s.p") %
                   (fn, tf.sha1, tf.size, sf.sha1, fn))
@@ -594,7 +657,7 @@
   if verbatim_targets:
     pb_verbatim = progress_bar_total * \
                   (total_verbatim_size /
-                   float(total_patched_size+total_verbatim_size))
+                   float(total_patched_size+total_verbatim_size+1))
     script.append("show_progress %f 5" % (pb_verbatim,))
     script.append("copy_dir PACKAGE:system SYSTEM:")
 
@@ -615,10 +678,6 @@
   # permissions.
   script.extend(temp_script)
 
-  if updating_boot:
-    script.append("show_progress 0.1 5")
-    script.append("write_raw_image PACKAGE:boot.img BOOT:")
-
   if OPTIONS.extra_script is not None:
     script.append(OPTIONS.extra_script)
 
diff --git a/tools/releasetools/sign_target_files_apks b/tools/releasetools/sign_target_files_apks
index b3bfaee..9f393c8 100755
--- a/tools/releasetools/sign_target_files_apks
+++ b/tools/releasetools/sign_target_files_apks
@@ -101,6 +101,85 @@
   return certmap
 
 
+def CheckAllApksSigned(input_tf_zip, apk_key_map):
+  """Check that all the APKs we want to sign have keys specified, and
+  error out if they don't."""
+  unknown_apks = []
+  for info in input_tf_zip.infolist():
+    if info.filename.endswith(".apk"):
+      name = os.path.basename(info.filename)
+      if name not in apk_key_map:
+        unknown_apks.append(name)
+  if unknown_apks:
+    print "ERROR: no key specified for:\n\n ",
+    print "\n  ".join(unknown_apks)
+    print "\nUse '-e <apkname>=' to specify a key (which may be an"
+    print "empty string to not sign this apk)."
+    sys.exit(1)
+
+
+def SharedUserForApk(data):
+  tmp = tempfile.NamedTemporaryFile()
+  tmp.write(data)
+  tmp.flush()
+
+  p = common.Run(["aapt", "dump", "xmltree", tmp.name, "AndroidManifest.xml"],
+                 stdout=subprocess.PIPE)
+  data, _ = p.communicate()
+  if p.returncode != 0:
+    raise ExternalError("failed to run aapt dump")
+  lines = data.split("\n")
+  for i in lines:
+    m = re.match(r'^\s*A: android:sharedUserId\([0-9a-fx]*\)="([^"]*)" .*$', i)
+    if m:
+      return m.group(1)
+  return None
+
+
+def CheckSharedUserIdsConsistent(input_tf_zip, apk_key_map):
+  """Check that all packages that request the same shared user id are
+  going to be signed with the same key."""
+
+  shared_user_apks = {}
+  maxlen = len("(unknown key)")
+
+  for info in input_tf_zip.infolist():
+    if info.filename.endswith(".apk"):
+      data = input_tf_zip.read(info.filename)
+
+      name = os.path.basename(info.filename)
+      shared_user = SharedUserForApk(data)
+      key = apk_key_map[name]
+      maxlen = max(maxlen, len(key))
+
+      if shared_user is not None:
+        shared_user_apks.setdefault(
+            shared_user, {}).setdefault(key, []).append(name)
+
+  errors = []
+  for k, v in shared_user_apks.iteritems():
+    # each shared user should have exactly one key used for all the
+    # apks that want that user.
+    if len(v) > 1:
+      errors.append((k, v))
+
+  if not errors: return
+
+  print "ERROR:  shared user inconsistency.  All apks wanting to use"
+  print "        a given shared user must be signed with the same key."
+  print
+  errors.sort()
+  for user, keys in errors:
+    print 'shared user id "%s":' % (user,)
+    for key, apps in keys.iteritems():
+      print '  %-*s   %s' % (maxlen, key or "(unknown key)", apps[0])
+      for a in apps[1:]:
+        print (' ' * (maxlen+5)) + a
+    print
+
+  sys.exit(1)
+
+
 def SignApk(data, keyname, pw):
   unsigned = tempfile.NamedTemporaryFile()
   unsigned.write(data)
@@ -117,31 +196,11 @@
   return data
 
 
-def SignApks(input_tf_zip, output_tf_zip):
-  apk_key_map = GetApkCerts(input_tf_zip)
-
+def SignApks(input_tf_zip, output_tf_zip, apk_key_map, key_passwords):
   maxsize = max([len(os.path.basename(i.filename))
                  for i in input_tf_zip.infolist()
                  if i.filename.endswith('.apk')])
 
-  # Check that all the APKs we want to sign have keys specified, and
-  # error out if they don't.  Do this before prompting for key
-  # passwords in case we're going to fail anyway.
-  unknown_apks = []
-  for info in input_tf_zip.infolist():
-    if info.filename.endswith(".apk"):
-      name = os.path.basename(info.filename)
-      if name not in apk_key_map:
-        unknown_apks.append(name)
-  if unknown_apks:
-    print "ERROR: no key specified for:\n\n ",
-    print "\n  ".join(unknown_apks)
-    print "\nUse '-e <apkname>=' to specify a key (which may be an"
-    print "empty string to not sign this apk)."
-    sys.exit(1)
-
-  key_passwords = common.GetKeyPasswords(set(apk_key_map.values()))
-
   for info in input_tf_zip.infolist():
     data = input_tf_zip.read(info.filename)
     out_info = copy.copy(info)
@@ -289,7 +348,12 @@
   input_zip = zipfile.ZipFile(args[0], "r")
   output_zip = zipfile.ZipFile(args[1], "w")
 
-  SignApks(input_zip, output_zip)
+  apk_key_map = GetApkCerts(input_zip)
+  CheckAllApksSigned(input_zip, apk_key_map)
+  CheckSharedUserIdsConsistent(input_zip, apk_key_map)
+
+  key_passwords = common.GetKeyPasswords(set(apk_key_map.values()))
+  SignApks(input_zip, output_zip, apk_key_map, key_passwords)
 
   if OPTIONS.replace_ota_keys:
     ReplaceOtaKeys(input_zip, output_zip)