Merge "Don't build tests in normal builds"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 8de0e84..b9f6e13 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -383,6 +383,21 @@
 
 $(call add-clean-step, rm -rf $(HOST_OUT_INTERMEDIATES)/include)
 
+$(call add-clean-step, rm -rf $(HOST_OUT_COMMON_INTERMEDIATES)/APPS/*_intermediates/src)
+$(call add-clean-step, rm -rf $(HOST_OUT_COMMON_INTERMEDIATES)/JAVA_LIBRARIES/*_intermediates/src)
+$(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS/*_intermediates/src)
+$(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/JAVA_LIBRARIES/*_intermediates/src)
+$(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/previous_gen_java_config.mk)
+$(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/current_gen_java_config.mk)
+
+$(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/JAVA_LIBRARIES/*/package-res.apk)
+$(call add-clean-step, rm -rf $(TARGET_OUT_INTERMEDIATES)/APPS/*/package-res.apk)
+$(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/APPS/*_intermediates/src)
+$(call add-clean-step, rm -rf $(TARGET_OUT_COMMON_INTERMEDIATES)/JAVA_LIBRARIES/*_intermediates/src)
+
+$(call add-clean-step, rm -rf $(HOST_OUT_TESTCASES))
+$(call add-clean-step, rm -rf $(TARGET_OUT_TESTCASES))
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/core/Makefile b/core/Makefile
index c409458..c24bbe2 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -74,12 +74,12 @@
 # default.prop
 INSTALLED_DEFAULT_PROP_TARGET := $(TARGET_ROOT_OUT)/default.prop
 ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_DEFAULT_PROP_TARGET)
-ADDITIONAL_DEFAULT_PROPERTIES := \
+FINAL_DEFAULT_PROPERTIES := \
     $(call collapse-pairs, $(ADDITIONAL_DEFAULT_PROPERTIES))
-ADDITIONAL_DEFAULT_PROPERTIES += \
+FINAL_DEFAULT_PROPERTIES += \
     $(call collapse-pairs, $(PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
-ADDITIONAL_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
-    $(ADDITIONAL_DEFAULT_PROPERTIES),=)
+FINAL_DEFAULT_PROPERTIES := $(call uniq-pairs-by-first-component, \
+    $(FINAL_DEFAULT_PROPERTIES),=)
 
 intermediate_system_build_prop := $(call intermediates-dir-for,ETC,system_build_prop)/build.prop
 
@@ -89,7 +89,7 @@
 	$(hide) echo "#" > $@; \
 	        echo "# ADDITIONAL_DEFAULT_PROPERTIES" >> $@; \
 	        echo "#" >> $@;
-	$(hide) $(foreach line,$(ADDITIONAL_DEFAULT_PROPERTIES), \
+	$(hide) $(foreach line,$(FINAL_DEFAULT_PROPERTIES), \
 		echo "$(line)" >> $@;)
 	$(hide) echo "#" >> $@; \
 	        echo "# BOOTIMAGE_BUILD_PROPERTIES" >> $@; \
@@ -103,10 +103,10 @@
 # build.prop
 INSTALLED_BUILD_PROP_TARGET := $(TARGET_OUT)/build.prop
 ALL_DEFAULT_INSTALLED_MODULES += $(INSTALLED_BUILD_PROP_TARGET)
-ADDITIONAL_BUILD_PROPERTIES := \
+FINAL_BUILD_PROPERTIES := \
     $(call collapse-pairs, $(ADDITIONAL_BUILD_PROPERTIES))
-ADDITIONAL_BUILD_PROPERTIES := $(call uniq-pairs-by-first-component, \
-    $(ADDITIONAL_BUILD_PROPERTIES),=)
+FINAL_BUILD_PROPERTIES := $(call uniq-pairs-by-first-component, \
+    $(FINAL_BUILD_PROPERTIES),=)
 
 # A list of arbitrary tags describing the build configuration.
 # Force ":=" so we can use +=
@@ -256,12 +256,12 @@
 			echo "#" >> $@; \
 			cat $(file) >> $@; \
 		fi;)
-	$(if $(ADDITIONAL_BUILD_PROPERTIES), \
+	$(if $(FINAL_BUILD_PROPERTIES), \
 		$(hide) echo >> $@; \
 		        echo "#" >> $@; \
 		        echo "# ADDITIONAL_BUILD_PROPERTIES" >> $@; \
 		        echo "#" >> $@; )
-	$(hide) $(foreach line,$(ADDITIONAL_BUILD_PROPERTIES), \
+	$(hide) $(foreach line,$(FINAL_BUILD_PROPERTIES), \
 		echo "$(line)" >> $@;)
 	$(hide) cat $(INSTALLED_ANDROID_INFO_TXT_TARGET) | grep 'require version-' | sed -e 's/require version-/ro.build.expect./g' >> $@
 	$(hide) build/tools/post_process_props.py $@ $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_PROPERTY_BLACKLIST)
@@ -835,7 +835,7 @@
 # $(1): the path of the output dictionary file
 # $(2): additional "key=value" pairs to append to the dictionary file.
 define generate-userimage-prop-dictionary
-$(hide) echo "ext_mkuserimg=$(MKEXTUSERIMG)" >> $(1)
+$(hide) echo "ext_mkuserimg=$(notdir $(MKEXTUSERIMG))" >> $(1)
 $(if $(INTERNAL_USERIMAGES_EXT_VARIANT),$(hide) echo "fs_type=$(INTERNAL_USERIMAGES_EXT_VARIANT)" >> $(1))
 $(if $(BOARD_SYSTEMIMAGE_PARTITION_SIZE),$(hide) echo "system_size=$(BOARD_SYSTEMIMAGE_PARTITION_SIZE)" >> $(1))
 $(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "system_fs_type=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -849,6 +849,8 @@
 $(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH),$(hide) echo "system_base_fs_file=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_BASE_FS_PATH)" >> $(1))
 $(if $(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "userdata_fs_type=$(BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
 $(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(hide) echo "userdata_size=$(BOARD_USERDATAIMAGE_PARTITION_SIZE)" >> $(1))
+$(if $(BOARD_FLASH_LOGICAL_BLOCK_SIZE), $(hide) echo "flash_logical_block_size=$(BOARD_FLASH_LOGICAL_BLOCK_SIZE)" >> $(1))
+$(if $(BOARD_FLASH_ERASE_BLOCK_SIZE), $(hide) echo "flash_erase_block_size=$(BOARD_FLASH_ERASE_BLOCK_SIZE)" >> $(1))
 $(if $(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "cache_fs_type=$(BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
 $(if $(BOARD_CACHEIMAGE_PARTITION_SIZE),$(hide) echo "cache_size=$(BOARD_CACHEIMAGE_PARTITION_SIZE)" >> $(1))
 $(if $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "vendor_fs_type=$(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
@@ -1722,8 +1724,11 @@
   $(HOST_OUT_JAVA_LIBRARIES)/dumpkey.jar \
   $(HOST_OUT_JAVA_LIBRARIES)/signapk.jar \
   $(HOST_OUT_JAVA_LIBRARIES)/BootSignature.jar \
-  $(MAKE_EXT4FS) \
-  $(MKEXTUSERIMG) \
+  $(HOST_OUT_EXECUTABLES)/make_ext4fs \
+  $(HOST_OUT_EXECUTABLES)/mkuserimg.sh \
+  $(HOST_OUT_EXECUTABLES)/mke2fs \
+  $(HOST_OUT_EXECUTABLES)/mkuserimg_mke2fs.sh \
+  $(HOST_OUT_EXECUTABLES)/e2fsdroid \
   $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh \
   $(HOST_OUT_EXECUTABLES)/mksquashfs \
   $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh \
@@ -1753,6 +1758,7 @@
   $(HOST_LIBRARY_PATH)/libext2_blkid-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_com_err-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_e2p-host$(HOST_SHLIB_SUFFIX) \
+  $(HOST_LIBRARY_PATH)/libext2_misc$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_profile-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_quota-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libext2_uuid-host$(HOST_SHLIB_SUFFIX) \
@@ -1765,7 +1771,9 @@
   $(HOST_LIBRARY_PATH)/libprotobuf-cpp-lite$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libssl-host$(HOST_SHLIB_SUFFIX) \
   $(HOST_LIBRARY_PATH)/libz-host$(HOST_SHLIB_SUFFIX) \
-  $(HOST_LIBRARY_PATH)/libbase$(HOST_SHLIB_SUFFIX)
+  $(HOST_LIBRARY_PATH)/libsparse-host$(HOST_SHLIB_SUFFIX) \
+  $(HOST_LIBRARY_PATH)/libbase$(HOST_SHLIB_SUFFIX) \
+  $(HOST_LIBRARY_PATH)/libpcre2$(HOST_SHLIB_SUFFIX)
 
 .PHONY: otatools
 otatools: $(OTATOOLS)
@@ -2189,7 +2197,7 @@
 	$(hide) rm -rf $@ $(PRIVATE_LIST_FILE)
 	$(hide) mkdir -p $(dir $@) $(TARGET_OUT_UNSTRIPPED) $(dir $(PRIVATE_LIST_FILE))
 	$(hide) find $(TARGET_OUT_UNSTRIPPED) | sort >$(PRIVATE_LIST_FILE)
-	$(hide) $(SOONG_ZIP) -d -o $@ -C . -l $(PRIVATE_LIST_FILE)
+	$(hide) $(SOONG_ZIP) -d -o $@ -C $(OUT_DIR)/.. -l $(PRIVATE_LIST_FILE)
 # -----------------------------------------------------------------
 # A zip of the coverage directory.
 #
@@ -2470,6 +2478,7 @@
 -include $(sort $(wildcard product/*/*/build/tasks/*.mk))
 # Also add test specifc tasks
 include $(sort $(wildcard platform_testing/build/tasks/*.mk))
+include $(sort $(wildcard test/vts/tools/build/tasks/*.mk))
 endif
 
 include $(BUILD_SYSTEM)/product-graph.mk
diff --git a/core/aux_config.mk b/core/aux_config.mk
index decff34..c40b8cc 100644
--- a/core/aux_config.mk
+++ b/core/aux_config.mk
@@ -151,7 +151,11 @@
 variant_sfx :=_aux_variant_config.mk
 os_sfx :=_aux_os_config.mk
 
-all_configs := $(shell find device vendor -maxdepth 4 -name '*$(variant_sfx)' -o -name '*$(os_sfx)' | sort)
+config_roots := $(wildcard device vendor)
+all_configs :=
+ifdef config_roots
+all_configs := $(shell find $(config_roots) -maxdepth 4 -name '*$(variant_sfx)' -o -name '*$(os_sfx)' | sort)
+endif
 all_os_configs := $(filter %$(os_sfx),$(all_configs))
 all_variant_configs := $(filter %$(variant_sfx),$(all_configs))
 
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 68ac08b..1f55eae 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -424,9 +424,13 @@
 $(error $(LOCAL_PATH):$(LOCAL_MODULE) LOCAL_COMPATIBILITY_SUITE can be only one name)
 endif
 
+# Copy this module into its own subdirectory in the common testcases output directory.
+my_testcases_subdir := $($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)
+
 # The module itself.
 my_compat_dist := \
-  $(LOCAL_BUILT_MODULE):$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(my_installed_module_stem)
+  $(LOCAL_BUILT_MODULE):$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(my_installed_module_stem) \
+  $(LOCAL_BUILT_MODULE):$(my_testcases_subdir)/$(my_installed_module_stem)
 
 # Make sure we only add the files once for multilib modules.
 ifndef $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
@@ -436,17 +440,22 @@
 my_compat_dist += $(foreach f, $(LOCAL_COMPATIBILITY_SUPPORT_FILES),\
   $(eval p := $(subst :,$(space),$(f)))\
   $(eval s := $(word 1,$(p)))\
-  $(eval d := $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(or $(word 2,$(p)),$(notdir $(word 1,$(p)))))\
-  $(s):$(d))
+  $(eval n := $(or $(word 2,$(p)),$(notdir $(word 1, $(p))))) \
+  $(eval d := $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(n)) \
+  $(s):$(d) $(s):$(my_testcases_subdir)/$(n))
 
 ifneq (,$(wildcard $(LOCAL_PATH)/AndroidTest.xml))
 my_compat_dist += \
   $(LOCAL_PATH)/AndroidTest.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).config
+my_compat_dist += \
+  $(LOCAL_PATH)/AndroidTest.xml:$(my_testcases_subdir)/$(LOCAL_MODULE).config
 endif
 
 ifneq (,$(wildcard $(LOCAL_PATH)/DynamicConfig.xml))
 my_compat_dist += \
   $(LOCAL_PATH)/DynamicConfig.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).dynamic
+my_compat_dist += \
+  $(LOCAL_PATH)/DynamicConfig.xml:$(my_testcases_subdir)/$(LOCAL_MODULE).dynamic
 endif
 endif # $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
 
@@ -528,6 +537,9 @@
 endif
 ALL_MODULES.$(my_register_name).REQUIRED := \
     $(strip $(ALL_MODULES.$(my_register_name).REQUIRED) $(my_required_modules))
+ALL_MODULES.$(my_register_name).EXPLICITLY_REQUIRED := \
+    $(strip $(ALL_MODULES.$(my_register_name).EXPLICITLY_REQUIRED)\
+        $(my_required_modules))
 ALL_MODULES.$(my_register_name).EVENT_LOG_TAGS := \
     $(ALL_MODULES.$(my_register_name).EVENT_LOG_TAGS) $(event_log_tags)
 ALL_MODULES.$(my_register_name).MAKEFILE := \
diff --git a/core/binary.mk b/core/binary.mk
index b37ef80..304a72e 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -57,6 +57,7 @@
 my_additional_dependencies := $(LOCAL_ADDITIONAL_DEPENDENCIES)
 my_export_c_include_dirs := $(LOCAL_EXPORT_C_INCLUDE_DIRS)
 my_export_c_include_deps := $(LOCAL_EXPORT_C_INCLUDE_DEPS)
+my_arflags :=
 
 ifneq (,$(strip $(foreach dir,$(subst $(comma),$(space),$(COVERAGE_PATHS)),$(filter $(dir)%,$(LOCAL_PATH)))))
   my_native_coverage := true
@@ -393,8 +394,21 @@
     my_clang := true
 endif
 
-my_c_std_version := $(DEFAULT_C_STD_VERSION)
-my_cpp_std_version := $(DEFAULT_CPP_STD_VERSION)
+ifeq ($(LOCAL_C_STD),)
+    my_c_std_version := $(DEFAULT_C_STD_VERSION)
+else ifeq ($(LOCAL_C_STD),experimental)
+    my_c_std_version := $(EXPERIMENTAL_C_STD_VERSION)
+else
+    my_c_std_version := $(LOCAL_C_STD)
+endif
+
+ifeq ($(LOCAL_CPP_STD),)
+    my_cpp_std_version := $(DEFAULT_CPP_STD_VERSION)
+else ifeq ($(LOCAL_CPP_STD),experimental)
+    my_cpp_std_version := $(EXPERIMENTAL_CPP_STD_VERSION)
+else
+    my_cpp_std_version := $(LOCAL_CPP_STD)
+endif
 
 ifneq ($(my_clang),true)
     # GCC uses an invalid C++14 ABI (emits calls to
@@ -788,7 +802,7 @@
 
 renderscript_includes := \
     $(TOPDIR)external/clang/lib/Headers \
-    $(TOPDIR)frameworks/rs/scriptc \
+    $(TOPDIR)frameworks/rs/script_api/include \
     $(LOCAL_RENDERSCRIPT_INCLUDES)
 
 ifneq ($(LOCAL_RENDERSCRIPT_INCLUDES_OVERRIDE),)
@@ -1736,6 +1750,7 @@
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_LDLIBS := $(my_ldlibs)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TIDY_CHECKS := $(my_tidy_checks)
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_TIDY_FLAGS := $(my_tidy_flags)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_ARFLAGS := $(my_arflags)
 
 # this is really the way to get the files onto the command line instead
 # of using $^, because then LOCAL_ADDITIONAL_DEPENDENCIES doesn't work
diff --git a/core/clang/config.mk b/core/clang/config.mk
index b4fe708..be43a26 100644
--- a/core/clang/config.mk
+++ b/core/clang/config.mk
@@ -5,16 +5,6 @@
 CLANG_TBLGEN := $(BUILD_OUT_EXECUTABLES)/clang-tblgen$(BUILD_EXECUTABLE_SUFFIX)
 LLVM_TBLGEN := $(BUILD_OUT_EXECUTABLES)/llvm-tblgen$(BUILD_EXECUTABLE_SUFFIX)
 
-# RenderScript-specific tools
-# These are tied to the version of LLVM directly in external/, so they might
-# trail the host prebuilts being used for the rest of the build process.
-RS_LLVM_PREBUILTS_VERSION := clang-3289846
-RS_LLVM_PREBUILTS_BASE := prebuilts/clang/host
-RS_LLVM_PREBUILTS_PATH := $(RS_LLVM_PREBUILTS_BASE)/$(BUILD_OS)-x86/$(RS_LLVM_PREBUILTS_VERSION)/bin
-RS_CLANG := $(RS_LLVM_PREBUILTS_PATH)/clang$(BUILD_EXECUTABLE_SUFFIX)
-RS_LLVM_AS := $(RS_LLVM_PREBUILTS_PATH)/llvm-as$(BUILD_EXECUTABLE_SUFFIX)
-RS_LLVM_LINK := $(RS_LLVM_PREBUILTS_PATH)/llvm-link$(BUILD_EXECUTABLE_SUFFIX)
-
 define convert-to-clang-flags
 $(strip $(filter-out $(CLANG_CONFIG_UNKNOWN_CFLAGS),$(1)))
 endef
diff --git a/core/clang/versions.mk b/core/clang/versions.mk
index d9c8aab..abed69b 100644
--- a/core/clang/versions.mk
+++ b/core/clang/versions.mk
@@ -1,3 +1,4 @@
 ## Clang/LLVM release versions.
 
-LLVM_PREBUILTS_VERSION ?= clang-3289846
+LLVM_PREBUILTS_VERSION ?= clang-3688880
+LLVM_PREBUILTS_BASE ?= prebuilts/clang/host
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index c0343a0..bae38c5 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -37,6 +37,8 @@
 LOCAL_COPY_TO_INTERMEDIATE_LIBRARIES:=
 LOCAL_CPP_EXTENSION:=
 LOCAL_CPPFLAGS:=
+LOCAL_CPP_STD:=
+LOCAL_C_STD:=
 LOCAL_CTS_TEST_PACKAGE:=
 LOCAL_CTS_TEST_RUNNER:=
 LOCAL_CXX:=
@@ -78,6 +80,7 @@
 LOCAL_GENERATED_SOURCES:=
 # Group static libraries with "-Wl,--start-group" and "-Wl,--end-group" when linking.
 LOCAL_GROUP_STATIC_LIBRARIES:=
+LOCAL_GTEST:=true
 LOCAL_HAL_STATIC_LIBRARIES:=
 LOCAL_INIT_RC:=
 LOCAL_INSTALLED_MODULE:=
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index 3a59ee3..57a7993 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -3,6 +3,7 @@
 ##############################################
 
 my_sanitize := $(strip $(LOCAL_SANITIZE))
+my_sanitize_diag := $(strip $(LOCAL_SANITIZE_DIAG))
 
 # SANITIZE_HOST is only in effect if the module is already using clang (host
 # modules that haven't set `LOCAL_CLANG := false` and device modules that
@@ -61,6 +62,24 @@
   my_sanitize :=
 endif
 
+# If CFI is disabled globally, remove it from my_sanitize.
+ifeq ($(strip $(ENABLE_CFI)),)
+  my_sanitize := $(filter-out cfi,$(my_sanitize))
+  my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
+endif
+
+# Disable CFI for arm32 (b/35157333).
+ifneq ($(filter arm,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
+  my_sanitize := $(filter-out cfi,$(my_sanitize))
+  my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
+endif
+
+# CFI needs gold linker, and mips toolchain does not have one.
+ifneq ($(filter mips mips64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
+  my_sanitize := $(filter-out cfi,$(my_sanitize))
+  my_sanitize_diag := $(filter-out cfi,$(my_sanitize_diag))
+endif
+
 my_nosanitize = $(strip $(LOCAL_NOSANITIZE))
 ifneq ($(my_nosanitize),)
   my_sanitize := $(filter-out $(my_nosanitize),$(my_sanitize))
@@ -136,8 +155,18 @@
 endif
 
 ifneq ($(filter cfi,$(my_sanitize)),)
+  # __cfi_check needs to be built as Thumb (see the code in linker_cfi.cpp).
+  # LLVM is not set up to do this on a function basis, so force Thumb on the
+  # entire module.
+  LOCAL_ARM_MODE := thumb
   my_cflags += -flto -fsanitize-cfi-cross-dso -fvisibility=default
   my_ldflags += -flto -fsanitize-cfi-cross-dso -fsanitize=cfi -Wl,-plugin-opt,O1 -Wl,-export-dynamic-symbol=__cfi_check
+  my_arflags += --plugin $(LLVM_PREBUILTS_PATH)/../lib64/LLVMgold.so
+  # Workaround for b/33678192. CFI jumptables need Thumb2 codegen.  Revert when
+  # Clang is updated past r290384.
+  ifneq ($(filter arm,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
+    my_ldflags += -march=armv7-a
+  endif
 endif
 
 # If local or global modules need ASAN, add linker flags.
@@ -195,8 +224,8 @@
   my_cflags += -fsanitize-recover=$(recover_arg)
 endif
 
-ifneq ($(strip $(LOCAL_SANITIZE_DIAG)),)
-  notrap_arg := $(subst $(space),$(comma),$(LOCAL_SANITIZE_DIAG)),
+ifneq ($(my_sanitize_diag),)
+  notrap_arg := $(subst $(space),$(comma),$(my_sanitize_diag)),
   my_cflags += -fno-sanitize-trap=$(notrap_arg)
   # Diagnostic requires a runtime library, unless ASan or TSan are also enabled.
   ifeq ($(filter address thread,$(my_sanitize)),)
diff --git a/core/definitions.mk b/core/definitions.mk
index e07d911..da5aff1 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -1535,6 +1535,7 @@
 endef
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define _extract-and-include-single-target-whole-static-lib
 $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
     rm -rf $$ldir; \
@@ -1556,20 +1557,22 @@
         filelist="$$filelist $$ldir/$$ext$$f"; \
     done ; \
     $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_AR) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_ARFLAGS) \
-        $@ $$filelist
+        $(2) $$filelist
 
 endef
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define extract-and-include-whole-static-libs-first
 $(if $(strip $(1)),
-$(hide) cp $(1) $@)
+$(hide) cp $(1) $(2))
 endef
 
+# $(1): the full path of the destination static library.
 define extract-and-include-target-whole-static-libs
-$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)))
+$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)),$(1))
 $(foreach lib,$(wordlist 2,999,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)), \
-    $(call _extract-and-include-single-target-whole-static-lib, $(lib)))
+    $(call _extract-and-include-single-target-whole-static-lib, $(lib), $(1)))
 endef
 
 # Explicitly delete the archive first so that ar doesn't
@@ -1577,14 +1580,17 @@
 define transform-o-to-static-lib
 @echo "$($(PRIVATE_PREFIX)DISPLAY) StaticLib: $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-@rm -f $@
-$(extract-and-include-target-whole-static-libs)
+@rm -f $@ $@.tmp
+$(call extract-and-include-target-whole-static-libs,$@.tmp)
 $(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_AR) \
     $($(PRIVATE_2ND_ARCH_VAR_PREFIX)TARGET_GLOBAL_ARFLAGS) \
-    $@,$(PRIVATE_ALL_OBJECTS))
+    $(PRIVATE_ARFLAGS) \
+    $@.tmp,$(PRIVATE_ALL_OBJECTS))
+$(hide) mv -f $@.tmp $@
 endef
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define _extract-and-include-single-aux-whole-static-lib
 $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
     rm -rf $$ldir; \
@@ -1605,14 +1611,14 @@
         $(PRIVATE_AR) p $$lib_to_include $$f > $$ldir/$$ext$$f; \
         filelist="$$filelist $$ldir/$$ext$$f"; \
     done ; \
-    $(PRIVATE_AR) $(AUX_GLOBAL_ARFLAGS) $@ $$filelist
+    $(PRIVATE_AR) $(AUX_GLOBAL_ARFLAGS) $(2) $$filelist
 
 endef
 
 define extract-and-include-aux-whole-static-libs
-$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)))
+$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)),$(1))
 $(foreach lib,$(wordlist 2,999,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)), \
-    $(call _extract-and-include-single-aux-whole-static-lib, $(lib)))
+    $(call _extract-and-include-single-aux-whole-static-lib, $(lib), $(1)))
 endef
 
 # Explicitly delete the archive first so that ar doesn't
@@ -1620,10 +1626,11 @@
 define transform-o-to-aux-static-lib
 @echo "$($(PRIVATE_PREFIX)DISPLAY) StaticLib: $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-@rm -f $@
-$(extract-and-include-aux-whole-static-libs)
+@rm -f $@ $@.tmp
+$(call extract-and-include-aux-whole-static-libs,$@.tmp)
 $(call split-long-arguments,$(PRIVATE_AR) \
-    $(AUX_GLOBAL_ARFLAGS) $@,$(PRIVATE_ALL_OBJECTS))
+    $(AUX_GLOBAL_ARFLAGS) $@.tmp,$(PRIVATE_ALL_OBJECTS))
+$(hide) mv -f $@.tmp $@
 endef
 
 define transform-o-to-aux-executable-inner
@@ -1670,6 +1677,7 @@
 ###########################################################
 
 # $(1): the full path of the source static library.
+# $(2): the full path of the destination static library.
 define _extract-and-include-single-host-whole-static-lib
 $(hide) ldir=$(PRIVATE_INTERMEDIATES_DIR)/WHOLE/$(basename $(notdir $(1)))_objs;\
     rm -rf $$ldir; \
@@ -1691,30 +1699,30 @@
         filelist="$$filelist $$ldir/$$ext$$f"; \
     done ; \
     $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)GLOBAL_ARFLAGS) \
-        $@ $$filelist
+        $(2) $$filelist
 
 endef
 
 define extract-and-include-host-whole-static-libs
-$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)))
+$(call extract-and-include-whole-static-libs-first, $(firstword $(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)),$(1))
 $(foreach lib,$(wordlist 2,999,$(PRIVATE_ALL_WHOLE_STATIC_LIBRARIES)), \
-    $(call _extract-and-include-single-host-whole-static-lib, $(lib)))
+    $(call _extract-and-include-single-host-whole-static-lib, $(lib),$(1)))
 endef
 
 ifeq ($(HOST_OS),darwin)
 # On Darwin the host ar fails if there is nothing to add to .a at all.
 # We work around by adding a dummy.o and then deleting it.
 define create-dummy.o-if-no-objs
-$(if $(PRIVATE_ALL_OBJECTS),,$(hide) touch $(dir $@)dummy.o)
+$(if $(PRIVATE_ALL_OBJECTS),,$(hide) touch $(dir $(1))dummy.o)
 endef
 
 define get-dummy.o-if-no-objs
-$(if $(PRIVATE_ALL_OBJECTS),,$(dir $@)dummy.o)
+$(if $(PRIVATE_ALL_OBJECTS),,$(dir $(1))dummy.o)
 endef
 
 define delete-dummy.o-if-no-objs
-$(if $(PRIVATE_ALL_OBJECTS),,$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) d $@ $(dir $@)dummy.o \
-  && rm -f $(dir $@)dummy.o)
+$(if $(PRIVATE_ALL_OBJECTS),,$(hide) $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) d $(1) $(dir $(1))dummy.o \
+  && rm -f $(dir $(1))dummy.o)
 endef
 endif  # HOST_OS is darwin
 
@@ -1723,13 +1731,14 @@
 define transform-host-o-to-static-lib
 @echo "$($(PRIVATE_PREFIX)DISPLAY) StaticLib: $(PRIVATE_MODULE) ($@)"
 @mkdir -p $(dir $@)
-@rm -f $@
-$(extract-and-include-host-whole-static-libs)
-$(create-dummy.o-if-no-objs)
+@rm -f $@ $@.tmp
+$(call extract-and-include-host-whole-static-libs,$@.tmp)
+$(call create-dummy.o-if-no-objs,$@.tmp)
 $(call split-long-arguments,$($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)AR) \
-    $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)GLOBAL_ARFLAGS) $@,\
-    $(PRIVATE_ALL_OBJECTS) $(get-dummy.o-if-no-objs))
-$(delete-dummy.o-if-no-objs)
+    $($(PRIVATE_2ND_ARCH_VAR_PREFIX)$(PRIVATE_PREFIX)GLOBAL_ARFLAGS) $@.tmp,\
+    $(PRIVATE_ALL_OBJECTS) $(call get-dummy.o-if-no-objs,$@.tmp))
+$(call delete-dummy.o-if-no-objs,$@.tmp)
+$(hide) mv -f $@.tmp $@
 endef
 
 
@@ -2201,6 +2210,10 @@
 $(hide) if [ -d "$(PRIVATE_SOURCE_INTERMEDIATES_DIR)" ]; then \
           find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name '*.java' -and -not -name '.*' >> $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list; \
 fi
+$(if $(PRIVATE_HAS_PROTO_SOURCES), \
+    $(hide) find $(PRIVATE_PROTO_SOURCE_INTERMEDIATES_DIR) -name '*.java' -and -not -name '.*' >> $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list )
+$(if $(PRIVATE_HAS_RS_SOURCES), \
+    $(hide) find $(PRIVATE_RS_SOURCE_INTERMEDIATES_DIR) -name '*.java' -and -not -name '.*' >> $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list )
 $(hide) tr ' ' '\n' < $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list \
     | $(NORMALIZE_PATH) | sort -u > $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq
 $(hide) if [ -s $(PRIVATE_CLASS_INTERMEDIATES_DIR)/java-source-list-uniq ] ; then \
@@ -2263,6 +2276,10 @@
 $(hide) if [ -d "$(PRIVATE_SOURCE_INTERMEDIATES_DIR)" ]; then \
           find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list; \
 fi
+$(if $(PRIVATE_HAS_PROTO_SOURCES), \
+    $(hide) find $(PRIVATE_PROTO_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list )
+$(if $(PRIVATE_HAS_RS_SOURCES), \
+    $(hide) find $(PRIVATE_RS_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list )
 $(hide) tr ' ' '\n' < $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list \
     | $(NORMALIZE_PATH) | sort -u > $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list-uniq
 $(if $(PRIVATE_JACK_PROGUARD_FLAGS), \
@@ -2329,6 +2346,10 @@
 $(hide) if [ -d "$(PRIVATE_SOURCE_INTERMEDIATES_DIR)" ]; then \
           find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $@.java-source-list; \
 fi
+$(if $(PRIVATE_HAS_PROTO_SOURCES), \
+    $(hide) find $(PRIVATE_PROTO_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $@.java-source-list )
+$(if $(PRIVATE_HAS_RS_SOURCES), \
+    $(hide) find $(PRIVATE_RS_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $@.java-source-list )
 $(hide) tr ' ' '\n' < $@.java-source-list \
     | sort -u > $@.java-source-list-uniq
 $(hide) if [ -s $@.java-source-list-uniq ] ; then \
@@ -2438,6 +2459,10 @@
 $(hide) if [ -d "$(PRIVATE_SOURCE_INTERMEDIATES_DIR)" ]; then \
           find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list; \
 fi
+$(if $(PRIVATE_HAS_PROTO_SOURCES), \
+    $(hide) find $(PRIVATE_PROTO_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list )
+$(if $(PRIVATE_HAS_RS_SOURCES), \
+    $(hide) find $(PRIVATE_RS_SOURCE_INTERMEDIATES_DIR) -name '*.java' >> $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list )
 $(hide) tr ' ' '\n' < $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list \
     | $(NORMALIZE_PATH) | sort -u > $(PRIVATE_JACK_INTERMEDIATES_DIR)/java-source-list-uniq
 $(if $(PRIVATE_JACK_PROGUARD_FLAGS), \
@@ -2596,21 +2621,6 @@
 fi
 endef
 
-# Returns the minSdkVersion of the specified APK as a decimal number. If the
-# version is a codename, returns the current platform SDK version (always a
-# decimal number) instead. If the APK does not specify a minSdkVersion, returns
-# 0 to match how the Android platform interprets this situation at runtime.
-#
-# This currently substitutes any version which contains characters other than
-# digits with the current platform's API Level number. This is because I
-# couldn't figure out an easy way to perform the substitution only for the
-# version codes listed in PLATFORM_VERSION_ALL_CODENAMES.
-define get-package-min-sdk-version-int
-$$(($(AAPT) dump badging $(1) 2>&1 | grep '^sdkVersion' || echo "sdkVersion:'0'") \
-    | cut -d"'" -f2 | \
-    sed -e s/^.*[^0-9].*$$/$(PLATFORM_SDK_VERSION)/)
-endef
-
 # Sign a package using the specified key/cert.
 #
 define sign-package
diff --git a/core/dex_preopt.mk b/core/dex_preopt.mk
index ff0dfd5..b107ded 100644
--- a/core/dex_preopt.mk
+++ b/core/dex_preopt.mk
@@ -26,7 +26,6 @@
 # The default values for pre-opting: always preopt PIC.
 # Conditional to building on linux, as dex2oat currently does not work on darwin.
 ifeq ($(HOST_OS),linux)
-  WITH_DEXPREOPT_PIC ?= true
   WITH_DEXPREOPT ?= true
 # For an eng build only pre-opt the boot image. This gives reasonable performance and still
 # allows a simple workflow: building in frameworks/base and syncing.
@@ -40,10 +39,6 @@
 endif
 
 GLOBAL_DEXPREOPT_FLAGS :=
-ifeq ($(WITH_DEXPREOPT_PIC),true)
-# Compile boot.oat as position-independent code if WITH_DEXPREOPT_PIC=true
-GLOBAL_DEXPREOPT_FLAGS += --compile-pic
-endif
 
 # $(1): the .jar or .apk to remove classes.dex
 define dexpreopt-remove-classes.dex
diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk
index b551739..41e5e87 100644
--- a/core/dex_preopt_libart.mk
+++ b/core/dex_preopt_libart.mk
@@ -119,10 +119,11 @@
 	--instruction-set=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) \
 	--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
 	--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
-	--include-patch-information --runtime-arg -Xnorelocate \
+	--include-patch-information --runtime-arg -Xnorelocate --compile-pic \
 	--no-generate-debug-info --generate-build-id \
 	--abort-on-hard-verifier-error \
 	--no-inline-from=core-oj.jar \
 	$(PRIVATE_DEX_PREOPT_FLAGS) \
+	$(PRIVATE_ART_FILE_PREOPT_FLAGS) \
 	$(GLOBAL_DEXPREOPT_FLAGS)
 endef
diff --git a/core/dex_preopt_libart_boot.mk b/core/dex_preopt_libart_boot.mk
index 5d383a9..54211a4 100644
--- a/core/dex_preopt_libart_boot.mk
+++ b/core/dex_preopt_libart_boot.mk
@@ -71,7 +71,8 @@
 		--instruction-set=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) \
 		--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
 		--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
-		--android-root=$(PRODUCT_OUT)/system --include-patch-information --runtime-arg -Xnorelocate \
+		--android-root=$(PRODUCT_OUT)/system \
+		--include-patch-information --runtime-arg -Xnorelocate --compile-pic \
 		--no-generate-debug-info --generate-build-id \
 		--multi-image --no-inline-from=core-oj.jar \
 		$(PRODUCT_DEX_PREOPT_BOOT_FLAGS) $(GLOBAL_DEXPREOPT_FLAGS) $(COMPILED_CLASSES_FLAGS) $(ART_BOOT_IMAGE_EXTRA_ARGS)
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 52a67fe..9a15706 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -50,10 +50,14 @@
 
 built_odex :=
 built_vdex :=
+built_art :=
 installed_odex :=
 installed_vdex :=
+installed_art :=
 built_installed_odex :=
 built_installed_vdex :=
+built_installed_art :=
+
 ifdef LOCAL_DEX_PREOPT
 dexpreopt_boot_jar_module := $(filter $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE))
 ifdef dexpreopt_boot_jar_module
@@ -103,8 +107,10 @@
 
 built_odex := $(strip $(built_odex))
 built_vdex := $(strip $(built_vdex))
+built_art := $(strip $(built_art))
 installed_odex := $(strip $(installed_odex))
 installed_vdex := $(strip $(installed_vdex))
+installed_art := $(strip $(installed_art))
 
 ifdef built_odex
 ifndef LOCAL_DEX_PREOPT_FLAGS
@@ -113,16 +119,18 @@
 LOCAL_DEX_PREOPT_FLAGS := $(PRODUCT_DEX_PREOPT_DEFAULT_FLAGS)
 endif
 endif
-
 $(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
 $(built_vdex): $(built_odex)
+$(built_art): $(built_odex)
 endif
 
 # Add the installed_odex to the list of installed files for this module.
 ALL_MODULES.$(my_register_name).INSTALLED += $(installed_odex)
 ALL_MODULES.$(my_register_name).INSTALLED += $(installed_vdex)
+ALL_MODULES.$(my_register_name).INSTALLED += $(installed_art)
 ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_odex)
 ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_vdex)
+ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_art)
 
 # Record dex-preopt config.
 DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
@@ -138,6 +146,6 @@
 
 
 # Make sure to install the .odex and .vdex when you run "make <module_name>"
-$(my_all_targets): $(installed_odex) $(installed_vdex)
+$(my_all_targets): $(installed_odex) $(installed_vdex) $(installed_art)
 
 endif # LOCAL_DEX_PREOPT
diff --git a/core/envsetup.mk b/core/envsetup.mk
index d8dcfd9..b0f35b1 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -188,7 +188,15 @@
 $(error TARGET_COPY_OUT_VENDOR must be set to 'vendor' to use a vendor image)
 endif
 ###########################################
-
+# Ensure that only TARGET_RECOVERY_UPDATER_LIBS *or* AB_OTA_UPDATER is set.
+TARGET_RECOVERY_UPDATER_LIBS ?=
+AB_OTA_UPDATER ?=
+.KATI_READONLY := TARGET_RECOVERY_UPDATER_LIBS AB_OTA_UPDATER
+ifeq ($(AB_OTA_UPDATER),true)
+  ifneq ($(strip $(TARGET_RECOVERY_UPDATER_LIBS)),)
+    $(error Do not use TARGET_RECOVERY_UPDATER_LIBS when using AB_OTA_UPDATER)
+  endif
+endif
 
 # ---------------------------------------------------------------
 # Set up configuration for target machine.
@@ -268,6 +276,7 @@
 HOST_CROSS_OUT_SHARED_LIBRARIES := $(HOST_CROSS_OUT)/lib
 HOST_CROSS_OUT_NATIVE_TESTS := $(HOST_CROSS_OUT)/nativetest
 HOST_CROSS_OUT_COVERAGE := $(HOST_CROSS_OUT)/coverage
+HOST_OUT_TESTCASES := $(HOST_OUT)/testcases
 
 HOST_OUT_INTERMEDIATES := $(HOST_OUT)/obj
 HOST_OUT_INTERMEDIATE_LIBRARIES := $(HOST_OUT_INTERMEDIATES)/lib
@@ -296,6 +305,7 @@
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_EXECUTABLES := $(HOST_OUT_EXECUTABLES)
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_JAVA_LIBRARIES := $(HOST_OUT_JAVA_LIBRARIES)
 $(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_NATIVE_TESTS := $(HOST_OUT)/nativetest
+$(HOST_2ND_ARCH_VAR_PREFIX)HOST_OUT_TESTCASES := $(HOST_OUT_TESTCASES)
 
 # The default host library path.
 # It always points to the path where we build libraries in the default bitness.
@@ -343,6 +353,7 @@
 TARGET_OUT_ETC := $(TARGET_OUT)/etc
 TARGET_OUT_NOTICE_FILES := $(TARGET_OUT_INTERMEDIATES)/NOTICE_FILES
 TARGET_OUT_FAKE := $(PRODUCT_OUT)/fake_packages
+TARGET_OUT_TESTCASES := $(PRODUCT_OUT)/testcases
 
 TARGET_OUT_SYSTEM_OTHER := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_SYSTEM_OTHER)
 
@@ -365,6 +376,7 @@
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_EXECUTABLES := $(TARGET_OUT_EXECUTABLES)
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_APPS := $(TARGET_OUT_APPS)
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_APPS_PRIVILEGED := $(TARGET_OUT_APPS_PRIVILEGED)
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_TESTCASES := $(TARGET_OUT_TESTCASES)
 
 TARGET_OUT_DATA := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_DATA)
 TARGET_OUT_DATA_EXECUTABLES := $(TARGET_OUT_EXECUTABLES)
@@ -422,6 +434,7 @@
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_SHARED_LIBRARIES := $(target_out_vendor_shared_libraries_base)/lib
 endif
 $(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_APPS := $(TARGET_OUT_VENDOR_APPS)
+$(TARGET_2ND_ARCH_VAR_PREFIX)TARGET_OUT_VENDOR_APPS_PRIVILEGED := $(TARGET_OUT_VENDOR_APPS_PRIVILEGED)
 
 TARGET_OUT_OEM := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_OEM)
 TARGET_OUT_OEM_EXECUTABLES := $(TARGET_OUT_OEM)/bin
diff --git a/core/host_test_internal.mk b/core/host_test_internal.mk
index 473815b..ffb22c7 100644
--- a/core/host_test_internal.mk
+++ b/core/host_test_internal.mk
@@ -2,15 +2,17 @@
 ## Shared definitions for all host test compilations.
 #####################################################
 
-LOCAL_CFLAGS_windows += -DGTEST_OS_WINDOWS
-LOCAL_CFLAGS_linux += -DGTEST_OS_LINUX
-LOCAL_LDLIBS_linux += -lpthread
-LOCAL_CFLAGS_darwin += -DGTEST_OS_MAC
-LOCAL_LDLIBS_darwin += -lpthread
+ifeq ($(LOCAL_GTEST),true)
+  LOCAL_CFLAGS_windows += -DGTEST_OS_WINDOWS
+  LOCAL_CFLAGS_linux += -DGTEST_OS_LINUX
+  LOCAL_LDLIBS_linux += -lpthread
+  LOCAL_CFLAGS_darwin += -DGTEST_OS_MAC
+  LOCAL_LDLIBS_darwin += -lpthread
 
-LOCAL_CFLAGS += -DGTEST_HAS_STD_STRING -O0 -g
+  LOCAL_CFLAGS += -DGTEST_HAS_STD_STRING -O0 -g
 
-LOCAL_STATIC_LIBRARIES += libgtest_main_host libgtest_host
+  LOCAL_STATIC_LIBRARIES += libgtest_main_host libgtest_host
+endif
 
 ifdef LOCAL_MODULE_PATH
 $(error $(LOCAL_PATH): Do not set LOCAL_MODULE_PATH when building test $(LOCAL_MODULE))
diff --git a/core/java.mk b/core/java.mk
index 9199f7c..baf097b 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -148,7 +148,7 @@
 ifneq ($(renderscript_sources),)
 renderscript_sources_fullpath := $(addprefix $(LOCAL_PATH)/, $(renderscript_sources))
 RenderScript_file_stamp := $(LOCAL_INTERMEDIATE_SOURCE_DIR)/RenderScript.stamp
-renderscript_intermediate.COMMON := $(LOCAL_INTERMEDIATE_SOURCE_DIR)/renderscript
+renderscript_intermediate.COMMON := $(intermediates.COMMON)/renderscript
 
 # Defaulting to an empty string uses the latest available platform SDK.
 renderscript_target_api :=
@@ -192,7 +192,7 @@
 else
 LOCAL_RENDERSCRIPT_INCLUDES := \
     $(TOPDIR)external/clang/lib/Headers \
-    $(TOPDIR)frameworks/rs/scriptc \
+    $(TOPDIR)frameworks/rs/script_api/include \
     $(LOCAL_RENDERSCRIPT_INCLUDES)
 endif
 
@@ -353,6 +353,9 @@
 
 include $(BUILD_SYSTEM)/java_common.mk
 
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HAS_RS_SOURCES := $(if $(renderscript_sources),true)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_RS_SOURCE_INTERMEDIATES_DIR := $(intermediates.COMMON)/renderscript
+
 #######################################
 # defines built_odex along with rule to install odex
 include $(BUILD_SYSTEM)/dex_preopt_odex_install.mk
diff --git a/core/java_common.mk b/core/java_common.mk
index 1119a37..dbdea26 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -39,9 +39,7 @@
 ifneq ($(proto_sources),)
 proto_sources_fullpath := $(addprefix $(LOCAL_PATH)/, $(proto_sources))
 
-# By putting the generated java files into $(LOCAL_INTERMEDIATE_SOURCE_DIR), they will be
-# automatically found by the java compiling function transform-java-to-classes.jar.
-proto_java_intemediate_dir := $(LOCAL_INTERMEDIATE_SOURCE_DIR)/proto
+proto_java_intemediate_dir := $(intermediates.COMMON)/proto
 proto_java_sources_file_stamp := $(proto_java_intemediate_dir)/Proto.stamp
 proto_java_sources_dir := $(proto_java_intemediate_dir)/src
 
@@ -162,6 +160,9 @@
 
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_CLASS_INTERMEDIATES_DIR := $(intermediates.COMMON)/classes
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_SOURCE_INTERMEDIATES_DIR := $(intermediates.COMMON)/src
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HAS_PROTO_SOURCES := $(if $(proto_sources),true)
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_PROTO_SOURCE_INTERMEDIATES_DIR := $(intermediates.COMMON)/proto
+$(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_HAS_RS_SOURCES :=
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_JAVA_SOURCES := $(all_java_sources)
 
 $(LOCAL_INTERMEDIATE_TARGETS): PRIVATE_RMTYPEDEFS := $(LOCAL_RMTYPEDEFS)
diff --git a/core/local_vndk.mk b/core/local_vndk.mk
index f81249b..f7970f0 100644
--- a/core/local_vndk.mk
+++ b/core/local_vndk.mk
@@ -1,3 +1,18 @@
+
+#Set LOCAL_USE_VNDK for modules going into vendor partition, except for host modules
+#If LOCAL_SDK_VERSION is set, thats a more restrictive set, so they dont need LOCAL_USE_VNDK
+ifndef LOCAL_IS_HOST_MODULE
+ifndef LOCAL_SDK_VERSION
+  ifneq (,$(filter true,$(LOCAL_PROPRIETARY_MODULE) $(LOCAL_ODM_MODULE) $(LOCAL_OEM_MODULE)))
+    LOCAL_USE_VNDK:=true
+  else
+    ifneq (,$(filter $(TARGET_OUT_VENDOR)%,$(LOCAL_MODULE_PATH) $(LOCAL_MODULE_PATH_32) $(LOCAL_MODULE_PATH_64)))
+      LOCAL_USE_VNDK:=true
+    endif
+  endif
+endif
+endif
+
 # Verify LOCAL_USE_VNDK usage, and set LOCAL_SDK_VERSION if necessary
 
 ifdef LOCAL_IS_HOST_MODULE
diff --git a/core/main.mk b/core/main.mk
index 0d15c3f..dff0163 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -9,6 +9,26 @@
 SHELL := /bin/bash
 endif
 
+ifndef KATI
+USE_SOONG_UI ?= false
+endif
+ifeq ($(USE_SOONG_UI),true)
+
+host_prebuilts := linux-x86
+ifeq ($(shell uname),Darwin)
+host_prebuilts := darwin-x86
+endif
+
+.PHONY: run_soong_ui
+run_soong_ui:
+	+@prebuilts/build-tools/$(host_prebuilts)/bin/makeparallel --ninja build/soong/soong_ui.bash --make-mode $(MAKECMDGOALS)
+
+.PHONY: $(MAKECMDGOALS)
+$(sort $(MAKECMDGOALS)) : run_soong_ui
+	@#empty
+
+else # USE_SOONG_UI
+
 # Absolute path of the present working direcotry.
 # This overrides the shell variable $PWD, which does not necessarily points to
 # the top of the source tree, for example when "make -C" is used in m/mm/mmm.
@@ -230,6 +250,31 @@
 EMMA_INSTRUMENT := true
 endif
 
+#
+# -----------------------------------------------------------------
+# Validate ADDITIONAL_DEFAULT_PROPERTIES.
+ifneq ($(ADDITIONAL_DEFAULT_PROPERTIES),)
+$(error ADDITIONAL_DEFAULT_PROPERTIES must not be set before here: $(ADDITIONAL_DEFAULT_PROPERTIES))
+endif
+
+#
+# -----------------------------------------------------------------
+# Validate ADDITIONAL_BUILD_PROPERTIES.
+ifneq ($(ADDITIONAL_BUILD_PROPERTIES),)
+$(error ADDITIONAL_BUILD_PROPERTIES must not be set before here: $(ADDITIONAL_BUILD_PROPERTIES))
+endif
+
+#
+# -----------------------------------------------------------------
+# Add the product-defined properties to the build properties.
+ifdef PRODUCT_SHIPPING_API_LEVEL
+ADDITIONAL_BUILD_PROPERTIES += \
+  ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
+endif
+ADDITIONAL_BUILD_PROPERTIES := \
+  $(ADDITIONAL_BUILD_PROPERTIES) \
+  $(PRODUCT_PROPERTY_OVERRIDES)
+
 # Bring in standard build system definitions.
 include $(BUILD_SYSTEM)/definitions.mk
 
@@ -447,8 +492,12 @@
 FULL_BUILD := true
 
 # Before we go and include all of the module makefiles, mark the PRODUCT_*
-# values readonly so that they won't be modified.
+# and ADDITIONAL*PROPERTIES values readonly so that they won't be modified.
 $(call readonly-product-vars)
+ADDITIONAL_DEFAULT_PROPERTIES := $(strip $(ADDITIONAL_DEFAULT_PROPERTIES))
+.KATI_READONLY := ADDITIONAL_DEFAULT_PROPERTIES
+ADDITIONAL_BUILD_PROPERTIES := $(strip $(ADDITIONAL_BUILD_PROPERTIES))
+.KATI_READONLY := ADDITIONAL_BUILD_PROPERTIES
 
 ifneq ($(ONE_SHOT_MAKEFILE),)
 # We've probably been invoked by the "mm" shell function
@@ -510,9 +559,6 @@
 
 endif # ONE_SHOT_MAKEFILE
 
-# Now with all Android.mks loaded we can do post cleaning steps.
-include $(BUILD_SYSTEM)/post_clean.mk
-
 # -------------------------------------------------------------------
 # All module makefiles have been included at this point.
 # -------------------------------------------------------------------
@@ -1087,3 +1133,4 @@
 all_link_types:
 
 endif # KATI
+endif # USE_SOONG_UI
diff --git a/core/ninja.mk b/core/ninja.mk
index 8a5a904..af2ede0 100644
--- a/core/ninja.mk
+++ b/core/ninja.mk
@@ -97,7 +97,7 @@
 endif
 $(KATI_BUILD_NINJA): $(CKATI) $(MAKEPARALLEL) $(DUMMY_OUT_MKS) run_soong FORCE
 	@echo Running kati to generate build$(KATI_NINJA_SUFFIX).ninja...
-	+$(hide) $(KATI_MAKEPARALLEL) $(CKATI) --ninja --ninja_dir=$(OUT_DIR) --ninja_suffix=$(KATI_NINJA_SUFFIX) --regen --ignore_dirty=$(OUT_DIR)/% --no_ignore_dirty=$(SOONG_OUT_DIR)/%.mk --ignore_optional_include=$(OUT_DIR)/%.P --detect_android_echo $(KATI_FIND_EMULATOR) -f build/core/main.mk $(KATI_GOALS) --gen_all_targets BUILDING_WITH_NINJA=true SOONG_ANDROID_MK=$(SOONG_ANDROID_MK) SOONG_MAKEVARS_MK=$(SOONG_MAKEVARS_MK)
+	+$(hide) $(KATI_MAKEPARALLEL) $(CKATI) --ninja --ninja_dir=$(OUT_DIR) --ninja_suffix=$(KATI_NINJA_SUFFIX) --regen --ignore_optional_include=$(OUT_DIR)/%.P --detect_android_echo $(KATI_FIND_EMULATOR) -f build/core/main.mk $(KATI_GOALS) --gen_all_targets BUILDING_WITH_NINJA=true SOONG_ANDROID_MK=$(SOONG_ANDROID_MK) SOONG_MAKEVARS_MK=$(SOONG_MAKEVARS_MK)
 
 .PHONY: FORCE
 FORCE:
diff --git a/core/package_internal.mk b/core/package_internal.mk
index 5dd021c..694716a 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -585,12 +585,15 @@
 cts_testcase_file := $(foreach s,$(my_split_suffixes),$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE)_$(s).apk)
 $(cts_testcase_file) : $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE)_%.apk : $(built_module_path)/package_%.apk | $(ACP)
 	$(copy-file-to-new-target)
+common_testcase_file := $(foreach s,$(my_split_suffixes),$($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)/$(LOCAL_MODULE)_$(s).apk)
+$(common_testcase_file) : $($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)/$(LOCAL_MODULE)_%.apk : $(built_module_path)/package_%.apk
+	$(copy-file-to-new-target)
 
 COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES := \
   $(COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES) \
-  $(cts_testcase_file)
+  $(cts_testcase_file) $(common_testcase_file)
 
-$(my_all_targets) : $(cts_testcase_file)
+$(my_all_targets) : $(cts_testcase_file) $(common_testcase_file)
 endif # LOCAL_COMPATIBILITY_SUITE
 endif # LOCAL_PACKAGE_SPLITS
 
diff --git a/core/pathmap.mk b/core/pathmap.mk
index ce1754e..c328e58 100644
--- a/core/pathmap.mk
+++ b/core/pathmap.mk
@@ -114,7 +114,6 @@
         v7/cardview \
         v7/mediarouter \
         v7/palette \
-        v8/renderscript \
         v13 \
         v17/leanback \
         design \
@@ -141,6 +140,7 @@
 FRAMEWORKS_SUPPORT_JAVA_SRC_DIRS := \
 	$(addprefix frameworks/support/,$(FRAMEWORKS_SUPPORT_SUBDIRS)) \
 	$(addprefix frameworks/,$(FRAMEWORKS_MULTIDEX_SUBDIRS)) \
+        frameworks/rs/support \
         frameworks/support/graphics/drawable/animated \
         frameworks/support/graphics/drawable/static \
 	frameworks/support/v7/appcompat/src \
@@ -156,6 +156,7 @@
     android-support-animatedvectordrawable \
     android-support-v7-appcompat \
     android-support-v7-recyclerview \
+    android-support-v8-renderscript \
     android-support-multidex \
     android-support-multidex-instrumentation
 
diff --git a/core/post_clean.mk b/core/post_clean.mk
deleted file mode 100644
index 553f728..0000000
--- a/core/post_clean.mk
+++ /dev/null
@@ -1,60 +0,0 @@
-# Copyright (C) 2012 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Clean steps that need global knowledge of individual modules.
-# This file must be included after all Android.mks have been loaded.
-
-#######################################################
-# Check if we need to delete obsolete generated java files.
-# When an proto/etc file gets deleted (or renamed), the generated java file is obsolete.
-previous_gen_java_config := $(TARGET_OUT_COMMON_INTERMEDIATES)/previous_gen_java_config.mk
-current_gen_java_config := $(TARGET_OUT_COMMON_INTERMEDIATES)/current_gen_java_config.mk
-
-$(shell rm -rf $(current_gen_java_config) \
-  && mkdir -p $(dir $(current_gen_java_config))\
-  && touch $(current_gen_java_config))
--include $(previous_gen_java_config)
-
-intermediates_to_clean :=
-modules_with_gen_java_files :=
-$(foreach p, $(ALL_MODULES), \
-  $(eval gs := $(strip $(ALL_MODULES.$(p).PROTO_FILES)\
-                       $(ALL_MODULES.$(p).RS_FILES)))\
-  $(if $(gs),\
-    $(eval modules_with_gen_java_files += $(p))\
-    $(shell echo 'GEN_SRC_FILES.$(p) := $(gs)' >> $(current_gen_java_config)))\
-  $(if $(filter-out $(gs),$(GEN_SRC_FILES.$(p))),\
-    $(eval intermediates_to_clean += $(ALL_MODULES.$(p).INTERMEDIATE_SOURCE_DIR))))
-intermediates_to_clean := $(strip $(intermediates_to_clean))
-ifdef intermediates_to_clean
-$(info *** Obsolete generated java files detected, clean intermediate files...)
-$(info *** rm -rf $(intermediates_to_clean))
-$(shell rm -rf $(intermediates_to_clean))
-intermediates_to_clean :=
-endif
-
-# For modules not loaded by the current build (e.g. you are running mm/mmm),
-# we copy the info from the previous bulid.
-$(foreach p, $(filter-out $(ALL_MODULES),$(MODULES_WITH_GEN_JAVA_FILES)),\
-  $(shell echo 'GEN_SRC_FILES.$(p) := $(GEN_SRC_FILES.$(p))' >> $(current_gen_java_config)))
-MODULES_WITH_GEN_JAVA_FILES := $(sort $(MODULES_WITH_GEN_JAVA_FILES) $(modules_with_gen_java_files))
-$(shell echo 'MODULES_WITH_GEN_JAVA_FILES := $(MODULES_WITH_GEN_JAVA_FILES)' >> $(current_gen_java_config))
-
-# Now current becomes previous.
-$(shell cmp $(current_gen_java_config) $(previous_gen_java_config) > /dev/null 2>&1 || mv -f $(current_gen_java_config) $(previous_gen_java_config))
-
-MODULES_WITH_GEN_JAVA_FILES :=
-modules_with_gen_java_files :=
-previous_gen_java_config :=
-current_gen_java_config :=
diff --git a/core/prebuilt.mk b/core/prebuilt.mk
index 5831e24..839e14f 100644
--- a/core/prebuilt.mk
+++ b/core/prebuilt.mk
@@ -15,7 +15,7 @@
 
   ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
     # Only support prebuilt shared and static libraries for translated arch
-    ifeq ($(filter SHARED_LIBRARIES STATIC_LIBRARIES,$(LOCAL_MODULE_CLASS)),)
+    ifeq ($(filter SHARED_LIBRARIES STATIC_LIBRARIES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
       LOCAL_MULTILIB := first
     endif
   endif
diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk
index d9f9ea8..10512ff 100644
--- a/core/prebuilt_internal.mk
+++ b/core/prebuilt_internal.mk
@@ -299,7 +299,7 @@
 endif
 $(built_module): PRIVATE_EMBEDDED_JNI_LIBS := $(embedded_prebuilt_jni_libs)
 
-$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(SIGNAPK_JAR) $(AAPT)
+$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(SIGNAPK_JAR)
 	$(transform-prebuilt-to-target)
 	$(uncompress-shared-libs)
 ifdef LOCAL_DEX_PREOPT
@@ -346,7 +346,7 @@
 $(built_apk_splits) : $(LOCAL_CERTIFICATE).pk8 $(LOCAL_CERTIFICATE).x509.pem
 $(built_apk_splits) : PRIVATE_PRIVATE_KEY := $(LOCAL_CERTIFICATE).pk8
 $(built_apk_splits) : PRIVATE_CERTIFICATE := $(LOCAL_CERTIFICATE).x509.pem
-$(built_apk_splits) : $(built_module_path)/%.apk : $(my_src_dir)/%.apk | $(AAPT)
+$(built_apk_splits) : $(built_module_path)/%.apk : $(my_src_dir)/%.apk
 	$(copy-file-to-new-target)
 	$(sign-package)
 
diff --git a/core/product.mk b/core/product.mk
index e2a5339..7d7c68b 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -127,6 +127,7 @@
     VENDOR_PRODUCT_RESTRICT_VENDOR_FILES \
     VENDOR_EXCEPTION_MODULES \
     VENDOR_EXCEPTION_PATHS \
+    PRODUCT_ART_USE_READ_BARRIER \
 
 
 
@@ -288,7 +289,8 @@
 _product_stash_var_list += \
 	DEFAULT_SYSTEM_DEV_CERTIFICATE \
 	WITH_DEXPREOPT \
-	WITH_DEXPREOPT_BOOT_IMG_ONLY
+	WITH_DEXPREOPT_BOOT_IMG_ONLY \
+	WITH_DEXPREOPT_APP_IMAGE
 
 #
 # Mark the variables in _product_stash_var_list as readonly
diff --git a/core/product_config.mk b/core/product_config.mk
index ea20bcb..8943429 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -354,18 +354,16 @@
 # whitespace characters on either side of the '='.
 PRODUCT_PROPERTY_OVERRIDES := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_PROPERTY_OVERRIDES))
+.KATI_READONLY := PRODUCT_PROPERTY_OVERRIDES
 
 PRODUCT_SHIPPING_API_LEVEL := $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SHIPPING_API_LEVEL))
-ifdef PRODUCT_SHIPPING_API_LEVEL
-ADDITIONAL_BUILD_PROPERTIES += \
-    ro.product.first_api_level=$(PRODUCT_SHIPPING_API_LEVEL)
-endif
 
 # A list of property assignments, like "key = value", with zero or more
 # whitespace characters on either side of the '='.
 # used for adding properties to default.prop
 PRODUCT_DEFAULT_PROPERTY_OVERRIDES := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_DEFAULT_PROPERTY_OVERRIDES))
+.KATI_READONLY := PRODUCT_DEFAULT_PROPERTY_OVERRIDES
 
 # Should we use the default resources or add any product specific overlays
 PRODUCT_PACKAGE_OVERLAYS := \
@@ -377,11 +375,6 @@
 PRODUCT_VENDOR_KERNEL_HEADERS := \
     $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_KERNEL_HEADERS)
 
-# Add the product-defined properties to the build properties.
-ADDITIONAL_BUILD_PROPERTIES := \
-    $(ADDITIONAL_BUILD_PROPERTIES) \
-    $(PRODUCT_PROPERTY_OVERRIDES)
-
 # The OTA key(s) specified by the product config, if any.  The names
 # of these keys are stored in the target-files zip so that post-build
 # signing tools can substitute them for the test key embedded by
@@ -423,3 +416,7 @@
     $(eval cf := $(subst $(_PSMC_SP_PLACE_HOLDER),$(space),$(cf)))\
     $(eval SANITIZER.$(TARGET_PRODUCT).$(m).CONFIG := $(cf))))
 _psmc_modules :=
+
+# Make this art variable visible to soong_config.mk.
+PRODUCT_ART_USE_READ_BARRIER := \
+    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ART_USE_READ_BARRIER))
diff --git a/core/setup_one_odex.mk b/core/setup_one_odex.mk
index 37aeb60..0afc5b7 100644
--- a/core/setup_one_odex.mk
+++ b/core/setup_one_odex.mk
@@ -36,6 +36,17 @@
 
 my_built_vdex := $(patsubst %.odex,%.vdex,$(my_built_odex))
 my_installed_vdex := $(patsubst %.odex,%.vdex,$(my_installed_odex))
+my_installed_art := $(patsubst %.odex,%.art,$(my_installed_odex))
+
+ifeq (true,$(WITH_DEXPREOPT_APP_IMAGE))
+my_built_art := $(patsubst %.odex,%.art,$(my_built_odex))
+$(my_built_odex): PRIVATE_ART_FILE_PREOPT_FLAGS := --app-image-file=$(my_built_art) \
+    --image-format=lz4
+$(eval $(call copy-one-file,$(my_built_art),$(my_installed_art)))
+built_art += $(my_built_art)
+installed_art += $(my_installed_art)
+built_installed_art += $(my_built_art):$(my_installed_art)
+endif
 
 $(eval $(call copy-one-file,$(my_built_odex),$(my_installed_odex)))
 $(eval $(call copy-one-file,$(my_built_vdex),$(my_installed_vdex)))
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 94cd4a0..ad2f204 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -62,7 +62,12 @@
 	echo '    "CrossHost": "$(HOST_CROSS_OS)",'; \
 	echo '    "CrossHostArch": "$(HOST_CROSS_ARCH)",'; \
 	echo '    "CrossHostSecondaryArch": "$(HOST_CROSS_2ND_ARCH)",'; \
-	echo '    "Safestack": $(if $(filter true,$(USE_SAFESTACK)),true,false)'; \
+	echo '    "Safestack": $(if $(filter true,$(USE_SAFESTACK)),true,false),'; \
+	echo '    "EnableCFI": $(if $(filter true,$(ENABLE_CFI)),true,false),'; \
+	echo ''; \
+	echo '    "ArtUseReadBarrier": $(if $(filter false,$(PRODUCT_ART_USE_READ_BARRIER)),false,true),'; \
+	echo ''; \
+	echo '    "BtConfigIncludeDir": "$(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR)"'; \
 	echo '}') > $(SOONG_VARIABLES_TMP); \
 	if ! cmp -s $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); then \
 	  mv $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); \
diff --git a/core/target_test_internal.mk b/core/target_test_internal.mk
index 2e65218..59a3a9e 100644
--- a/core/target_test_internal.mk
+++ b/core/target_test_internal.mk
@@ -2,11 +2,12 @@
 ## Shared definitions for all target test compilations.
 #######################################################
 
-LOCAL_CFLAGS += -DGTEST_OS_LINUX_ANDROID -DGTEST_HAS_STD_STRING
+ifeq ($(LOCAL_GTEST),true)
+  LOCAL_CFLAGS += -DGTEST_OS_LINUX_ANDROID -DGTEST_HAS_STD_STRING
 
-ifndef LOCAL_SDK_VERSION
+  ifndef LOCAL_SDK_VERSION
     LOCAL_STATIC_LIBRARIES += libgtest_main libgtest
-else
+  else
     ifneq (,$(filter c++_%,$(LOCAL_NDK_STL_VARIANT)))
         my_ndk_gtest_suffix := _c++
     else ifneq ($(filter stlport_,$(LOCAL_NDK_STL_VARIANT)),)
@@ -19,6 +20,7 @@
     LOCAL_STATIC_LIBRARIES += \
         libgtest_main_ndk$(my_ndk_gtest_suffix) \
         libgtest_ndk$(my_ndk_gtest_suffix)
+  endif
 endif
 
 ifdef LOCAL_MODULE_PATH
diff --git a/core/tasks/check_boot_jars/package_whitelist.txt b/core/tasks/check_boot_jars/package_whitelist.txt
index ae69099..1889117 100644
--- a/core/tasks/check_boot_jars/package_whitelist.txt
+++ b/core/tasks/check_boot_jars/package_whitelist.txt
@@ -29,6 +29,11 @@
 java\.sql
 java\.text
 java\.text\.spi
+java\.time
+java\.time\.chrono
+java\.time\.format
+java\.time\.temporal
+java\.time\.zone
 java\.util
 java\.util\.concurrent
 java\.util\.concurrent\.atomic
diff --git a/core/tasks/tools/build_custom_image.mk b/core/tasks/tools/build_custom_image.mk
index 34bbfce..f0db476 100644
--- a/core/tasks/tools/build_custom_image.mk
+++ b/core/tasks/tools/build_custom_image.mk
@@ -90,7 +90,7 @@
 	$(hide) echo "mount_point=$(PRIVATE_MOUNT_POINT)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt
 	$(hide) echo "fs_type=$(PRIVATE_FILE_SYSTEM_TYPE)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt
 	$(hide) echo "partition_size=$(PRIVATE_PARTITION_SIZE)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt
-	$(hide) echo "ext_mkuserimg=$(MKEXTUSERIMG)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt
+	$(hide) echo "ext_mkuserimg=$(notdir $(MKEXTUSERIMG))" >> $(PRIVATE_INTERMEDIATES)/image_info.txt
 	$(if $(PRIVATE_SELINUX),$(hide) echo "selinux_fc=$(SELINUX_FC)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt)
 	$(if $(PRIVATE_SUPPORT_VERITY),\
 	  $(hide) echo "verity=$(PRIVATE_SUPPORT_VERITY)" >> $(PRIVATE_INTERMEDIATES)/image_info.txt;\
diff --git a/core/tasks/tools/package-modules.mk b/core/tasks/tools/package-modules.mk
index 24a7608..ef49d90 100644
--- a/core/tasks/tools/package-modules.mk
+++ b/core/tasks/tools/package-modules.mk
@@ -14,10 +14,20 @@
 my_copy_pairs :=
 my_pickup_files :=
 
+# Iterate over the modules and include their direct dependencies stated in the
+# LOCAL_REQUIRED_MODULES.
+my_modules_and_deps := $(my_modules)
+$(foreach m,$(my_modules),\
+  $(eval _explicitly_required := \
+    $(strip $(ALL_MODULES.$(m).EXPLICITLY_REQUIRED)\
+    $(ALL_MODULES.$(m)$(TARGET_2ND_ARCH_MODULE_SUFFIX).EXPLICITLY_REQUIRED)))\
+  $(eval my_modules_and_deps += $(_explicitly_required))\
+)
+
 # Iterate over modules' built files and installed files;
 # Calculate the dest files in the output zip file.
 
-$(foreach m,$(my_modules),\
+$(foreach m,$(my_modules_and_deps),\
   $(eval _pickup_files := $(strip $(ALL_MODULES.$(m).PICKUP_FILES)\
     $(ALL_MODULES.$(m)$(TARGET_2ND_ARCH_MODULE_SUFFIX).PICKUP_FILES)))\
   $(eval _built_files := $(strip $(ALL_MODULES.$(m).BUILT_INSTALLED)\
diff --git a/core/tasks/vts.mk b/core/tasks/vts.mk
deleted file mode 100644
index 507f22e..0000000
--- a/core/tasks/vts.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright (C) 2016 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-test_suite_name := vts
-test_suite_tradefed := vts-tradefed
-test_suite_readme := test/vts/README.md
-
-include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
-
-.PHONY: vts
-vts: $(compatibility_zip)
-$(call dist-for-goals, vts, $(compatibility_zip))
diff --git a/target/board/generic/sepolicy/goldfish_setup.te b/target/board/generic/sepolicy/goldfish_setup.te
index bc25967..a863aa6 100644
--- a/target/board/generic/sepolicy/goldfish_setup.te
+++ b/target/board/generic/sepolicy/goldfish_setup.te
@@ -14,6 +14,7 @@
 allow goldfish_setup self:udp_socket create_socket_perms;
 allowxperm goldfish_setup self:udp_socket ioctl priv_sock_ioctls;
 
+wakelock_use(goldfish_setup)
 net_domain(goldfish_setup)
 
 # Set net.eth0.dns*, debug.sf.nobootanimation
diff --git a/target/board/generic/sepolicy/logd.te b/target/board/generic/sepolicy/logd.te
deleted file mode 100644
index b3e60d7..0000000
--- a/target/board/generic/sepolicy/logd.te
+++ /dev/null
@@ -1,11 +0,0 @@
-# goldfish logcat service:  runs logcat -Q in logd domain
-
-# See global logd.te, these only set for eng & userdebug, allow for all builds
-
-domain_auto_trans(init, logcat_exec, logd)
-
-# Read from logd.
-read_logd(logd)
-
-# Write to /dev/ttyS2 and /dev/ttyGF2.
-allow logd serial_device:chr_file { write open };
diff --git a/target/board/generic/sepolicy/logpersist.te b/target/board/generic/sepolicy/logpersist.te
new file mode 100644
index 0000000..0c52986
--- /dev/null
+++ b/target/board/generic/sepolicy/logpersist.te
@@ -0,0 +1,12 @@
+# goldfish logcat service:  runs logcat -Q in logpersist domain
+
+# See global logcat.te/logpersist.te, only set for eng & userdebug,
+# allow for all builds in a non-conflicting manner.
+
+domain_auto_trans(init, logcat_exec, logpersist)
+
+# Read from logd.
+unix_socket_connect(logpersist, logdr, logd)
+
+# Write to /dev/ttyS2 and /dev/ttyGF2.
+allow logpersist serial_device:chr_file { write open };
diff --git a/target/product/core.mk b/target/product/core.mk
index 0a4e0fd..10b2c9e 100644
--- a/target/product/core.mk
+++ b/target/product/core.mk
@@ -44,6 +44,7 @@
     Launcher2 \
     ManagedProvisioning \
     MtpDocumentsProvider \
+    NetworkRecommendation \
     PicoTts \
     PacProcessor \
     libpac \
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index 48916b2..c94abc3 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -20,11 +20,14 @@
 PRODUCT_PACKAGES += \
     adb \
     adbd \
+    android.hidl.memory@1.0-service \
+    android.hidl.memory@1.0-impl \
     atrace \
     bootanimation \
     bootstat \
     cmd \
-    debuggerd \
+    crash_dump \
+    debuggerd\
     dumpstate \
     dumpsys \
     fastboot \
@@ -66,6 +69,7 @@
     lmkd \
     logcat \
     logwrapper \
+    lshal \
     mkshrc \
     reboot \
     recovery \
@@ -73,6 +77,7 @@
     servicemanager \
     sh \
     surfaceflinger \
+    tombstoned \
     toolbox \
     toybox \
     tzdatacheck \
@@ -80,13 +85,26 @@
 # SELinux packages
 PRODUCT_PACKAGES += \
     file_contexts.bin \
+    nonplat_file_contexts \
     nonplat_mac_permissions.xml \
+    nonplat_property_contexts \
+    nonplat_seapp_contexts \
+    nonplat_service_contexts \
+    plat_file_contexts \
     plat_mac_permissions.xml \
-    property_contexts \
-    seapp_contexts \
+    plat_property_contexts \
+    plat_seapp_contexts \
+    plat_service_contexts \
     selinux_version \
-    sepolicy \
-    service_contexts
+    sepolicy
+
+# AID Generation for
+# <pwd.h> and <grp.h>
+PRODUCT_PACKAGES += \
+    passwd \
+    group \
+    fs_config_files \
+    fs_config_dirs
 
 # Ensure that this property is always defined so that bionic_systrace.cpp
 # can rely on it being initially set by init.
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 3dd505f..0f42c27 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -53,6 +53,7 @@
 PRODUCT_PACKAGES += \
     dalvikvm \
     dex2oat \
+    dexoptanalyzer \
     libart \
     libart_fake \
     libopenjdkjvmti \
diff --git a/target/product/telephony.mk b/target/product/telephony.mk
index e840ba1..38a8caa 100644
--- a/target/product/telephony.mk
+++ b/target/product/telephony.mk
@@ -19,6 +19,7 @@
 
 PRODUCT_PACKAGES := \
     CarrierConfig \
+    CarrierDefaultApp \
     Dialer \
     CallLogBackup \
     CellBroadcastReceiver \
diff --git a/tools/fat16copy.py b/tools/fat16copy.py
index af8bd83..c20930a 100755
--- a/tools/fat16copy.py
+++ b/tools/fat16copy.py
@@ -234,11 +234,16 @@
     data.seek(0)
     data_file.write(data.read())
 
-  def new_subdirectory(self, name):
+  def open_subdirectory(self, name):
     """
-    Create a new subdirectory of this directory with the given name.
+    Open a subdirectory of this directory with the given name. If the
+    subdirectory doesn't exist, a new one is created instead.
     Returns a fat_dir().
     """
+    for dent in self.dentries:
+      if dent.longname == name:
+        return dent.open_directory()
+
     chunk = self.backing.fs.allocate(1)
     (shortname, ext) = self.make_short_name(name)
     new_dentry = self.add_dentry(ATTRIBUTE_SUBDIRECTORY, shortname,
@@ -751,7 +756,7 @@
     base = os.path.basename(item)
     if len(base) == 0:
       base = os.path.basename(item[:-1])
-    sub = directory.new_subdirectory(base)
+    sub = directory.open_subdirectory(base)
     for next_item in sorted(os.listdir(item)):
       add_item(sub, os.path.join(item, next_item))
   else:
diff --git a/tools/fs_config/Android.mk b/tools/fs_config/Android.mk
index fb4a0c4..65f8a08 100644
--- a/tools/fs_config/Android.mk
+++ b/tools/fs_config/Android.mk
@@ -81,19 +81,35 @@
 LOCAL_CFLAGS := -Werror -Wno-error=\#warnings
 
 ifneq ($(TARGET_FS_CONFIG_GEN),)
+system_android_filesystem_config := system/core/include/private/android_filesystem_config.h
+
+# Generate the "generated_oem_aid.h" file
+oem := $(local-generated-sources-dir)/generated_oem_aid.h
+$(oem): PRIVATE_LOCAL_PATH := $(LOCAL_PATH)
+$(oem): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
+$(oem): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
+$(oem): PRIVATE_CUSTOM_TOOL = $(PRIVATE_LOCAL_PATH)/fs_config_generator.py oemaid --aid-header=$(PRIVATE_ANDROID_FS_HDR) $(PRIVATE_TARGET_FS_CONFIG_GEN) > $@
+$(oem): $(TARGET_FS_CONFIG_GEN) $(LOCAL_PATH)/fs_config_generator.py
+	$(transform-generated-source)
+
+# Generate the fs_config header
 gen := $(local-generated-sources-dir)/$(ANDROID_FS_CONFIG_H)
 $(gen): PRIVATE_LOCAL_PATH := $(LOCAL_PATH)
 $(gen): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
-$(gen): PRIVATE_CUSTOM_TOOL = $(PRIVATE_LOCAL_PATH)/fs_config_generator.py $(PRIVATE_TARGET_FS_CONFIG_GEN) > $@
-$(gen): $(TARGET_FS_CONFIG_GEN) $(LOCAL_PATH)/fs_config_generator.py
+$(gen): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
+$(gen): PRIVATE_CUSTOM_TOOL = $(PRIVATE_LOCAL_PATH)/fs_config_generator.py fsconfig --aid-header=$(PRIVATE_ANDROID_FS_HDR) $(PRIVATE_TARGET_FS_CONFIG_GEN) > $@
+$(gen): $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config) $(LOCAL_PATH)/fs_config_generator.py
 	$(transform-generated-source)
 
-LOCAL_GENERATED_SOURCES := $(gen)
+LOCAL_GENERATED_SOURCES := $(oem) $(gen)
+
 my_fs_config_h := $(gen)
+my_gen_oem_aid := $(oem)
 gen :=
+oem :=
 endif
 
-LOCAL_C_INCLUDES := $(dir $(my_fs_config_h))
+LOCAL_C_INCLUDES := $(dir $(my_fs_config_h)) $(dir $(my_gen_oem_aid))
 
 include $(BUILD_HOST_EXECUTABLE)
 fs_config_generate_bin := $(LOCAL_INSTALLED_MODULE)
@@ -122,6 +138,60 @@
 	@mkdir -p $(dir $@)
 	$< -F -o $@
 
+# The newer passwd/group targets are only generated if you
+# use the new TARGET_FS_CONFIG_GEN method.
+ifneq ($(TARGET_FS_CONFIG_GEN),)
+
+##################################
+# Build the oemaid library when fs config files are present.
+# Intentionally break build if you require generated AIDS
+# header file, but are not using any fs config files.
+include $(CLEAR_VARS)
+LOCAL_MODULE := liboemaids
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(dir $(my_gen_oem_aid))
+LOCAL_EXPORT_C_INCLUDE_DEPS := $(my_gen_oem_aid)
+include $(BUILD_STATIC_LIBRARY)
+
+##################################
+# Generate the system/etc/passwd text file for the target
+# This file may be empty if no AIDs are defined in
+# TARGET_FS_CONFIG_GEN files.
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := passwd
+LOCAL_MODULE_CLASS := ETC
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+$(LOCAL_BUILT_MODULE): PRIVATE_LOCAL_PATH := $(LOCAL_PATH)
+$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config)
+	@mkdir -p $(dir $@)
+	$(hide) $< passwd --aid-header=$(PRIVATE_ANDROID_FS_HDR) $(PRIVATE_TARGET_FS_CONFIG_GEN) > $@
+
+##################################
+# Generate the system/etc/group text file for the target
+# This file may be empty if no AIDs are defined in
+# TARGET_FS_CONFIG_GEN files.
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := group
+LOCAL_MODULE_CLASS := ETC
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+$(LOCAL_BUILT_MODULE): PRIVATE_LOCAL_PATH := $(LOCAL_PATH)
+$(LOCAL_BUILT_MODULE): PRIVATE_TARGET_FS_CONFIG_GEN := $(TARGET_FS_CONFIG_GEN)
+$(LOCAL_BUILT_MODULE): PRIVATE_ANDROID_FS_HDR := $(system_android_filesystem_config)
+$(LOCAL_BUILT_MODULE): $(LOCAL_PATH)/fs_config_generator.py $(TARGET_FS_CONFIG_GEN) $(system_android_filesystem_config)
+	@mkdir -p $(dir $@)
+	$(hide) $< group --aid-header=$(PRIVATE_ANDROID_FS_HDR) $(PRIVATE_TARGET_FS_CONFIG_GEN) > $@
+
+system_android_filesystem_config :=
+endif
+
 ANDROID_FS_CONFIG_H :=
 my_fs_config_h :=
 fs_config_generate_bin :=
+my_gen_oem_aid :=
diff --git a/tools/fs_config/README b/tools/fs_config/README
index 0258687..d884e32 100644
--- a/tools/fs_config/README
+++ b/tools/fs_config/README
@@ -50,12 +50,12 @@
   prefixed with a 0, else mode is used as is.
 
 user:
-  The exact, C define for a valid AID. Note custom AIDs can be defined in the
+  Either the C define for a valid AID or the friendly name. For instance both
+  AID_RADIO and radio are acceptable. Note custom AIDs can be defined in the
   AID section documented below.
 
 group:
-  The exact, C define for a valid AID. Note custom AIDs can be defined in the
-  AID section documented below.
+  Same as user.
 
 caps:
   The name as declared in
@@ -82,7 +82,8 @@
 Where:
 
 [AID_<name>]
-  The <name> can be any valid character for a #define identifier in C.
+  The <name> can contain characters in the set uppercase, numbers
+  and underscores.
 
 value:
   A valid C style number string. Hex, octal, binary and decimal are supported.
@@ -118,3 +119,24 @@
 representation of value is preserved. Both choices were made for maximum readability of the generated
 file and to line up files. Sync lines are placed with the source file as comments in the generated
 header file.
+
+For OEMs wishing to use the define AIDs in their native code, one can access the generated header
+file like so:
+  1. In your C code just #include "generated_oem_aid.h" and start using the declared identifiers.
+  2. In your Makefile add this static library like so: LOCAL_STATIC_LIBRARIES := liboemaids
+
+Unit Tests:
+
+From within the fs_config directory, unit tests can be executed like so:
+$ python -m unittest test_fs_config_generator.Tests
+.............
+----------------------------------------------------------------------
+Ran 13 tests in 0.004s
+
+OK
+
+One could also use nose if they would like:
+$ nose2
+
+To add new tests, simply add a test_<xxx> method to the test class. It will automatically
+get picked up and added to the test suite.
diff --git a/tools/fs_config/default/android_filesystem_config.h b/tools/fs_config/default/android_filesystem_config.h
index 820b04a..b7d936a 100644
--- a/tools/fs_config/default/android_filesystem_config.h
+++ b/tools/fs_config/default/android_filesystem_config.h
@@ -19,13 +19,6 @@
 ** by the device side of adb.
 */
 
-/*
-** Resorting to the default file means someone requested fs_config_dirs or
-** fs_config_files in their device configuration without providing an
-** associated header.
-*/
-#warning No device-supplied android_filesystem_config.h, using empty default.
-
 /* Rules for directories.
 ** These rules are applied based on "first match", so they
 ** should start with the most specific path and work their
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index 6a16fea..2cf2fd8 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -1,294 +1,1331 @@
 #!/usr/bin/env python
+"""Generates config files for Android file system properties.
 
+This script is used for generating configuration files for configuring
+Android filesystem properties. Internally, its composed of a plug-able
+interface to support the understanding of new input and output parameters.
+
+Run the help for a list of supported plugins and their capabilities.
+
+Further documentation can be found in the README.
+"""
+
+import argparse
 import ConfigParser
 import re
 import sys
+import textwrap
+
+# Keep the tool in one file to make it easy to run.
+# pylint: disable=too-many-lines
 
 
-GENERATED = '''
-/*
- * THIS IS AN AUTOGENERATED FILE! DO NOT MODIFY
- */
-'''
+# Lowercase generator used to be inline with @staticmethod.
+class generator(object):  # pylint: disable=invalid-name
+    """A decorator class to add commandlet plugins.
 
-INCLUDE = '#include <private/android_filesystem_config.h>'
+    Used as a decorator to classes to add them to
+    the internal plugin interface. Plugins added
+    with @generator() are automatically added to
+    the command line.
 
-DEFINE_NO_DIRS = '#define NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS\n'
-DEFINE_NO_FILES = '#define NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_FILES\n'
+    For instance, to add a new generator
+    called foo and have it added just do this:
 
-DEFAULT_WARNING = '#warning No device-supplied android_filesystem_config.h, using empty default.'
+        @generator("foo")
+        class FooGen(object):
+            ...
+    """
+    _generators = {}
 
-NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS_ENTRY = '{ 00000, AID_ROOT,      AID_ROOT,      0, "system/etc/fs_config_dirs" },'
-NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_FILES_ENTRY = '{ 00000, AID_ROOT,      AID_ROOT,      0, "system/etc/fs_config_files" },'
+    def __init__(self, gen):
+        """
+        Args:
+            gen (str): The name of the generator to add.
 
-IFDEF_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS = '#ifdef NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS'
-ENDIF = '#endif'
+        Raises:
+            ValueError: If there is a similarly named generator already added.
 
-OPEN_FILE_STRUCT = 'static const struct fs_path_config android_device_files[] = {'
-OPEN_DIR_STRUCT = 'static const struct fs_path_config android_device_dirs[] = {'
-CLOSE_FILE_STRUCT = '};'
+        """
+        self._gen = gen
 
-GENERIC_DEFINE = "#define %s\t%s"
+        if gen in generator._generators:
+            raise ValueError('Duplicate generator name: ' + gen)
 
-FILE_COMMENT = '// Defined in file: \"%s\"'
+        generator._generators[gen] = None
 
-# from system/core/include/private/android_filesystem_config.h
-AID_OEM_RESERVED_RANGES = [
-    (2900, 2999),
-    (5000, 5999),
-]
+    def __call__(self, cls):
+
+        generator._generators[self._gen] = cls()
+        return cls
+
+    @staticmethod
+    def get():
+        """Gets the list of generators.
+
+        Returns:
+           The list of registered generators.
+        """
+        return generator._generators
 
 
-AID_MATCH = re.compile('AID_[a-zA-Z]+')
+class Utils(object):
+    """Various assorted static utilities."""
 
-def handle_aid(file_name, section_name, config, aids, seen_aids):
-    value = config.get(section_name, 'value')
+    @staticmethod
+    def in_any_range(value, ranges):
+        """Tests if a value is in a list of given closed range tuples.
 
-    errmsg = '%s for: \"' + section_name + '" file: \"' + file_name + '\"'
+        A range tuple is a closed range. That means it's inclusive of its
+        start and ending values.
 
-    if not value:
-        raise Exception(errmsg % 'Found specified but unset "value"')
+        Args:
+            value (int): The value to test.
+            range [(int, int)]: The closed range list to test value within.
 
-    v = convert_int(value)
-    if not v:
-        raise Exception(errmsg % ('Invalid "value", not a number, got: \"%s\"' % value))
+        Returns:
+            True if value is within the closed range, false otherwise.
+        """
 
-    # Values must be within OEM range
-    if not any(lower <= v <= upper for (lower, upper) in AID_OEM_RESERVED_RANGES):
-        s = '"value" not in valid range %s, got: %s'
-        s = s % (str(AID_OEM_RESERVED_RANGES), value)
-        raise Exception(errmsg % s)
+        return any(lower <= value <= upper for (lower, upper) in ranges)
 
-    # use the normalized int value in the dict and detect
-    # duplicate definitions of the same vallue
-    v = str(v)
-    if v in seen_aids[1]:
-        # map of value to aid name
-        a = seen_aids[1][v]
+    @staticmethod
+    def get_login_and_uid_cleansed(aid):
+        """Returns a passwd/group file safe logon and uid.
 
-        # aid name to file
-        f = seen_aids[0][a]
+        This checks that the logon and uid of the AID do not
+        contain the delimiter ":" for a passwd/group file.
 
-        s = 'Duplicate AID value "%s" found on AID: "%s".' % (value, seen_aids[1][v])
-        s += ' Previous found in file: "%s."' % f
-        raise Exception(errmsg % s)
+        Args:
+            aid (AID): The aid to check
 
-    seen_aids[1][v] = section_name
+        Returns:
+            logon, uid of the AID after checking its safe.
 
-    # Append a tuple of (AID_*, base10(value), str(value))
-    # We keep the str version of value so we can print that out in the
-    # generated header so investigating parties can identify parts.
-    # We store the base10 value for sorting, so everything is ascending
-    # later.
-    aids.append((file_name, section_name, v, value))
+        Raises:
+            ValueError: If there is a delimiter charcter found.
+        """
+        logon = aid.friendly
+        uid = aid.normalized_value
+        if ':' in uid:
+            raise ValueError(
+                'Cannot specify delimiter character ":" in uid: "%s"' % uid)
+        if ':' in logon:
+            raise ValueError(
+                'Cannot specify delimiter character ":" in logon: "%s"' % logon)
+        return logon, uid
 
-def convert_int(num):
+
+class AID(object):
+    """This class represents an Android ID or an AID.
+
+    Attributes:
+        identifier (str): The identifier name for a #define.
+        value (str) The User Id (uid) of the associate define.
+        found (str) The file it was found in, can be None.
+        normalized_value (str): Same as value, but base 10.
+        friendly (str): The friendly name of aid.
+    """
+
+    PREFIX = 'AID_'
+
+    # Some of the AIDS like AID_MEDIA_EX had names like mediaex
+    # list a map of things to fixup until we can correct these
+    # at a later date.
+    _FIXUPS = {
+        'media_drm': 'mediadrm',
+        'media_ex': 'mediaex',
+        'media_codec': 'mediacodec'
+    }
+
+    def __init__(self, identifier, value, found):
+        """
+        Args:
+            identifier: The identifier name for a #define <identifier>.
+            value: The value of the AID, aka the uid.
+            found (str): The file found in, not required to be specified.
+
+        Raises:
+            ValueError: if value is not a valid string number as processed by
+                int(x, 0)
+        """
+        self.identifier = identifier
+        self.value = value
+        self.found = found
+        self.normalized_value = str(int(value, 0))
+
+        # Where we calculate the friendly name
+        friendly = identifier[len(AID.PREFIX):].lower()
+        self.friendly = AID._fixup_friendly(friendly)
+
+    def __eq__(self, other):
+
+        return self.identifier == other.identifier \
+            and self.value == other.value and self.found == other.found \
+            and self.normalized_value == other.normalized_value
+
+    @staticmethod
+    def is_friendly(name):
+        """Determines if an AID is a freindly name or C define.
+
+        For example if name is AID_SYSTEM it returns false, if name
+        was system, it would return true.
+
+        Returns:
+            True if name is a friendly name False otherwise.
+        """
+
+        return not name.startswith(AID.PREFIX)
+
+    @staticmethod
+    def _fixup_friendly(friendly):
+        """Fixup friendly names that historically don't follow the convention.
+
+        Args:
+            friendly (str): The friendly name.
+
+        Returns:
+            The fixedup friendly name as a str.
+        """
+
+        if friendly in AID._FIXUPS:
+            return AID._FIXUPS[friendly]
+
+        return friendly
+
+
+class FSConfig(object):
+    """Represents a filesystem config array entry.
+
+    Represents a file system configuration entry for specifying
+    file system capabilities.
+
+    Attributes:
+        mode (str): The mode of the file or directory.
+        user (str): The uid or #define identifier (AID_SYSTEM)
+        group (str): The gid or #define identifier (AID_SYSTEM)
+        caps (str): The capability set.
+        filename (str): The file it was found in.
+    """
+
+    def __init__(self, mode, user, group, caps, path, filename):
+        """
+        Args:
+            mode (str): The mode of the file or directory.
+            user (str): The uid or #define identifier (AID_SYSTEM)
+            group (str): The gid or #define identifier (AID_SYSTEM)
+            caps (str): The capability set as a list.
+            filename (str): The file it was found in.
+        """
+        self.mode = mode
+        self.user = user
+        self.group = group
+        self.caps = caps
+        self.path = path
+        self.filename = filename
+
+    def __eq__(self, other):
+
+        return self.mode == other.mode and self.user == other.user \
+            and self.group == other.group and self.caps == other.caps \
+            and self.path == other.path and self.filename == other.filename
+
+
+class AIDHeaderParser(object):
+    """Parses an android_filesystem_config.h file.
+
+    Parses a C header file and extracts lines starting with #define AID_<name>
+    while capturing the OEM defined ranges and ignoring other ranges. It also
+    skips some hardcoded AIDs it doesn't need to generate a mapping for.
+    It provides some basic sanity checks. The information extracted from this
+    file can later be used to sanity check other things (like oem ranges) as
+    well as generating a mapping of names to uids. It was primarily designed to
+    parse the private/android_filesystem_config.h, but any C header should
+    work.
+    """
+
+
+    _SKIP_AIDS = [
+        re.compile(r'%sUNUSED[0-9].*' % AID.PREFIX),
+        re.compile(r'%sAPP' % AID.PREFIX), re.compile(r'%sUSER' % AID.PREFIX)
+    ]
+    _AID_DEFINE = re.compile(r'\s*#define\s+%s.*' % AID.PREFIX)
+    _OEM_START_KW = 'START'
+    _OEM_END_KW = 'END'
+    _OEM_RANGE = re.compile('%sOEM_RESERVED_[0-9]*_{0,1}(%s|%s)' %
+                            (AID.PREFIX, _OEM_START_KW, _OEM_END_KW))
+    # AID lines cannot end with _START or _END, ie AID_FOO is OK
+    # but AID_FOO_START is skiped. Note that AID_FOOSTART is NOT skipped.
+    _AID_SKIP_RANGE = ['_' + _OEM_START_KW, '_' + _OEM_END_KW]
+    _COLLISION_OK = ['AID_APP', 'AID_APP_START', 'AID_USER', 'AID_USER_OFFSET']
+
+    def __init__(self, aid_header):
+        """
+        Args:
+            aid_header (str): file name for the header
+                file containing AID entries.
+        """
+        self._aid_header = aid_header
+        self._aid_name_to_value = {}
+        self._aid_value_to_name = {}
+        self._oem_ranges = {}
+
+        with open(aid_header) as open_file:
+            self._parse(open_file)
 
         try:
-            if num.startswith('0x'):
-                return int(num, 16)
-            elif num.startswith('0b'):
-                return int(num, 2)
-            elif num.startswith('0'):
-                return int(num, 8)
-            else:
-                return int(num, 10)
+            self._process_and_check()
+        except ValueError as exception:
+            sys.exit('Error processing parsed data: "%s"' % (str(exception)))
+
+    def _parse(self, aid_file):
+        """Parses an AID header file. Internal use only.
+
+        Args:
+            aid_file (file): The open AID header file to parse.
+        """
+
+        for lineno, line in enumerate(aid_file):
+
+            def error_message(msg):
+                """Creates an error message with the current parsing state."""
+                # pylint: disable=cell-var-from-loop
+                return 'Error "{}" in file: "{}" on line: {}'.format(
+                    msg, self._aid_header, str(lineno))
+
+            if AIDHeaderParser._AID_DEFINE.match(line):
+                chunks = line.split()
+                identifier = chunks[1]
+                value = chunks[2]
+
+                if any(x.match(identifier) for x in AIDHeaderParser._SKIP_AIDS):
+                    continue
+
+                try:
+                    if AIDHeaderParser._is_oem_range(identifier):
+                        self._handle_oem_range(identifier, value)
+                    elif not any(
+                            identifier.endswith(x)
+                            for x in AIDHeaderParser._AID_SKIP_RANGE):
+                        self._handle_aid(identifier, value)
+                except ValueError as exception:
+                    sys.exit(
+                        error_message('{} for "{}"'.format(exception,
+                                                           identifier)))
+
+    def _handle_aid(self, identifier, value):
+        """Handle an AID C #define.
+
+        Handles an AID, sanity checking, generating the friendly name and
+        adding it to the internal maps. Internal use only.
+
+        Args:
+            identifier (str): The name of the #define identifier. ie AID_FOO.
+            value (str): The value associated with the identifier.
+
+        Raises:
+            ValueError: With message set to indicate the error.
+        """
+
+        aid = AID(identifier, value, self._aid_header)
+
+        # duplicate name
+        if aid.friendly in self._aid_name_to_value:
+            raise ValueError('Duplicate aid "%s"' % identifier)
+
+        if value in self._aid_value_to_name and aid.identifier not in AIDHeaderParser._COLLISION_OK:
+            raise ValueError('Duplicate aid value "%s" for %s' % (value,
+                                                                  identifier))
+
+        self._aid_name_to_value[aid.friendly] = aid
+        self._aid_value_to_name[value] = aid.friendly
+
+    def _handle_oem_range(self, identifier, value):
+        """Handle an OEM range C #define.
+
+        When encountering special AID defines, notably for the OEM ranges
+        this method handles sanity checking and adding them to the internal
+        maps. For internal use only.
+
+        Args:
+            identifier (str): The name of the #define identifier.
+                ie AID_OEM_RESERVED_START/END.
+            value (str): The value associated with the identifier.
+
+        Raises:
+            ValueError: With message set to indicate the error.
+        """
+
+        try:
+            int_value = int(value, 0)
         except ValueError:
-            pass
-        return None
+            raise ValueError(
+                'Could not convert "%s" to integer value, got: "%s"' %
+                (identifier, value))
 
-def handle_path(file_name, section_name, config, files, dirs):
+        # convert AID_OEM_RESERVED_START or AID_OEM_RESERVED_<num>_START
+        # to AID_OEM_RESERVED or AID_OEM_RESERVED_<num>
+        is_start = identifier.endswith(AIDHeaderParser._OEM_START_KW)
 
-            mode = config.get(section_name, 'mode')
-            user = config.get(section_name, 'user')
-            group = config.get(section_name, 'group')
-            caps = config.get(section_name, 'caps')
+        if is_start:
+            tostrip = len(AIDHeaderParser._OEM_START_KW)
+        else:
+            tostrip = len(AIDHeaderParser._OEM_END_KW)
 
-            errmsg = 'Found specified but unset option: \"%s" in file: \"' + file_name + '\"'
+        # ending _
+        tostrip = tostrip + 1
 
-            if not mode:
-                raise Exception(errmsg % 'mode')
+        strip = identifier[:-tostrip]
+        if strip not in self._oem_ranges:
+            self._oem_ranges[strip] = []
 
-            if not user:
-                raise Exception(errmsg % 'user')
+        if len(self._oem_ranges[strip]) > 2:
+            raise ValueError('Too many same OEM Ranges "%s"' % identifier)
 
-            if not group:
-                raise Exception(errmsg % 'group')
+        if len(self._oem_ranges[strip]) == 1:
+            tmp = self._oem_ranges[strip][0]
 
-            if not caps:
-                raise Exception(errmsg % 'caps')
+            if tmp == int_value:
+                raise ValueError('START and END values equal %u' % int_value)
+            elif is_start and tmp < int_value:
+                raise ValueError('END value %u less than START value %u' %
+                                 (tmp, int_value))
+            elif not is_start and tmp > int_value:
+                raise ValueError('END value %u less than START value %u' %
+                                 (int_value, tmp))
 
-            caps = caps.split()
+        # Add START values to the head of the list and END values at the end.
+        # Thus, the list is ordered with index 0 as START and index 1 as END.
+        if is_start:
+            self._oem_ranges[strip].insert(0, int_value)
+        else:
+            self._oem_ranges[strip].append(int_value)
 
-            tmp = []
-            for x in caps:
-                if convert_int(x):
-                    tmp.append('(' + x + ')')
-                else:
-                    tmp.append('(1ULL << CAP_' + x.upper() + ')')
+    def _process_and_check(self):
+        """Process, check and populate internal data structures.
 
-            caps = tmp
+        After parsing and generating the internal data structures, this method
+        is responsible for sanity checking ALL of the acquired data.
 
-            path = '"' + section_name + '"'
+        Raises:
+            ValueError: With the message set to indicate the specific error.
+        """
 
-            if len(mode) == 3:
-                mode = '0' + mode
+        # tuplefy the lists since range() does not like them mutable.
+        self._oem_ranges = [
+            AIDHeaderParser._convert_lst_to_tup(k, v)
+            for k, v in self._oem_ranges.iteritems()
+        ]
 
-            try:
-                int(mode, 8)
-            except:
-                raise Exception('Mode must be octal characters, got: "' + mode + '"')
+        # Check for overlapping ranges
+        for i, range1 in enumerate(self._oem_ranges):
+            for range2 in self._oem_ranges[i + 1:]:
+                if AIDHeaderParser._is_overlap(range1, range2):
+                    raise ValueError("Overlapping OEM Ranges found %s and %s" %
+                                     (str(range1), str(range2)))
 
-            if len(mode) != 4:
-                raise Exception('Mode must be 3 or 4 characters, got: "' + mode + '"')
+        # No core AIDs should be within any oem range.
+        for aid in self._aid_value_to_name:
+
+            if Utils.in_any_range(aid, self._oem_ranges):
+                name = self._aid_value_to_name[aid]
+                raise ValueError(
+                    'AID "%s" value: %u within reserved OEM Range: "%s"' %
+                    (name, aid, str(self._oem_ranges)))
+
+    @property
+    def oem_ranges(self):
+        """Retrieves the OEM closed ranges as a list of tuples.
+
+        Returns:
+            A list of closed range tuples: [ (0, 42), (50, 105) ... ]
+        """
+        return self._oem_ranges
+
+    @property
+    def aids(self):
+        """Retrieves the list of found AIDs.
+
+        Returns:
+            A list of AID() objects.
+        """
+        return self._aid_name_to_value.values()
+
+    @staticmethod
+    def _convert_lst_to_tup(name, lst):
+        """Converts a mutable list to a non-mutable tuple.
+
+        Used ONLY for ranges and thus enforces a length of 2.
+
+        Args:
+            lst (List): list that should be "tuplefied".
+
+        Raises:
+            ValueError if lst is not a list or len is not 2.
+
+        Returns:
+            Tuple(lst)
+        """
+        if not lst or len(lst) != 2:
+            raise ValueError('Mismatched range for "%s"' % name)
+
+        return tuple(lst)
+
+    @staticmethod
+    def _is_oem_range(aid):
+        """Detects if a given aid is within the reserved OEM range.
+
+        Args:
+            aid (int): The aid to test
+
+        Returns:
+            True if it is within the range, False otherwise.
+        """
+
+        return AIDHeaderParser._OEM_RANGE.match(aid)
+
+    @staticmethod
+    def _is_overlap(range_a, range_b):
+        """Calculates the overlap of two range tuples.
+
+        A range tuple is a closed range. A closed range includes its endpoints.
+        Note that python tuples use () notation which collides with the
+        mathematical notation for open ranges.
+
+        Args:
+            range_a: The first tuple closed range eg (0, 5).
+            range_b: The second tuple closed range eg (3, 7).
+
+        Returns:
+            True if they overlap, False otherwise.
+        """
+
+        return max(range_a[0], range_b[0]) <= min(range_a[1], range_b[1])
 
 
-            caps = '|'.join(caps)
+class FSConfigFileParser(object):
+    """Parses a config.fs ini format file.
 
-            x = [ mode, user, group, caps, section_name ]
-            if section_name[-1] == '/':
-                dirs.append((file_name, x))
-            else:
-                files.append((file_name, x))
+    This class is responsible for parsing the config.fs ini format files.
+    It collects and checks all the data in these files and makes it available
+    for consumption post processed.
+    """
 
-def handle_dup(name, file_name, section_name, seen):
-        if section_name in seen:
-            dups = '"' + seen[section_name] + '" and '
-            dups += file_name
-            raise Exception('Duplicate ' + name + ' "' + section_name + '" found in files: ' + dups)
+    # These _AID vars work together to ensure that an AID section name
+    # cannot contain invalid characters for a C define or a passwd/group file.
+    # Since _AID_PREFIX is within the set of _AID_MATCH the error logic only
+    # checks end, if you change this, you may have to update the error
+    # detection code.
+    _AID_MATCH = re.compile('%s[A-Z0-9_]+' % AID.PREFIX)
+    _AID_ERR_MSG = 'Expecting upper case, a number or underscore'
 
-def parse(file_name, files, dirs, aids, seen_paths, seen_aids):
+    # list of handler to required options, used to identify the
+    # parsing section
+    _SECTIONS = [('_handle_aid', ('value',)),
+                 ('_handle_path', ('mode', 'user', 'group', 'caps'))]
+
+    def __init__(self, config_files, oem_ranges):
+        """
+        Args:
+            config_files ([str]): The list of config.fs files to parse.
+                Note the filename is not important.
+            oem_ranges ([(),()]): range tuples indicating reserved OEM ranges.
+        """
+
+        self._files = []
+        self._dirs = []
+        self._aids = []
+
+        self._seen_paths = {}
+        # (name to file, value to aid)
+        self._seen_aids = ({}, {})
+
+        self._oem_ranges = oem_ranges
+
+        self._config_files = config_files
+
+        for config_file in self._config_files:
+            self._parse(config_file)
+
+    def _parse(self, file_name):
+        """Parses and verifies config.fs files. Internal use only.
+
+        Args:
+            file_name (str): The config.fs (PythonConfigParser file format)
+                file to parse.
+
+        Raises:
+            Anything raised by ConfigParser.read()
+        """
+
+        # Separate config parsers for each file found. If you use
+        # read(filenames...) later files can override earlier files which is
+        # not what we want. Track state across files and enforce with
+        # _handle_dup(). Note, strict ConfigParser is set to true in
+        # Python >= 3.2, so in previous versions same file sections can
+        # override previous
+        # sections.
 
         config = ConfigParser.ConfigParser()
         config.read(file_name)
 
-        for s in config.sections():
+        for section in config.sections():
 
-            if AID_MATCH.match(s) and config.has_option(s, 'value'):
-                handle_dup('AID', file_name, s, seen_aids[0])
-                seen_aids[0][s] = file_name
-                handle_aid(file_name, s, config, aids, seen_aids)
-            else:
-                handle_dup('path', file_name, s, seen_paths)
-                seen_paths[s] = file_name
-                handle_path(file_name, s, config, files, dirs)
+            found = False
 
-def generate(files, dirs, aids):
-    print GENERATED
-    print INCLUDE
-    print
+            for test in FSConfigFileParser._SECTIONS:
+                handler = test[0]
+                options = test[1]
 
-    are_dirs = len(dirs) > 0
-    are_files = len(files) > 0
-    are_aids = len(aids) > 0
+                if all([config.has_option(section, item) for item in options]):
+                    handler = getattr(self, handler)
+                    handler(file_name, section, config)
+                    found = True
+                    break
 
-    if are_aids:
-        for a in aids:
-            # use the preserved str value
-            print FILE_COMMENT % a[0]
-            print GENERIC_DEFINE % (a[1], a[2])
+            if not found:
+                sys.exit('Invalid section "%s" in file: "%s"' %
+                         (section, file_name))
 
+            # sort entries:
+            # * specified path before prefix match
+            # ** ie foo before f*
+            # * lexicographical less than before other
+            # ** ie boo before foo
+            # Given these paths:
+            # paths=['ac', 'a', 'acd', 'an', 'a*', 'aa', 'ac*']
+            # The sort order would be:
+            # paths=['a', 'aa', 'ac', 'acd', 'an', 'ac*', 'a*']
+            # Thus the fs_config tools will match on specified paths before
+            # attempting prefix, and match on the longest matching prefix.
+            self._files.sort(key=FSConfigFileParser._file_key)
+
+            # sort on value of (file_name, name, value, strvalue)
+            # This is only cosmetic so AIDS are arranged in ascending order
+            # within the generated file.
+            self._aids.sort(key=lambda item: item.normalized_value)
+
+    def _handle_aid(self, file_name, section_name, config):
+        """Verifies an AID entry and adds it to the aid list.
+
+        Calls sys.exit() with a descriptive message of the failure.
+
+        Args:
+            file_name (str): The filename of the config file being parsed.
+            section_name (str): The section name currently being parsed.
+            config (ConfigParser): The ConfigParser section being parsed that
+                the option values will come from.
+        """
+
+        def error_message(msg):
+            """Creates an error message with current parsing state."""
+            return '{} for: "{}" file: "{}"'.format(msg, section_name,
+                                                    file_name)
+
+        FSConfigFileParser._handle_dup_and_add('AID', file_name, section_name,
+                                               self._seen_aids[0])
+
+        match = FSConfigFileParser._AID_MATCH.match(section_name)
+        invalid = match.end() if match else len(AID.PREFIX)
+        if invalid != len(section_name):
+            tmp_errmsg = ('Invalid characters in AID section at "%d" for: "%s"'
+                          % (invalid, FSConfigFileParser._AID_ERR_MSG))
+            sys.exit(error_message(tmp_errmsg))
+
+        value = config.get(section_name, 'value')
+
+        if not value:
+            sys.exit(error_message('Found specified but unset "value"'))
+
+        try:
+            aid = AID(section_name, value, file_name)
+        except ValueError:
+            sys.exit(
+                error_message('Invalid "value", not aid number, got: \"%s\"' %
+                              value))
+
+        # Values must be within OEM range
+        if not Utils.in_any_range(int(aid.value, 0), self._oem_ranges):
+            emsg = '"value" not in valid range %s, got: %s'
+            emsg = emsg % (str(self._oem_ranges), value)
+            sys.exit(error_message(emsg))
+
+        # use the normalized int value in the dict and detect
+        # duplicate definitions of the same value
+        FSConfigFileParser._handle_dup_and_add(
+            'AID', file_name, aid.normalized_value, self._seen_aids[1])
+
+        # Append aid tuple of (AID_*, base10(value), _path(value))
+        # We keep the _path version of value so we can print that out in the
+        # generated header so investigating parties can identify parts.
+        # We store the base10 value for sorting, so everything is ascending
+        # later.
+        self._aids.append(aid)
+
+    def _handle_path(self, file_name, section_name, config):
+        """Add a file capability entry to the internal list.
+
+        Handles a file capability entry, verifies it, and adds it to
+        to the internal dirs or files list based on path. If it ends
+        with a / its a dir. Internal use only.
+
+        Calls sys.exit() on any validation error with message set.
+
+        Args:
+            file_name (str): The current name of the file being parsed.
+            section_name (str): The name of the section to parse.
+            config (str): The config parser.
+        """
+
+        FSConfigFileParser._handle_dup_and_add('path', file_name, section_name,
+                                               self._seen_paths)
+
+        mode = config.get(section_name, 'mode')
+        user = config.get(section_name, 'user')
+        group = config.get(section_name, 'group')
+        caps = config.get(section_name, 'caps')
+
+        errmsg = ('Found specified but unset option: \"%s" in file: \"' +
+                  file_name + '\"')
+
+        if not mode:
+            sys.exit(errmsg % 'mode')
+
+        if not user:
+            sys.exit(errmsg % 'user')
+
+        if not group:
+            sys.exit(errmsg % 'group')
+
+        if not caps:
+            sys.exit(errmsg % 'caps')
+
+        caps = caps.split()
+
+        tmp = []
+        for cap in caps:
+            try:
+                # test if string is int, if it is, use as is.
+                int(cap, 0)
+                tmp.append('(' + cap + ')')
+            except ValueError:
+                tmp.append('(1ULL << CAP_' + cap.upper() + ')')
+
+        caps = tmp
+
+        if len(mode) == 3:
+            mode = '0' + mode
+
+        try:
+            int(mode, 8)
+        except ValueError:
+            sys.exit('Mode must be octal characters, got: "%s"' % mode)
+
+        if len(mode) != 4:
+            sys.exit('Mode must be 3 or 4 characters, got: "%s"' % mode)
+
+        caps_str = '|'.join(caps)
+
+        entry = FSConfig(mode, user, group, caps_str, section_name, file_name)
+        if section_name[-1] == '/':
+            self._dirs.append(entry)
+        else:
+            self._files.append(entry)
+
+    @property
+    def files(self):
+        """Get the list of FSConfig file entries.
+
+        Returns:
+             a list of FSConfig() objects for file paths.
+        """
+        return self._files
+
+    @property
+    def dirs(self):
+        """Get the list of FSConfig dir entries.
+
+        Returns:
+            a list of FSConfig() objects for directory paths.
+        """
+        return self._dirs
+
+    @property
+    def aids(self):
+        """Get the list of AID entries.
+
+        Returns:
+            a list of AID() objects.
+        """
+        return self._aids
+
+    @staticmethod
+    def _file_key(fs_config):
+        """Used as the key paramter to sort.
+
+        This is used as a the function to the key parameter of a sort.
+        it wraps the string supplied in a class that implements the
+        appropriate __lt__ operator for the sort on path strings. See
+        StringWrapper class for more details.
+
+        Args:
+            fs_config (FSConfig): A FSConfig entry.
+
+        Returns:
+            A StringWrapper object
+        """
+
+        # Wrapper class for custom prefix matching strings
+        class StringWrapper(object):
+            """Wrapper class used for sorting prefix strings.
+
+            The algorithm is as follows:
+              - specified path before prefix match
+                - ie foo before f*
+              - lexicographical less than before other
+                - ie boo before foo
+
+            Given these paths:
+            paths=['ac', 'a', 'acd', 'an', 'a*', 'aa', 'ac*']
+            The sort order would be:
+            paths=['a', 'aa', 'ac', 'acd', 'an', 'ac*', 'a*']
+            Thus the fs_config tools will match on specified paths before
+            attempting prefix, and match on the longest matching prefix.
+            """
+
+            def __init__(self, path):
+                """
+                Args:
+                    path (str): the path string to wrap.
+                """
+                self.is_prefix = path[-1] == '*'
+                if self.is_prefix:
+                    self.path = path[:-1]
+                else:
+                    self.path = path
+
+            def __lt__(self, other):
+
+                # if were both suffixed the smallest string
+                # is 'bigger'
+                if self.is_prefix and other.is_prefix:
+                    result = len(self.path) > len(other.path)
+                # If I am an the suffix match, im bigger
+                elif self.is_prefix:
+                    result = False
+                # If other is the suffix match, he's bigger
+                elif other.is_prefix:
+                    result = True
+                # Alphabetical
+                else:
+                    result = self.path < other.path
+                return result
+
+        return StringWrapper(fs_config.path)
+
+    @staticmethod
+    def _handle_dup_and_add(name, file_name, section_name, seen):
+        """Tracks and detects duplicates. Internal use only.
+
+        Calls sys.exit() on a duplicate.
+
+        Args:
+            name (str): The name to use in the error reporting. The pretty
+                name for the section.
+            file_name (str): The file currently being parsed.
+            section_name (str): The name of the section. This would be path
+                or identifier depending on what's being parsed.
+            seen (dict): The dictionary of seen things to check against.
+        """
+        if section_name in seen:
+            dups = '"' + seen[section_name] + '" and '
+            dups += file_name
+            sys.exit('Duplicate %s "%s" found in files: %s' %
+                     (name, section_name, dups))
+
+        seen[section_name] = file_name
+
+
+class BaseGenerator(object):
+    """Interface for Generators.
+
+    Base class for generators, generators should implement
+    these method stubs.
+    """
+
+    def add_opts(self, opt_group):
+        """Used to add per-generator options to the command line.
+
+        Args:
+            opt_group (argument group object): The argument group to append to.
+                See the ArgParse docs for more details.
+        """
+
+        raise NotImplementedError("Not Implemented")
+
+    def __call__(self, args):
+        """This is called to do whatever magic the generator does.
+
+        Args:
+            args (dict): The arguments from ArgParse as a dictionary.
+                ie if you specified an argument of foo in add_opts, access
+                it via args['foo']
+        """
+
+        raise NotImplementedError("Not Implemented")
+
+
+@generator('fsconfig')
+class FSConfigGen(BaseGenerator):
+    """Generates the android_filesystem_config.h file.
+
+    Output is  used in generating fs_config_files and fs_config_dirs.
+    """
+
+    _GENERATED = textwrap.dedent("""\
+        /*
+         * THIS IS AN AUTOGENERATED FILE! DO NOT MODIFY
+         */
+        """)
+
+    _INCLUDES = [
+        '<private/android_filesystem_config.h>', '"generated_oem_aid.h"'
+    ]
+
+    _DEFINE_NO_DIRS = '#define NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS'
+    _DEFINE_NO_FILES = '#define NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_FILES'
+
+    _DEFAULT_WARNING = (
+        '#warning No device-supplied android_filesystem_config.h,'
+        ' using empty default.')
+
+    # Long names.
+    # pylint: disable=invalid-name
+    _NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS_ENTRY = (
+        '{ 00000, AID_ROOT, AID_ROOT, 0,'
+        '"system/etc/fs_config_dirs" },')
+
+    _NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_FILES_ENTRY = (
+        '{ 00000, AID_ROOT, AID_ROOT, 0,'
+        '"system/etc/fs_config_files" },')
+
+    _IFDEF_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS = (
+        '#ifdef NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS')
+    # pylint: enable=invalid-name
+
+    _ENDIF = '#endif'
+
+    _OPEN_FILE_STRUCT = (
+        'static const struct fs_path_config android_device_files[] = {')
+
+    _OPEN_DIR_STRUCT = (
+        'static const struct fs_path_config android_device_dirs[] = {')
+
+    _CLOSE_FILE_STRUCT = '};'
+
+    _GENERIC_DEFINE = "#define %s\t%s"
+
+    _FILE_COMMENT = '// Defined in file: \"%s\"'
+
+    def __init__(self, *args, **kwargs):
+        BaseGenerator.__init__(args, kwargs)
+
+        self._oem_parser = None
+        self._base_parser = None
+        self._friendly_to_aid = None
+
+    def add_opts(self, opt_group):
+
+        opt_group.add_argument(
+            'fsconfig', nargs='+', help='The list of fsconfig files to parse')
+
+        opt_group.add_argument(
+            '--aid-header',
+            required=True,
+            help='An android_filesystem_config.h file'
+            ' to parse AIDs and OEM Ranges from')
+
+    def __call__(self, args):
+
+        self._base_parser = AIDHeaderParser(args['aid_header'])
+        self._oem_parser = FSConfigFileParser(args['fsconfig'],
+                                              self._base_parser.oem_ranges)
+        base_aids = self._base_parser.aids
+        oem_aids = self._oem_parser.aids
+
+        # Detect name collisions on AIDs. Since friendly works as the
+        # identifier for collision testing and we need friendly later on for
+        # name resolution, just calculate and use friendly.
+        # {aid.friendly: aid for aid in base_aids}
+        base_friendly = {aid.friendly: aid for aid in base_aids}
+        oem_friendly = {aid.friendly: aid for aid in oem_aids}
+
+        base_set = set(base_friendly.keys())
+        oem_set = set(oem_friendly.keys())
+
+        common = base_set & oem_set
+
+        if len(common) > 0:
+            emsg = 'Following AID Collisions detected for: \n'
+            for friendly in common:
+                base = base_friendly[friendly]
+                oem = oem_friendly[friendly]
+                emsg += (
+                    'Identifier: "%s" Friendly Name: "%s" '
+                    'found in file "%s" and "%s"' %
+                    (base.identifier, base.friendly, base.found, oem.found))
+                sys.exit(emsg)
+
+        self._friendly_to_aid = oem_friendly
+        self._friendly_to_aid.update(base_friendly)
+
+        self._generate()
+
+    def _to_fs_entry(self, fs_config):
+        """Converts an FSConfig entry to an fs entry.
+
+        Prints '{ mode, user, group, caps, "path" },'.
+
+        Calls sys.exit() on error.
+
+        Args:
+            fs_config (FSConfig): The entry to convert to
+                a valid C array entry.
+        """
+
+        # Get some short names
+        mode = fs_config.mode
+        user = fs_config.user
+        group = fs_config.group
+        fname = fs_config.filename
+        caps = fs_config.caps
+        path = fs_config.path
+
+        emsg = 'Cannot convert friendly name "%s" to identifier!'
+
+        # remap friendly names to identifier names
+        if AID.is_friendly(user):
+            if user not in self._friendly_to_aid:
+                sys.exit(emsg % user)
+            user = self._friendly_to_aid[user].identifier
+
+        if AID.is_friendly(group):
+            if group not in self._friendly_to_aid:
+                sys.exit(emsg % group)
+            group = self._friendly_to_aid[group].identifier
+
+        fmt = '{ %s, %s, %s, %s, "%s" },'
+
+        expanded = fmt % (mode, user, group, caps, path)
+
+        print FSConfigGen._FILE_COMMENT % fname
+        print '    ' + expanded
+
+    @staticmethod
+    def _gen_inc():
+        """Generate the include header lines and print to stdout."""
+        for include in FSConfigGen._INCLUDES:
+            print '#include %s' % include
+
+    def _generate(self):
+        """Generates an OEM android_filesystem_config.h header file to stdout.
+
+        Args:
+            files ([FSConfig]): A list of FSConfig objects for file entries.
+            dirs ([FSConfig]): A list of FSConfig objects for directory
+                entries.
+            aids ([AIDS]): A list of AID objects for Android Id entries.
+        """
+        print FSConfigGen._GENERATED
         print
 
-    if not are_dirs:
-        print DEFINE_NO_DIRS
+        FSConfigGen._gen_inc()
+        print
 
-    if not are_files:
-        print DEFINE_NO_FILES
+        dirs = self._oem_parser.dirs
+        files = self._oem_parser.files
+        aids = self._oem_parser.aids
 
-    if not are_files and not are_dirs and not are_aids:
-        print DEFAULT_WARNING
-        return
+        are_dirs = len(dirs) > 0
+        are_files = len(files) > 0
+        are_aids = len(aids) > 0
 
-    if are_files:
-        print OPEN_FILE_STRUCT
-        for tup in files:
-            f = tup[0]
-            c = tup[1]
-            c[4] = '"' + c[4] + '"'
-            c = '{ ' + '    ,'.join(c) + ' },'
-            print FILE_COMMENT % f
-            print '    ' + c
+        if are_aids:
+            for aid in aids:
+                # use the preserved _path value
+                print FSConfigGen._FILE_COMMENT % aid.found
+                print FSConfigGen._GENERIC_DEFINE % (aid.identifier, aid.value)
+
+            print
 
         if not are_dirs:
-            print IFDEF_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS
-            print '    ' + NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS_ENTRY
-            print ENDIF
-        print CLOSE_FILE_STRUCT
+            print FSConfigGen._DEFINE_NO_DIRS + '\n'
 
-    if are_dirs:
-        print OPEN_DIR_STRUCT
-        for d in dirs:
-            f[4] = '"' + f[4] + '"'
-            d = '{ ' + '    ,'.join(d) + ' },'
-            print '    ' + d
+        if not are_files:
+            print FSConfigGen._DEFINE_NO_FILES + '\n'
 
-        print CLOSE_FILE_STRUCT
+        if not are_files and not are_dirs and not are_aids:
+            return
 
-def file_key(x):
+        if are_files:
+            print FSConfigGen._OPEN_FILE_STRUCT
+            for fs_config in files:
+                self._to_fs_entry(fs_config)
 
-    # Wrapper class for custom prefix matching strings
-    class S(object):
-        def __init__(self, str):
+            if not are_dirs:
+                print FSConfigGen._IFDEF_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS
+                print(
+                    '    ' +
+                    FSConfigGen._NO_ANDROID_FILESYSTEM_CONFIG_DEVICE_DIRS_ENTRY)
+                print FSConfigGen._ENDIF
+            print FSConfigGen._CLOSE_FILE_STRUCT
 
-            self.orig = str
-            self.is_prefix = str[-1] == '*'
-            if self.is_prefix:
-                self.str = str[:-1]
-            else:
-                self.str = str
+        if are_dirs:
+            print FSConfigGen._OPEN_DIR_STRUCT
+            for dir_entry in dirs:
+                self._to_fs_entry(dir_entry)
 
-        def __lt__(self, other):
+            print FSConfigGen._CLOSE_FILE_STRUCT
 
-            # if were both suffixed the smallest string
-            # is 'bigger'
-            if self.is_prefix and other.is_prefix:
-                b = len(self.str) > len(other.str)
-            # If I am an the suffix match, im bigger
-            elif self.is_prefix:
-                b = False
-            # If other is the suffix match, he's bigger
-            elif other.is_prefix:
-                b = True
-            # Alphabetical
-            else:
-                b = self.str < other.str
-            return b
 
-    return S(x[4])
+@generator('aidarray')
+class AIDArrayGen(BaseGenerator):
+    """Generates the android_id static array."""
+
+    _GENERATED = ('/*\n'
+                  ' * THIS IS AN AUTOGENERATED FILE! DO NOT MODIFY!\n'
+                  ' */')
+
+    _INCLUDE = '#include <private/android_filesystem_config.h>'
+
+    _STRUCT_FS_CONFIG = textwrap.dedent("""
+                         struct android_id_info {
+                             const char *name;
+                             unsigned aid;
+                         };""")
+
+    _OPEN_ID_ARRAY = 'static const struct android_id_info android_ids[] = {'
+
+    _ID_ENTRY = '    { "%s", %s },'
+
+    _CLOSE_FILE_STRUCT = '};'
+
+    _COUNT = ('#define android_id_count \\\n'
+              '    (sizeof(android_ids) / sizeof(android_ids[0]))')
+
+    def add_opts(self, opt_group):
+
+        opt_group.add_argument(
+            'hdrfile', help='The android_filesystem_config.h'
+            'file to parse')
+
+    def __call__(self, args):
+
+        hdr = AIDHeaderParser(args['hdrfile'])
+
+        print AIDArrayGen._GENERATED
+        print
+        print AIDArrayGen._INCLUDE
+        print
+        print AIDArrayGen._STRUCT_FS_CONFIG
+        print
+        print AIDArrayGen._OPEN_ID_ARRAY
+
+        for aid in hdr.aids:
+            print AIDArrayGen._ID_ENTRY % (aid.friendly, aid.identifier)
+
+        print AIDArrayGen._CLOSE_FILE_STRUCT
+        print
+        print AIDArrayGen._COUNT
+        print
+
+
+@generator('oemaid')
+class OEMAidGen(BaseGenerator):
+    """Generates the OEM AID_<name> value header file."""
+
+    _GENERATED = ('/*\n'
+                  ' * THIS IS AN AUTOGENERATED FILE! DO NOT MODIFY!\n'
+                  ' */')
+
+    _GENERIC_DEFINE = "#define %s\t%s"
+
+    _FILE_COMMENT = '// Defined in file: \"%s\"'
+
+    # Intentional trailing newline for readability.
+    _FILE_IFNDEF_DEFINE = ('#ifndef GENERATED_OEM_AIDS_H_\n'
+                           '#define GENERATED_OEM_AIDS_H_\n')
+
+    _FILE_ENDIF = '#endif'
+
+    def __init__(self):
+
+        self._old_file = None
+
+    def add_opts(self, opt_group):
+
+        opt_group.add_argument(
+            'fsconfig', nargs='+', help='The list of fsconfig files to parse.')
+
+        opt_group.add_argument(
+            '--aid-header',
+            required=True,
+            help='An android_filesystem_config.h file'
+            'to parse AIDs and OEM Ranges from')
+
+    def __call__(self, args):
+
+        hdr_parser = AIDHeaderParser(args['aid_header'])
+
+        parser = FSConfigFileParser(args['fsconfig'], hdr_parser.oem_ranges)
+
+        print OEMAidGen._GENERATED
+
+        print OEMAidGen._FILE_IFNDEF_DEFINE
+
+        for aid in parser.aids:
+            self._print_aid(aid)
+            print
+
+        print OEMAidGen._FILE_ENDIF
+
+    def _print_aid(self, aid):
+        """Prints a valid #define AID identifier to stdout.
+
+        Args:
+            aid to print
+        """
+
+        # print the source file location of the AID
+        found_file = aid.found
+        if found_file != self._old_file:
+            print OEMAidGen._FILE_COMMENT % found_file
+            self._old_file = found_file
+
+        print OEMAidGen._GENERIC_DEFINE % (aid.identifier, aid.value)
+
+
+@generator('passwd')
+class PasswdGen(BaseGenerator):
+    """Generates the /etc/passwd file per man (5) passwd."""
+
+    _GENERATED = ('#\n# THIS IS AN AUTOGENERATED FILE! DO NOT MODIFY!\n#')
+
+    _FILE_COMMENT = '# Defined in file: \"%s\"'
+
+    def __init__(self):
+
+        self._old_file = None
+
+    def add_opts(self, opt_group):
+
+        opt_group.add_argument(
+            'fsconfig', nargs='+', help='The list of fsconfig files to parse.')
+
+        opt_group.add_argument(
+            '--aid-header',
+            required=True,
+            help='An android_filesystem_config.h file'
+            'to parse AIDs and OEM Ranges from')
+
+    def __call__(self, args):
+
+        hdr_parser = AIDHeaderParser(args['aid_header'])
+
+        parser = FSConfigFileParser(args['fsconfig'], hdr_parser.oem_ranges)
+
+        aids = parser.aids
+
+        # nothing to do if no aids defined
+        if len(aids) == 0:
+            return
+
+        print PasswdGen._GENERATED
+
+        for aid in aids:
+            self._print_formatted_line(aid)
+
+    def _print_formatted_line(self, aid):
+        """Prints the aid to stdout in the passwd format. Internal use only.
+
+        Colon delimited:
+            login name, friendly name
+            encrypted password (optional)
+            uid (int)
+            gid (int)
+            User name or comment field
+            home directory
+            interpreter (optional)
+
+        Args:
+            aid (AID): The aid to print.
+        """
+        if self._old_file != aid.found:
+            self._old_file = aid.found
+            print PasswdGen._FILE_COMMENT % aid.found
+
+        try:
+            logon, uid = Utils.get_login_and_uid_cleansed(aid)
+        except ValueError as exception:
+            sys.exit(exception)
+
+        print "%s::%s:%s::/:/system/bin/sh" % (logon, uid, uid)
+
+
+@generator('group')
+class GroupGen(PasswdGen):
+    """Generates the /etc/group file per man (5) group."""
+
+    # Overrides parent
+    def _print_formatted_line(self, aid):
+        """Prints the aid to stdout in the group format. Internal use only.
+
+        Formatted (per man 5 group) like:
+            group_name:password:GID:user_list
+
+        Args:
+            aid (AID): The aid to print.
+        """
+        if self._old_file != aid.found:
+            self._old_file = aid.found
+            print PasswdGen._FILE_COMMENT % aid.found
+
+        try:
+            logon, uid = Utils.get_login_and_uid_cleansed(aid)
+        except ValueError as exception:
+            sys.exit(exception)
+
+        print "%s::%s:" % (logon, uid)
+
 
 def main():
+    """Main entry point for execution."""
 
-    files = []
-    dirs = []
-    aids = []
-    seen_paths = {}
+    opt_parser = argparse.ArgumentParser(
+        description='A tool for parsing fsconfig config files and producing' +
+        'digestable outputs.')
+    subparser = opt_parser.add_subparsers(help='generators')
 
-    # (name to file, value to aid)
-    seen_aids = ({}, {})
+    gens = generator.get()
 
-    for x in sys.argv[1:]:
-        parse(x, files, dirs, aids, seen_paths, seen_aids)
+    # for each gen, instantiate and add them as an option
+    for name, gen in gens.iteritems():
 
-    # sort entries:
-    # * specified path before prefix match
-    # ** ie foo before f*
-    # * lexicographical less than before other
-    # ** ie boo before foo
-    # Given these paths:
-    # paths=['ac', 'a', 'acd', 'an', 'a*', 'aa', 'ac*']
-    # The sort order would be:
-    # paths=['a', 'aa', 'ac', 'acd', 'an', 'ac*', 'a*']
-    # Thus the fs_config tools will match on specified paths before attempting
-    # prefix, and match on the longest matching prefix.
-    files.sort(key= lambda x: file_key(x[1]))
+        generator_option_parser = subparser.add_parser(name, help=gen.__doc__)
+        generator_option_parser.set_defaults(which=name)
 
-    # sort on value of (file_name, name, value, strvalue)
-    # This is only cosmetic so AIDS are arranged in ascending order
-    # within the generated file.
-    aids.sort(key=lambda x: x[2])
+        opt_group = generator_option_parser.add_argument_group(name +
+                                                               ' options')
+        gen.add_opts(opt_group)
 
-    generate(files, dirs, aids)
+    args = opt_parser.parse_args()
+
+    args_as_dict = vars(args)
+    which = args_as_dict['which']
+    del args_as_dict['which']
+
+    gens[which](args_as_dict)
+
 
 if __name__ == '__main__':
     main()
diff --git a/tools/fs_config/pylintrc b/tools/fs_config/pylintrc
new file mode 100644
index 0000000..3e44870
--- /dev/null
+++ b/tools/fs_config/pylintrc
@@ -0,0 +1,5 @@
+[MESSAGES CONTROL]
+disable=fixme,design,locally-disabled,too-many-lines
+
+[VARIABLES]
+dummy-variables-rgx=_|dummy
diff --git a/tools/fs_config/test_fs_config_generator.py b/tools/fs_config/test_fs_config_generator.py
new file mode 100755
index 0000000..a49058a
--- /dev/null
+++ b/tools/fs_config/test_fs_config_generator.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+"""Unit test suite for the fs_config_genertor.py tool."""
+
+import tempfile
+import textwrap
+import unittest
+
+from fs_config_generator import AID
+from fs_config_generator import AIDHeaderParser
+from fs_config_generator import FSConfigFileParser
+from fs_config_generator import FSConfig
+from fs_config_generator import Utils
+
+
+# Disable protected access so we can test class internal
+# methods. Also, disable invalid-name as some of the
+# class method names are over length.
+# pylint: disable=protected-access,invalid-name
+class Tests(unittest.TestCase):
+    """Test class for unit tests"""
+
+    def test_is_overlap(self):
+        """Test overlap detection helper"""
+
+        self.assertTrue(AIDHeaderParser._is_overlap((0, 1), (1, 2)))
+
+        self.assertTrue(AIDHeaderParser._is_overlap((0, 100), (90, 200)))
+
+        self.assertTrue(AIDHeaderParser._is_overlap((20, 50), (1, 101)))
+
+        self.assertFalse(AIDHeaderParser._is_overlap((0, 100), (101, 200)))
+
+        self.assertFalse(AIDHeaderParser._is_overlap((-10, 0), (10, 20)))
+
+    def test_in_any_range(self):
+        """Test if value in range"""
+
+        self.assertFalse(Utils.in_any_range(50, [(100, 200), (1, 2), (1, 1)]))
+        self.assertFalse(Utils.in_any_range(250, [(100, 200), (1, 2), (1, 1)]))
+
+        self.assertTrue(Utils.in_any_range(100, [(100, 200), (1, 2), (1, 1)]))
+        self.assertTrue(Utils.in_any_range(200, [(100, 200), (1, 2), (1, 1)]))
+        self.assertTrue(Utils.in_any_range(150, [(100, 200)]))
+
+    def test_aid(self):
+        """Test AID class constructor"""
+
+        aid = AID('AID_FOO_BAR', '0xFF', 'myfakefile')
+        self.assertEquals(aid.identifier, 'AID_FOO_BAR')
+        self.assertEquals(aid.value, '0xFF')
+        self.assertEquals(aid.found, 'myfakefile')
+        self.assertEquals(aid.normalized_value, '255')
+        self.assertEquals(aid.friendly, 'foo_bar')
+
+        aid = AID('AID_MEDIA_EX', '1234', 'myfakefile')
+        self.assertEquals(aid.identifier, 'AID_MEDIA_EX')
+        self.assertEquals(aid.value, '1234')
+        self.assertEquals(aid.found, 'myfakefile')
+        self.assertEquals(aid.normalized_value, '1234')
+        self.assertEquals(aid.friendly, 'mediaex')
+
+    def test_aid_header_parser_good(self):
+        """Test AID Header Parser good input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                #define AID_FOO 1000
+                #define AID_BAR 1001
+                #define SOMETHING "something"
+                #define AID_OEM_RESERVED_START 2900
+                #define AID_OEM_RESERVED_END   2999
+                #define AID_OEM_RESERVED_1_START  7000
+                #define AID_OEM_RESERVED_1_END    8000
+            """))
+            temp_file.flush()
+
+            parser = AIDHeaderParser(temp_file.name)
+            oem_ranges = parser.oem_ranges
+            aids = parser.aids
+
+            self.assertTrue((2900, 2999) in oem_ranges)
+            self.assertFalse((5000, 6000) in oem_ranges)
+
+            for aid in aids:
+                self.assertTrue(aid.normalized_value in ['1000', '1001'])
+                self.assertFalse(aid.normalized_value in ['1', '2', '3'])
+
+    def test_aid_header_parser_good_unordered(self):
+        """Test AID Header Parser good unordered input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                #define AID_FOO 1000
+                #define AID_OEM_RESERVED_1_END    8000
+                #define AID_BAR 1001
+                #define SOMETHING "something"
+                #define AID_OEM_RESERVED_END   2999
+                #define AID_OEM_RESERVED_1_START  7000
+                #define AID_OEM_RESERVED_START 2900
+            """))
+            temp_file.flush()
+
+            parser = AIDHeaderParser(temp_file.name)
+            oem_ranges = parser.oem_ranges
+            aids = parser.aids
+
+            self.assertTrue((2900, 2999) in oem_ranges)
+            self.assertFalse((5000, 6000) in oem_ranges)
+
+            for aid in aids:
+                self.assertTrue(aid.normalized_value in ['1000', '1001'])
+                self.assertFalse(aid.normalized_value in ['1', '2', '3'])
+
+    def test_aid_header_parser_bad_aid(self):
+        """Test AID Header Parser bad aid input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                #define AID_FOO "bad"
+            """))
+            temp_file.flush()
+
+            with self.assertRaises(SystemExit):
+                AIDHeaderParser(temp_file.name)
+
+    def test_aid_header_parser_bad_oem_range(self):
+        """Test AID Header Parser bad oem range input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                #define AID_OEM_RESERVED_START 2900
+                #define AID_OEM_RESERVED_END   1800
+            """))
+            temp_file.flush()
+
+            with self.assertRaises(SystemExit):
+                AIDHeaderParser(temp_file.name)
+
+    def test_aid_header_parser_bad_oem_range_no_end(self):
+        """Test AID Header Parser bad oem range (no end) input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                #define AID_OEM_RESERVED_START 2900
+            """))
+            temp_file.flush()
+
+            with self.assertRaises(SystemExit):
+                AIDHeaderParser(temp_file.name)
+
+    def test_aid_header_parser_bad_oem_range_no_start(self):
+        """Test AID Header Parser bad oem range (no start) input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                #define AID_OEM_RESERVED_END 2900
+            """))
+            temp_file.flush()
+
+            with self.assertRaises(SystemExit):
+                AIDHeaderParser(temp_file.name)
+
+    def test_aid_header_parser_bad_oem_range_mismatch_start_end(self):
+        """Test AID Header Parser bad oem range mismatched input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                #define AID_OEM_RESERVED_START 2900
+                #define AID_OEM_RESERVED_2_END 2900
+            """))
+            temp_file.flush()
+
+            with self.assertRaises(SystemExit):
+                AIDHeaderParser(temp_file.name)
+
+    def test_aid_header_parser_bad_duplicate_ranges(self):
+        """Test AID Header Parser exits cleanly on duplicate AIDs"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                #define AID_FOO 100
+                #define AID_BAR 100
+            """))
+            temp_file.flush()
+
+            with self.assertRaises(SystemExit):
+                AIDHeaderParser(temp_file.name)
+
+    def test_aid_header_parser_no_bad_aids(self):
+        """Test AID Header Parser that it doesn't contain:
+        Ranges, ie things the end with "_START" or "_END"
+        AID_APP
+        AID_USER
+        For more details see:
+          - https://android-review.googlesource.com/#/c/313024
+          - https://android-review.googlesource.com/#/c/313169
+        """
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                #define AID_APP              10000 /* TODO: switch users over to AID_APP_START */
+                #define AID_APP_START        10000 /* first app user */
+                #define AID_APP_END          19999 /* last app user */
+
+                #define AID_CACHE_GID_START  20000 /* start of gids for apps to mark cached data */
+                #define AID_CACHE_GID_END    29999 /* end of gids for apps to mark cached data */
+
+                #define AID_SHARED_GID_START 50000 /* start of gids for apps in each user to share */
+                #define AID_SHARED_GID_END   59999 /* end of gids for apps in each user to share */
+
+                #define AID_ISOLATED_START   99000 /* start of uids for fully isolated sandboxed processes */
+                #define AID_ISOLATED_END     99999 /* end of uids for fully isolated sandboxed processes */
+
+                #define AID_USER            100000 /* TODO: switch users over to AID_USER_OFFSET */
+                #define AID_USER_OFFSET     100000 /* offset for uid ranges for each user */
+            """))
+            temp_file.flush()
+
+            parser = AIDHeaderParser(temp_file.name)
+            aids = parser.aids
+
+            bad_aids = ['_START', '_END', 'AID_APP', 'AID_USER']
+
+            for aid in aids:
+                self.assertFalse(
+                    any(bad in aid.identifier for bad in bad_aids),
+                    'Not expecting keywords "%s" in aids "%s"' %
+                    (str(bad_aids), str([tmp.identifier for tmp in aids])))
+
+    def test_fs_config_file_parser_good(self):
+        """Test FSConfig Parser good input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                [/system/bin/file]
+                user: AID_FOO
+                group: AID_SYSTEM
+                mode: 0777
+                caps: BLOCK_SUSPEND
+
+                [/vendor/path/dir/]
+                user: AID_FOO
+                group: AID_SYSTEM
+                mode: 0777
+                caps: 0
+
+                [AID_OEM1]
+                # 5001 in base16
+                value: 0x1389
+            """))
+            temp_file.flush()
+
+            parser = FSConfigFileParser([temp_file.name], [(5000, 5999)])
+            files = parser.files
+            dirs = parser.dirs
+            aids = parser.aids
+
+            self.assertEquals(len(files), 1)
+            self.assertEquals(len(dirs), 1)
+            self.assertEquals(len(aids), 1)
+
+            aid = aids[0]
+            fcap = files[0]
+            dcap = dirs[0]
+
+            self.assertEqual(fcap,
+                             FSConfig('0777', 'AID_FOO', 'AID_SYSTEM',
+                                      '(1ULL << CAP_BLOCK_SUSPEND)',
+                                      '/system/bin/file', temp_file.name))
+
+            self.assertEqual(dcap,
+                             FSConfig('0777', 'AID_FOO', 'AID_SYSTEM', '(0)',
+                                      '/vendor/path/dir/', temp_file.name))
+
+            self.assertEqual(aid, AID('AID_OEM1', '0x1389', temp_file.name))
+
+    def test_fs_config_file_parser_bad(self):
+        """Test FSConfig Parser bad input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                [/system/bin/file]
+                caps: BLOCK_SUSPEND
+            """))
+            temp_file.flush()
+
+            with self.assertRaises(SystemExit):
+                FSConfigFileParser([temp_file.name], [(5000, 5999)])
+
+    def test_fs_config_file_parser_bad_aid_range(self):
+        """Test FSConfig Parser bad aid range value input file"""
+
+        with tempfile.NamedTemporaryFile() as temp_file:
+            temp_file.write(
+                textwrap.dedent("""
+                [AID_OEM1]
+                value: 25
+            """))
+            temp_file.flush()
+
+            with self.assertRaises(SystemExit):
+                FSConfigFileParser([temp_file.name], [(5000, 5999)])
diff --git a/tools/makeparallel/Makefile b/tools/makeparallel/Makefile
index 4e12b10..82a4abf 100644
--- a/tools/makeparallel/Makefile
+++ b/tools/makeparallel/Makefile
@@ -65,8 +65,9 @@
 makeparallel_test: $(MAKEPARALLEL)
 	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) -j1234
 	@EXPECTED="-j123"  $(MAKEPARALLEL_TEST) -j123
-	@EXPECTED="-j1"    $(MAKEPARALLEL_TEST) -j1
-	@EXPECTED="-j1"    $(MAKEPARALLEL_TEST)
+	@EXPECTED=""       $(MAKEPARALLEL_TEST) -j1
+	@EXPECTED="-j$$(($$(nproc) + 2))"   $(MAKEPARALLEL_TEST) -j
+	@EXPECTED=""       $(MAKEPARALLEL_TEST)
 
 	@EXPECTED="-j1234" $(MAKEPARALLEL_NINJA_TEST) -j1234
 	@EXPECTED="-j123"  $(MAKEPARALLEL_NINJA_TEST) -j123
@@ -87,8 +88,6 @@
 	@EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -j1234 -k
 	@EXPECTED="-j1234 -k0" $(MAKEPARALLEL_NINJA_TEST) -kt -j1234
 
-	@EXPECTED="-j1"    $(MAKEPARALLEL_TEST) A=-j1234
-	@EXPECTED="-j1"    $(MAKEPARALLEL_TEST) A\ -j1234=-j1234
-	@EXPECTED="-j1234" $(MAKEPARALLEL_TEST) A\ -j1234=-j1234 -j1234
+	@EXPECTED=""       $(MAKEPARALLEL_TEST) A=-j1234
 
 	@EXPECTED="-j1234 args" ARGS="args" $(MAKEPARALLEL_TEST) -j1234
diff --git a/tools/makeparallel/makeparallel.cpp b/tools/makeparallel/makeparallel.cpp
index 4ae8f61..0e1e45c 100644
--- a/tools/makeparallel/makeparallel.cpp
+++ b/tools/makeparallel/makeparallel.cpp
@@ -317,20 +317,38 @@
     }
   }
 
-  std::string jarg = "-j" + std::to_string(tokens + 1);
+  std::string jarg;
+  if (parallel) {
+    if (tokens == 0) {
+      if (ninja) {
+        // ninja is parallel by default
+        jarg = "";
+      } else {
+        // make -j with no argument, guess a reasonable parallelism like ninja does
+        jarg = "-j" + std::to_string(sysconf(_SC_NPROCESSORS_ONLN) + 2);
+      }
+    } else {
+      jarg = "-j" + std::to_string(tokens + 1);
+    }
+  }
+
 
   if (ninja) {
     if (!parallel) {
       // ninja is parallel by default, pass -j1 to disable parallelism if make wasn't parallel
       args.push_back(strdup("-j1"));
-    } else if (tokens > 0) {
-      args.push_back(strdup(jarg.c_str()));
+    } else {
+      if (jarg != "") {
+        args.push_back(strdup(jarg.c_str()));
+      }
     }
     if (keep_going) {
       args.push_back(strdup("-k0"));
     }
   } else {
-    args.push_back(strdup(jarg.c_str()));
+    if (jarg != "") {
+      args.push_back(strdup(jarg.c_str()));
+    }
   }
 
   args.insert(args.end(), &argv[2], &argv[argc]);
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 8309463..28fd474 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -43,10 +43,12 @@
       are signing the target files.
 """
 
+from __future__ import print_function
+
 import sys
 
 if sys.hexversion < 0x02070000:
-  print >> sys.stderr, "Python 2.7 or newer is required."
+  print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
 import datetime
@@ -60,6 +62,7 @@
 
 import build_image
 import common
+import rangelib
 import sparse_img
 
 OPTIONS = common.OPTIONS
@@ -79,7 +82,16 @@
   simg = sparse_img.SparseImage(imgname)
   care_map_list = []
   care_map_list.append(blk_device)
-  care_map_list.append(simg.care_map.to_string_raw())
+
+  care_map_ranges = simg.care_map
+  key = which + "_adjusted_partition_size"
+  adjusted_blocks = OPTIONS.info_dict.get(key)
+  if adjusted_blocks:
+    assert adjusted_blocks > 0, "blocks should be positive for " + which
+    care_map_ranges = care_map_ranges.intersect(rangelib.RangeSet(
+        "0-%d" % (adjusted_blocks,)))
+
+  care_map_list.append(care_map_ranges.to_string_raw())
   return care_map_list
 
 
@@ -89,7 +101,7 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system.img")
   if os.path.exists(prebuilt_path):
-    print "system.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("system.img already exists in %s, no need to rebuild..." % (prefix,))
     return prebuilt_path
 
   def output_sink(fn, data):
@@ -98,7 +110,7 @@
     ofile.close()
 
   if OPTIONS.rebuild_recovery:
-    print "Building new recovery patch"
+    print("Building new recovery patch")
     common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
                              boot_img, info_dict=OPTIONS.info_dict)
 
@@ -123,7 +135,8 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system_other.img")
   if os.path.exists(prebuilt_path):
-    print "system_other.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("system_other.img already exists in %s, no need to rebuild..." % (
+        prefix,))
     return
 
   imgname = BuildSystemOther(OPTIONS.input_tmp, OPTIONS.info_dict)
@@ -141,7 +154,7 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "vendor.img")
   if os.path.exists(prebuilt_path):
-    print "vendor.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("vendor.img already exists in %s, no need to rebuild..." % (prefix,))
     return prebuilt_path
 
   block_list = common.MakeTempFile(prefix="vendor-blocklist-", suffix=".map")
@@ -159,7 +172,7 @@
 
 
 def CreateImage(input_dir, info_dict, what, block_list=None):
-  print "creating " + what + ".img..."
+  print("creating " + what + ".img...")
 
   img = common.MakeTempFile(prefix=what + "-", suffix=".img")
 
@@ -209,6 +222,14 @@
                                 image_props, img)
   assert succ, "build " + what + ".img image failed"
 
+  is_verity_partition = "verity_block_device" in image_props
+  verity_supported = image_props.get("verity") == "true"
+  if is_verity_partition and verity_supported:
+    adjusted_blocks_value = image_props.get("partition_size")
+    if adjusted_blocks_value:
+      adjusted_blocks_key = what + "_adjusted_partition_size"
+      info_dict[adjusted_blocks_key] = int(adjusted_blocks_value)/4096 - 1
+
   return img
 
 
@@ -223,7 +244,8 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "userdata.img")
   if os.path.exists(prebuilt_path):
-    print "userdata.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("userdata.img already exists in %s, no need to rebuild..." % (
+        prefix,))
     return
 
   # Skip userdata.img if no size.
@@ -231,7 +253,7 @@
   if not image_props.get("partition_size"):
     return
 
-  print "creating userdata.img..."
+  print("creating userdata.img...")
 
   # Use a fixed timestamp (01/01/2009) when packaging the image.
   # Bug: 24377993
@@ -321,7 +343,7 @@
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "cache.img")
   if os.path.exists(prebuilt_path):
-    print "cache.img already exists in %s, no need to rebuild..." % (prefix,)
+    print("cache.img already exists in %s, no need to rebuild..." % (prefix,))
     return
 
   image_props = build_image.ImagePropFromGlobalDict(OPTIONS.info_dict, "cache")
@@ -329,7 +351,7 @@
   if "fs_type" not in image_props:
     return
 
-  print "creating cache.img..."
+  print("creating cache.img...")
 
   # Use a fixed timestamp (01/01/2009) when packaging the image.
   # Bug: 24377993
@@ -364,7 +386,7 @@
   if not OPTIONS.add_missing:
     for n in input_zip.namelist():
       if n.startswith("IMAGES/"):
-        print "target_files appears to already contain images."
+        print("target_files appears to already contain images.")
         sys.exit(1)
 
   try:
@@ -386,13 +408,13 @@
   system_root_image = (OPTIONS.info_dict.get("system_root_image", None) == "true")
 
   def banner(s):
-    print "\n\n++++ " + s + " ++++\n\n"
+    print("\n\n++++ " + s + " ++++\n\n")
 
   prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", "boot.img")
   boot_image = None
   if os.path.exists(prebuilt_path):
     banner("boot")
-    print "boot.img already exists in IMAGES/, no need to rebuild..."
+    print("boot.img already exists in IMAGES/, no need to rebuild...")
     if OPTIONS.rebuild_recovery:
       boot_image = common.GetBootableImage(
           "IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
@@ -408,7 +430,7 @@
     banner("recovery")
     prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", "recovery.img")
     if os.path.exists(prebuilt_path):
-      print "recovery.img already exists in IMAGES/, no need to rebuild..."
+      print("recovery.img already exists in IMAGES/, no need to rebuild...")
       if OPTIONS.rebuild_recovery:
         recovery_image = common.GetBootableImage(
             "IMAGES/recovery.img", "recovery.img", OPTIONS.input_tmp,
@@ -474,7 +496,7 @@
       img_name = line.strip() + ".img"
       prebuilt_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
       if os.path.exists(prebuilt_path):
-        print "%s already exists, no need to overwrite..." % (img_name,)
+        print("%s already exists, no need to overwrite..." % (img_name,))
         continue
 
       img_radio_path = os.path.join(OPTIONS.input_tmp, "RADIO", img_name)
@@ -530,16 +552,14 @@
     sys.exit(1)
 
   AddImagesToTargetFiles(args[0])
-  print "done."
+  print("done.")
 
 if __name__ == '__main__':
   try:
     common.CloseInheritedPipes()
     main(sys.argv[1:])
   except common.ExternalError as e:
-    print
-    print "   ERROR: %s" % (e,)
-    print
+    print("\n   ERROR: %s\n" % (e,))
     sys.exit(1)
   finally:
     common.Cleanup()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index cc06a42..1edf5b2 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -14,8 +14,6 @@
 
 from __future__ import print_function
 
-from collections import deque, OrderedDict
-from hashlib import sha1
 import array
 import common
 import functools
@@ -23,12 +21,14 @@
 import itertools
 import multiprocessing
 import os
+import os.path
 import re
 import subprocess
 import threading
-import time
 import tempfile
 
+from collections import deque, OrderedDict
+from hashlib import sha1
 from rangelib import RangeSet
 
 
@@ -348,7 +348,7 @@
       This prevents the target size of one command from being too large; and
       might help to avoid fsync errors on some devices."""
 
-      assert (style == "new" or style == "zero")
+      assert style == "new" or style == "zero"
       blocks_limit = 1024
       total = 0
       while target_blocks:
@@ -359,15 +359,25 @@
       return total
 
     out = []
-
     total = 0
 
+    # In BBOTA v2, 'stashes' records the map from 'stash_raw_id' to 'stash_id'
+    # (aka 'sid', which is the stash slot id). The stash in a 'stash_id' will
+    # be freed immediately after its use. So unlike 'stash_raw_id' (which
+    # uniquely identifies each pair of stashed blocks), the same 'stash_id'
+    # may be reused during the life cycle of an update (maintained by
+    # 'free_stash_ids' heap and 'next_stash_id').
+    #
+    # In BBOTA v3+, it uses the hash of the stashed blocks as the stash slot
+    # id. 'stashes' records the map from 'hash' to the ref count. The stash
+    # will be freed only if the count decrements to zero.
     stashes = {}
     stashed_blocks = 0
     max_stashed_blocks = 0
 
-    free_stash_ids = []
-    next_stash_id = 0
+    if self.version == 2:
+      free_stash_ids = []
+      next_stash_id = 0
 
     for xf in self.transfers:
 
@@ -375,15 +385,15 @@
         assert not xf.stash_before
         assert not xf.use_stash
 
-      for s, sr in xf.stash_before:
-        assert s not in stashes
-        if free_stash_ids:
-          sid = heapq.heappop(free_stash_ids)
-        else:
-          sid = next_stash_id
-          next_stash_id += 1
-        stashes[s] = sid
+      for stash_raw_id, sr in xf.stash_before:
         if self.version == 2:
+          assert stash_raw_id not in stashes
+          if free_stash_ids:
+            sid = heapq.heappop(free_stash_ids)
+          else:
+            sid = next_stash_id
+            next_stash_id += 1
+          stashes[stash_raw_id] = sid
           stashed_blocks += sr.size()
           out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
         else:
@@ -417,13 +427,13 @@
 
         unstashed_src_ranges = xf.src_ranges
         mapped_stashes = []
-        for s, sr in xf.use_stash:
-          sid = stashes.pop(s)
+        for stash_raw_id, sr in xf.use_stash:
           unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
           sh = self.HashBlocks(self.src, sr)
           sr = xf.src_ranges.map_within(sr)
           mapped_stashes.append(sr)
           if self.version == 2:
+            sid = stashes.pop(stash_raw_id)
             src_str.append("%d:%s" % (sid, sr.to_string_raw()))
             # A stash will be used only once. We need to free the stash
             # immediately after the use, instead of waiting for the automatic
@@ -432,15 +442,15 @@
             # Bug: 23119955
             free_string.append("free %d\n" % (sid,))
             free_size += sr.size()
+            heapq.heappush(free_stash_ids, sid)
           else:
             assert sh in stashes
             src_str.append("%s:%s" % (sh, sr.to_string_raw()))
             stashes[sh] -= 1
             if stashes[sh] == 0:
+              free_string.append("free %s\n" % (sh,))
               free_size += sr.size()
-              free_string.append("free %s\n" % (sh))
               stashes.pop(sh)
-          heapq.heappush(free_stash_ids, sid)
 
         if unstashed_src_ranges:
           src_str.insert(1, unstashed_src_ranges.to_string_raw())
@@ -593,11 +603,15 @@
 
     out.insert(0, "%d\n" % (self.version,))   # format version number
     out.insert(1, "%d\n" % (total,))
-    if self.version >= 2:
-      # version 2 only: after the total block count, we give the number
-      # of stash slots needed, and the maximum size needed (in blocks)
+    if self.version == 2:
+      # v2 only: after the total block count, we give the number of stash slots
+      # needed, and the maximum size needed (in blocks).
       out.insert(2, str(next_stash_id) + "\n")
       out.insert(3, str(max_stashed_blocks) + "\n")
+    elif self.version >= 3:
+      # v3+: the number of stash slots is unused.
+      out.insert(2, "0\n")
+      out.insert(3, str(max_stashed_blocks) + "\n")
 
     with open(prefix + ".transfer.list", "wb") as f:
       for i in out:
@@ -618,18 +632,18 @@
 
   def ReviseStashSize(self):
     print("Revising stash size...")
-    stashes = {}
+    stash_map = {}
 
     # Create the map between a stash and its def/use points. For example, for a
-    # given stash of (idx, sr), stashes[idx] = (sr, def_cmd, use_cmd).
+    # given stash of (raw_id, sr), stash_map[raw_id] = (sr, def_cmd, use_cmd).
     for xf in self.transfers:
       # Command xf defines (stores) all the stashes in stash_before.
-      for idx, sr in xf.stash_before:
-        stashes[idx] = (sr, xf)
+      for stash_raw_id, sr in xf.stash_before:
+        stash_map[stash_raw_id] = (sr, xf)
 
       # Record all the stashes command xf uses.
-      for idx, _ in xf.use_stash:
-        stashes[idx] += (xf,)
+      for stash_raw_id, _ in xf.use_stash:
+        stash_map[stash_raw_id] += (xf,)
 
     # Compute the maximum blocks available for stash based on /cache size and
     # the threshold.
@@ -637,9 +651,15 @@
     stash_threshold = common.OPTIONS.stash_threshold
     max_allowed = cache_size * stash_threshold / self.tgt.blocksize
 
+    # See the comments for 'stashes' in WriteTransfers().
+    stashes = {}
     stashed_blocks = 0
     new_blocks = 0
 
+    if self.version == 2:
+      free_stash_ids = []
+      next_stash_id = 0
+
     # Now go through all the commands. Compute the required stash size on the
     # fly. If a command requires excess stash than available, it deletes the
     # stash by replacing the command that uses the stash with a "new" command
@@ -648,19 +668,38 @@
       replaced_cmds = []
 
       # xf.stash_before generates explicit stash commands.
-      for idx, sr in xf.stash_before:
-        if stashed_blocks + sr.size() > max_allowed:
+      for stash_raw_id, sr in xf.stash_before:
+        # Check the post-command stashed_blocks.
+        stashed_blocks_after = stashed_blocks
+        if self.version == 2:
+          stashed_blocks_after += sr.size()
+        else:
+          sh = self.HashBlocks(self.src, sr)
+          if sh not in stashes:
+            stashed_blocks_after += sr.size()
+
+        if stashed_blocks_after > max_allowed:
           # We cannot stash this one for a later command. Find out the command
           # that will use this stash and replace the command with "new".
-          use_cmd = stashes[idx][2]
+          use_cmd = stash_map[stash_raw_id][2]
           replaced_cmds.append(use_cmd)
           print("%10d  %9s  %s" % (sr.size(), "explicit", use_cmd))
         else:
-          stashed_blocks += sr.size()
-
-      # xf.use_stash generates free commands.
-      for _, sr in xf.use_stash:
-        stashed_blocks -= sr.size()
+          # Update the stashes map.
+          if self.version == 2:
+            assert stash_raw_id not in stashes
+            if free_stash_ids:
+              sid = heapq.heappop(free_stash_ids)
+            else:
+              sid = next_stash_id
+              next_stash_id += 1
+            stashes[stash_raw_id] = sid
+          else:
+            if sh in stashes:
+              stashes[sh] += 1
+            else:
+              stashes[sh] = 1
+          stashed_blocks = stashed_blocks_after
 
       # "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
       # ComputePatches(), they both have the style of "diff".
@@ -675,19 +714,34 @@
       for cmd in replaced_cmds:
         # It no longer uses any commands in "use_stash". Remove the def points
         # for all those stashes.
-        for idx, sr in cmd.use_stash:
-          def_cmd = stashes[idx][1]
-          assert (idx, sr) in def_cmd.stash_before
-          def_cmd.stash_before.remove((idx, sr))
+        for stash_raw_id, sr in cmd.use_stash:
+          def_cmd = stash_map[stash_raw_id][1]
+          assert (stash_raw_id, sr) in def_cmd.stash_before
+          def_cmd.stash_before.remove((stash_raw_id, sr))
 
         # Add up blocks that violates space limit and print total number to
         # screen later.
         new_blocks += cmd.tgt_ranges.size()
         cmd.ConvertToNew()
 
+      # xf.use_stash may generate free commands.
+      for stash_raw_id, sr in xf.use_stash:
+        if self.version == 2:
+          sid = stashes.pop(stash_raw_id)
+          stashed_blocks -= sr.size()
+          heapq.heappush(free_stash_ids, sid)
+        else:
+          sh = self.HashBlocks(self.src, sr)
+          assert sh in stashes
+          stashes[sh] -= 1
+          if stashes[sh] == 0:
+            stashed_blocks -= sr.size()
+            stashes.pop(sh)
+
     num_of_bytes = new_blocks * self.tgt.blocksize
     print("  Total %d blocks (%d bytes) are packed as new blocks due to "
           "insufficient cache size." % (new_blocks, num_of_bytes))
+    return new_blocks
 
   def ComputePatches(self, prefix):
     print("Reticulating splines...")
@@ -926,10 +980,21 @@
            lost_source))
 
   def ReverseBackwardEdges(self):
+    """Reverse unsatisfying edges and compute pairs of stashed blocks.
+
+    For each transfer, make sure it properly stashes the blocks it touches and
+    will be used by later transfers. It uses pairs of (stash_raw_id, range) to
+    record the blocks to be stashed. 'stash_raw_id' is an id that uniquely
+    identifies each pair. Note that for the same range (e.g. RangeSet("1-5")),
+    it is possible to have multiple pairs with different 'stash_raw_id's. Each
+    'stash_raw_id' will be consumed by one transfer. In BBOTA v3+, identical
+    blocks will be written to the same stash slot in WriteTransfers().
+    """
+
     print("Reversing backward edges...")
     in_order = 0
     out_of_order = 0
-    stashes = 0
+    stash_raw_id = 0
     stash_size = 0
 
     for xf in self.transfers:
@@ -947,9 +1012,9 @@
           overlap = xf.src_ranges.intersect(u.tgt_ranges)
           assert overlap
 
-          u.stash_before.append((stashes, overlap))
-          xf.use_stash.append((stashes, overlap))
-          stashes += 1
+          u.stash_before.append((stash_raw_id, overlap))
+          xf.use_stash.append((stash_raw_id, overlap))
+          stash_raw_id += 1
           stash_size += overlap.size()
 
           # reverse the edge direction; now xf must go after u
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 1708d86..73cd07e 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -462,6 +462,10 @@
     build_command.extend(["-L", prop_dict["mount_point"]])
     if "extfs_inode_count" in prop_dict:
       build_command.extend(["-i", prop_dict["extfs_inode_count"]])
+    if "flash_erase_block_size" in prop_dict:
+      build_command.extend(["-e", prop_dict["flash_erase_block_size"]])
+    if "flash_logical_block_size" in prop_dict:
+      build_command.extend(["-o", prop_dict["flash_logical_block_size"]])
     if "selinux_fc" in prop_dict:
       build_command.append(prop_dict["selinux_fc"])
   elif fs_type.startswith("squash"):
@@ -665,6 +669,8 @@
     copy_prop("fs_type", "fs_type")
     copy_prop("userdata_fs_type", "fs_type")
     copy_prop("userdata_size", "partition_size")
+    copy_prop("flash_logical_block_size","flash_logical_block_size")
+    copy_prop("flash_erase_block_size", "flash_erase_block_size")
   elif mount_point == "cache":
     copy_prop("cache_fs_type", "fs_type")
     copy_prop("cache_size", "partition_size")
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 4ad30ec..7b3e9ba 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -12,6 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from __future__ import print_function
+
 import copy
 import errno
 import getopt
@@ -109,7 +111,7 @@
   """Create and return a subprocess.Popen object, printing the command
   line on the terminal if -v was specified."""
   if OPTIONS.verbose:
-    print "  running: ", " ".join(args)
+    print("  running: ", " ".join(args))
   return subprocess.Popen(args, **kwargs)
 
 
@@ -208,8 +210,8 @@
       if os.path.exists(system_base_fs_file):
         d["system_base_fs_file"] = system_base_fs_file
       else:
-        print "Warning: failed to find system base fs file: %s" % (
-            system_base_fs_file,)
+        print("Warning: failed to find system base fs file: %s" % (
+            system_base_fs_file,))
         del d["system_base_fs_file"]
 
     if "vendor_base_fs_file" in d:
@@ -218,8 +220,8 @@
       if os.path.exists(vendor_base_fs_file):
         d["vendor_base_fs_file"] = vendor_base_fs_file
       else:
-        print "Warning: failed to find vendor base fs file: %s" % (
-            vendor_base_fs_file,)
+        print("Warning: failed to find vendor base fs file: %s" % (
+            vendor_base_fs_file,))
         del d["vendor_base_fs_file"]
 
   try:
@@ -270,7 +272,7 @@
   try:
     data = read_helper("SYSTEM/build.prop")
   except KeyError:
-    print "Warning: could not find SYSTEM/build.prop in %s" % zip
+    print("Warning: could not find SYSTEM/build.prop in %s" % (zip,))
     data = ""
   return LoadDictionaryFromLines(data.split("\n"))
 
@@ -299,7 +301,7 @@
   try:
     data = read_helper(recovery_fstab_path)
   except KeyError:
-    print "Warning: could not find {}".format(recovery_fstab_path)
+    print("Warning: could not find {}".format(recovery_fstab_path))
     data = ""
 
   if fstab_version == 1:
@@ -331,7 +333,7 @@
           if i.startswith("length="):
             length = int(i[7:])
           else:
-            print "%s: unknown option \"%s\"" % (mount_point, i)
+            print("%s: unknown option \"%s\"" % (mount_point, i))
 
       d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
                                  device=pieces[2], length=length,
@@ -389,7 +391,7 @@
 
 def DumpInfoDict(d):
   for k, v in sorted(d.items()):
-    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
+    print("%-25s = (%s) %s" % (k, type(v).__name__, v))
 
 
 def AppendAVBSigningArgs(cmd):
@@ -565,15 +567,15 @@
 
   prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
   if os.path.exists(prebuilt_path):
-    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
+    print("using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,))
     return File.FromLocalFile(name, prebuilt_path)
 
   prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
   if os.path.exists(prebuilt_path):
-    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
+    print("using prebuilt %s from IMAGES..." % (prebuilt_name,))
     return File.FromLocalFile(name, prebuilt_path)
 
-  print "building image from target_files %s..." % (tree_subdir,)
+  print("building image from target_files %s..." % (tree_subdir,))
 
   if info_dict is None:
     info_dict = OPTIONS.info_dict
@@ -792,11 +794,9 @@
   if pct >= 99.0:
     raise ExternalError(msg)
   elif pct >= 95.0:
-    print
-    print "  WARNING: ", msg
-    print
+    print("\n  WARNING: %s\n" % (msg,))
   elif OPTIONS.verbose:
-    print "  ", msg
+    print("  ", msg)
 
 
 def ReadApkCerts(tf_zip):
@@ -845,8 +845,8 @@
 """
 
 def Usage(docstring):
-  print docstring.rstrip("\n")
-  print COMMON_DOCSTRING
+  print(docstring.rstrip("\n"))
+  print(COMMON_DOCSTRING)
 
 
 def ParseOptions(argv,
@@ -871,7 +871,7 @@
         list(extra_long_opts))
   except getopt.GetoptError as err:
     Usage(docstring)
-    print "**", str(err), "**"
+    print("**", str(err), "**")
     sys.exit(2)
 
   for o, a in opts:
@@ -969,7 +969,7 @@
         current[i] = ""
 
       if not first:
-        print "key file %s still missing some passwords." % (self.pwfile,)
+        print("key file %s still missing some passwords." % (self.pwfile,))
         answer = raw_input("try to edit again? [y]> ").strip()
         if answer and answer[0] not in 'yY':
           raise RuntimeError("key passwords unavailable")
@@ -1029,13 +1029,13 @@
           continue
         m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
         if not m:
-          print "failed to parse password file: ", line
+          print("failed to parse password file: ", line)
         else:
           result[m.group(2)] = m.group(1)
       f.close()
     except IOError as e:
       if e.errno != errno.ENOENT:
-        print "error reading password file: ", str(e)
+        print("error reading password file: ", str(e))
     return result
 
 
@@ -1156,10 +1156,10 @@
           if x == ".py":
             f = b
           info = imp.find_module(f, [d])
-        print "loaded device-specific extensions from", path
+        print("loaded device-specific extensions from", path)
         self.module = imp.load_module("device_specific", *info)
       except ImportError:
-        print "unable to load device-specific module; assuming none"
+        print("unable to load device-specific module; assuming none")
 
   def _DoCall(self, function_name, *args, **kwargs):
     """Call the named function in the device-specific module, passing
@@ -1294,7 +1294,7 @@
       th.start()
       th.join(timeout=300)   # 5 mins
       if th.is_alive():
-        print "WARNING: diff command timed out"
+        print("WARNING: diff command timed out")
         p.terminate()
         th.join(5)
         if th.is_alive():
@@ -1302,8 +1302,8 @@
           th.join()
 
       if err or p.returncode != 0:
-        print "WARNING: failure running %s:\n%s\n" % (
-            diff_program, "".join(err))
+        print("WARNING: failure running %s:\n%s\n" % (
+            diff_program, "".join(err)))
         self.patch = None
         return None, None, None
       diff = ptemp.read()
@@ -1325,7 +1325,7 @@
 
 def ComputeDifferences(diffs):
   """Call ComputePatch on all the Difference objects in 'diffs'."""
-  print len(diffs), "diffs to compute"
+  print(len(diffs), "diffs to compute")
 
   # Do the largest files first, to try and reduce the long-pole effect.
   by_size = [(i.tf.size, i) for i in diffs]
@@ -1351,13 +1351,13 @@
         else:
           name = "%s (%s)" % (tf.name, sf.name)
         if patch is None:
-          print "patching failed!                                  %s" % (name,)
+          print("patching failed!                                  %s" % (name,))
         else:
-          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
-              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
+          print("%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
+              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name))
       lock.release()
     except Exception as e:
-      print e
+      print(e)
       raise
 
   # start worker threads; wait for them all to finish.
@@ -1736,6 +1736,6 @@
     if found:
       break
 
-  print "putting script in", sh_location
+  print("putting script in", sh_location)
 
   output_sink(sh_location, sh)
diff --git a/tools/releasetools/img_from_target_files.py b/tools/releasetools/img_from_target_files.py
index 84e0e63..fd98ad2 100755
--- a/tools/releasetools/img_from_target_files.py
+++ b/tools/releasetools/img_from_target_files.py
@@ -26,10 +26,12 @@
 
 """
 
+from __future__ import print_function
+
 import sys
 
 if sys.hexversion < 0x02070000:
-  print >> sys.stderr, "Python 2.7 or newer is required."
+  print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
 import os
@@ -111,7 +113,7 @@
           recovery_image.AddToZip(output_zip)
 
       def banner(s):
-        print "\n\n++++ " + s + " ++++\n\n"
+        print("\n\n++++ " + s + " ++++\n\n")
 
       if not bootable_only:
         banner("AddSystem")
@@ -128,11 +130,11 @@
         add_img_to_target_files.AddCache(output_zip, prefix="")
 
   finally:
-    print "cleaning up..."
+    print("cleaning up...")
     common.ZipClose(output_zip)
     shutil.rmtree(OPTIONS.input_tmp)
 
-  print "done."
+  print("done.")
 
 
 if __name__ == '__main__':
@@ -140,7 +142,5 @@
     common.CloseInheritedPipes()
     main(sys.argv[1:])
   except common.ExternalError as e:
-    print
-    print "   ERROR: %s" % (e,)
-    print
+    print("\n   ERROR: %s\n" % (e,))
     sys.exit(1)
diff --git a/tools/releasetools/make_recovery_patch.py b/tools/releasetools/make_recovery_patch.py
index 08d1450..7c6007e 100755
--- a/tools/releasetools/make_recovery_patch.py
+++ b/tools/releasetools/make_recovery_patch.py
@@ -14,10 +14,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from __future__ import print_function
+
 import sys
 
 if sys.hexversion < 0x02070000:
-  print >> sys.stderr, "Python 2.7 or newer is required."
+  print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
 import os
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 4d5b8b8..72e00b2 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -75,9 +75,6 @@
   -e  (--extra_script)  <file>
       Insert the contents of file at the end of the update script.
 
-  -a  (--aslr_mode)  <on|off>
-      Specify whether to turn on ASLR for the package (on by default).
-
   -2  (--two_step)
       Generate a 'two-step' OTA package, where recovery is updated
       first, so that any changes made to the system partition are done
@@ -121,14 +118,17 @@
       Specify the arguments needed for payload signer.
 """
 
+from __future__ import print_function
+
 import sys
 
 if sys.hexversion < 0x02070000:
-  print >> sys.stderr, "Python 2.7 or newer is required."
+  print("Python 2.7 or newer is required.", file=sys.stderr)
   sys.exit(1)
 
+import copy
 import multiprocessing
-import os
+import os.path
 import subprocess
 import shlex
 import tempfile
@@ -148,7 +148,6 @@
 OPTIONS.wipe_user_data = False
 OPTIONS.downgrade = False
 OPTIONS.extra_script = None
-OPTIONS.aslr_mode = True
 OPTIONS.worker_threads = multiprocessing.cpu_count() // 2
 if OPTIONS.worker_threads == 0:
   OPTIONS.worker_threads = 1
@@ -169,6 +168,8 @@
 OPTIONS.payload_signer = None
 OPTIONS.payload_signer_args = []
 
+METADATA_NAME = 'META-INF/com/android/metadata'
+
 def MostPopularKey(d, default):
   """Given a dict, return the key corresponding to the largest
   value.  Returns 'default' if the dict is empty."""
@@ -292,14 +293,14 @@
 
   def Dump(self, indent=0):
     if self.uid is not None:
-      print "%s%s %d %d %o" % (
-          "  " * indent, self.name, self.uid, self.gid, self.mode)
+      print("%s%s %d %d %o" % (
+          "  " * indent, self.name, self.uid, self.gid, self.mode))
     else:
-      print "%s%s %s %s %s" % (
-          "  " * indent, self.name, self.uid, self.gid, self.mode)
+      print("%s%s %s %s %s" % (
+          "  " * indent, self.name, self.uid, self.gid, self.mode))
     if self.is_dir:
-      print "%s%s" % ("  "*indent, self.descendants)
-      print "%s%s" % ("  "*indent, self.best_subtree)
+      print("%s%s" % ("  " * indent, self.descendants))
+      print("%s%s" % ("  " * indent, self.best_subtree))
       for i in self.children:
         i.Dump(indent=indent+1)
 
@@ -417,7 +418,6 @@
         symlinks.append((input_zip.read(info.filename),
                          "/" + partition + "/" + basefilename))
       else:
-        import copy
         info2 = copy.copy(info)
         fn = info2.filename = partition + "/" + basefilename
         if substitute and fn in substitute and substitute[fn] is None:
@@ -485,11 +485,11 @@
         OPTIONS.input_tmp, "RECOVERY")
     common.ZipWriteStr(
         output_zip, recovery_two_step_img_name, recovery_two_step_img.data)
-    print "two-step package: using %s in stage 1/3" % (
-        recovery_two_step_img_name,)
+    print("two-step package: using %s in stage 1/3" % (
+        recovery_two_step_img_name,))
     script.WriteRawImage("/boot", recovery_two_step_img_name)
   else:
-    print "two-step package: using recovery.img in stage 1/3"
+    print("two-step package: using recovery.img in stage 1/3")
     # The "recovery.img" entry has been written into package earlier.
     script.WriteRawImage("/boot", "recovery.img")
 
@@ -533,11 +533,11 @@
   path = os.path.join(tmpdir, "IMAGES", which + ".img")
   mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
   if os.path.exists(path) and os.path.exists(mappath):
-    print "using %s.img from target-files" % (which,)
+    print("using %s.img from target-files" % (which,))
     # This is a 'new' target-files, which already has the image in it.
 
   else:
-    print "building %s.img from target-files" % (which,)
+    print("building %s.img from target-files" % (which,))
 
     # This is an 'old' target-files, which does not contain images
     # already built.  Build them.
@@ -776,9 +776,9 @@
 
 
 def WriteMetadata(metadata, output_zip):
-  common.ZipWriteStr(output_zip, "META-INF/com/android/metadata",
-                     "".join(["%s=%s\n" % kv
-                              for kv in sorted(metadata.iteritems())]))
+  value = "".join(["%s=%s\n" % kv for kv in sorted(metadata.iteritems())])
+  common.ZipWriteStr(output_zip, METADATA_NAME, value,
+                     compress_type=zipfile.ZIP_STORED)
 
 
 def LoadPartitionFiles(z, partition):
@@ -815,6 +815,32 @@
     dirs.pop()
 
 
+def HandleDowngradeMetadata(metadata):
+  # Only incremental OTAs are allowed to reach here.
+  assert OPTIONS.incremental_source is not None
+
+  post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
+  pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
+  is_downgrade = long(post_timestamp) < long(pre_timestamp)
+
+  if OPTIONS.downgrade:
+    metadata["ota-downgrade"] = "yes"
+    if not is_downgrade:
+      raise RuntimeError("--downgrade specified but no downgrade detected: "
+                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+  else:
+    if is_downgrade:
+      # Non-fatal here to allow generating such a package which may require
+      # manual work to adjust the post-timestamp. A legit use case is that we
+      # cut a new build C (after having A and B), but want to enfore the
+      # update path of A -> C -> B. Specifying --downgrade may not help since
+      # that would enforce a data wipe for C -> B update.
+      print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
+            "The package may not be deployed properly. "
+            "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
+    metadata["post-timestamp"] = post_timestamp
+
+
 def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
   # TODO(tbao): We should factor out the common parts between
   # WriteBlockIncrementalOTAPackage() and WriteIncrementalOTAPackage().
@@ -847,26 +873,7 @@
       "ota-type": "BLOCK",
   }
 
-  post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
-  pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
-  is_downgrade = long(post_timestamp) < long(pre_timestamp)
-
-  if OPTIONS.downgrade:
-    metadata["ota-downgrade"] = "yes"
-    if not is_downgrade:
-      raise RuntimeError("--downgrade specified but no downgrade detected: "
-                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
-  else:
-    if is_downgrade:
-      # Non-fatal here to allow generating such a package which may require
-      # manual work to adjust the post-timestamp. A legit use case is that we
-      # cut a new build C (after having A and B), but want to enfore the
-      # update path of A -> C -> B. Specifying --downgrade may not help since
-      # that would enforce a data wipe for C -> B update.
-      print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
-            "The package may not be deployed properly. "
-            "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
-    metadata["post-timestamp"] = post_timestamp
+  HandleDowngradeMetadata(metadata)
 
   device_specific = common.DeviceSpecificParams(
       source_zip=source_zip,
@@ -1048,8 +1055,8 @@
     else:
       include_full_boot = False
 
-      print "boot      target: %d  source: %d  diff: %d" % (
-          target_boot.size, source_boot.size, len(d))
+      print("boot      target: %d  source: %d  diff: %d" % (
+          target_boot.size, source_boot.size, len(d)))
 
       common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
 
@@ -1095,19 +1102,19 @@
   if OPTIONS.two_step:
     common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
     script.WriteRawImage("/boot", "boot.img")
-    print "writing full boot image (forced by two-step mode)"
+    print("writing full boot image (forced by two-step mode)")
 
   if not OPTIONS.two_step:
     if updating_boot:
       if include_full_boot:
-        print "boot image changed; including full."
+        print("boot image changed; including full.")
         script.Print("Installing boot image...")
         script.WriteRawImage("/boot", "boot.img")
       else:
         # Produce the boot image by applying a patch to the current
         # contents of the boot partition, and write it back to the
         # partition.
-        print "boot image changed; including patch."
+        print("boot image changed; including patch.")
         script.Print("Patching boot image...")
         script.ShowProgress(0.1, 10)
         script.ApplyPatch("%s:%s:%d:%s:%d:%s"
@@ -1118,7 +1125,7 @@
                           target_boot.size, target_boot.sha1,
                           source_boot.sha1, "patch/boot.img.p")
     else:
-      print "boot image unchanged; skipping."
+      print("boot image unchanged; skipping.")
 
   # Do device-specific installation (eg, write radio image).
   device_specific.IncrementalOTA_InstallEnd()
@@ -1228,6 +1235,53 @@
                                       source_file=None):
   """Generate an Android OTA package that has A/B update payload."""
 
+  def ComputeStreamingMetadata(zip_file, reserve_space=False,
+                               expected_length=None):
+    """Compute the streaming metadata for a given zip.
+
+    When 'reserve_space' is True, we reserve extra space for the offset and
+    length of the metadata entry itself, although we don't know the final
+    values until the package gets signed. This function will be called again
+    after signing. We then write the actual values and pad the string to the
+    length we set earlier. Note that we can't use the actual length of the
+    metadata entry in the second run. Otherwise the offsets for other entries
+    will be changing again.
+    """
+
+    def ComputeEntryOffsetSize(name):
+      """Compute the zip entry offset and size."""
+      info = zip_file.getinfo(name)
+      offset = info.header_offset + len(info.FileHeader())
+      size = info.file_size
+      return '%s:%d:%d' % (os.path.basename(name), offset, size)
+
+    # payload.bin and payload_properties.txt must exist.
+    offsets = [ComputeEntryOffsetSize('payload.bin'),
+               ComputeEntryOffsetSize('payload_properties.txt')]
+
+    # care_map.txt is available only if dm-verity is enabled.
+    if 'care_map.txt' in zip_file.namelist():
+      offsets.append(ComputeEntryOffsetSize('care_map.txt'))
+
+    # 'META-INF/com/android/metadata' is required. We don't know its actual
+    # offset and length (as well as the values for other entries). So we
+    # reserve 10-byte as a placeholder, which is to cover the space for metadata
+    # entry ('xx:xxx', since it's ZIP_STORED which should appear at the
+    # beginning of the zip), as well as the possible value changes in other
+    # entries.
+    if reserve_space:
+      offsets.append('metadata:' + ' ' * 10)
+    else:
+      offsets.append(ComputeEntryOffsetSize(METADATA_NAME))
+
+    value = ','.join(offsets)
+    if expected_length is not None:
+      assert len(value) <= expected_length, \
+          'Insufficient reserved space: reserved=%d, actual=%d' % (
+              expected_length, len(value))
+      value += ' ' * (expected_length - len(value))
+    return value
+
   # The place where the output from the subprocess should go.
   log_file = sys.stdout if OPTIONS.verbose else subprocess.PIPE
 
@@ -1270,7 +1324,6 @@
                                               OPTIONS.info_dict),
       "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
                                    OPTIONS.info_dict),
-      "post-timestamp": GetBuildProp("ro.build.date.utc", OPTIONS.info_dict),
       "ota-required-cache": "0",
       "ota-type": "AB",
   }
@@ -1281,6 +1334,11 @@
     metadata["pre-build-incremental"] = GetBuildProp(
         "ro.build.version.incremental", OPTIONS.source_info_dict)
 
+    HandleDowngradeMetadata(metadata)
+  else:
+    metadata["post-timestamp"] = GetBuildProp(
+        "ro.build.date.utc", OPTIONS.info_dict)
+
   # 1. Generate payload.
   payload_file = common.MakeTempFile(prefix="payload-", suffix=".bin")
   cmd = ["brillo_update_payload", "generate",
@@ -1365,11 +1423,15 @@
       f.write("POWERWASH=1\n")
     metadata["ota-wipe"] = "yes"
 
-  # Add the signed payload file and properties into the zip.
-  common.ZipWrite(output_zip, properties_file, arcname="payload_properties.txt")
+  # Add the signed payload file and properties into the zip. In order to
+  # support streaming, we pack payload.bin, payload_properties.txt and
+  # care_map.txt as ZIP_STORED. So these entries can be read directly with
+  # the offset and length pairs.
   common.ZipWrite(output_zip, signed_payload_file, arcname="payload.bin",
                   compress_type=zipfile.ZIP_STORED)
-  WriteMetadata(metadata, output_zip)
+  common.ZipWrite(output_zip, properties_file,
+                  arcname="payload_properties.txt",
+                  compress_type=zipfile.ZIP_STORED)
 
   # If dm-verity is supported for the device, copy contents of care_map
   # into A/B OTA package.
@@ -1379,23 +1441,70 @@
     namelist = target_zip.namelist()
     if care_map_path in namelist:
       care_map_data = target_zip.read(care_map_path)
-      common.ZipWriteStr(output_zip, "care_map.txt", care_map_data)
+      common.ZipWriteStr(output_zip, "care_map.txt", care_map_data,
+          compress_type=zipfile.ZIP_STORED)
     else:
-      print "Warning: cannot find care map file in target_file package"
+      print("Warning: cannot find care map file in target_file package")
     common.ZipClose(target_zip)
 
-  # Sign the whole package to comply with the Android OTA package format.
+  # Write the current metadata entry with placeholders.
+  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
+      output_zip, reserve_space=True)
+  WriteMetadata(metadata, output_zip)
   common.ZipClose(output_zip)
-  SignOutput(temp_zip_file.name, output_file)
-  temp_zip_file.close()
+
+  # SignOutput(), which in turn calls signapk.jar, will possibly reorder the
+  # zip entries, as well as padding the entry headers. We do a preliminary
+  # signing (with an incomplete metadata entry) to allow that to happen. Then
+  # compute the zip entry offsets, write back the final metadata and do the
+  # final signing.
+  prelim_signing = tempfile.NamedTemporaryFile()
+  SignOutput(temp_zip_file.name, prelim_signing.name)
+  common.ZipClose(temp_zip_file)
+
+  # Open the signed zip. Compute the final metadata that's needed for streaming.
+  prelim_zip = zipfile.ZipFile(prelim_signing, "r",
+                               compression=zipfile.ZIP_DEFLATED)
+  expected_length = len(metadata['ota-streaming-property-files'])
+  metadata['ota-streaming-property-files'] = ComputeStreamingMetadata(
+      prelim_zip, reserve_space=False, expected_length=expected_length)
+
+  # Copy the zip entries, as we cannot update / delete entries with zipfile.
+  final_signing = tempfile.NamedTemporaryFile()
+  output_zip = zipfile.ZipFile(final_signing, "w",
+                               compression=zipfile.ZIP_DEFLATED)
+  for item in prelim_zip.infolist():
+    if item.filename == METADATA_NAME:
+      continue
+
+    data = prelim_zip.read(item.filename)
+    out_info = copy.copy(item)
+    common.ZipWriteStr(output_zip, out_info, data)
+
+  # Now write the final metadata entry.
+  WriteMetadata(metadata, output_zip)
+  common.ZipClose(prelim_zip)
+  common.ZipClose(output_zip)
+
+  # Re-sign the package after updating the metadata entry.
+  SignOutput(final_signing.name, output_file)
+  final_signing.close()
+
+  # Reopen the final signed zip to double check the streaming metadata.
+  output_zip = zipfile.ZipFile(output_file, "r")
+  actual = metadata['ota-streaming-property-files'].strip()
+  expected = ComputeStreamingMetadata(output_zip)
+  assert actual == expected, \
+      "Mismatching streaming metadata: %s vs %s." % (actual, expected)
+  common.ZipClose(output_zip)
 
 
 class FileDifference(object):
   def __init__(self, partition, source_zip, target_zip, output_zip):
     self.deferred_patch_list = None
-    print "Loading target..."
+    print("Loading target...")
     self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
-    print "Loading source..."
+    print("Loading source...")
     self.source_data = source_data = LoadPartitionFiles(source_zip, partition)
 
     self.verbatim_targets = verbatim_targets = []
@@ -1422,14 +1531,14 @@
       assert fn == tf.name
       sf = ClosestFileMatch(tf, matching_file_cache, renames)
       if sf is not None and sf.name != tf.name:
-        print "File has moved from " + sf.name + " to " + tf.name
+        print("File has moved from " + sf.name + " to " + tf.name)
         renames[sf.name] = tf
 
       if sf is None or fn in OPTIONS.require_verbatim:
         # This file should be included verbatim
         if fn in OPTIONS.prohibit_verbatim:
           raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
-        print "send", fn, "verbatim"
+        print("send", fn, "verbatim")
         tf.AddToZip(output_zip)
         verbatim_targets.append((fn, tf.size, tf.sha1))
         if fn in target_data.keys():
@@ -1517,7 +1626,7 @@
     if len(self.renames) > 0:
       script.Print("Renaming files...")
       for src, tgt in self.renames.iteritems():
-        print "Renaming " + src + " to " + tgt.name
+        print("Renaming " + src + " to " + tgt.name)
         script.RenameFile(src, tgt.name)
 
 
@@ -1559,26 +1668,7 @@
       "ota-type": "FILE",
   }
 
-  post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
-  pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
-  is_downgrade = long(post_timestamp) < long(pre_timestamp)
-
-  if OPTIONS.downgrade:
-    metadata["ota-downgrade"] = "yes"
-    if not is_downgrade:
-      raise RuntimeError("--downgrade specified but no downgrade detected: "
-                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
-  else:
-    if is_downgrade:
-      # Non-fatal here to allow generating such a package which may require
-      # manual work to adjust the post-timestamp. A legit use case is that we
-      # cut a new build C (after having A and B), but want to enfore the
-      # update path of A -> C -> B. Specifying --downgrade may not help since
-      # that would enforce a data wipe for C -> B update.
-      print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
-            "The package may not be deployed properly. "
-            "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
-    metadata["post-timestamp"] = post_timestamp
+  HandleDowngradeMetadata(metadata)
 
   device_specific = common.DeviceSpecificParams(
       source_zip=source_zip,
@@ -1719,8 +1809,8 @@
   if updating_boot:
     d = common.Difference(target_boot, source_boot)
     _, _, d = d.ComputePatch()
-    print "boot      target: %d  source: %d  diff: %d" % (
-        target_boot.size, source_boot.size, len(d))
+    print("boot      target: %d  source: %d  diff: %d" % (
+        target_boot.size, source_boot.size, len(d)))
 
     common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
 
@@ -1759,7 +1849,7 @@
   if OPTIONS.two_step:
     common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
     script.WriteRawImage("/boot", "boot.img")
-    print "writing full boot image (forced by two-step mode)"
+    print("writing full boot image (forced by two-step mode)")
 
   script.Print("Removing unneeded files...")
   system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",))
@@ -1794,9 +1884,9 @@
                         source_boot.sha1, "patch/boot.img.p")
       so_far += target_boot.size
       script.SetProgress(so_far / total_patch_size)
-      print "boot image changed; including."
+      print("boot image changed; including.")
     else:
-      print "boot image unchanged; skipping."
+      print("boot image unchanged; skipping.")
 
   system_items = ItemSet("system", "META/filesystem_config.txt")
   if vendor_diff:
@@ -1822,9 +1912,9 @@
       script.DeleteFiles(["/system/recovery-from-boot.p",
                           "/system/etc/recovery.img",
                           "/system/etc/install-recovery.sh"])
-    print "recovery image changed; including as patch from boot."
+    print("recovery image changed; including as patch from boot.")
   else:
-    print "recovery image unchanged; skipping."
+    print("recovery image unchanged; skipping.")
 
   script.ShowProgress(0.1, 10)
 
@@ -1987,11 +2077,6 @@
       OPTIONS.oem_no_mount = True
     elif o in ("-e", "--extra_script"):
       OPTIONS.extra_script = a
-    elif o in ("-a", "--aslr_mode"):
-      if a in ("on", "On", "true", "True", "yes", "Yes"):
-        OPTIONS.aslr_mode = True
-      else:
-        OPTIONS.aslr_mode = False
     elif o in ("-t", "--worker_threads"):
       if a.isdigit():
         OPTIONS.worker_threads = int(a)
@@ -2029,7 +2114,7 @@
     return True
 
   args = common.ParseOptions(argv, __doc__,
-                             extra_opts="b:k:i:d:we:t:a:2o:",
+                             extra_opts="b:k:i:d:we:t:2o:",
                              extra_long_opts=[
                                  "board_config=",
                                  "package_key=",
@@ -2040,7 +2125,6 @@
                                  "downgrade",
                                  "extra_script=",
                                  "worker_threads=",
-                                 "aslr_mode=",
                                  "two_step",
                                  "no_signing",
                                  "block",
@@ -2087,11 +2171,11 @@
       common.ZipClose(source_zip)
 
     if OPTIONS.verbose:
-      print "--- target info ---"
+      print("--- target info ---")
       common.DumpInfoDict(OPTIONS.info_dict)
 
       if OPTIONS.incremental_source is not None:
-        print "--- source info ---"
+        print("--- source info ---")
         common.DumpInfoDict(OPTIONS.source_info_dict)
 
     WriteABOTAPackageWithBrilloScript(
@@ -2099,20 +2183,20 @@
         output_file=args[1],
         source_file=OPTIONS.incremental_source)
 
-    print "done."
+    print("done.")
     return
 
   if OPTIONS.extra_script is not None:
     OPTIONS.extra_script = open(OPTIONS.extra_script).read()
 
-  print "unzipping target target-files..."
+  print("unzipping target target-files...")
   OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0])
 
   OPTIONS.target_tmp = OPTIONS.input_tmp
   OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.target_tmp)
 
   if OPTIONS.verbose:
-    print "--- target info ---"
+    print("--- target info ---")
     common.DumpInfoDict(OPTIONS.info_dict)
 
   # If the caller explicitly specified the device-specific extensions
@@ -2125,7 +2209,7 @@
   if OPTIONS.device_specific is None:
     from_input = os.path.join(OPTIONS.input_tmp, "META", "releasetools.py")
     if os.path.exists(from_input):
-      print "(using device-specific extensions from target_files)"
+      print("(using device-specific extensions from target_files)")
       OPTIONS.device_specific = from_input
     else:
       OPTIONS.device_specific = OPTIONS.info_dict.get("tool_extensions", None)
@@ -2158,7 +2242,7 @@
   # Non A/B OTAs rely on /cache partition to store temporary files.
   cache_size = OPTIONS.info_dict.get("cache_size", None)
   if cache_size is None:
-    print "--- can't determine the cache partition size ---"
+    print("--- can't determine the cache partition size ---")
   OPTIONS.cache_size = cache_size
 
   # Generate a verify package.
@@ -2172,14 +2256,14 @@
   # Generate an incremental OTA. It will fall back to generate a full OTA on
   # failure unless no_fallback_to_full is specified.
   else:
-    print "unzipping source target-files..."
+    print("unzipping source target-files...")
     OPTIONS.source_tmp, source_zip = common.UnzipTemp(
         OPTIONS.incremental_source)
     OPTIONS.target_info_dict = OPTIONS.info_dict
     OPTIONS.source_info_dict = common.LoadInfoDict(source_zip,
                                                    OPTIONS.source_tmp)
     if OPTIONS.verbose:
-      print "--- source info ---"
+      print("--- source info ---")
       common.DumpInfoDict(OPTIONS.source_info_dict)
     try:
       WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
@@ -2194,7 +2278,7 @@
     except ValueError:
       if not OPTIONS.fallback_to_full:
         raise
-      print "--- failed to build incremental; falling back to full ---"
+      print("--- failed to build incremental; falling back to full ---")
       OPTIONS.incremental_source = None
       WriteFullOTAPackage(input_zip, output_zip)
 
@@ -2205,7 +2289,7 @@
     SignOutput(temp_zip_file.name, args[1])
     temp_zip_file.close()
 
-  print "done."
+  print("done.")
 
 
 if __name__ == '__main__':
@@ -2213,9 +2297,7 @@
     common.CloseInheritedPipes()
     main(sys.argv[1:])
   except common.ExternalError as e:
-    print
-    print "   ERROR: %s" % (e,)
-    print
+    print("\n   ERROR: %s\n" % (e,))
     sys.exit(1)
   finally:
     common.Cleanup()
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index 03e8c8b..cc1fa23 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -19,8 +19,7 @@
 import common
 import unittest
 
-from collections import OrderedDict
-from blockimgdiff import BlockImageDiff, EmptyImage, DataImage, Transfer
+from blockimgdiff import BlockImageDiff, EmptyImage, Transfer
 from rangelib import RangeSet
 
 class BlockImageDiffTest(unittest.TestCase):
@@ -75,3 +74,70 @@
     self.assertEqual(t2, elements[0])
     self.assertEqual(t0, elements[1])
     self.assertEqual(t1, elements[2])
+
+  def test_ReviseStashSize(self):
+    """ReviseStashSize should convert transfers to 'new' commands as needed.
+
+    t1: diff <20-29> => <11-15>
+    t2: diff <11-15> => <20-29>
+    """
+
+    src = EmptyImage()
+    tgt = EmptyImage()
+    block_image_diff = BlockImageDiff(tgt, src, version=3)
+
+    transfers = block_image_diff.transfers
+    Transfer("t1", "t1", RangeSet("11-15"), RangeSet("20-29"), "diff",
+             transfers)
+    Transfer("t2", "t2", RangeSet("20-29"), RangeSet("11-15"), "diff",
+             transfers)
+
+    block_image_diff.GenerateDigraph()
+    block_image_diff.FindVertexSequence()
+    block_image_diff.ReverseBackwardEdges()
+
+    # Sufficient cache to stash 5 blocks (size * 0.8 >= 5).
+    common.OPTIONS.cache_size = 7 * 4096
+    self.assertEqual(0, block_image_diff.ReviseStashSize())
+
+    # Insufficient cache to stash 5 blocks (size * 0.8 < 5).
+    common.OPTIONS.cache_size = 6 * 4096
+    self.assertEqual(10, block_image_diff.ReviseStashSize())
+
+  def test_ReviseStashSize_bug_33687949(self):
+    """ReviseStashSize() should "free" the used stash _after_ the command.
+
+    t1: diff <1-5> => <11-15>
+    t2: diff <11-15> => <21-25>
+    t3: diff <11-15 30-39> => <1-5 30-39>
+
+    For transfer t3, the used stash "11-15" should not be freed until the
+    command finishes. Assume the allowed cache size is 12-block, it should
+    convert the command to 'new' due to insufficient cache (12 < 5 + 10).
+    """
+
+    src = EmptyImage()
+    tgt = EmptyImage()
+    block_image_diff = BlockImageDiff(tgt, src, version=3)
+
+    transfers = block_image_diff.transfers
+    t1 = Transfer("t1", "t1", RangeSet("11-15"), RangeSet("1-5"), "diff",
+                  transfers)
+    t2 = Transfer("t2", "t2", RangeSet("21-25"), RangeSet("11-15"), "diff",
+                  transfers)
+    t3 = Transfer("t3", "t3", RangeSet("1-5 30-39"), RangeSet("11-15 30-39"),
+                  "diff", transfers)
+
+    block_image_diff.GenerateDigraph()
+
+    # Instead of calling FindVertexSequence() and ReverseBackwardEdges(), we
+    # just set up the stash_before and use_stash manually. Otherwise it will
+    # reorder the transfer, which makes testing ReviseStashSize() harder.
+    t1.stash_before.append((0, RangeSet("11-15")))
+    t2.use_stash.append((0, RangeSet("11-15")))
+    t1.stash_before.append((1, RangeSet("11-15")))
+    t3.use_stash.append((1, RangeSet("11-15")))
+
+    # Insufficient cache to stash 15 blocks (size * 0.8 < 15).
+    common.OPTIONS.cache_size = 15 * 4096
+    self.assertEqual(15, block_image_diff.ReviseStashSize())