[DO NOT MERGE] Update Security String to 2017-02-01 on mnc-dev
am: f92880e0c7
Change-Id: Ib50d3f7d336d9e6b6f7edcac63c8927fc17aa780
diff --git a/core/Makefile b/core/Makefile
index becd51d..2af421d 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -523,14 +523,14 @@
$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER)
$(call pretty,"Target boot image: $@")
$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $@.unsigned
- $(VBOOT_SIGNER) $(FUTILITY) $@.unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $@.keyblock $@
+ $(VBOOT_SIGNER) $(FUTILITY) $@.unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $@.keyblock $@
$(hide) $(call assert-max-image-size,$@,$(BOARD_BOOTIMAGE_PARTITION_SIZE))
.PHONY: bootimage-nodeps
bootimage-nodeps: $(MKBOOTIMG) $(VBOOT_SIGNER)
@echo "make $@: ignoring dependencies"
$(hide) $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(INSTALLED_BOOTIMAGE_TARGET).unsigned
- $(VBOOT_SIGNER) $(FUTILITY) $(INSTALLED_BOOTIMAGE_TARGET).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(INSTALLED_BOOTIMAGE_TARGET).keyblock $(INSTALLED_BOOTIMAGE_TARGET)
+ $(VBOOT_SIGNER) $(FUTILITY) $(INSTALLED_BOOTIMAGE_TARGET).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(INSTALLED_BOOTIMAGE_TARGET).keyblock $(INSTALLED_BOOTIMAGE_TARGET)
$(hide) $(call assert-max-image-size,$(INSTALLED_BOOTIMAGE_TARGET),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
else # PRODUCT_SUPPORTS_VBOOT != true
@@ -752,11 +752,12 @@
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER),$(hide) echo "boot_signer=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity_key=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY)" >> $(1))
-$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity_signer_cmd=$(VERITY_SIGNER)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),$(hide) echo "verity_signer_cmd=$(notdir $(VERITY_SIGNER))" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_VERITY_PARTITION),$(hide) echo "system_verity_block_device=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SYSTEM_VERITY_PARTITION)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_VERITY_PARTITION),$(hide) echo "vendor_verity_block_device=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VENDOR_VERITY_PARTITION)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_key=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY)" >> $(1))
+$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_subkey=$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "futility=$(FUTILITY)" >> $(1))
$(if $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT),$(hide) echo "vboot_signer_cmd=$(VBOOT_SIGNER)" >> $(1))
$(if $(filter true,$(BOARD_BUILD_SYSTEM_ROOT_IMAGE)),\
@@ -893,7 +894,7 @@
$(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_BOOT_SIGNER)),\
$(BOOT_SIGNER) /recovery $(1) $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1))
$(if $(filter true,$(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VBOOT)), \
- $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(1).keyblock $(1))
+ $(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1))
$(hide) $(call assert-max-image-size,$(1),$(BOARD_RECOVERYIMAGE_PARTITION_SIZE))
@echo ----- Made recovery image: $(1) --------
endef
@@ -1340,13 +1341,15 @@
$(BUILT_OTATOOLS_PACKAGE): $(OTATOOLS) | $(ACP)
@echo "Package OTA tools: $@"
$(hide) rm -rf $@ $(zip_root)
- $(hide) mkdir -p $(dir $@) $(zip_root)/bin $(zip_root)/framework $(zip_root)/releasetools
+ $(hide) mkdir -p $(dir $@) $(zip_root)/bin $(zip_root)/framework $(zip_root)/releasetools $(zip_root)/system/extras/verity
$(call copy-files-with-structure,$(OTATOOLS),$(HOST_OUT)/,$(zip_root))
+ $(hide) $(ACP) $(HOST_OUT_JAVA_LIBRARIES)/VeritySigner.jar $(zip_root)/framework/
+ $(hide) $(ACP) -p system/extras/verity/build_verity_metadata.py $(zip_root)/system/extras/verity/
$(hide) $(ACP) -r -d -p build/tools/releasetools/* $(zip_root)/releasetools
$(hide) rm -rf $@ $(zip_root)/releasetools/*.pyc
$(hide) (cd $(zip_root) && zip -qry $(abspath $@) *)
$(hide) zip -qry $(abspath $@) build/target/product/security/
- $(hide) find device vendor -name \*.pk8 -o -name \*.x509.pem -o -name oem\*.prop | xargs zip -qry $(abspath $@)>/dev/null || true
+ $(hide) find device vendor -name \*.pk8 -o -name verifiedboot\* -o -name \*.x509.pem -o -name oem\*.prop | xargs zip -qry $(abspath $@)>/dev/null || true
.PHONY: otatools-package
otatools-package: $(BUILT_OTATOOLS_PACKAGE)
diff --git a/core/definitions.mk b/core/definitions.mk
index 78b0a39..9dea18c 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -175,16 +175,53 @@
endef
###########################################################
+## Find all of the directories under the named directories with
+## the specified name.
+## Meant to be used like:
+## INC_DIRS := $(call all-named-dirs-under,inc,.)
+###########################################################
+
+define all-named-dirs-under
+$(call find-subdir-files,$(2) -type d -name "$(1)")
+endef
+
+###########################################################
+## Find all the directories under the current directory that
+## haves name that match $(1)
+###########################################################
+
+define all-subdir-named-dirs
+$(call all-named-dirs-under,$(1),.)
+endef
+
+###########################################################
+## Find all of the files under the named directories with
+## the specified name.
+## Meant to be used like:
+## SRC_FILES := $(call all-named-files-under,*.h,src tests)
+###########################################################
+
+define all-named-files-under
+$(call find-files-in-subdirs,$(LOCAL_PATH),"$(1)",$(2))
+endef
+
+###########################################################
+## Find all of the files under the current directory with
+## the specified name.
+###########################################################
+
+define all-subdir-named-files
+$(call all-named-files-under,$(1),.)
+endef
+
+###########################################################
## Find all of the java files under the named directories.
## Meant to be used like:
## SRC_FILES := $(call all-java-files-under,src tests)
###########################################################
define all-java-files-under
-$(patsubst ./%,%, \
- $(shell cd $(LOCAL_PATH) ; \
- find -L $(1) -name "*.java" -and -not -name ".*") \
- )
+$(call all-named-files-under,*.java,$(1))
endef
###########################################################
@@ -203,10 +240,7 @@
###########################################################
define all-c-files-under
-$(patsubst ./%,%, \
- $(shell cd $(LOCAL_PATH) ; \
- find -L $(1) -name "*.c" -and -not -name ".*") \
- )
+$(call all-named-files-under,*.c,$(1))
endef
###########################################################
@@ -219,16 +253,36 @@
endef
###########################################################
+## Find all of the cpp files under the named directories.
+## LOCAL_CPP_EXTENSION is respected if set.
+## Meant to be used like:
+## SRC_FILES := $(call all-cpp-files-under,src tests)
+###########################################################
+
+define all-cpp-files-under
+$(sort $(patsubst ./%,%, \
+ $(shell cd $(LOCAL_PATH) ; \
+ find -L $(1) -name "*$(or $(LOCAL_CPP_EXTENSION),.cpp)" -and -not -name ".*") \
+ ))
+endef
+
+###########################################################
+## Find all of the cpp files from here. Meant to be used like:
+## SRC_FILES := $(call all-subdir-cpp-files)
+###########################################################
+
+define all-subdir-cpp-files
+$(call all-cpp-files-under,.)
+endef
+
+###########################################################
## Find all files named "I*.aidl" under the named directories,
## which must be relative to $(LOCAL_PATH). The returned list
## is relative to $(LOCAL_PATH).
###########################################################
define all-Iaidl-files-under
-$(patsubst ./%,%, \
- $(shell cd $(LOCAL_PATH) ; \
- find -L $(1) -name "I*.aidl" -and -not -name ".*") \
- )
+$(call all-named-files-under,I*.aidl,$(1))
endef
###########################################################
@@ -246,10 +300,7 @@
###########################################################
define all-logtags-files-under
-$(patsubst ./%,%, \
- $(shell cd $(LOCAL_PATH) ; \
- find -L $(1) -name "*.logtags" -and -not -name ".*") \
- )
+$(call all-named-files-under,*.logtags,$(1))
endef
###########################################################
@@ -259,10 +310,7 @@
###########################################################
define all-proto-files-under
-$(patsubst ./%,%, \
- $(shell cd $(LOCAL_PATH) ; \
- find -L $(1) -name "*.proto" -and -not -name ".*") \
- )
+$(call all-named-files-under,*.proto,$(1))
endef
###########################################################
@@ -272,10 +320,7 @@
###########################################################
define all-renderscript-files-under
-$(patsubst ./%,%, \
- $(shell cd $(LOCAL_PATH) ; \
- find -L $(1) \( -name "*.rs" -or -name "*.fs" \) -and -not -name ".*") \
- )
+$(call find-subdir-files,$(1) \( -name "*.rs" -or -name "*.fs" \) -and -not -name ".*")
endef
###########################################################
@@ -285,10 +330,7 @@
###########################################################
define all-S-files-under
-$(patsubst ./%,%, \
- $(shell cd $(LOCAL_PATH) ; \
- find -L $(1) -name "*.S" -and -not -name ".*") \
- )
+$(call all-named-files-under,*.S,$(1))
endef
###########################################################
@@ -298,10 +340,7 @@
###########################################################
define all-html-files-under
-$(patsubst ./%,%, \
- $(shell cd $(LOCAL_PATH) ; \
- find -L $(1) -name "*.html" -and -not -name ".*") \
- )
+$(call all-named-files-under,*.html,$(1))
endef
###########################################################
@@ -319,7 +358,7 @@
###########################################################
define find-subdir-files
-$(patsubst ./%,%,$(shell cd $(LOCAL_PATH) ; find -L $(1)))
+$(sort $(patsubst ./%,%,$(shell cd $(LOCAL_PATH) ; find -L $(1))))
endef
###########################################################
@@ -331,8 +370,8 @@
###########################################################
define find-subdir-subdir-files
-$(filter-out $(patsubst %,$(1)/%,$(3)),$(patsubst ./%,%,$(shell cd \
- $(LOCAL_PATH) ; find -L $(1) -maxdepth 1 -name $(2))))
+$(sort $(filter-out $(patsubst %,$(1)/%,$(3)),$(patsubst ./%,%,$(shell cd \
+ $(LOCAL_PATH) ; find -L $(1) -maxdepth 1 -name $(2)))))
endef
###########################################################
@@ -341,10 +380,10 @@
###########################################################
define find-subdir-assets
-$(if $(1),$(patsubst ./%,%, \
+$(sort $(if $(1),$(patsubst ./%,%, \
$(shell if [ -d $(1) ] ; then cd $(1) ; find ./ -not -name '.*' -and -type f -and -not -type l ; fi)), \
$(warning Empty argument supplied to find-subdir-assets) \
-)
+))
endef
###########################################################
@@ -352,11 +391,11 @@
###########################################################
define find-other-java-files
- $(call find-subdir-files,$(1) -name "*.java" -and -not -name ".*")
+$(call all-java-files-under,$(1))
endef
define find-other-html-files
- $(call find-subdir-files,$(1) -name "*.html" -and -not -name ".*")
+$(call all-html-files-under,$(1))
endef
###########################################################
@@ -369,10 +408,10 @@
###########################################################
define find-files-in-subdirs
-$(patsubst ./%,%, \
+$(sort $(patsubst ./%,%, \
$(shell cd $(1) ; \
find -L $(3) -name $(2) -and -not -name ".*") \
- )
+ ))
endef
###########################################################
@@ -1633,8 +1672,12 @@
## Commands for running javac to make .class files
###########################################################
-#@echo "Source intermediates dir: $(PRIVATE_SOURCE_INTERMEDIATES_DIR)"
-#@echo "Source intermediates: $$(find $(PRIVATE_SOURCE_INTERMEDIATES_DIR) -name '*.java')"
+# Add BUILD_NUMBER to apps default version name if it's unbundled build.
+ifdef TARGET_BUILD_APPS
+APPS_DEFAULT_VERSION_NAME := $(PLATFORM_VERSION)-$(BUILD_NUMBER)
+else
+APPS_DEFAULT_VERSION_NAME := $(PLATFORM_VERSION)
+endif
# TODO: Right now we generate the asset resources twice, first as part
# of generating the Java classes, then at the end when packaging the final
@@ -1661,8 +1704,8 @@
$(addprefix -G , $(PRIVATE_PROGUARD_OPTIONS_FILE)) \
$(addprefix --min-sdk-version , $(PRIVATE_DEFAULT_APP_TARGET_SDK)) \
$(addprefix --target-sdk-version , $(PRIVATE_DEFAULT_APP_TARGET_SDK)) \
- $(if $(filter --version-code,$(PRIVATE_AAPT_FLAGS)),,$(addprefix --version-code , $(PLATFORM_SDK_VERSION))) \
- $(if $(filter --version-name,$(PRIVATE_AAPT_FLAGS)),,$(addprefix --version-name , $(PLATFORM_VERSION)-$(BUILD_NUMBER))) \
+ $(if $(filter --version-code,$(PRIVATE_AAPT_FLAGS)),,--version-code $(PLATFORM_SDK_VERSION)) \
+ $(if $(filter --version-name,$(PRIVATE_AAPT_FLAGS)),,--version-name $(APPS_DEFAULT_VERSION_NAME)) \
$(addprefix --rename-manifest-package , $(PRIVATE_MANIFEST_PACKAGE_NAME)) \
$(addprefix --rename-instrumentation-target-package , $(PRIVATE_MANIFEST_INSTRUMENTATION_FOR)) \
--skip-symbols-without-default-localization
@@ -2010,8 +2053,8 @@
$(addprefix --min-sdk-version , $(PRIVATE_DEFAULT_APP_TARGET_SDK)) \
$(addprefix --target-sdk-version , $(PRIVATE_DEFAULT_APP_TARGET_SDK)) \
$(if $(filter --product,$(PRIVATE_AAPT_FLAGS)),,$(addprefix --product , $(TARGET_AAPT_CHARACTERISTICS))) \
- $(if $(filter --version-code,$(PRIVATE_AAPT_FLAGS)),,$(addprefix --version-code , $(PLATFORM_SDK_VERSION))) \
- $(if $(filter --version-name,$(PRIVATE_AAPT_FLAGS)),,$(addprefix --version-name , $(PLATFORM_VERSION)-$(BUILD_NUMBER))) \
+ $(if $(filter --version-code,$(PRIVATE_AAPT_FLAGS)),,--version-code $(PLATFORM_SDK_VERSION)) \
+ $(if $(filter --version-name,$(PRIVATE_AAPT_FLAGS)),,--version-name $(APPS_DEFAULT_VERSION_NAME)) \
$(addprefix --rename-manifest-package , $(PRIVATE_MANIFEST_PACKAGE_NAME)) \
$(addprefix --rename-instrumentation-target-package , $(PRIVATE_MANIFEST_INSTRUMENTATION_FOR)) \
--skip-symbols-without-default-localization \
diff --git a/core/product.mk b/core/product.mk
index 4e8bff1..e97cba4 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -104,6 +104,7 @@
PRODUCT_SYSTEM_PROPERTY_BLACKLIST \
PRODUCT_SYSTEM_SERVER_JARS \
PRODUCT_VBOOT_SIGNING_KEY \
+ PRODUCT_VBOOT_SIGNING_SUBKEY \
PRODUCT_VERITY_SIGNING_KEY \
PRODUCT_SYSTEM_VERITY_PARTITION \
PRODUCT_VENDOR_VERITY_PARTITION \
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index a85c754..69d3f1b 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -42,7 +42,7 @@
# which is the version that we reveal to the end user.
# Update this value when the platform version changes (rather
# than overriding it somewhere else). Can be an arbitrary string.
- PLATFORM_VERSION := 6.0
+ PLATFORM_VERSION := 6.0.1
endif
ifeq "" "$(PLATFORM_SDK_VERSION)"
diff --git a/target/product/base.mk b/target/product/base.mk
index 1699156..4c49e86 100644
--- a/target/product/base.mk
+++ b/target/product/base.mk
@@ -111,6 +111,7 @@
run-as \
schedtest \
sdcard \
+ secdiscard \
services \
settings \
sgdisk \
diff --git a/target/product/core.mk b/target/product/core.mk
index 519dbb8..d453303 100644
--- a/target/product/core.mk
+++ b/target/product/core.mk
@@ -23,7 +23,6 @@
PRODUCT_PACKAGES += \
BasicDreams \
Browser \
- Calculator \
Calendar \
CalendarProvider \
CaptivePortalLogin \
@@ -33,6 +32,7 @@
DocumentsUI \
DownloadProviderUi \
Email \
+ ExactCalculator \
Exchange2 \
ExternalStorageProvider \
FusedLocation \
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index c40de4f..25a8975 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -89,6 +89,7 @@
PRODUCT_COPY_FILES += \
system/core/rootdir/init.usb.rc:root/init.usb.rc \
+ system/core/rootdir/init.usb.configfs.rc:root/init.usb.configfs.rc \
system/core/rootdir/init.trace.rc:root/init.trace.rc \
system/core/rootdir/ueventd.rc:root/ueventd.rc \
system/core/rootdir/etc/hosts:system/etc/hosts
diff --git a/target/product/vboot.mk b/target/product/vboot.mk
index e4b1144..48a4883 100644
--- a/target/product/vboot.mk
+++ b/target/product/vboot.mk
@@ -22,3 +22,4 @@
# We expect this file to exist with the suffixes ".vbprivk" and ".vbpupk".
# TODO: find a proper location for this
PRODUCT_VBOOT_SIGNING_KEY := external/vboot_reference/tests/devkeys/kernel_data_key
+PRODUCT_VBOOT_SIGNING_SUBKEY := external/vboot_reference/tests/devkeys/kernel_subkey
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index eab8113..f2bf1e1 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -28,6 +28,7 @@
print >> sys.stderr, "Python 2.7 or newer is required."
sys.exit(1)
+import datetime
import errno
import os
import tempfile
@@ -40,6 +41,9 @@
OPTIONS.add_missing = False
OPTIONS.rebuild_recovery = False
+OPTIONS.replace_verity_public_key = False
+OPTIONS.replace_verity_private_key = False
+OPTIONS.verity_signer_path = None
def AddSystem(output_zip, prefix="IMAGES/", recovery_img=None, boot_img=None):
"""Turn the contents of SYSTEM into a system image and store it in
@@ -119,6 +123,12 @@
if fstab:
image_props["fs_type"] = fstab["/" + what].fs_type
+ # Use a fixed timestamp (01/01/2009) when packaging the image.
+ # Bug: 24377993
+ epoch = datetime.datetime.fromtimestamp(0)
+ timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
+ image_props["timestamp"] = int(timestamp)
+
if what == "system":
fs_config_prefix = ""
else:
@@ -170,6 +180,12 @@
print "creating userdata.img..."
+ # Use a fixed timestamp (01/01/2009) when packaging the image.
+ # Bug: 24377993
+ epoch = datetime.datetime.fromtimestamp(0)
+ timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
+ image_props["timestamp"] = int(timestamp)
+
# The name of the directory it is making an image out of matters to
# mkyaffs2image. So we create a temp dir, and within it we create an
# empty dir named "data", and build the image from that.
@@ -207,6 +223,12 @@
print "creating cache.img..."
+ # Use a fixed timestamp (01/01/2009) when packaging the image.
+ # Bug: 24377993
+ epoch = datetime.datetime.fromtimestamp(0)
+ timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
+ image_props["timestamp"] = int(timestamp)
+
# The name of the directory it is making an image out of matters to
# mkyaffs2image. So we create a temp dir, and within it we create an
# empty dir named "cache", and build the image from that.
@@ -296,18 +318,27 @@
common.ZipClose(output_zip)
def main(argv):
- def option_handler(o, _):
+ def option_handler(o, a):
if o in ("-a", "--add_missing"):
OPTIONS.add_missing = True
elif o in ("-r", "--rebuild_recovery",):
OPTIONS.rebuild_recovery = True
+ elif o == "--replace_verity_private_key":
+ OPTIONS.replace_verity_private_key = (True, a)
+ elif o == "--replace_verity_public_key":
+ OPTIONS.replace_verity_public_key = (True, a)
+ elif o == "--verity_signer_path":
+ OPTIONS.verity_signer_path = a
else:
return False
return True
args = common.ParseOptions(
argv, __doc__, extra_opts="ar",
- extra_long_opts=["add_missing", "rebuild_recovery"],
+ extra_long_opts=["add_missing", "rebuild_recovery",
+ "replace_verity_public_key=",
+ "replace_verity_private_key=",
+ "verity_signer_path="],
extra_option_handler=option_handler)
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index 8060b58..f605df3 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -16,6 +16,7 @@
from collections import deque, OrderedDict
from hashlib import sha1
+import common
import heapq
import itertools
import multiprocessing
@@ -188,6 +189,12 @@
return (sum(sr.size() for (_, sr) in self.stash_before) -
sum(sr.size() for (_, sr) in self.use_stash))
+ def ConvertToNew(self):
+ assert self.style != "new"
+ self.use_stash = []
+ self.style = "new"
+ self.src_ranges = RangeSet()
+
def __str__(self):
return (str(self.id) + ": <" + str(self.src_ranges) + " " + self.style +
" to " + str(self.tgt_ranges) + ">")
@@ -282,6 +289,10 @@
self.ReverseBackwardEdges()
self.ImproveVertexSequence()
+ # Ensure the runtime stash size is under the limit.
+ if self.version >= 2 and common.OPTIONS.cache_size is not None:
+ self.ReviseStashSize()
+
# Double-check our work.
self.AssertSequenceGood()
@@ -301,7 +312,6 @@
out = []
total = 0
- performs_read = False
stashes = {}
stashed_blocks = 0
@@ -413,7 +423,6 @@
out.append("%s %s\n" % (xf.style, xf.tgt_ranges.to_string_raw()))
total += tgt_size
elif xf.style == "move":
- performs_read = True
assert xf.tgt_ranges
assert xf.src_ranges.size() == tgt_size
if xf.src_ranges != xf.tgt_ranges:
@@ -438,7 +447,6 @@
xf.tgt_ranges.to_string_raw(), src_str))
total += tgt_size
elif xf.style in ("bsdiff", "imgdiff"):
- performs_read = True
assert xf.tgt_ranges
assert xf.src_ranges
if self.version == 1:
@@ -475,9 +483,20 @@
if free_string:
out.append("".join(free_string))
- # sanity check: abort if we're going to need more than 512 MB if
- # stash space
- assert max_stashed_blocks * self.tgt.blocksize < (512 << 20)
+ if self.version >= 2:
+ # Sanity check: abort if we're going to need more stash space than
+ # the allowed size (cache_size * threshold). There are two purposes
+ # of having a threshold here. a) Part of the cache may have been
+ # occupied by some recovery logs. b) It will buy us some time to deal
+ # with the oversize issue.
+ cache_size = common.OPTIONS.cache_size
+ stash_threshold = common.OPTIONS.stash_threshold
+ max_allowed = cache_size * stash_threshold
+ assert max_stashed_blocks * self.tgt.blocksize < max_allowed, \
+ 'Stash size %d (%d * %d) exceeds the limit %d (%d * %.2f)' % (
+ max_stashed_blocks * self.tgt.blocksize, max_stashed_blocks,
+ self.tgt.blocksize, max_allowed, cache_size,
+ stash_threshold)
# Zero out extended blocks as a workaround for bug 20881595.
if self.tgt.extended:
@@ -504,8 +523,81 @@
f.write(i)
if self.version >= 2:
- print("max stashed blocks: %d (%d bytes)\n" % (
- max_stashed_blocks, max_stashed_blocks * self.tgt.blocksize))
+ max_stashed_size = max_stashed_blocks * self.tgt.blocksize
+ max_allowed = common.OPTIONS.cache_size * common.OPTIONS.stash_threshold
+ print("max stashed blocks: %d (%d bytes), limit: %d bytes (%.2f%%)\n" % (
+ max_stashed_blocks, max_stashed_size, max_allowed,
+ max_stashed_size * 100.0 / max_allowed))
+
+ def ReviseStashSize(self):
+ print("Revising stash size...")
+ stashes = {}
+
+ # Create the map between a stash and its def/use points. For example, for a
+ # given stash of (idx, sr), stashes[idx] = (sr, def_cmd, use_cmd).
+ for xf in self.transfers:
+ # Command xf defines (stores) all the stashes in stash_before.
+ for idx, sr in xf.stash_before:
+ stashes[idx] = (sr, xf)
+
+ # Record all the stashes command xf uses.
+ for idx, _ in xf.use_stash:
+ stashes[idx] += (xf,)
+
+ # Compute the maximum blocks available for stash based on /cache size and
+ # the threshold.
+ cache_size = common.OPTIONS.cache_size
+ stash_threshold = common.OPTIONS.stash_threshold
+ max_allowed = cache_size * stash_threshold / self.tgt.blocksize
+
+ stashed_blocks = 0
+ new_blocks = 0
+
+ # Now go through all the commands. Compute the required stash size on the
+ # fly. If a command requires excess stash than available, it deletes the
+ # stash by replacing the command that uses the stash with a "new" command
+ # instead.
+ for xf in self.transfers:
+ replaced_cmds = []
+
+ # xf.stash_before generates explicit stash commands.
+ for idx, sr in xf.stash_before:
+ if stashed_blocks + sr.size() > max_allowed:
+ # We cannot stash this one for a later command. Find out the command
+ # that will use this stash and replace the command with "new".
+ use_cmd = stashes[idx][2]
+ replaced_cmds.append(use_cmd)
+ print("%10d %9s %s" % (sr.size(), "explicit", use_cmd))
+ else:
+ stashed_blocks += sr.size()
+
+ # xf.use_stash generates free commands.
+ for _, sr in xf.use_stash:
+ stashed_blocks -= sr.size()
+
+ # "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
+ # ComputePatches(), they both have the style of "diff".
+ if xf.style == "diff" and self.version >= 3:
+ assert xf.tgt_ranges and xf.src_ranges
+ if xf.src_ranges.overlaps(xf.tgt_ranges):
+ if stashed_blocks + xf.src_ranges.size() > max_allowed:
+ replaced_cmds.append(xf)
+ print("%10d %9s %s" % (xf.src_ranges.size(), "implicit", xf))
+
+ # Replace the commands in replaced_cmds with "new"s.
+ for cmd in replaced_cmds:
+ # It no longer uses any commands in "use_stash". Remove the def points
+ # for all those stashes.
+ for idx, sr in cmd.use_stash:
+ def_cmd = stashes[idx][1]
+ assert (idx, sr) in def_cmd.stash_before
+ def_cmd.stash_before.remove((idx, sr))
+ new_blocks += sr.size()
+
+ cmd.ConvertToNew()
+
+ print(" Total %d blocks are packed as new blocks due to insufficient "
+ "cache size." % (new_blocks,))
def ComputePatches(self, prefix):
print("Reticulating splines...")
@@ -862,6 +954,64 @@
a.goes_after[b] = size
def FindTransfers(self):
+ """Parse the file_map to generate all the transfers."""
+
+ def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
+ split=False):
+ """Wrapper function for adding a Transfer().
+
+ For BBOTA v3, we need to stash source blocks for resumable feature.
+ However, with the growth of file size and the shrink of the cache
+ partition source blocks are too large to be stashed. If a file occupies
+ too many blocks (greater than MAX_BLOCKS_PER_DIFF_TRANSFER), we split it
+ into smaller pieces by getting multiple Transfer()s.
+
+ The downside is that after splitting, we may increase the package size
+ since the split pieces don't align well. According to our experiments,
+ 1/8 of the cache size as the per-piece limit appears to be optimal.
+ Compared to the fixed 1024-block limit, it reduces the overall package
+ size by 30% volantis, and 20% for angler and bullhead."""
+
+ # We care about diff transfers only.
+ if style != "diff" or not split:
+ Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+ return
+
+ pieces = 0
+ cache_size = common.OPTIONS.cache_size
+ split_threshold = 0.125
+ max_blocks_per_transfer = int(cache_size * split_threshold /
+ self.tgt.blocksize)
+
+ # Change nothing for small files.
+ if (tgt_ranges.size() <= max_blocks_per_transfer and
+ src_ranges.size() <= max_blocks_per_transfer):
+ Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+ return
+
+ while (tgt_ranges.size() > max_blocks_per_transfer and
+ src_ranges.size() > max_blocks_per_transfer):
+ tgt_split_name = "%s-%d" % (tgt_name, pieces)
+ src_split_name = "%s-%d" % (src_name, pieces)
+ tgt_first = tgt_ranges.first(max_blocks_per_transfer)
+ src_first = src_ranges.first(max_blocks_per_transfer)
+
+ Transfer(tgt_split_name, src_split_name, tgt_first, src_first, style,
+ by_id)
+
+ tgt_ranges = tgt_ranges.subtract(tgt_first)
+ src_ranges = src_ranges.subtract(src_first)
+ pieces += 1
+
+ # Handle remaining blocks.
+ if tgt_ranges.size() or src_ranges.size():
+ # Must be both non-empty.
+ assert tgt_ranges.size() and src_ranges.size()
+ tgt_split_name = "%s-%d" % (tgt_name, pieces)
+ src_split_name = "%s-%d" % (src_name, pieces)
+ Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style,
+ by_id)
+
empty = RangeSet()
for tgt_fn, tgt_ranges in self.tgt.file_map.items():
if tgt_fn == "__ZERO":
@@ -869,28 +1019,28 @@
# in any file and that are filled with zeros. We have a
# special transfer style for zero blocks.
src_ranges = self.src.file_map.get("__ZERO", empty)
- Transfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges,
- "zero", self.transfers)
+ AddTransfer(tgt_fn, "__ZERO", tgt_ranges, src_ranges,
+ "zero", self.transfers)
continue
elif tgt_fn == "__COPY":
# "__COPY" domain includes all the blocks not contained in any
# file and that need to be copied unconditionally to the target.
- Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
+ AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
continue
elif tgt_fn in self.src.file_map:
# Look for an exact pathname match in the source.
- Transfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
- "diff", self.transfers)
+ AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
+ "diff", self.transfers, self.version >= 3)
continue
b = os.path.basename(tgt_fn)
if b in self.src_basenames:
# Look for an exact basename match in the source.
src_fn = self.src_basenames[b]
- Transfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
- "diff", self.transfers)
+ AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
+ "diff", self.transfers, self.version >= 3)
continue
b = re.sub("[0-9]+", "#", b)
@@ -900,11 +1050,11 @@
# for .so files that contain version numbers in the filename
# that get bumped.)
src_fn = self.src_numpatterns[b]
- Transfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
- "diff", self.transfers)
+ AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
+ "diff", self.transfers, self.version >= 3)
continue
- Transfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
+ AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
def AbbreviateSourceNames(self):
for k in self.src.file_map.keys():
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 4b43c0c..357a666 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -399,6 +399,7 @@
mount_point: such as "system", "data" etc.
"""
d = {}
+
if "build.prop" in glob_dict:
bp = glob_dict["build.prop"]
if "ro.build.date.utc" in bp:
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 27b8f27..94eacc2 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -392,7 +392,9 @@
img_keyblock = tempfile.NamedTemporaryFile()
cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
- info_dict["vboot_key"] + ".vbprivk", img_keyblock.name,
+ info_dict["vboot_key"] + ".vbprivk",
+ info_dict["vboot_subkey"] + ".vbprivk",
+ img_keyblock.name,
img.name]
p = Run(cmd, stdout=subprocess.PIPE)
p.communicate()
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index c6fd47e..e61c64f 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -64,6 +64,14 @@
the build scripts (used for developer OTA packages which
legitimately need to go back and forth).
+ --downgrade
+ Intentionally generate an incremental OTA that updates from a newer
+ build to an older one (based on timestamp comparison). "post-timestamp"
+ will be replaced by "ota-downgrade=yes" in the metadata file. A data
+ wipe will always be enforced, so "ota-wipe=yes" will also be included in
+ the metadata file. The update-binary in the source build will be used in
+ the OTA package, unless --binary flag is specified.
+
-e (--extra_script) <file>
Insert the contents of file at the end of the update script.
@@ -89,6 +97,9 @@
Specifies the number of worker-threads that will be used when
generating patches for incremental updates (defaults to 3).
+ --stash_threshold <float>
+ Specifies the threshold that will be used to compute the maximum
+ allowed stash size (defaults to 0.8).
"""
import sys
@@ -115,6 +126,7 @@
OPTIONS.patch_threshold = 0.95
OPTIONS.wipe_user_data = False
OPTIONS.omit_prereq = False
+OPTIONS.downgrade = False
OPTIONS.extra_script = None
OPTIONS.aslr_mode = True
OPTIONS.worker_threads = multiprocessing.cpu_count() // 2
@@ -128,6 +140,9 @@
OPTIONS.fallback_to_full = True
OPTIONS.full_radio = False
OPTIONS.full_bootloader = False
+# Stash size cannot exceed cache_size * threshold.
+OPTIONS.cache_size = None
+OPTIONS.stash_threshold = 0.8
def MostPopularKey(d, default):
"""Given a dict, return the key corresponding to the largest
@@ -679,6 +694,8 @@
endif;
endif;
""" % bcb_dev)
+
+ script.SetProgress(1)
script.AddToZip(input_zip, output_zip, input_path=OPTIONS.updater_binary)
WriteMetadata(metadata, output_zip)
@@ -738,13 +755,44 @@
source_version, OPTIONS.target_info_dict,
fstab=OPTIONS.source_info_dict["fstab"])
+ oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties")
+ recovery_mount_options = OPTIONS.source_info_dict.get(
+ "recovery_mount_options")
+ oem_dict = None
+ if oem_props is not None and len(oem_props) > 0:
+ if OPTIONS.oem_source is None:
+ raise common.ExternalError("OEM source required for this build")
+ script.Mount("/oem", recovery_mount_options)
+ oem_dict = common.LoadDictionaryFromLines(
+ open(OPTIONS.oem_source).readlines())
+
metadata = {
- "pre-device": GetBuildProp("ro.product.device",
- OPTIONS.source_info_dict),
- "post-timestamp": GetBuildProp("ro.build.date.utc",
- OPTIONS.target_info_dict),
+ "pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
+ OPTIONS.source_info_dict),
+ "ota-type": "BLOCK",
}
+ post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
+ pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
+ is_downgrade = long(post_timestamp) < long(pre_timestamp)
+
+ if OPTIONS.downgrade:
+ metadata["ota-downgrade"] = "yes"
+ if not is_downgrade:
+ raise RuntimeError("--downgrade specified but no downgrade detected: "
+ "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+ else:
+ if is_downgrade:
+ # Non-fatal here to allow generating such a package which may require
+ # manual work to adjust the post-timestamp. A legit use case is that we
+ # cut a new build C (after having A and B), but want to enfore the
+ # update path of A -> C -> B. Specifying --downgrade may not help since
+ # that would enforce a data wipe for C -> B update.
+ print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
+ "The package may not be deployed properly. "
+ "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
+ metadata["post-timestamp"] = post_timestamp
+
device_specific = common.DeviceSpecificParams(
source_zip=source_zip,
source_version=source_version,
@@ -755,14 +803,10 @@
metadata=metadata,
info_dict=OPTIONS.source_info_dict)
- # TODO: Currently this works differently from WriteIncrementalOTAPackage().
- # This function doesn't consider thumbprints when writing
- # metadata["pre/post-build"]. One possible reason is that the current
- # devices with thumbprints are all using file-based OTAs. Long term we
- # should factor out the common parts into a shared one to avoid further
- # divergence.
- source_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.source_info_dict)
- target_fp = GetBuildProp("ro.build.fingerprint", OPTIONS.target_info_dict)
+ target_fp = CalculateFingerprint(oem_props, oem_dict,
+ OPTIONS.target_info_dict)
+ source_fp = CalculateFingerprint(oem_props, oem_dict,
+ OPTIONS.source_info_dict)
metadata["pre-build"] = source_fp
metadata["post-build"] = target_fp
@@ -801,17 +845,6 @@
else:
vendor_diff = None
- oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
- recovery_mount_options = OPTIONS.source_info_dict.get(
- "recovery_mount_options")
- oem_dict = None
- if oem_props is not None and len(oem_props) > 0:
- if OPTIONS.oem_source is None:
- raise common.ExternalError("OEM source required for this build")
- script.Mount("/oem", recovery_mount_options)
- oem_dict = common.LoadDictionaryFromLines(
- open(OPTIONS.oem_source).readlines())
-
AppendAssertions(script, OPTIONS.target_info_dict, oem_dict)
device_specific.IncrementalOTA_Assertions()
@@ -966,6 +999,7 @@
if OPTIONS.wipe_user_data:
script.Print("Erasing user data...")
script.FormatPartition("/data")
+ metadata["ota-wipe"] = "yes"
if OPTIONS.two_step:
script.AppendExtra("""
@@ -975,7 +1009,12 @@
""" % bcb_dev)
script.SetProgress(1)
- script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
+ # For downgrade OTAs, we prefer to use the update-binary in the source
+ # build that is actually newer than the one in the target build.
+ if OPTIONS.downgrade:
+ script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary)
+ else:
+ script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
WriteMetadata(metadata, output_zip)
@@ -1141,10 +1180,30 @@
metadata = {
"pre-device": GetOemProperty("ro.product.device", oem_props, oem_dict,
OPTIONS.source_info_dict),
- "post-timestamp": GetBuildProp("ro.build.date.utc",
- OPTIONS.target_info_dict),
+ "ota-type": "FILE",
}
+ post_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.target_info_dict)
+ pre_timestamp = GetBuildProp("ro.build.date.utc", OPTIONS.source_info_dict)
+ is_downgrade = long(post_timestamp) < long(pre_timestamp)
+
+ if OPTIONS.downgrade:
+ metadata["ota-downgrade"] = "yes"
+ if not is_downgrade:
+ raise RuntimeError("--downgrade specified but no downgrade detected: "
+ "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+ else:
+ if is_downgrade:
+ # Non-fatal here to allow generating such a package which may require
+ # manual work to adjust the post-timestamp. A legit use case is that we
+ # cut a new build C (after having A and B), but want to enfore the
+ # update path of A -> C -> B. Specifying --downgrade may not help since
+ # that would enforce a data wipe for C -> B update.
+ print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
+ "The package may not be deployed properly. "
+ "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
+ metadata["post-timestamp"] = post_timestamp
+
device_specific = common.DeviceSpecificParams(
source_zip=source_zip,
source_version=source_version,
@@ -1470,6 +1529,7 @@
if OPTIONS.wipe_user_data:
script.Print("Erasing user data...")
script.FormatPartition("/data")
+ metadata["ota-wipe"] = "yes"
if OPTIONS.two_step:
script.AppendExtra("""
@@ -1489,7 +1549,13 @@
script.Unmount("/vendor")
script.Mount("/vendor")
vendor_diff.EmitExplicitTargetVerification(script)
- script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
+
+ # For downgrade OTAs, we prefer to use the update-binary in the source
+ # build that is actually newer than the one in the target build.
+ if OPTIONS.downgrade:
+ script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary)
+ else:
+ script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
WriteMetadata(metadata, output_zip)
@@ -1511,6 +1577,9 @@
OPTIONS.wipe_user_data = True
elif o in ("-n", "--no_prereq"):
OPTIONS.omit_prereq = True
+ elif o == "--downgrade":
+ OPTIONS.downgrade = True
+ OPTIONS.wipe_user_data = True
elif o in ("-o", "--oem_settings"):
OPTIONS.oem_source = a
elif o in ("-e", "--extra_script"):
@@ -1538,6 +1607,12 @@
OPTIONS.updater_binary = a
elif o in ("--no_fallback_to_full",):
OPTIONS.fallback_to_full = False
+ elif o == "--stash_threshold":
+ try:
+ OPTIONS.stash_threshold = float(a)
+ except ValueError:
+ raise ValueError("Cannot parse value %r for option %r - expecting "
+ "a float" % (a, o))
else:
return False
return True
@@ -1552,6 +1627,7 @@
"full_bootloader",
"wipe_user_data",
"no_prereq",
+ "downgrade",
"extra_script=",
"worker_threads=",
"aslr_mode=",
@@ -1562,12 +1638,25 @@
"oem_settings=",
"verify",
"no_fallback_to_full",
+ "stash_threshold=",
], extra_option_handler=option_handler)
if len(args) != 2:
common.Usage(__doc__)
sys.exit(1)
+ if OPTIONS.downgrade:
+ # Sanity check to enforce a data wipe.
+ if not OPTIONS.wipe_user_data:
+ raise ValueError("Cannot downgrade without a data wipe")
+
+ # We should only allow downgrading incrementals (as opposed to full).
+ # Otherwise the device may go back from arbitrary build with this full
+ # OTA package.
+ if OPTIONS.incremental_source is None:
+ raise ValueError("Cannot generate downgradable full OTAs - consider"
+ "using --omit_prereq?")
+
if OPTIONS.extra_script is not None:
OPTIONS.extra_script = open(OPTIONS.extra_script).read()
@@ -1619,6 +1708,11 @@
output_zip = zipfile.ZipFile(temp_zip_file, "w",
compression=zipfile.ZIP_DEFLATED)
+ cache_size = OPTIONS.info_dict.get("cache_size", None)
+ if cache_size is None:
+ raise RuntimeError("can't determine the cache partition size")
+ OPTIONS.cache_size = cache_size
+
if OPTIONS.incremental_source is None:
WriteFullOTAPackage(input_zip, output_zip)
if OPTIONS.package_key is None:
diff --git a/tools/releasetools/rangelib.py b/tools/releasetools/rangelib.py
index 1506658..aa572cc 100644
--- a/tools/releasetools/rangelib.py
+++ b/tools/releasetools/rangelib.py
@@ -28,7 +28,9 @@
if isinstance(data, str):
self._parse_internal(data)
elif data:
+ assert len(data) % 2 == 0
self.data = tuple(self._remove_pairs(data))
+ self.monotonic = all(x < y for x, y in zip(self.data, self.data[1:]))
else:
self.data = ()
@@ -38,8 +40,10 @@
def __eq__(self, other):
return self.data == other.data
+
def __ne__(self, other):
return self.data != other.data
+
def __nonzero__(self):
return bool(self.data)
@@ -73,9 +77,9 @@
monotonic = True
for p in text.split():
if "-" in p:
- s, e = p.split("-")
- data.append(int(s))
- data.append(int(e)+1)
+ s, e = (int(x) for x in p.split("-"))
+ data.append(s)
+ data.append(e+1)
if last <= s <= e:
last = e
else:
@@ -87,13 +91,16 @@
if last <= s:
last = s+1
else:
- monotonic = True
+ monotonic = False
data.sort()
self.data = tuple(self._remove_pairs(data))
self.monotonic = monotonic
@staticmethod
def _remove_pairs(source):
+ """Remove consecutive duplicate items to simplify the result.
+
+ [1, 2, 2, 5, 5, 10] will become [1, 10]."""
last = None
for i in source:
if i == last:
@@ -116,6 +123,7 @@
return " ".join(out)
def to_string_raw(self):
+ assert self.data
return str(len(self.data)) + "," + ",".join(str(i) for i in self.data)
def union(self, other):
@@ -260,6 +268,38 @@
out = out.union(RangeSet(str(s1) + "-" + str(e1-1)))
return out
+ def first(self, n):
+ """Return the RangeSet that contains at most the first 'n' integers.
+
+ >>> RangeSet("0-9").first(1)
+ <RangeSet("0")>
+ >>> RangeSet("10-19").first(5)
+ <RangeSet("10-14")>
+ >>> RangeSet("10-19").first(15)
+ <RangeSet("10-19")>
+ >>> RangeSet("10-19 30-39").first(3)
+ <RangeSet("10-12")>
+ >>> RangeSet("10-19 30-39").first(15)
+ <RangeSet("10-19 30-34")>
+ >>> RangeSet("10-19 30-39").first(30)
+ <RangeSet("10-19 30-39")>
+ >>> RangeSet("0-9").first(0)
+ <RangeSet("")>
+ """
+
+ if self.size() <= n:
+ return self
+
+ out = []
+ for s, e in self:
+ if e - s >= n:
+ out += (s, s+n)
+ break
+ else:
+ out += (s, e)
+ n -= e - s
+ return RangeSet(data=out)
+
if __name__ == "__main__":
import doctest