Merge "Add mediaswcodec to system"
diff --git a/core/Makefile b/core/Makefile
index a2d9339..2ff9063 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -2916,7 +2916,7 @@
ifeq (true,$(PRODUCT_BUILD_SUPER_PARTITION))
# BOARD_SUPER_PARTITION_SIZE must be defined to build super image.
-ifdef BOARD_SUPER_PARTITION_SIZE
+ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
INSTALLED_SUPERIMAGE_TARGET := $(PRODUCT_OUT)/super.img
INSTALLED_SUPERIMAGE_EMPTY_TARGET := $(PRODUCT_OUT)/super_empty.img
@@ -2940,9 +2940,9 @@
--metadata-slots $(if $(1),2,1) \
--device-size $(BOARD_SUPER_PARTITION_SIZE) \
$(foreach name,$(BOARD_SUPER_PARTITION_PARTITION_LIST), \
- --partition $(name)$(1):$$($(UUIDGEN) $(name)$(1)):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0) \
+ --partition $(name)$(1):readonly:$(if $(2),$(call read-size-of-partitions,$(name)),0) \
$(if $(2), --image $(name)$(1)=$(call images-for-partitions,$(name))) \
- $(if $(1), --partition $(name)_b:$$($(UUIDGEN) $(name)_b):readonly:0) \
+ $(if $(1), --partition $(name)_b:readonly:0) \
)
endef
@@ -2977,32 +2977,61 @@
# Do not check for apps-only build
ifeq (true,$(PRODUCT_BUILD_SUPER_PARTITION))
-ifdef BOARD_SUPER_PARTITION_SIZE
-ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
-droid_targets: check_android_partition_sizes
+droid_targets: check-all-partition-sizes
-.PHONY: check_android_partition_sizes
+.PHONY: check-all-partition-sizes check-all-partition-sizes-nodeps
# Add image dependencies so that generated_*_image_info.txt are written before checking.
-check_android_partition_sizes: $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
+check-all-partition-sizes: $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
-check_android_partition_sizes:
- partition_size_list="$(call read-size-of-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))"; \
- sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${partition_size_list}"); \
- max_size_tail=$(if $(filter true,$(AB_OTA_UPDATER))," / 2"); \
- max_size_expr=$(BOARD_SUPER_PARTITION_SIZE)$${max_size_tail}; \
- if [ $$(( $${sum_sizes_expr} )) -gt $$(( $${max_size_expr} )) ]; then \
- echo "The sum of sizes of all logical partitions is larger than BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
- echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '>' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
- exit 1; \
- else \
- echo "The sum of sizes of all logical partitions is within BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
- echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '<=' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
- fi
+# $(1): human-readable max size string
+# $(2): max size expression
+# $(3): list of partition names
+define check-sum-of-partition-sizes
+ partition_size_list="$(call read-size-of-partitions,$(3))"; \
+ sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${partition_size_list}"); \
+ if [ $$(( $${sum_sizes_expr} )) -gt $$(( $(2) )) ]; then \
+ echo "The sum of sizes of [$(strip $(3))] is larger than $(strip $(1)):"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '>' "$(2)" '==' $$(( $(2) )); \
+ exit 1; \
+ else \
+ echo "The sum of sizes of [$(strip $(3))] is within $(strip $(1)):"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '<=' "$(2)" '==' $$(( $(2) )); \
+ fi
+endef
-endif # BOARD_SUPER_PARTITION_PARTITION_LIST
-endif # BOARD_SUPER_PARTITION_SIZE
+define check-all-partition-sizes-target
+ # Check sum(all partitions) <= super partition (/ 2 for A/B)
+ $(if $(BOARD_SUPER_PARTITION_SIZE),$(if $(BOARD_SUPER_PARTITION_PARTITION_LIST), \
+ $(call check-sum-of-partition-sizes,BOARD_SUPER_PARTITION_SIZE$(if $(filter true,$(AB_OTA_UPDATER)), / 2), \
+ $(BOARD_SUPER_PARTITION_SIZE)$(if $(filter true,$(AB_OTA_UPDATER)), / 2),$(BOARD_SUPER_PARTITION_PARTITION_LIST))))
+
+ # For each group, check sum(partitions in group) <= group size
+ $(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(if $(BOARD_$(group)_SIZE),$(if $(BOARD_$(group)_PARTITION_LIST), \
+ $(call check-sum-of-partition-sizes,BOARD_$(group)_SIZE,$(BOARD_$(group)_SIZE),$(BOARD_$(group)_PARTITION_LIST)))))
+
+ # Check sum(all group sizes) <= super partition (/ 2 for A/B)
+ if [[ ! -z $(BOARD_SUPER_PARTITION_SIZE) ]]; then \
+ group_size_list="$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)),$(BOARD_$(group)_SIZE))"; \
+ sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${group_size_list}"); \
+ max_size_tail=$(if $(filter true,$(AB_OTA_UPDATER))," / 2"); \
+ max_size_expr="$(BOARD_SUPER_PARTITION_SIZE)$${max_size_tail}"; \
+ if [ $$(( $${sum_sizes_expr} )) -gt $$(( $${max_size_expr} )) ]; then \
+ echo "The sum of sizes of [$(strip $(BOARD_SUPER_PARTITION_GROUPS))] is larger than BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '>' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
+ exit 1; \
+ else \
+ echo "The sum of sizes of [$(strip $(BOARD_SUPER_PARTITION_GROUPS))] is within BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+ echo $${sum_sizes_expr} '==' $$(( $${sum_sizes_expr} )) '<=' $${max_size_expr} '==' $$(( $${max_size_expr} )); \
+ fi \
+ fi
+endef
+
+check-all-partition-sizes check-all-partition-sizes-nodeps:
+ $(call check-all-partition-sizes-target)
+
endif # PRODUCT_BUILD_SUPER_PARTITION
endif # TARGET_BUILD_APPS
@@ -3016,33 +3045,42 @@
# -----------------------------------------------------------------
# host tools needed to build dist and OTA packages
-build_ota_package := true
-ifeq ($(TARGET_SKIP_OTA_PACKAGE),true)
-build_ota_package := false
-endif
ifeq ($(BUILD_OS),darwin)
-build_ota_package := false
-endif
-ifneq ($(strip $(SANITIZE_TARGET)),)
-build_ota_package := false
-endif
-ifeq ($(TARGET_PRODUCT),sdk)
-build_ota_package := false
-endif
-ifneq ($(filter generic%,$(TARGET_DEVICE)),)
-build_ota_package := false
-endif
-ifeq ($(TARGET_NO_KERNEL),true)
-build_ota_package := false
-endif
-ifeq ($(recovery_fstab),)
-build_ota_package := false
-endif
-ifeq ($(TARGET_BUILD_PDK),true)
-build_ota_package := false
+ build_ota_package := false
+ build_otatools_package := false
+else
+ # set build_ota_package, and allow opt-out below
+ build_ota_package := true
+ ifeq ($(TARGET_SKIP_OTA_PACKAGE),true)
+ build_ota_package := false
+ endif
+ ifneq ($(strip $(SANITIZE_TARGET)),)
+ build_ota_package := false
+ endif
+ ifeq ($(TARGET_PRODUCT),sdk)
+ build_ota_package := false
+ endif
+ ifneq ($(filter generic%,$(TARGET_DEVICE)),)
+ build_ota_package := false
+ endif
+ ifeq ($(TARGET_NO_KERNEL),true)
+ build_ota_package := false
+ endif
+ ifeq ($(recovery_fstab),)
+ build_ota_package := false
+ endif
+ ifeq ($(TARGET_BUILD_PDK),true)
+ build_ota_package := false
+ endif
+
+ # set build_otatools_package, and allow opt-out below
+ build_otatools_package := true
+ ifeq ($(TARGET_SKIP_OTATOOLS_PACKAGE),true)
+ build_otatools_package := false
+ endif
endif
-ifeq ($(build_ota_package),true)
+ifeq ($(build_otatools_package),true)
OTATOOLS := $(HOST_OUT_EXECUTABLES)/minigzip \
$(HOST_OUT_EXECUTABLES)/aapt \
$(HOST_OUT_EXECUTABLES)/checkvintf \
@@ -3160,7 +3198,7 @@
.PHONY: otatools-package
otatools-package: $(BUILT_OTATOOLS_PACKAGE)
-endif # build_ota_package
+endif # build_otatools_package
# -----------------------------------------------------------------
# A zip of the directories that map to the target filesystem.
@@ -3602,7 +3640,7 @@
ifdef BUILT_VENDOR_MATRIX
$(hide) cp $(BUILT_VENDOR_MATRIX) $(zip_root)/META/vendor_matrix.xml
endif
-ifdef BOARD_SUPER_PARTITION_SIZE
+ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
$(hide) echo "super_size=$(BOARD_SUPER_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
$(hide) echo "lpmake=$(notdir $(LPMAKE))" >> $(zip_root)/META/misc_info.txt
$(hide) echo -n "lpmake_args=" >> $(zip_root)/META/misc_info.txt
diff --git a/core/base_rules.mk b/core/base_rules.mk
index fcc8ede..57fd818 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -759,8 +759,6 @@
ALL_MODULES.$(my_register_name).MODULE_NAME := $(LOCAL_MODULE)
ALL_MODULES.$(my_register_name).COMPATIBILITY_SUITES := $(LOCAL_COMPATIBILITY_SUITE)
ALL_MODULES.$(my_register_name).TEST_CONFIG := $(test_config)
-ALL_MODULES.$(my_register_name).SRCS := \
- $(ALL_MODULES.$(my_register_name).SRCS) $(LOCAL_SRC_FILES)
test_config :=
INSTALLABLE_FILES.$(LOCAL_INSTALLED_MODULE).MODULE := $(my_register_name)
diff --git a/core/binary.mk b/core/binary.mk
index b8ee423..07fb48a 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -46,8 +46,8 @@
my_cflags := $(LOCAL_CFLAGS)
my_conlyflags := $(LOCAL_CONLYFLAGS)
my_cppflags := $(LOCAL_CPPFLAGS)
-my_cflags_no_override := $(GLOBAL_CFLAGS_NO_OVERRIDE)
-my_cppflags_no_override := $(GLOBAL_CPPFLAGS_NO_OVERRIDE)
+my_cflags_no_override := $(GLOBAL_CLANG_CFLAGS_NO_OVERRIDE)
+my_cppflags_no_override := $(GLOBAL_CLANG_CPPFLAGS_NO_OVERRIDE)
my_ldflags := $(LOCAL_LDFLAGS)
my_ldlibs := $(LOCAL_LDLIBS)
my_asflags := $(LOCAL_ASFLAGS)
@@ -626,8 +626,6 @@
# actually used (although they are usually empty).
arm_objects_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)$(arm_objects_mode)_CFLAGS)
normal_objects_cflags := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)$(normal_objects_mode)_CFLAGS)
-arm_objects_cflags := $(call convert-to-clang-flags,$(arm_objects_cflags))
-normal_objects_cflags := $(call convert-to-clang-flags,$(normal_objects_cflags))
else
arm_objects_mode :=
@@ -1561,8 +1559,6 @@
my_cflags += $(LOCAL_CLANG_CFLAGS)
my_conlyflags += $(LOCAL_CLANG_CONLYFLAGS)
my_cppflags += $(LOCAL_CLANG_CPPFLAGS)
-my_cflags_no_override += $(GLOBAL_CLANG_CFLAGS_NO_OVERRIDE)
-my_cppflags_no_override += $(GLOBAL_CLANG_CPPFLAGS_NO_OVERRIDE)
my_asflags += $(LOCAL_CLANG_ASFLAGS)
my_ldflags += $(LOCAL_CLANG_LDFLAGS)
my_cflags += $(LOCAL_CLANG_CFLAGS_$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)) $(LOCAL_CLANG_CFLAGS_$(my_32_64_bit_suffix))
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 5aa27ca..07e34e1 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -318,6 +318,7 @@
LOCAL_PREBUILT_JNI_LIBS_$(TARGET_ARCH):=
LOCAL_REQUIRED_MODULES_$(TARGET_ARCH):=
LOCAL_SHARED_LIBRARIES_$(TARGET_ARCH):=
+LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH):=
LOCAL_SRC_FILES_EXCLUDE_$(TARGET_ARCH):=
LOCAL_SRC_FILES_$(TARGET_ARCH):=
LOCAL_STATIC_LIBRARIES_$(TARGET_ARCH):=
@@ -340,6 +341,7 @@
LOCAL_PREBUILT_JNI_LIBS_$(TARGET_2ND_ARCH):=
LOCAL_REQUIRED_MODULES_$(TARGET_2ND_ARCH):=
LOCAL_SHARED_LIBRARIES_$(TARGET_2ND_ARCH):=
+LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH):=
LOCAL_SRC_FILES_EXCLUDE_$(TARGET_2ND_ARCH):=
LOCAL_SRC_FILES_$(TARGET_2ND_ARCH):=
LOCAL_STATIC_LIBRARIES_$(TARGET_2ND_ARCH):=
diff --git a/core/config.mk b/core/config.mk
index 483bc77..b9174b3 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -99,6 +99,15 @@
TARGET_CLANG_SUPPORTED 2ND_TARGET_CLANG_SUPPORTED \
TARGET_CC 2ND_TARGET_CC \
TARGET_CXX 2ND_TARGET_CXX \
+ TARGET_TOOLCHAIN_ROOT 2ND_TARGET_TOOLCHAIN_ROOT \
+ HOST_TOOLCHAIN_ROOT 2ND_HOST_TOOLCHAIN_ROOT \
+ HOST_CROSS_TOOLCHAIN_ROOT 2ND_HOST_CROSS_TOOLCHAIN_ROOT \
+ HOST_TOOLS_PREFIX 2ND_HOST_TOOLS_PREFIX \
+ HOST_CROSS_TOOLS_PREFIX 2ND_HOST_CROSS_TOOLS_PREFIX \
+ HOST_GCC_VERSION 2ND_HOST_GCC_VERSION \
+ HOST_CROSS_GCC_VERSION 2ND_HOST_CROSS_GCC_VERSION \
+ TARGET_NDK_GCC_VERSION 2ND_TARGET_NDK_GCC_VERSION \
+ GLOBAL_CFLAGS_NO_OVERRIDE GLOBAL_CPPFLAGS_NO_OVERRIDE \
,GCC support has been removed. Use Clang instead)
# This is marked as obsolete in envsetup.mk after reading the BoardConfig.mk
@@ -705,7 +714,6 @@
DATA_BINDING_COMPILER := $(HOST_OUT_JAVA_LIBRARIES)/databinding-compiler.jar
FAT16COPY := build/make/tools/fat16copy.py
CHECK_LINK_TYPE := build/make/tools/check_link_type.py
-UUIDGEN := build/make/tools/uuidgen.py
LPMAKE := $(HOST_OUT_EXECUTABLES)/lpmake$(HOST_EXECUTABLE_SUFFIX)
PROGUARD := external/proguard/bin/proguard.sh
@@ -1000,16 +1008,42 @@
endif # PRODUCT_USE_DYNAMIC_PARTITION_SIZE
ifeq ($(PRODUCT_BUILD_SUPER_PARTITION),true)
-ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
-# BOARD_SUPER_PARTITION_PARTITION_LIST: a list of the following tokens
+
+# BOARD_SUPER_PARTITION_GROUPS defines a list of "updatable groups". Each updatable group is a
+# group of partitions that share the same pool of free spaces.
+# For each group in BOARD_SUPER_PARTITION_GROUPS, a BOARD_{GROUP}_SIZE and
+# BOARD_{GROUP}_PARTITION_PARTITION_LIST may be defined.
+# - BOARD_{GROUP}_SIZE: The maximum sum of sizes of all partitions in the group.
+# If empty, no limit is enforced on the sum of sizes for this group.
+# - BOARD_{GROUP}_PARTITION_PARTITION_LIST: the list of partitions that belongs to this group.
+# If empty, no partitions belong to this group, and the sum of sizes is effectively 0.
+$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(eval BOARD_$(group)_SIZE ?=) \
+ $(eval .KATI_READONLY := BOARD_$(group)_SIZE) \
+ $(eval BOARD_$(group)_PARTITION_LIST ?=) \
+ $(eval .KATI_READONLY := BOARD_$(group)_PARTITION_LIST) \
+)
+
+# BOARD_*_PARTITION_LIST: a list of the following tokens
valid_super_partition_list := system vendor product product_services
-ifneq (,$(filter-out $(valid_super_partition_list),$(BOARD_SUPER_PARTITION_PARTITION_LIST)))
-$(error BOARD_SUPER_PARTITION_PARTITION_LIST contains invalid partition name \
- ($(filter-out $(valid_super_partition_list),$(BOARD_SUPER_PARTITION_PARTITION_LIST))). \
- Valid names are $(valid_super_partition_list))
-endif
+$(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(if $(filter-out $(valid_super_partition_list),$(BOARD_$(group)_PARTITION_LIST)), \
+ $(error BOARD_$(group)_PARTITION_LIST contains invalid partition name \
+ $(filter-out $(valid_super_partition_list),$(BOARD_$(group)_PARTITION_LIST)). \
+ Valid names are $(valid_super_partition_list))))
valid_super_partition_list :=
-endif # BOARD_SUPER_PARTITION_PARTITION_LIST
+
+
+# Define BOARD_SUPER_PARTITION_PARTITION_LIST, the sum of all BOARD_*_PARTITION_LIST
+ifdef BOARD_SUPER_PARTITION_PARTITION_LIST
+$(error BOARD_SUPER_PARTITION_PARTITION_LIST should not be defined, but computed from \
+ BOARD_SUPER_PARTITION_GROUPS and BOARD_*_PARTITION_LIST)
+endif
+BOARD_SUPER_PARTITION_PARTITION_LIST := \
+ $(foreach group,$(call to-upper,$(BOARD_SUPER_PARTITION_GROUPS)), \
+ $(BOARD_$(group)_PARTITION_LIST))
+.KATI_READONLY := BOARD_SUPER_PARTITION_PARTITION_LIST
+
endif # PRODUCT_BUILD_SUPER_PARTITION
# ###############################################################
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index e58f676..be1b124 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -212,10 +212,6 @@
my_sanitize := $(filter-out scudo,$(my_sanitize))
endif
-ifneq ($(filter scudo,$(my_sanitize)),)
- my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)SCUDO_RUNTIME_LIBRARY)
-endif
-
# Undefined symbols can occur if a non-sanitized library links
# sanitized static libraries. That's OK, because the executable
# always depends on the ASan runtime library, which defines these
@@ -375,7 +371,7 @@
endif
endif
ifneq ($(filter unsigned-integer-overflow signed-integer-overflow integer,$(my_sanitize)),)
- ifeq ($(filter unsigned-integer-overflow signed-integer overflow integer,$(my_sanitize_diag)),)
+ ifeq ($(filter unsigned-integer-overflow signed-integer-overflow integer,$(my_sanitize_diag)),)
ifeq ($(filter cfi,$(my_sanitize_diag)),)
ifeq ($(filter address hwaddress,$(my_sanitize)),)
my_cflags += -fsanitize-minimal-runtime
@@ -387,6 +383,18 @@
endif
endif
+# For Scudo, we opt for the minimal runtime, unless some diagnostics are enabled.
+ifneq ($(filter scudo,$(my_sanitize)),)
+ ifeq ($(filter unsigned-integer-overflow signed-integer-overflow integer cfi,$(my_sanitize_diag)),)
+ my_cflags += -fsanitize-minimal-runtime
+ endif
+ ifneq ($(filter -fsanitize-minimal-runtime,$(my_cflags)),)
+ my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)SCUDO_MINIMAL_RUNTIME_LIBRARY)
+ else
+ my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)SCUDO_RUNTIME_LIBRARY)
+ endif
+endif
+
ifneq ($(strip $(LOCAL_SANITIZE_RECOVER)),)
recover_arg := $(subst $(space),$(comma),$(LOCAL_SANITIZE_RECOVER)),
my_cflags += -fsanitize-recover=$(recover_arg)
diff --git a/core/cxx_stl_setup.mk b/core/cxx_stl_setup.mk
index 5171b8a..3590079 100644
--- a/core/cxx_stl_setup.mk
+++ b/core/cxx_stl_setup.mk
@@ -15,8 +15,7 @@
endif
ifeq ($($(my_prefix)OS),windows)
- # libc++ is not supported on mingw.
- my_cxx_stl := libstdc++
+ my_cxx_stl := libc++_static
endif
endif
else
@@ -38,9 +37,9 @@
endif
ifdef LOCAL_IS_HOST_MODULE
ifeq ($($(my_prefix)OS),windows)
- ifneq ($(filter $(my_cxx_stl),libc++ libc++_static),)
- # libc++ is not supported on mingw.
- my_cxx_stl := libstdc++
+ ifneq ($(filter $(my_cxx_stl),libc++),)
+ # only libc++_static is supported on mingw.
+ my_cxx_stl := libc++_static
endif
endif
endif
@@ -52,8 +51,9 @@
darwin_dynamic_gcclibs := -lc -lSystem
darwin_static_gcclibs := NO_STATIC_HOST_BINARIES_ON_DARWIN
windows_dynamic_gcclibs := \
- -lmsvcr110 -lmingw32 -lgcc -lmoldname -lmingwex -lmsvcrt -ladvapi32 \
- -lshell32 -luser32 -lkernel32 -lmingw32 -lgcc -lmoldname -lmingwex -lmsvcrt
+ -Wl,--start-group -lmingw32 -lgcc -lgcc_eh -lmoldname -lmingwex -lmsvcr110 \
+ -lmsvcrt -lpthread -ladvapi32 -lshell32 -luser32 -lkernel32 -lpsapi \
+ -Wl,--end-group
windows_static_gcclibs := NO_STATIC_HOST_BINARIES_ON_WINDOWS
my_link_type := dynamic
@@ -100,6 +100,20 @@
my_cppflags += -nostdinc++
my_ldflags += -nodefaultlibs
my_cxx_ldlibs += $($($(my_prefix)OS)_$(my_link_type)_gcclibs)
+
+ ifeq ($($(my_prefix)OS),windows)
+ # Use SjLj exceptions for 32-bit. libgcc_eh implements SjLj
+ # exception model for 32-bit.
+ ifeq (x86,$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
+ my_cppflags += -fsjlj-exceptions
+ endif
+ # Disable visibility annotations since we're using libc++ static
+ # library.
+ my_cppflags += -D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS
+ my_cppflags += -D_LIBCXXABI_DISABLE_VISIBILITY_ANNOTATIONS
+ # Use Win32 threads in libc++.
+ my_cppflags += -D_LIBCPP_HAS_THREAD_API_WIN32
+ endif
else
ifeq (arm,$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
my_static_libraries += libunwind_llvm
@@ -113,11 +127,7 @@
else ifeq ($(my_cxx_stl),ndk)
# Using an NDK STL. Handled in binary.mk.
else ifeq ($(my_cxx_stl),libstdc++)
- ifndef LOCAL_IS_HOST_MODULE
- $(error $(LOCAL_PATH): $(LOCAL_MODULE): libstdc++ is not supported for device modules)
- else ifneq ($($(my_prefix)OS),windows)
- $(error $(LOCAL_PATH): $(LOCAL_MODULE): libstdc++ is not supported on $($(my_prefix)OS))
- endif
+ $(error $(LOCAL_PATH): $(LOCAL_MODULE): libstdc++ is not supported)
else ifeq ($(my_cxx_stl),none)
ifdef LOCAL_IS_HOST_MODULE
my_cppflags += -nostdinc++
diff --git a/core/definitions.mk b/core/definitions.mk
index 8be8747..7c4fdf5 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -2410,25 +2410,21 @@
# Uncompress dex files embedded in an apk.
#
define uncompress-dexs
-$(hide) if (zipinfo $@ '*.dex' 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
- tmpdir=$@.tmpdir; \
- rm -rf $$tmpdir && mkdir $$tmpdir; \
- unzip -q $@ '*.dex' -d $$tmpdir && \
- zip -qd $@ '*.dex' && \
- ( cd $$tmpdir && find . -type f | sort | zip -qD -X -0 ../$(notdir $@) -@ ) && \
- rm -rf $$tmpdir; \
+ if (zipinfo $@ '*.dex' 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
+ $(ZIP2ZIP) -i $@ -o $@.tmp -0 "classes*.dex" && \
+ mv -f $@.tmp $@ ; \
fi
endef
-# Uncompress shared libraries embedded in an apk.
+# Uncompress shared JNI libraries embedded in an apk.
#
-define uncompress-shared-libs
-$(hide) if (zipinfo $@ $(PRIVATE_EMBEDDED_JNI_LIBS) 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
- rm -rf $(dir $@)uncompressedlibs && mkdir $(dir $@)uncompressedlibs; \
- unzip -q $@ $(PRIVATE_EMBEDDED_JNI_LIBS) -d $(dir $@)uncompressedlibs && \
- zip -qd $@ 'lib/*.so' && \
- ( cd $(dir $@)uncompressedlibs && find lib -type f | sort | zip -qD -X -0 ../$(notdir $@) -@ ) && \
- rm -rf $(dir $@)uncompressedlibs; \
+define uncompress-prebuilt-embedded-jni-libs
+ if (zipinfo $@ 'lib/*.so' 2>/dev/null | grep -v ' stor ' >/dev/null) ; then \
+ $(ZIP2ZIP) -i $@ -o $@.tmp -0 'lib/**/*.so' \
+ $(if $(PRIVATE_EMBEDDED_JNI_LIBS), \
+ -x 'lib/**/*.so' \
+ $(addprefix -X ,$(PRIVATE_EMBEDDED_JNI_LIBS))) && \
+ mv -f $@.tmp $@ ; \
fi
endef
@@ -2473,7 +2469,7 @@
endef
define copy-and-uncompress-dexs
-$(2): $(1) $(ZIPALIGN)
+$(2): $(1) $(ZIPALIGN) $(ZIP2ZIP)
@echo "Uncompress dexs in: $$@"
$$(copy-file-to-target)
$$(uncompress-dexs)
@@ -2488,7 +2484,8 @@
$(eval _cmf_tuple := $(subst :, ,$(f))) \
$(eval _cmf_src := $(word 1,$(_cmf_tuple))) \
$(eval _cmf_dest := $(word 2,$(_cmf_tuple))) \
- $(eval $(call copy-one-file,$(_cmf_src),$(_cmf_dest))) \
+ $(if $(filter-out $(_cmf_src), $(_cmf_dest)), \
+ $(eval $(call copy-one-file,$(_cmf_src),$(_cmf_dest)))) \
$(_cmf_dest)))
endef
@@ -2739,7 +2736,7 @@
$(2): OUTPUT_DIR := $(dir $(call hiddenapi-soong-output-dex,$(2)))
$(2): OUTPUT_JAR := $(dir $(call hiddenapi-soong-output-dex,$(2)))classes.jar
$(2): $(1) $(call hiddenapi-soong-output-dex,$(2)) | $(SOONG_ZIP) $(MERGE_ZIPS)
- $(SOONG_ZIP) -o $${OUTPUT_JAR} -C $${OUTPUT_DIR} -D $${OUTPUT_DIR}
+ $(SOONG_ZIP) -o $${OUTPUT_JAR} -C $${OUTPUT_DIR} -f "$${OUTPUT_DIR}/classes*.dex"
$(MERGE_ZIPS) -D -zipToNotStrip $${OUTPUT_JAR} -stripFile "classes*.dex" $(2) $${OUTPUT_JAR} $(1)
endef
diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk
index 6981916..698034c 100644
--- a/core/dex_preopt_libart.mk
+++ b/core/dex_preopt_libart.mk
@@ -199,7 +199,6 @@
--instruction-set=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) \
--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
- --runtime-arg -Xnorelocate --compile-pic \
--no-generate-debug-info --generate-build-id \
--abort-on-hard-verifier-error \
--force-determinism \
diff --git a/core/dex_preopt_libart_boot.mk b/core/dex_preopt_libart_boot.mk
index 8764d1d..14955f0 100644
--- a/core/dex_preopt_libart_boot.mk
+++ b/core/dex_preopt_libart_boot.mk
@@ -108,8 +108,7 @@
--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
--android-root=$(PRODUCT_OUT)/system \
- --runtime-arg -Xnorelocate --compile-pic \
- --multi-image --no-inline-from=core-oj.jar \
+ --no-inline-from=core-oj.jar \
--abort-on-hard-verifier-error \
--abort-on-soft-verifier-error \
$(PRODUCT_DEX_PREOPT_BOOT_FLAGS) $(GLOBAL_DEXPREOPT_FLAGS) $(ART_BOOT_IMAGE_EXTRA_ARGS) \
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 1aca804..cd78eda 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -7,87 +7,95 @@
# privileged apps
LOCAL_UNCOMPRESS_DEX := false
ifneq (true,$(DONT_UNCOMPRESS_PRIV_APPS_DEXS))
-ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
- LOCAL_UNCOMPRESS_DEX := true
-else
+ ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
+ LOCAL_UNCOMPRESS_DEX := true
+ endif
+
ifneq (,$(filter $(PRODUCT_LOADED_BY_PRIVILEGED_MODULES), $(LOCAL_MODULE)))
LOCAL_UNCOMPRESS_DEX := true
- endif # PRODUCT_LOADED_BY_PRIVILEGED_MODULES
-endif # LOCAL_PRIVILEGED_MODULE
+ endif
endif # DONT_UNCOMPRESS_PRIV_APPS_DEXS
# Setting LOCAL_DEX_PREOPT based on WITH_DEXPREOPT, LOCAL_DEX_PREOPT, etc
LOCAL_DEX_PREOPT := $(strip $(LOCAL_DEX_PREOPT))
-ifneq (true,$(WITH_DEXPREOPT))
- LOCAL_DEX_PREOPT :=
-else # WITH_DEXPREOPT=true
- ifeq (,$(TARGET_BUILD_APPS)) # TARGET_BUILD_APPS empty
- ifndef LOCAL_DEX_PREOPT # LOCAL_DEX_PREOPT undefined
- ifneq ($(filter $(TARGET_OUT)/%,$(my_module_path)),) # Installed to system.img.
- ifeq (,$(LOCAL_APK_LIBRARIES)) # LOCAL_APK_LIBRARIES empty
- # If we have product-specific config for this module?
- ifeq (disable,$(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG))
- LOCAL_DEX_PREOPT := false
- else
- LOCAL_DEX_PREOPT := $(DEX_PREOPT_DEFAULT)
- endif
- else # LOCAL_APK_LIBRARIES not empty
- LOCAL_DEX_PREOPT := nostripping
- endif # LOCAL_APK_LIBRARIES not empty
- else
- # Default to nostripping for non system preopt (enables preopt).
- # Don't strip in case the oat/vdex version in system ROM doesn't match the one in other
- # partitions. It needs to be able to fall back to the APK for that case.
- # Also only enable preopt for non tests.
- ifeq (,$(filter $(LOCAL_MODULE_TAGS),tests))
- LOCAL_DEX_PREOPT := nostripping
- endif
- endif # Installed to system.img.
- endif # LOCAL_DEX_PREOPT undefined
- endif # TARGET_BUILD_APPS empty
-endif # WITH_DEXPREOPT=true
+ifndef LOCAL_DEX_PREOPT # LOCAL_DEX_PREOPT undefined
+ LOCAL_DEX_PREOPT := $(DEX_PREOPT_DEFAULT)
+
+ ifeq ($(filter $(TARGET_OUT)/%,$(my_module_path)),) # Not installed to system.img.
+ # Default to nostripping for non system preopt (enables preopt).
+ # Don't strip in case the oat/vdex version in system ROM doesn't match the one in other
+ # partitions. It needs to be able to fall back to the APK for that case.
+ LOCAL_DEX_PREOPT := nostripping
+ endif
+
+ ifneq (,$(LOCAL_APK_LIBRARIES)) # LOCAL_APK_LIBRARIES not empty
+ LOCAL_DEX_PREOPT := nostripping
+ endif
+endif
+
ifeq (false,$(LOCAL_DEX_PREOPT))
LOCAL_DEX_PREOPT :=
endif
+
+# Only enable preopt for non tests.
+ifneq (,$(filter $(LOCAL_MODULE_TAGS),tests))
+ LOCAL_DEX_PREOPT :=
+endif
+
+# If we have product-specific config for this module?
+ifeq (disable,$(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG))
+ LOCAL_DEX_PREOPT :=
+endif
+
+# Disable preopt for TARGET_BUILD_APPS
+ifneq (,$(TARGET_BUILD_APPS))
+ LOCAL_DEX_PREOPT :=
+endif
+
+# Disable preopt if not WITH_DEXPREOPT
+ifneq (true,$(WITH_DEXPREOPT))
+ LOCAL_DEX_PREOPT :=
+endif
+
ifdef LOCAL_UNINSTALLABLE_MODULE
-LOCAL_DEX_PREOPT :=
+ LOCAL_DEX_PREOPT :=
endif
+
ifeq (,$(strip $(built_dex)$(my_prebuilt_src_file)$(LOCAL_SOONG_DEX_JAR))) # contains no java code
-LOCAL_DEX_PREOPT :=
+ LOCAL_DEX_PREOPT :=
endif
+
# if WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY=true and module is not in boot class path skip
# Also preopt system server jars since selinux prevents system server from loading anything from
# /data. If we don't do this they will need to be extracted which is not favorable for RAM usage
# or performance. If my_preopt_for_extracted_apk is true, we ignore the only preopt boot image
# options.
ifneq (true,$(my_preopt_for_extracted_apk))
-ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
-ifeq ($(filter $(PRODUCT_SYSTEM_SERVER_JARS) $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE)),)
-LOCAL_DEX_PREOPT :=
-endif
-endif
+ ifeq (true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY))
+ ifeq ($(filter $(PRODUCT_SYSTEM_SERVER_JARS) $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE)),)
+ LOCAL_DEX_PREOPT :=
+ endif
+ endif
endif
ifeq ($(LOCAL_DEX_PREOPT),true)
+ # Don't strip with dexes we explicitly uncompress (dexopt will not store the dex code).
+ ifeq ($(LOCAL_UNCOMPRESS_DEX),true)
+ LOCAL_DEX_PREOPT := nostripping
+ endif # LOCAL_UNCOMPRESS_DEX
-# Don't strip with dexes we explicitly uncompress (dexopt will not store the dex code).
-ifeq ($(LOCAL_UNCOMPRESS_DEX),true)
-LOCAL_DEX_PREOPT := nostripping
-endif # LOCAL_UNCOMPRESS_DEX
+ # system_other isn't there for an OTA, so don't strip
+ # if module is on system, and odex is on system_other.
+ ifeq ($(BOARD_USES_SYSTEM_OTHER_ODEX),true)
+ ifneq ($(call install-on-system-other, $(my_module_path)),)
+ LOCAL_DEX_PREOPT := nostripping
+ endif # install-on-system-other
+ endif # BOARD_USES_SYSTEM_OTHER_ODEX
-# system_other isn't there for an OTA, so don't strip
-# if module is on system, and odex is on system_other.
-ifeq ($(BOARD_USES_SYSTEM_OTHER_ODEX),true)
-ifneq ($(call install-on-system-other, $(my_module_path)),)
-LOCAL_DEX_PREOPT := nostripping
-endif # install-on-system-other
-endif # BOARD_USES_SYSTEM_OTHER_ODEX
-
-# We also don't strip if all dexs are uncompressed (dexopt will not store the dex code),
-# but that requires to inspect the source file, which is too early at this point (as we
-# don't know if the source file will actually be used).
-# See dexpreopt-remove-classes.dex.
-
+ # We also don't strip if all dexs are uncompressed (dexopt will not store the dex code),
+ # but that requires to inspect the source file, which is too early at this point (as we
+ # don't know if the source file will actually be used).
+ # See dexpreopt-remove-classes.dex.
endif # LOCAL_DEX_PREOPT
built_odex :=
@@ -101,64 +109,64 @@
built_installed_art :=
my_process_profile :=
my_profile_is_text_listing :=
+my_generate_dm :=
ifeq (false,$(WITH_DEX_PREOPT_GENERATE_PROFILE))
-LOCAL_DEX_PREOPT_GENERATE_PROFILE := false
+ LOCAL_DEX_PREOPT_GENERATE_PROFILE := false
endif
ifndef LOCAL_DEX_PREOPT_GENERATE_PROFILE
+ # If LOCAL_DEX_PREOPT_GENERATE_PROFILE is not defined, default it based on the existence of the
+ # profile class listing. TODO: Use product specific directory here.
+ my_classes_directory := $(PRODUCT_DEX_PREOPT_PROFILE_DIR)
+ LOCAL_DEX_PREOPT_PROFILE := $(my_classes_directory)/$(LOCAL_MODULE).prof
-
-# If LOCAL_DEX_PREOPT_GENERATE_PROFILE is not defined, default it based on the existence of the
-# profile class listing. TODO: Use product specific directory here.
-my_classes_directory := $(PRODUCT_DEX_PREOPT_PROFILE_DIR)
-LOCAL_DEX_PREOPT_PROFILE := $(my_classes_directory)/$(LOCAL_MODULE).prof
-
-ifneq (,$(wildcard $(LOCAL_DEX_PREOPT_PROFILE)))
-my_process_profile := true
-my_profile_is_text_listing := false
-endif
+ ifneq (,$(wildcard $(LOCAL_DEX_PREOPT_PROFILE)))
+ my_process_profile := true
+ my_profile_is_text_listing := false
+ endif
else
-my_process_profile := $(LOCAL_DEX_PREOPT_GENERATE_PROFILE)
-my_profile_is_text_listing := true
-LOCAL_DEX_PREOPT_PROFILE := $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
+ my_process_profile := $(LOCAL_DEX_PREOPT_GENERATE_PROFILE)
+ my_profile_is_text_listing := true
+ LOCAL_DEX_PREOPT_PROFILE := $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
endif
ifeq (true,$(my_process_profile))
-ifeq (,$(LOCAL_DEX_PREOPT_APP_IMAGE))
-LOCAL_DEX_PREOPT_APP_IMAGE := true
-endif
+ ifeq (,$(LOCAL_DEX_PREOPT_APP_IMAGE))
+ LOCAL_DEX_PREOPT_APP_IMAGE := true
+ endif
-ifndef LOCAL_DEX_PREOPT_PROFILE
-$(call pretty-error,Must have specified class listing (LOCAL_DEX_PREOPT_PROFILE))
-endif
-ifeq (,$(dex_preopt_profile_src_file))
-$(call pretty-error, Internal error: dex_preopt_profile_src_file must be set)
-endif
-my_built_profile := $(dir $(LOCAL_BUILT_MODULE))/profile.prof
-my_dex_location := $(patsubst $(PRODUCT_OUT)%,%,$(LOCAL_INSTALLED_MODULE))
-# Remove compressed APK extension.
-my_dex_location := $(patsubst %.gz,%,$(my_dex_location))
-$(my_built_profile): PRIVATE_BUILT_MODULE := $(dex_preopt_profile_src_file)
-$(my_built_profile): PRIVATE_DEX_LOCATION := $(my_dex_location)
-$(my_built_profile): PRIVATE_SOURCE_CLASSES := $(LOCAL_DEX_PREOPT_PROFILE)
-$(my_built_profile): $(LOCAL_DEX_PREOPT_PROFILE)
-$(my_built_profile): $(PROFMAN)
-$(my_built_profile): $(dex_preopt_profile_src_file)
-ifeq (true,$(my_profile_is_text_listing))
-# The profile is a test listing of classes (used for framework jars).
-# We need to generate the actual binary profile before being able to compile.
+ ifndef LOCAL_DEX_PREOPT_PROFILE
+ $(call pretty-error,Must have specified class listing (LOCAL_DEX_PREOPT_PROFILE))
+ endif
+ ifeq (,$(dex_preopt_profile_src_file))
+ $(call pretty-error, Internal error: dex_preopt_profile_src_file must be set)
+ endif
+ my_built_profile := $(dir $(LOCAL_BUILT_MODULE))/profile.prof
+ my_dex_location := $(patsubst $(PRODUCT_OUT)%,%,$(LOCAL_INSTALLED_MODULE))
+ # Remove compressed APK extension.
+ my_dex_location := $(patsubst %.gz,%,$(my_dex_location))
+ $(my_built_profile): PRIVATE_BUILT_MODULE := $(dex_preopt_profile_src_file)
+ $(my_built_profile): PRIVATE_DEX_LOCATION := $(my_dex_location)
+ $(my_built_profile): PRIVATE_SOURCE_CLASSES := $(LOCAL_DEX_PREOPT_PROFILE)
+ $(my_built_profile): $(LOCAL_DEX_PREOPT_PROFILE)
+ $(my_built_profile): $(PROFMAN)
+ $(my_built_profile): $(dex_preopt_profile_src_file)
+ ifeq (true,$(my_profile_is_text_listing))
+ # The profile is a test listing of classes (used for framework jars).
+ # We need to generate the actual binary profile before being able to compile.
+ $(my_built_profile):
$(hide) mkdir -p $(dir $@)
ANDROID_LOG_TAGS="*:e" $(PROFMAN) \
--create-profile-from=$(PRIVATE_SOURCE_CLASSES) \
--apk=$(PRIVATE_BUILT_MODULE) \
--dex-location=$(PRIVATE_DEX_LOCATION) \
--reference-profile-file=$@
-else
-# The profile is binary profile (used for apps). Run it through profman to
-# ensure the profile keys match the apk.
-$(my_built_profile):
+ else
+ # The profile is binary profile (used for apps). Run it through profman to
+ # ensure the profile keys match the apk.
+ $(my_built_profile):
$(hide) mkdir -p $(dir $@)
touch $@
ANDROID_LOG_TAGS="*:i" $(PROFMAN) \
@@ -168,239 +176,240 @@
--dex-location=$(PRIVATE_DEX_LOCATION) \
--reference-profile-file=$@ \
|| echo "Profile out of date for $(PRIVATE_BUILT_MODULE)"
-endif
+ endif
-my_profile_is_text_listing :=
-dex_preopt_profile_src_file :=
+ my_profile_is_text_listing :=
+ dex_preopt_profile_src_file :=
-# Remove compressed APK extension.
-my_installed_profile := $(patsubst %.gz,%,$(LOCAL_INSTALLED_MODULE)).prof
+ # Remove compressed APK extension.
+ my_installed_profile := $(patsubst %.gz,%,$(LOCAL_INSTALLED_MODULE)).prof
-# my_installed_profile := $(LOCAL_INSTALLED_MODULE).prof
-$(eval $(call copy-one-file,$(my_built_profile),$(my_installed_profile)))
-build_installed_profile:=$(my_built_profile):$(my_installed_profile)
+ # my_installed_profile := $(LOCAL_INSTALLED_MODULE).prof
+ $(eval $(call copy-one-file,$(my_built_profile),$(my_installed_profile)))
+ build_installed_profile:=$(my_built_profile):$(my_installed_profile)
else
-build_installed_profile:=
-my_installed_profile :=
+ build_installed_profile:=
+ my_installed_profile :=
endif
ifdef LOCAL_DEX_PREOPT
-dexpreopt_boot_jar_module := $(filter $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE))
-ifdef dexpreopt_boot_jar_module
-# For libart, the boot jars' odex files are replaced by $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE).
-# We use this installed_odex trick to get boot.art installed.
-installed_odex := $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)
-# Append the odex for the 2nd arch if we have one.
-installed_odex += $($(TARGET_2ND_ARCH_VAR_PREFIX)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)
-else # boot jar
-ifeq ($(LOCAL_MODULE_CLASS),JAVA_LIBRARIES)
+ dexpreopt_boot_jar_module := $(filter $(DEXPREOPT_BOOT_JARS_MODULES),$(LOCAL_MODULE))
-my_module_multilib := $(LOCAL_MULTILIB)
-# If the module is not an SDK library and it's a system server jar, only preopt the primary arch.
-my_filtered_lib_name := $(patsubst %.impl,%,$(LOCAL_MODULE))
-ifeq (,$(filter $(JAVA_SDK_LIBRARIES),$(my_filtered_lib_name)))
-# For a Java library, by default we build odex for both 1st arch and 2nd arch.
-# But it can be overridden with "LOCAL_MULTILIB := first".
-ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
-# For system server jars, we build for only "first".
-my_module_multilib := first
-endif
-endif
+ ifdef dexpreopt_boot_jar_module
+ # For libart, the boot jars' odex files are replaced by $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE).
+ # We use this installed_odex trick to get boot.art installed.
+ installed_odex := $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)
+ # Append the odex for the 2nd arch if we have one.
+ installed_odex += $($(TARGET_2ND_ARCH_VAR_PREFIX)DEFAULT_DEX_PREOPT_INSTALLED_IMAGE)
+ else # boot jar
+ ifeq ($(LOCAL_MODULE_CLASS),JAVA_LIBRARIES)
-# Only preopt primary arch for translated arch since there is only an image there.
-ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
-my_module_multilib := first
-endif
+ my_module_multilib := $(LOCAL_MULTILIB)
+ # If the module is not an SDK library and it's a system server jar, only preopt the primary arch.
+ my_filtered_lib_name := $(patsubst %.impl,%,$(LOCAL_MODULE))
+ ifeq (,$(filter $(JAVA_SDK_LIBRARIES),$(my_filtered_lib_name)))
+ # For a Java library, by default we build odex for both 1st arch and 2nd arch.
+ # But it can be overridden with "LOCAL_MULTILIB := first".
+ ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
+ # For system server jars, we build for only "first".
+ my_module_multilib := first
+ endif
+ endif
-# #################################################
-# Odex for the 1st arch
-my_2nd_arch_prefix :=
-include $(BUILD_SYSTEM)/setup_one_odex.mk
-# #################################################
-# Odex for the 2nd arch
-ifdef TARGET_2ND_ARCH
-ifneq ($(TARGET_TRANSLATE_2ND_ARCH),true)
-ifneq (first,$(my_module_multilib))
-my_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
-include $(BUILD_SYSTEM)/setup_one_odex.mk
-endif # my_module_multilib is not first.
-endif # TARGET_TRANSLATE_2ND_ARCH not true
-endif # TARGET_2ND_ARCH
-# #################################################
-else # must be APPS
-# The preferred arch
-my_2nd_arch_prefix := $(LOCAL_2ND_ARCH_VAR_PREFIX)
-# Save the module multilib since setup_one_odex modifies it.
-saved_my_module_multilib := $(my_module_multilib)
-include $(BUILD_SYSTEM)/setup_one_odex.mk
-my_module_multilib := $(saved_my_module_multilib)
-ifdef TARGET_2ND_ARCH
-ifeq ($(my_module_multilib),both)
-# The non-preferred arch
-my_2nd_arch_prefix := $(if $(LOCAL_2ND_ARCH_VAR_PREFIX),,$(TARGET_2ND_ARCH_VAR_PREFIX))
-include $(BUILD_SYSTEM)/setup_one_odex.mk
-endif # LOCAL_MULTILIB is both
-endif # TARGET_2ND_ARCH
-endif # LOCAL_MODULE_CLASS
-endif # boot jar
+ # Only preopt primary arch for translated arch since there is only an image there.
+ ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
+ my_module_multilib := first
+ endif
-built_odex := $(strip $(built_odex))
-built_vdex := $(strip $(built_vdex))
-built_art := $(strip $(built_art))
-installed_odex := $(strip $(installed_odex))
-installed_vdex := $(strip $(installed_vdex))
-installed_art := $(strip $(installed_art))
+ # #################################################
+ # Odex for the 1st arch
+ my_2nd_arch_prefix :=
+ include $(BUILD_SYSTEM)/setup_one_odex.mk
+ # #################################################
+ # Odex for the 2nd arch
+ ifdef TARGET_2ND_ARCH
+ ifneq ($(TARGET_TRANSLATE_2ND_ARCH),true)
+ ifneq (first,$(my_module_multilib))
+ my_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
+ include $(BUILD_SYSTEM)/setup_one_odex.mk
+ endif # my_module_multilib is not first.
+ endif # TARGET_TRANSLATE_2ND_ARCH not true
+ endif # TARGET_2ND_ARCH
+ # #################################################
+ else # must be APPS
+ # The preferred arch
+ my_2nd_arch_prefix := $(LOCAL_2ND_ARCH_VAR_PREFIX)
+ # Save the module multilib since setup_one_odex modifies it.
+ saved_my_module_multilib := $(my_module_multilib)
+ include $(BUILD_SYSTEM)/setup_one_odex.mk
+ my_module_multilib := $(saved_my_module_multilib)
+ ifdef TARGET_2ND_ARCH
+ ifeq ($(my_module_multilib),both)
+ # The non-preferred arch
+ my_2nd_arch_prefix := $(if $(LOCAL_2ND_ARCH_VAR_PREFIX),,$(TARGET_2ND_ARCH_VAR_PREFIX))
+ include $(BUILD_SYSTEM)/setup_one_odex.mk
+ endif # LOCAL_MULTILIB is both
+ endif # TARGET_2ND_ARCH
+ endif # LOCAL_MODULE_CLASS
+ endif # boot jar
-ifdef built_odex
-ifeq (true,$(my_process_profile))
-$(built_odex): $(my_built_profile)
-$(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS := --profile-file=$(my_built_profile)
-else
-$(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS :=
-endif
+ built_odex := $(strip $(built_odex))
+ built_vdex := $(strip $(built_vdex))
+ built_art := $(strip $(built_art))
+ installed_odex := $(strip $(installed_odex))
+ installed_vdex := $(strip $(installed_vdex))
+ installed_art := $(strip $(installed_art))
-ifndef LOCAL_DEX_PREOPT_FLAGS
-LOCAL_DEX_PREOPT_FLAGS := $(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG)
-ifndef LOCAL_DEX_PREOPT_FLAGS
-LOCAL_DEX_PREOPT_FLAGS := $(PRODUCT_DEX_PREOPT_DEFAULT_FLAGS)
-endif
-endif
-
-my_system_server_compiler_filter := $(PRODUCT_SYSTEM_SERVER_COMPILER_FILTER)
-ifeq (,$(my_system_server_compiler_filter))
-my_system_server_compiler_filter := speed
-endif
-
-my_default_compiler_filter := $(PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER)
-ifeq (,$(my_default_compiler_filter))
-# If no default compiler filter is specified, default to 'quicken' to save on storage.
-my_default_compiler_filter := quicken
-endif
-
-ifeq (,$(filter --compiler-filter=%, $(LOCAL_DEX_PREOPT_FLAGS)))
- ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
- # Jars of system server, use the product option if it is set, speed otherwise.
- LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=$(my_system_server_compiler_filter)
- else
- ifneq (,$(filter $(PRODUCT_DEXPREOPT_SPEED_APPS) $(PRODUCT_SYSTEM_SERVER_APPS),$(LOCAL_MODULE)))
- # Apps loaded into system server, and apps the product default to being compiled with the
- # 'speed' compiler filter.
- LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=speed
+ ifdef built_odex
+ ifeq (true,$(my_process_profile))
+ $(built_odex): $(my_built_profile)
+ $(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS := --profile-file=$(my_built_profile)
else
- ifeq (true,$(my_process_profile))
- # For non system server jars, use speed-profile when we have a profile.
- LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=speed-profile
- else
- LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=$(my_default_compiler_filter)
+ $(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS :=
+ endif
+
+ ifndef LOCAL_DEX_PREOPT_FLAGS
+ LOCAL_DEX_PREOPT_FLAGS := $(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG)
+ ifndef LOCAL_DEX_PREOPT_FLAGS
+ LOCAL_DEX_PREOPT_FLAGS := $(PRODUCT_DEX_PREOPT_DEFAULT_FLAGS)
endif
endif
- endif
-endif
-my_generate_dm := $(PRODUCT_DEX_PREOPT_GENERATE_DM_FILES)
-ifeq (,$(filter $(LOCAL_DEX_PREOPT_FLAGS),--compiler-filter=verify))
-# Generating DM files only makes sense for verify, avoid doing for non verify compiler filter APKs.
-my_generate_dm := false
-endif
+ my_system_server_compiler_filter := $(PRODUCT_SYSTEM_SERVER_COMPILER_FILTER)
+ ifeq (,$(my_system_server_compiler_filter))
+ my_system_server_compiler_filter := speed
+ endif
-# No reason to use a dm file if the dex is already uncompressed.
-ifeq ($(LOCAL_UNCOMPRESS_DEX),true)
-my_generate_dm := false
-endif
+ my_default_compiler_filter := $(PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER)
+ ifeq (,$(my_default_compiler_filter))
+ # If no default compiler filter is specified, default to 'quicken' to save on storage.
+ my_default_compiler_filter := quicken
+ endif
-ifeq (true,$(my_generate_dm))
-LOCAL_DEX_PREOPT_FLAGS += --copy-dex-files=false
-LOCAL_DEX_PREOPT := nostripping
-my_built_dm := $(dir $(LOCAL_BUILT_MODULE))generated.dm
-my_installed_dm := $(patsubst %.apk,%,$(LOCAL_INSTALLED_MODULE)).dm
-my_copied_vdex := $(dir $(LOCAL_BUILT_MODULE))primary.vdex
-$(eval $(call copy-one-file,$(built_vdex),$(my_copied_vdex)))
-$(my_built_dm): PRIVATE_INPUT_VDEX := $(my_copied_vdex)
-$(my_built_dm): $(my_copied_vdex) $(ZIPTIME)
+ ifeq (,$(filter --compiler-filter=%, $(LOCAL_DEX_PREOPT_FLAGS)))
+ ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
+ # Jars of system server, use the product option if it is set, speed otherwise.
+ LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=$(my_system_server_compiler_filter)
+ else
+ ifneq (,$(filter $(PRODUCT_DEXPREOPT_SPEED_APPS) $(PRODUCT_SYSTEM_SERVER_APPS),$(LOCAL_MODULE)))
+ # Apps loaded into system server, and apps the product default to being compiled with the
+ # 'speed' compiler filter.
+ LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=speed
+ else
+ ifeq (true,$(my_process_profile))
+ # For non system server jars, use speed-profile when we have a profile.
+ LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=speed-profile
+ else
+ LOCAL_DEX_PREOPT_FLAGS += --compiler-filter=$(my_default_compiler_filter)
+ endif
+ endif
+ endif
+ endif
+
+ my_generate_dm := $(PRODUCT_DEX_PREOPT_GENERATE_DM_FILES)
+ ifeq (,$(filter $(LOCAL_DEX_PREOPT_FLAGS),--compiler-filter=verify))
+ # Generating DM files only makes sense for verify, avoid doing for non verify compiler filter APKs.
+ my_generate_dm := false
+ endif
+
+ # No reason to use a dm file if the dex is already uncompressed.
+ ifeq ($(LOCAL_UNCOMPRESS_DEX),true)
+ my_generate_dm := false
+ endif
+
+ ifeq (true,$(my_generate_dm))
+ LOCAL_DEX_PREOPT_FLAGS += --copy-dex-files=false
+ LOCAL_DEX_PREOPT := nostripping
+ my_built_dm := $(dir $(LOCAL_BUILT_MODULE))generated.dm
+ my_installed_dm := $(patsubst %.apk,%,$(LOCAL_INSTALLED_MODULE)).dm
+ my_copied_vdex := $(dir $(LOCAL_BUILT_MODULE))primary.vdex
+ $(eval $(call copy-one-file,$(built_vdex),$(my_copied_vdex)))
+ $(my_built_dm): PRIVATE_INPUT_VDEX := $(my_copied_vdex)
+ $(my_built_dm): $(my_copied_vdex) $(ZIPTIME)
$(hide) mkdir -p $(dir $@)
$(hide) rm -f $@
$(hide) zip -qD -j -X -9 $@ $(PRIVATE_INPUT_VDEX)
$(ZIPTIME) $@
-$(eval $(call copy-one-file,$(my_built_dm),$(my_installed_dm)))
-endif
+ $(eval $(call copy-one-file,$(my_built_dm),$(my_installed_dm)))
+ endif
-# By default, emit debug info.
-my_dexpreopt_debug_info := true
-# If the global setting suppresses mini-debug-info, disable it.
-ifeq (false,$(WITH_DEXPREOPT_DEBUG_INFO))
- my_dexpreopt_debug_info := false
-endif
-
-# PRODUCT_SYSTEM_SERVER_DEBUG_INFO overrides WITH_DEXPREOPT_DEBUG_INFO.
-# PRODUCT_OTHER_JAVA_DEBUG_INFO overrides WITH_DEXPREOPT_DEBUG_INFO.
-ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
- ifeq (true,$(PRODUCT_SYSTEM_SERVER_DEBUG_INFO))
+ # By default, emit debug info.
my_dexpreopt_debug_info := true
- else ifeq (false,$(PRODUCT_SYSTEM_SERVER_DEBUG_INFO))
- my_dexpreopt_debug_info := false
+ # If the global setting suppresses mini-debug-info, disable it.
+ ifeq (false,$(WITH_DEXPREOPT_DEBUG_INFO))
+ my_dexpreopt_debug_info := false
+ endif
+
+ # PRODUCT_SYSTEM_SERVER_DEBUG_INFO overrides WITH_DEXPREOPT_DEBUG_INFO.
+ # PRODUCT_OTHER_JAVA_DEBUG_INFO overrides WITH_DEXPREOPT_DEBUG_INFO.
+ ifneq (,$(filter $(PRODUCT_SYSTEM_SERVER_JARS),$(LOCAL_MODULE)))
+ ifeq (true,$(PRODUCT_SYSTEM_SERVER_DEBUG_INFO))
+ my_dexpreopt_debug_info := true
+ else ifeq (false,$(PRODUCT_SYSTEM_SERVER_DEBUG_INFO))
+ my_dexpreopt_debug_info := false
+ endif
+ else
+ ifeq (true,$(PRODUCT_OTHER_JAVA_DEBUG_INFO))
+ my_dexpreopt_debug_info := true
+ else ifeq (false,$(PRODUCT_OTHER_JAVA_DEBUG_INFO))
+ my_dexpreopt_debug_info := false
+ endif
+ endif
+
+ # Never enable on eng.
+ ifeq (eng,$(filter eng, $(TARGET_BUILD_VARIANT)))
+ my_dexpreopt_debug_info := false
+ endif
+
+ # Add dex2oat flag for debug-info/no-debug-info.
+ ifeq (true,$(my_dexpreopt_debug_info))
+ LOCAL_DEX_PREOPT_FLAGS += --generate-mini-debug-info
+ else ifeq (false,$(my_dexpreopt_debug_info))
+ LOCAL_DEX_PREOPT_FLAGS += --no-generate-mini-debug-info
+ endif
+
+ # Set the compiler reason to 'prebuilt' to identify the oat files produced
+ # during the build, as opposed to compiled on the device.
+ LOCAL_DEX_PREOPT_FLAGS += --compilation-reason=prebuilt
+
+ $(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
+ $(built_vdex): $(built_odex)
+ $(built_art): $(built_odex)
endif
-else
- ifeq (true,$(PRODUCT_OTHER_JAVA_DEBUG_INFO))
- my_dexpreopt_debug_info := true
- else ifeq (false,$(PRODUCT_OTHER_JAVA_DEBUG_INFO))
- my_dexpreopt_debug_info := false
+
+ ifneq (true,$(my_generate_dm))
+ # Add the installed_odex to the list of installed files for this module if we aren't generating a
+ # dm file.
+ ALL_MODULES.$(my_register_name).INSTALLED += $(installed_odex)
+ ALL_MODULES.$(my_register_name).INSTALLED += $(installed_vdex)
+ ALL_MODULES.$(my_register_name).INSTALLED += $(installed_art)
+
+ ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_odex)
+ ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_vdex)
+ ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_art)
+
+ # Make sure to install the .odex and .vdex when you run "make <module_name>"
+ $(my_all_targets): $(installed_odex) $(installed_vdex) $(installed_art)
+ else
+ ALL_MODULES.$(my_register_name).INSTALLED += $(my_installed_dm)
+ ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(my_built_dm) $(my_installed_dm)
+
+ # Make sure to install the .dm when you run "make <module_name>"
+ $(my_all_targets): $(installed_dm)
endif
-endif
-# Never enable on eng.
-ifeq (eng,$(filter eng, $(TARGET_BUILD_VARIANT)))
-my_dexpreopt_debug_info := false
-endif
-
-# Add dex2oat flag for debug-info/no-debug-info.
-ifeq (true,$(my_dexpreopt_debug_info))
- LOCAL_DEX_PREOPT_FLAGS += --generate-mini-debug-info
-else ifeq (false,$(my_dexpreopt_debug_info))
- LOCAL_DEX_PREOPT_FLAGS += --no-generate-mini-debug-info
-endif
-
-# Set the compiler reason to 'prebuilt' to identify the oat files produced
-# during the build, as opposed to compiled on the device.
-LOCAL_DEX_PREOPT_FLAGS += --compilation-reason=prebuilt
-
-$(built_odex): PRIVATE_DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
-$(built_vdex): $(built_odex)
-$(built_art): $(built_odex)
-endif
-
-ifneq (true,$(my_generate_dm))
- # Add the installed_odex to the list of installed files for this module if we aren't generating a
- # dm file.
- ALL_MODULES.$(my_register_name).INSTALLED += $(installed_odex)
- ALL_MODULES.$(my_register_name).INSTALLED += $(installed_vdex)
- ALL_MODULES.$(my_register_name).INSTALLED += $(installed_art)
-
- ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_odex)
- ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_vdex)
- ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(built_installed_art)
-
- # Make sure to install the .odex and .vdex when you run "make <module_name>"
- $(my_all_targets): $(installed_odex) $(installed_vdex) $(installed_art)
-else
- ALL_MODULES.$(my_register_name).INSTALLED += $(my_installed_dm)
- ALL_MODULES.$(my_register_name).BUILT_INSTALLED += $(my_built_dm) $(my_installed_dm)
-
- # Make sure to install the .dm when you run "make <module_name>"
- $(my_all_targets): $(installed_dm)
-endif
-
-# Record dex-preopt config.
-DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
-DEXPREOPT.$(LOCAL_MODULE).MULTILIB := $(LOCAL_MULTILIB)
-DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
-DEXPREOPT.$(LOCAL_MODULE).PRIVILEGED_MODULE := $(LOCAL_PRIVILEGED_MODULE)
-DEXPREOPT.$(LOCAL_MODULE).VENDOR_MODULE := $(LOCAL_VENDOR_MODULE)
-DEXPREOPT.$(LOCAL_MODULE).TARGET_ARCH := $(LOCAL_MODULE_TARGET_ARCH)
-DEXPREOPT.$(LOCAL_MODULE).INSTALLED := $(installed_odex)
-DEXPREOPT.$(LOCAL_MODULE).INSTALLED_STRIPPED := $(LOCAL_INSTALLED_MODULE)
-DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS) := $(sort \
- $(DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS)) $(LOCAL_MODULE))
+ # Record dex-preopt config.
+ DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
+ DEXPREOPT.$(LOCAL_MODULE).MULTILIB := $(LOCAL_MULTILIB)
+ DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT_FLAGS := $(LOCAL_DEX_PREOPT_FLAGS)
+ DEXPREOPT.$(LOCAL_MODULE).PRIVILEGED_MODULE := $(LOCAL_PRIVILEGED_MODULE)
+ DEXPREOPT.$(LOCAL_MODULE).VENDOR_MODULE := $(LOCAL_VENDOR_MODULE)
+ DEXPREOPT.$(LOCAL_MODULE).TARGET_ARCH := $(LOCAL_MODULE_TARGET_ARCH)
+ DEXPREOPT.$(LOCAL_MODULE).INSTALLED := $(installed_odex)
+ DEXPREOPT.$(LOCAL_MODULE).INSTALLED_STRIPPED := $(LOCAL_INSTALLED_MODULE)
+ DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS) := $(sort \
+ $(DEXPREOPT.MODULES.$(LOCAL_MODULE_CLASS)) $(LOCAL_MODULE))
endif # LOCAL_DEX_PREOPT
diff --git a/core/envsetup.mk b/core/envsetup.mk
index 7128e3a..96e7e2c 100644
--- a/core/envsetup.mk
+++ b/core/envsetup.mk
@@ -756,11 +756,11 @@
TARGET_OUT_VENDOR := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_VENDOR)
.KATI_READONLY := TARGET_OUT_VENDOR
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_vendor_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/vendor
+target_out_vendor_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_VENDOR)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_vendor_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/vendor
+target_out_vendor_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_VENDOR)
else
target_out_vendor_app_base := $(TARGET_OUT_VENDOR)
endif
@@ -839,11 +839,11 @@
TARGET_OUT_ODM := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ODM)
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_odm_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/odm
+target_out_odm_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_OEM)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_odm_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/odm
+target_out_odm_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_OEM)
else
target_out_odm_app_base := $(TARGET_OUT_ODM)
endif
@@ -895,11 +895,11 @@
TARGET_OUT_PRODUCT_EXECUTABLES := $(TARGET_OUT_PRODUCT)/bin
.KATI_READONLY := TARGET_OUT_PRODUCT
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_product_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product
+target_out_product_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_product_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product
+target_out_product_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT)
else
target_out_product_app_base := $(TARGET_OUT_PRODUCT)
endif
@@ -941,11 +941,11 @@
TARGET_OUT_PRODUCT_SERVICES := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
ifneq ($(filter address,$(SANITIZE_TARGET)),)
-target_out_product_services_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product_services
+target_out_product_services_shared_libraries_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
ifeq ($(SANITIZE_LITE),true)
# When using SANITIZE_LITE, APKs must not be packaged with sanitized libraries, as they will not
# work with unsanitized app_process. For simplicity, generate APKs into /data/asan/.
-target_out_product_services_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/product_services
+target_out_product_services_app_base := $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ASAN)/$(TARGET_COPY_OUT_PRODUCT_SERVICES)
else
target_out_product_services_app_base := $(TARGET_OUT_PRODUCT_SERVICES)
endif
diff --git a/core/executable.mk b/core/executable.mk
index e8b2f30..e71ff33 100644
--- a/core/executable.mk
+++ b/core/executable.mk
@@ -12,6 +12,8 @@
my_skip_this_target := true
else ifeq (false, $(LOCAL_CLANG))
my_skip_this_target := true
+ else ifeq (never, $(LOCAL_SANITIZE))
+ my_skip_this_target := true
endif
endif
diff --git a/core/java.mk b/core/java.mk
index 6ca2904..c015e4a 100644
--- a/core/java.mk
+++ b/core/java.mk
@@ -170,6 +170,7 @@
$(filter %.java,$(LOCAL_GENERATED_SOURCES))
java_intermediate_sources := $(addprefix $(TARGET_OUT_COMMON_INTERMEDIATES)/, $(filter %.java,$(LOCAL_INTERMEDIATE_SOURCES)))
all_java_sources := $(java_sources) $(java_intermediate_sources)
+ALL_MODULES.$(my_register_name).SRCS := $(ALL_MODULES.$(my_register_name).SRCS) $(all_java_sources)
include $(BUILD_SYSTEM)/java_common.mk
diff --git a/core/package_internal.mk b/core/package_internal.mk
index 9343415..84d1c2c 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -620,6 +620,9 @@
ifdef LOCAL_COMPRESSED_MODULE
$(LOCAL_BUILT_MODULE) : $(MINIGZIP)
endif
+ifeq (true, $(LOCAL_UNCOMPRESS_DEX))
+$(LOCAL_BUILT_MODULE) : $(ZIP2ZIP)
+endif
ifneq ($(BUILD_PLATFORM_ZIP),)
$(LOCAL_BUILT_MODULE) : .KATI_IMPLICIT_OUTPUTS := $(dir $(LOCAL_BUILT_MODULE))package.dex.apk
endif
@@ -685,6 +688,9 @@
## Rule to build the odex file
ifdef LOCAL_DEX_PREOPT
$(built_odex): PRIVATE_DEX_FILE := $(built_dex)
+ifeq (true, $(LOCAL_UNCOMPRESS_DEX))
+$(built_odex): $(ZIP2ZIP)
+endif
# Use pattern rule - we may have multiple built odex files.
$(built_odex) : $(dir $(LOCAL_BUILT_MODULE))% : $(built_dex)
$(hide) mkdir -p $(dir $@) && rm -f $@
diff --git a/core/prebuilt_internal.mk b/core/prebuilt_internal.mk
index 9ea29fa..809c572 100644
--- a/core/prebuilt_internal.mk
+++ b/core/prebuilt_internal.mk
@@ -335,13 +335,14 @@
# For PRESIGNED apks we must uncompress every .so file:
# even if the .so file isn't for the current TARGET_ARCH,
# we can't strip the file.
-embedded_prebuilt_jni_libs := 'lib/*.so'
+embedded_prebuilt_jni_libs :=
endif
ifndef embedded_prebuilt_jni_libs
# No LOCAL_PREBUILT_JNI_LIBS, uncompress all.
-embedded_prebuilt_jni_libs := 'lib/*.so'
+embedded_prebuilt_jni_libs :=
endif
$(built_module): PRIVATE_EMBEDDED_JNI_LIBS := $(embedded_prebuilt_jni_libs)
+$(built_module): $(ZIP2ZIP)
ifdef LOCAL_COMPRESSED_MODULE
$(built_module) : $(MINIGZIP)
@@ -357,7 +358,7 @@
endif
$(built_module) : $(my_prebuilt_src_file) | $(ZIPALIGN) $(SIGNAPK_JAR)
$(transform-prebuilt-to-target)
- $(uncompress-shared-libs)
+ $(uncompress-prebuilt-embedded-jni-libs)
ifeq (true, $(LOCAL_UNCOMPRESS_DEX))
$(uncompress-dexs)
endif # LOCAL_UNCOMPRESS_DEX
diff --git a/core/product.mk b/core/product.mk
index 8c8246e..d1c74e7 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -408,7 +408,7 @@
BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE \
BOARD_PRODUCT_SERVICESIMAGE_PARTITION_RESERVED_SIZE \
BOARD_SUPER_PARTITION_SIZE \
- BOARD_SUPER_PARTITION_PARTITION_LIST \
+ BOARD_SUPER_PARTITION_GROUPS \
#
# Mark the variables in _product_stash_var_list as readonly
diff --git a/core/soong_app_prebuilt.mk b/core/soong_app_prebuilt.mk
index d02cba6..d34f367 100644
--- a/core/soong_app_prebuilt.mk
+++ b/core/soong_app_prebuilt.mk
@@ -74,6 +74,25 @@
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_MODULE_FILE),$(LOCAL_BUILT_MODULE)))
endif
+# embedded JNI will already have been handled by soong
+my_embed_jni :=
+my_prebuilt_jni_libs :=
+ifdef LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH)
+ my_2nd_arch_prefix :=
+ LOCAL_JNI_SHARED_LIBRARIES := $(LOCAL_SOONG_JNI_LIBS_$(TARGET_ARCH))
+ include $(BUILD_SYSTEM)/install_jni_libs_internal.mk
+endif
+ifdef TARGET_2ND_ARCH
+ ifdef LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH)
+ my_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
+ LOCAL_JNI_SHARED_LIBRARIES := $(LOCAL_SOONG_JNI_LIBS_$(TARGET_2ND_ARCH))
+ include $(BUILD_SYSTEM)/install_jni_libs_internal.mk
+ endif
+endif
+LOCAL_SHARED_JNI_LIBRARIES :=
+my_embed_jni :=
+my_prebuilt_jni_libs :=
+my_2nd_arch_prefix :=
PACKAGES := $(PACKAGES) $(LOCAL_MODULE)
ifdef LOCAL_CERTIFICATE
diff --git a/core/soong_cc_prebuilt.mk b/core/soong_cc_prebuilt.mk
index 9f2030e..f213563 100644
--- a/core/soong_cc_prebuilt.mk
+++ b/core/soong_cc_prebuilt.mk
@@ -175,6 +175,8 @@
$($(LOCAL_2ND_ARCH_VAR_PREFIX)TARGET_STRIP) --strip-all $(LOCAL_INSTALLED_MODULE)
endif
+$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
+
endif # !skip_module
skip_module :=
diff --git a/envsetup.sh b/envsetup.sh
index 5cbd9eb..4579bef 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -1594,6 +1594,25 @@
esac
}
+function acloud()
+{
+ # Let's use the built version over the prebuilt.
+ local built_acloud=${ANDROID_HOST_OUT}/bin/acloud
+ if [ -f $built_acloud ]; then
+ $built_acloud "$@"
+ return $?
+ fi
+
+ local host_os_arch=$(get_build_var HOST_PREBUILT_TAG)
+ case $host_os_arch in
+ linux-x86) "$(gettop)"/prebuilts/asuite/acloud/linux-x86/acloud "$@"
+ ;;
+ *)
+ echo "acloud is not supported on your host arch: $host_os_arch"
+ ;;
+ esac
+}
+
# Execute the contents of any vendorsetup.sh files we can find.
function source_vendorsetup() {
for dir in device vendor product; do
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index 182959c..0171806 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -90,6 +90,7 @@
init.rc \
input \
installd \
+ iorapd \
ip \
ip6tables \
iptables \
diff --git a/target/product/mainline_system.mk b/target/product/mainline_system.mk
index 8dec2d9..8d0611f 100644
--- a/target/product/mainline_system.mk
+++ b/target/product/mainline_system.mk
@@ -62,8 +62,10 @@
_my_whitelist := $(_base_mk_whitelist)
# Both /system and / are in system.img when PRODUCT_SHIPPING_API_LEVEL>=28.
+# Though we do have a new ramdisk partition for logical partitions.
_my_paths := \
$(TARGET_COPY_OUT_ROOT) \
$(TARGET_COPY_OUT_SYSTEM) \
+ $(TARGET_COPY_OUT_RAMDISK) \
$(call require-artifacts-in-path, $(_my_paths), $(_my_whitelist))
diff --git a/target/product/telephony_system.mk b/target/product/telephony_system.mk
index 3175c8a..0b1e8a2 100644
--- a/target/product/telephony_system.mk
+++ b/target/product/telephony_system.mk
@@ -18,6 +18,7 @@
# hardware, and install on the system partition.
PRODUCT_PACKAGES := \
+ ANS \
CarrierConfig \
CarrierDefaultApp \
Dialer \
diff --git a/target/product/vndk/current.txt b/target/product/vndk/current.txt
index d88b622..9f84e9f 100644
--- a/target/product/vndk/current.txt
+++ b/target/product/vndk/current.txt
@@ -164,6 +164,7 @@
VNDK-core: android.hardware.wifi@1.3.so
VNDK-core: android.hidl.allocator@1.0.so
VNDK-core: android.hidl.memory.block@1.0.so
+VNDK-core: android.hidl.safe_union@1.0.so
VNDK-core: android.hidl.token@1.0.so
VNDK-core: android.hidl.token@1.0-utils.so
VNDK-core: android.system.net.netd@1.0.so
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index d7d1bc8..2fa5f52 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -49,7 +49,6 @@
import os
import shlex
import shutil
-import subprocess
import sys
import uuid
import zipfile
@@ -259,10 +258,11 @@
args = OPTIONS.info_dict.get("avb_dtbo_add_hash_footer_args")
if args and args.strip():
cmd.extend(shlex.split(args))
- p = common.Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, \
- "avbtool add_hash_footer of %s failed" % (img.name,)
+ proc = common.Run(cmd)
+ output, _ = proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to call 'avbtool add_hash_footer' for {}:\n{}".format(
+ img.name, output)
img.Write()
return img.name
@@ -451,9 +451,9 @@
assert found, 'Failed to find {}'.format(image_path)
cmd.extend(split_args)
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
"avbtool make_vbmeta_image failed:\n{}".format(stdoutdata)
img.Write()
@@ -481,9 +481,9 @@
if args:
cmd.extend(shlex.split(args))
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
"bpttool make_table failed:\n{}".format(stdoutdata)
img.Write()
@@ -600,12 +600,10 @@
temp_care_map = common.MakeTempFile(prefix="caremap-", suffix=".pb")
care_map_gen_cmd = ["care_map_generator", temp_care_map_text, temp_care_map]
- p = common.Run(care_map_gen_cmd, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- output, _ = p.communicate()
- if OPTIONS.verbose:
- print(output.rstrip())
- assert p.returncode == 0, "Failed to generate the care_map proto message."
+ proc = common.Run(care_map_gen_cmd)
+ output, _ = proc.communicate()
+ assert proc.returncode == 0, \
+ "Failed to generate the care_map proto message:\n{}".format(output)
care_map_path = "META/care_map.pb"
if output_zip and care_map_path not in output_zip.namelist():
@@ -656,9 +654,9 @@
cmd += shlex.split(OPTIONS.info_dict.get('lpmake_args').strip())
cmd += ['--output', img.name]
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ stdoutdata, _ = proc.communicate()
+ assert proc.returncode == 0, \
"lpmake tool failed:\n{}".format(stdoutdata)
img.Write()
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index aeb4379..189dba2 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -23,7 +23,6 @@
import os
import os.path
import re
-import subprocess
import sys
import threading
from collections import deque, OrderedDict
@@ -43,11 +42,10 @@
# Don't dump the bsdiff/imgdiff commands, which are not useful for the case
# here, since they contain temp filenames only.
- p = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- output, _ = p.communicate()
+ proc = common.Run(cmd, verbose=False)
+ output, _ = proc.communicate()
- if p.returncode != 0:
+ if proc.returncode != 0:
raise ValueError(output)
with open(patchfile, 'rb') as f:
@@ -1494,9 +1492,9 @@
"--block-limit={}".format(max_blocks_per_transfer),
"--split-info=" + patch_info_file,
src_file, tgt_file, patch_file]
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- imgdiff_output, _ = p.communicate()
- assert p.returncode == 0, \
+ proc = common.Run(cmd)
+ imgdiff_output, _ = proc.communicate()
+ assert proc.returncode == 0, \
"Failed to create imgdiff patch between {} and {}:\n{}".format(
src_name, tgt_name, imgdiff_output)
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index d5ab055..42f05a7 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -31,7 +31,6 @@
import re
import shlex
import shutil
-import subprocess
import sys
import common
@@ -52,54 +51,21 @@
Exception.__init__(self, message)
-def RunCommand(cmd, verbose=None, env=None):
- """Echo and run the given command.
-
- Args:
- cmd: the command represented as a list of strings.
- verbose: show commands being executed.
- env: a dictionary of additional environment variables.
- Returns:
- A tuple of the output and the exit code.
- """
- env_copy = None
- if env is not None:
- env_copy = os.environ.copy()
- env_copy.update(env)
- if verbose is None:
- verbose = OPTIONS.verbose
- if verbose:
- print("Running: " + " ".join(cmd))
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
- env=env_copy)
- output, _ = p.communicate()
-
- if verbose:
- print(output.rstrip())
- return (output, p.returncode)
-
-
def GetVerityFECSize(partition_size):
cmd = ["fec", "-s", str(partition_size)]
- output, exit_code = RunCommand(cmd, False)
- if exit_code != 0:
- raise BuildImageError("Failed to GetVerityFECSize:\n{}".format(output))
+ output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityTreeSize(partition_size):
cmd = ["build_verity_tree", "-s", str(partition_size)]
- output, exit_code = RunCommand(cmd, False)
- if exit_code != 0:
- raise BuildImageError("Failed to GetVerityTreeSize:\n{}".format(output))
+ output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityMetadataSize(partition_size):
cmd = ["build_verity_metadata.py", "size", str(partition_size)]
- output, exit_code = RunCommand(cmd, False)
- if exit_code != 0:
- raise BuildImageError("Failed to GetVerityMetadataSize:\n{}".format(output))
+ output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
@@ -125,10 +91,12 @@
Raises:
BuildImageError: On error.
"""
- env = {"POSIXLY_CORRECT": "1"}
+ env_copy = os.environ.copy()
+ env_copy["POSIXLY_CORRECT"] = "1"
cmd = ["du", "-s", path]
- output, exit_code = RunCommand(cmd, verbose=False, env=env)
- if exit_code != 0:
+ try:
+ output = common.RunAndCheckOutput(cmd, verbose=False, env=env_copy)
+ except common.ExternalError:
raise BuildImageError("Failed to get disk usage:\n{}".format(output))
# POSIX du returns number of blocks with block size 512
return int(output.split()[0]) * 512
@@ -160,16 +128,13 @@
The maximum image size.
Raises:
- BuildImageError: On error or getting invalid image size.
+ BuildImageError: On invalid image size.
"""
cmd = [avbtool, "add_%s_footer" % footer_type,
"--partition_size", str(partition_size), "--calc_max_image_size"]
cmd.extend(shlex.split(additional_args))
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- raise BuildImageError(
- "Failed to calculate max image size:\n{}".format(output))
+ output = common.RunAndCheckOutput(cmd)
image_size = int(output)
if image_size <= 0:
raise BuildImageError(
@@ -250,9 +215,6 @@
salt: The salt to use (a hexadecimal string) or None.
additional_args: Additional arguments to pass to "avbtool add_hash_footer"
or "avbtool add_hashtree_footer".
-
- Raises:
- BuildImageError: On error.
"""
cmd = [avbtool, "add_%s_footer" % footer_type,
"--partition_size", partition_size,
@@ -266,10 +228,7 @@
cmd.extend(shlex.split(additional_args))
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- raise BuildImageError(
- "Failed to add AVB footer:\n{}".format(output))
+ common.RunAndCheckOutput(cmd)
def AdjustPartitionSizeForVerity(partition_size, fec_supported):
@@ -324,19 +283,13 @@
padding_size):
cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
verity_path, verity_fec_path]
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- raise BuildImageError(
- "Failed to build FEC data:\n{}".format(output))
+ common.RunAndCheckOutput(cmd)
def BuildVerityTree(sparse_image_path, verity_image_path):
cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
verity_image_path]
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- raise BuildImageError(
- "Failed to build verity tree:\n{}".format(output))
+ output = common.RunAndCheckOutput(cmd)
root, salt = output.split()
return root, salt
@@ -350,10 +303,7 @@
cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
if verity_disable:
cmd.append("--verity_disable")
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- raise BuildImageError(
- "Failed to build verity metadata:\n{}".format(output))
+ common.RunAndCheckOutput(cmd)
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
@@ -367,9 +317,10 @@
BuildImageError: On error.
"""
cmd = ["append2simg", sparse_image_path, unsparse_image_path]
- output, exit_code = RunCommand(cmd)
- if exit_code != 0:
- raise BuildImageError("{}:\n{}".format(error_message, output))
+ try:
+ common.RunAndCheckOutput(cmd)
+ except:
+ raise BuildImageError(error_message)
def Append(target, file_to_append, error_message):
@@ -413,12 +364,11 @@
else:
return unsparse_image_path
inflate_command = ["simg2img", sparse_image_path, unsparse_image_path]
- inflate_output, exit_code = RunCommand(inflate_command)
- if exit_code != 0:
+ try:
+ common.RunAndCheckOutput(inflate_command)
+ except:
os.remove(unsparse_image_path)
- raise BuildImageError(
- "Error: '{}' failed with exit code {}:\n{}".format(
- inflate_command, exit_code, inflate_output))
+ raise
return unsparse_image_path
@@ -475,10 +425,7 @@
def ConvertBlockMapToBaseFs(block_map_file):
base_fs_file = common.MakeTempFile(prefix="script_gen_", suffix=".base_fs")
convert_command = ["blk_alloc_to_base_fs", block_map_file, base_fs_file]
- output, exit_code = RunCommand(convert_command)
- if exit_code != 0:
- raise BuildImageError(
- "Failed to call blk_alloc_to_base_fs:\n{}".format(output))
+ common.RunAndCheckOutput(convert_command)
return base_fs_file
@@ -729,12 +676,15 @@
raise BuildImageError(
"Error: unknown filesystem type: {}".format(fs_type))
- mkfs_output, exit_code = RunCommand(build_command)
- if exit_code != 0:
+ try:
+ mkfs_output = common.RunAndCheckOutput(build_command)
+ except:
try:
du = GetDiskUsage(in_dir)
du_str = "{} bytes ({} MB)".format(du, du // BYTES_IN_MB)
- except BuildImageError as e:
+ # Suppress any errors from GetDiskUsage() to avoid hiding the real errors
+ # from common.RunAndCheckOutput().
+ except Exception as e: # pylint: disable=broad-except
print(e, file=sys.stderr)
du_str = "unknown"
print(
@@ -750,10 +700,7 @@
int(prop_dict["image_size"]) // BYTES_IN_MB,
int(prop_dict["partition_size"]),
int(prop_dict["partition_size"]) // BYTES_IN_MB))
-
- raise BuildImageError(
- "Error: '{}' failed with exit code {}:\n{}".format(
- build_command, exit_code, mkfs_output))
+ raise
# Check if there's enough headroom space available for ext4 image.
if "partition_headroom" in prop_dict and fs_type.startswith("ext4"):
@@ -792,15 +739,12 @@
# Run e2fsck on the inflated image file
e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image]
# TODO(b/112062612): work around e2fsck failure with SANITIZE_HOST=address
- env4e2fsck = {"ASAN_OPTIONS": "detect_odr_violation=0"}
- e2fsck_output, exit_code = RunCommand(e2fsck_command, env=env4e2fsck)
-
- os.remove(unsparse_image)
-
- if exit_code != 0:
- raise BuildImageError(
- "Error: '{}' failed with exit code {}:\n{}".format(
- e2fsck_command, exit_code, e2fsck_output))
+ env4e2fsck = os.environ.copy()
+ env4e2fsck["ASAN_OPTIONS"] = "detect_odr_violation=0"
+ try:
+ common.RunAndCheckOutput(e2fsck_command, env=env4e2fsck)
+ finally:
+ os.remove(unsparse_image)
def ImagePropFromGlobalDict(glob_dict, mount_point):
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index 3cac90a..a580709 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -24,7 +24,6 @@
import re
import subprocess
import sys
-import tempfile
import zipfile
from hashlib import sha1
@@ -165,11 +164,11 @@
cmd = ['delta_generator',
'--in_file=' + payload_file,
'--public_key=' + pubkey]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
- 'Failed to verify payload with delta_generator: %s\n%s' % (package,
- stdoutdata)
+ 'Failed to verify payload with delta_generator: {}\n{}'.format(
+ package, stdoutdata)
common.ZipClose(package_zip)
# Verified successfully upon reaching here.
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 4e2346c..d1bfc8f 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -37,6 +37,7 @@
import blockimgdiff
import sparse_img
+
class Options(object):
def __init__(self):
platform_search_path = {
@@ -72,16 +73,13 @@
OPTIONS = Options()
-
# Values for "certificate" in apkcerts that mean special things.
SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
-
# The partitions allowed to be signed by AVB (Android verified boot 2.0).
AVB_PARTITIONS = ('boot', 'recovery', 'system', 'vendor', 'product',
'product_services', 'dtbo', 'odm')
-
# Partitions that should have their care_map added to META/care_map.pb
PARTITIONS_WITH_CARE_MAP = ('system', 'vendor', 'product', 'product_services',
'odm')
@@ -121,18 +119,59 @@
def Run(args, verbose=None, **kwargs):
- """Create and return a subprocess.Popen object.
+ """Creates and returns a subprocess.Popen object.
- Caller can specify if the command line should be printed. The global
- OPTIONS.verbose will be used if not specified.
+ Args:
+ args: The command represented as a list of strings.
+ verbose: Whether the commands should be shown (default to OPTIONS.verbose
+ if unspecified).
+ kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
+ stdin, etc. stdout and stderr will default to subprocess.PIPE and
+ subprocess.STDOUT respectively unless caller specifies any of them.
+
+ Returns:
+ A subprocess.Popen object.
"""
if verbose is None:
verbose = OPTIONS.verbose
+ if 'stdout' not in kwargs and 'stderr' not in kwargs:
+ kwargs['stdout'] = subprocess.PIPE
+ kwargs['stderr'] = subprocess.STDOUT
if verbose:
- print(" running: ", " ".join(args))
+ print(" Running: \"{}\"".format(" ".join(args)))
return subprocess.Popen(args, **kwargs)
+def RunAndCheckOutput(args, verbose=None, **kwargs):
+ """Runs the given command and returns the output.
+
+ Args:
+ args: The command represented as a list of strings.
+ verbose: Whether the commands should be shown (default to OPTIONS.verbose
+ if unspecified).
+ kwargs: Any additional args to be passed to subprocess.Popen(), such as env,
+ stdin, etc. stdout and stderr will default to subprocess.PIPE and
+ subprocess.STDOUT respectively unless caller specifies any of them.
+
+ Returns:
+ The output string.
+
+ Raises:
+ ExternalError: On non-zero exit from the command.
+ """
+ if verbose is None:
+ verbose = OPTIONS.verbose
+ proc = Run(args, verbose=verbose, **kwargs)
+ output, _ = proc.communicate()
+ if verbose:
+ print("{}".format(output.rstrip()))
+ if proc.returncode != 0:
+ raise ExternalError(
+ "Failed to run command '{}' (exit code {}):\n{}".format(
+ args, proc.returncode, output))
+ return output
+
+
def RoundUpTo4K(value):
rounded_up = value + 4095
return rounded_up - (rounded_up % 4096)
@@ -434,21 +473,13 @@
Returns:
A string of form "partition:rollback_index_location:key" that can be used to
build or verify vbmeta image.
-
- Raises:
- AssertionError: When it fails to extract the public key with avbtool.
"""
if key is None:
key = info_dict["avb_" + partition + "_key_path"]
avbtool = os.getenv('AVBTOOL') or info_dict["avb_avbtool"]
pubkey_path = MakeTempFile(prefix="avb-", suffix=".pubkey")
- proc = Run(
- [avbtool, "extract_public_key", "--key", key, "--output", pubkey_path],
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = proc.communicate()
- assert proc.returncode == 0, \
- "Failed to extract pubkey for {}:\n{}".format(
- partition, stdoutdata)
+ RunAndCheckOutput(
+ [avbtool, "extract_public_key", "--key", key, "--output", pubkey_path])
rollback_index_location = info_dict[
"avb_" + partition + "_rollback_index_location"]
@@ -551,9 +582,7 @@
fn = os.path.join(sourcedir, "recovery_dtbo")
cmd.extend(["--recovery_dtbo", fn])
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "mkbootimg of %s image failed" % (partition_name,)
+ RunAndCheckOutput(cmd)
if (info_dict.get("boot_signer") == "true" and
info_dict.get("verity_key")):
@@ -568,9 +597,7 @@
cmd.extend([path, img.name,
info_dict["verity_key"] + ".pk8",
info_dict["verity_key"] + ".x509.pem", img.name])
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "boot_signer of %s image failed" % path
+ RunAndCheckOutput(cmd)
# Sign the image if vboot is non-empty.
elif info_dict.get("vboot"):
@@ -588,9 +615,7 @@
info_dict["vboot_subkey"] + ".vbprivk",
img_keyblock.name,
img.name]
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "vboot_signer of %s image failed" % path
+ RunAndCheckOutput(cmd)
# Clean up the temp files.
img_unsigned.close()
@@ -607,10 +632,7 @@
args = info_dict.get("avb_" + partition_name + "_add_hash_footer_args")
if args and args.strip():
cmd.extend(shlex.split(args))
- p = Run(cmd, stdout=subprocess.PIPE)
- p.communicate()
- assert p.returncode == 0, "avbtool add_hash_footer of %s failed" % (
- partition_name,)
+ RunAndCheckOutput(cmd)
img.seek(os.SEEK_SET, 0)
data = img.read()
@@ -682,12 +704,7 @@
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if pattern is not None:
cmd.extend(pattern)
- p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- if p.returncode != 0:
- raise ExternalError(
- "Failed to unzip input target-files \"{}\":\n{}".format(
- filename, stdoutdata))
+ RunAndCheckOutput(cmd)
tmp = MakeTempDir(prefix="targetfiles-")
m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
@@ -926,15 +943,14 @@
key + OPTIONS.private_key_suffix,
input_name, output_name])
- p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ proc = Run(cmd, stdin=subprocess.PIPE)
if password is not None:
password += "\n"
- stdoutdata, _ = p.communicate(password)
- if p.returncode != 0:
+ stdoutdata, _ = proc.communicate(password)
+ if proc.returncode != 0:
raise ExternalError(
"Failed to run signapk.jar: return code {}:\n{}".format(
- p.returncode, stdoutdata))
+ proc.returncode, stdoutdata))
def CheckSize(data, target, info_dict):
@@ -1267,8 +1283,7 @@
first_line = i + 4
f.close()
- p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
- _, _ = p.communicate()
+ RunAndCheckOutput([self.editor, "+%d" % (first_line,), self.pwfile])
return self.ReadFile()
@@ -1396,10 +1411,7 @@
if isinstance(entries, basestring):
entries = [entries]
cmd = ["zip", "-d", zip_filename] + entries
- proc = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = proc.communicate()
- assert proc.returncode == 0, "Failed to delete %s:\n%s" % (entries,
- stdoutdata)
+ RunAndCheckOutput(cmd)
def ZipClose(zip_file):
@@ -1860,11 +1872,7 @@
'--output={}.new.dat.br'.format(self.path),
'{}.new.dat'.format(self.path)]
print("Compressing {}.new.dat with brotli".format(self.partition))
- p = Run(brotli_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- stdoutdata, _ = p.communicate()
- assert p.returncode == 0, \
- 'Failed to compress {}.new.dat with brotli:\n{}'.format(
- self.partition, stdoutdata)
+ RunAndCheckOutput(brotli_cmd)
new_data_name = '{}.new.dat.br'.format(self.partition)
ZipWrite(output_zip,
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 755eda9..7ea53f8 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -394,8 +394,7 @@
signing_key = common.MakeTempFile(prefix="key-", suffix=".key")
cmd.extend(["-out", signing_key])
- get_signing_key = common.Run(cmd, verbose=False, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
+ get_signing_key = common.Run(cmd, verbose=False)
stdoutdata, _ = get_signing_key.communicate()
assert get_signing_key.returncode == 0, \
"Failed to get signing key: {}".format(stdoutdata)
@@ -411,7 +410,7 @@
"""Signs the given input file. Returns the output filename."""
out_file = common.MakeTempFile(prefix="signed-", suffix=".bin")
cmd = [self.signer] + self.signer_args + ['-in', in_file, '-out', out_file]
- signing = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ signing = common.Run(cmd)
stdoutdata, _ = signing.communicate()
assert signing.returncode == 0, \
"Failed to sign the input file: {}".format(stdoutdata)
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index a73746e..ad22b72 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -16,8 +16,6 @@
import os
import os.path
-import subprocess
-import unittest
import zipfile
import common
@@ -31,23 +29,22 @@
OPTIONS = common.OPTIONS
-class AddImagesToTargetFilesTest(unittest.TestCase):
+class AddImagesToTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
OPTIONS.input_tmp = common.MakeTempDir()
- def tearDown(self):
- common.Cleanup()
-
def _verifyCareMap(self, expected, file_name):
"""Parses the care_map.pb; and checks the content in plain text."""
text_file = common.MakeTempFile(prefix="caremap-", suffix=".txt")
# Calls an external binary to convert the proto message.
cmd = ["care_map_generator", "--parse_proto", file_name, text_file]
- p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
- p.communicate()
- self.assertEqual(0, p.returncode)
+ proc = common.Run(cmd)
+ output, _ = proc.communicate()
+ self.assertEqual(
+ 0, proc.returncode,
+ "Failed to run care_map_generator:\n{}".format(output))
with open(text_file, 'r') as verify_fp:
plain_text = verify_fp.read()
diff --git a/tools/releasetools/test_blockimgdiff.py b/tools/releasetools/test_blockimgdiff.py
index 124b4d5..857026e 100644
--- a/tools/releasetools/test_blockimgdiff.py
+++ b/tools/releasetools/test_blockimgdiff.py
@@ -14,17 +14,14 @@
# limitations under the License.
#
-from __future__ import print_function
-
-import unittest
-
import common
-from blockimgdiff import (BlockImageDiff, EmptyImage, HeapItem, ImgdiffStats,
- Transfer)
+from blockimgdiff import (
+ BlockImageDiff, EmptyImage, HeapItem, ImgdiffStats, Transfer)
from rangelib import RangeSet
+from test_utils import ReleaseToolsTestCase
-class HealpItemTest(unittest.TestCase):
+class HealpItemTest(ReleaseToolsTestCase):
class Item(object):
def __init__(self, score):
@@ -54,7 +51,7 @@
self.assertFalse(item)
-class BlockImageDiffTest(unittest.TestCase):
+class BlockImageDiffTest(ReleaseToolsTestCase):
def test_GenerateDigraphOrder(self):
"""Make sure GenerateDigraph preserves the order.
@@ -245,7 +242,7 @@
block_image_diff.imgdiff_stats.stats)
-class ImgdiffStatsTest(unittest.TestCase):
+class ImgdiffStatsTest(ReleaseToolsTestCase):
def test_Log(self):
imgdiff_stats = ImgdiffStats()
diff --git a/tools/releasetools/test_build_image.py b/tools/releasetools/test_build_image.py
index 94c31ee..6f853e8 100644
--- a/tools/releasetools/test_build_image.py
+++ b/tools/releasetools/test_build_image.py
@@ -18,15 +18,15 @@
import math
import os.path
import random
-import unittest
import common
from build_image import (
AVBCalcMinPartitionSize, BLOCK_SIZE, BuildImageError, CheckHeadroom,
- RunCommand, SetUpInDirAndFsConfig)
+ SetUpInDirAndFsConfig)
+from test_utils import ReleaseToolsTestCase
-class BuildImageTest(unittest.TestCase):
+class BuildImageTest(ReleaseToolsTestCase):
# Available: 1000 blocks.
EXT4FS_OUTPUT = (
@@ -39,9 +39,6 @@
self._image_sizes = [BLOCK_SIZE * random.randint(51200, 524288) + offset
for offset in range(BLOCK_SIZE)]
- def tearDown(self):
- common.Cleanup()
-
def test_CheckHeadroom_SizeUnderLimit(self):
# Required headroom: 1000 blocks.
prop_dict = {
@@ -91,8 +88,9 @@
output_image = common.MakeTempFile(suffix='.img')
command = ['mkuserimg_mke2fs', input_dir, output_image, 'ext4',
'/system', '409600', '-j', '0']
- ext4fs_output, exit_code = RunCommand(command)
- self.assertEqual(0, exit_code)
+ proc = common.Run(command)
+ ext4fs_output, _ = proc.communicate()
+ self.assertEqual(0, proc.returncode)
prop_dict = {
'fs_type' : 'ext4',
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 5179900..c99049a 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -19,7 +19,6 @@
import subprocess
import tempfile
import time
-import unittest
import zipfile
from hashlib import sha1
@@ -44,7 +43,8 @@
yield '\0' * (step_size - block_size)
-class CommonZipTest(unittest.TestCase):
+class CommonZipTest(test_utils.ReleaseToolsTestCase):
+
def _verify(self, zip_file, zip_file_name, arcname, expected_hash,
test_file_name=None, expected_stat=None, expected_mode=0o644,
expected_compress_type=zipfile.ZIP_STORED):
@@ -334,8 +334,8 @@
self.assertFalse('Test2' in entries)
self.assertTrue('Test3' in entries)
- self.assertRaises(AssertionError, common.ZipDelete, zip_file.name,
- 'Test2')
+ self.assertRaises(
+ common.ExternalError, common.ZipDelete, zip_file.name, 'Test2')
with zipfile.ZipFile(zip_file.name, 'r') as check_zip:
entries = check_zip.namelist()
self.assertTrue('Test1' in entries)
@@ -359,7 +359,7 @@
os.remove(zip_file.name)
-class CommonApkUtilsTest(unittest.TestCase):
+class CommonApkUtilsTest(test_utils.ReleaseToolsTestCase):
"""Tests the APK utils related functions."""
APKCERTS_TXT1 = (
@@ -407,9 +407,6 @@
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
@staticmethod
def _write_apkcerts_txt(apkcerts_txt, additional=None):
if additional is None:
@@ -523,14 +520,11 @@
{})
-class CommonUtilsTest(unittest.TestCase):
+class CommonUtilsTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
def test_GetSparseImage_emptyBlockMapFile(self):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
with zipfile.ZipFile(target_files, 'w') as target_files_zip:
@@ -782,7 +776,8 @@
'avb_system_rollback_index_location': 2,
}
self.assertRaises(
- AssertionError, common.GetAvbChainedPartitionArg, 'system', info_dict)
+ common.ExternalError, common.GetAvbChainedPartitionArg, 'system',
+ info_dict)
INFO_DICT_DEFAULT = {
'recovery_api_version': 3,
@@ -934,7 +929,7 @@
AssertionError, common.LoadInfoDict, target_files_zip, True)
-class InstallRecoveryScriptFormatTest(unittest.TestCase):
+class InstallRecoveryScriptFormatTest(test_utils.ReleaseToolsTestCase):
"""Checks the format of install-recovery.sh.
Its format should match between common.py and validate_target_files.py.
@@ -993,6 +988,3 @@
recovery_image, boot_image, self._info)
validate_target_files.ValidateInstallRecoveryScript(self._tempdir,
self._info)
-
- def tearDown(self):
- common.Cleanup()
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 1d8a786..f75b3a7 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -17,8 +17,6 @@
import copy
import os
import os.path
-import subprocess
-import unittest
import zipfile
import common
@@ -105,7 +103,7 @@
self.script.append(('AssertSomeThumbprint',) + args)
-class BuildInfoTest(unittest.TestCase):
+class BuildInfoTest(test_utils.ReleaseToolsTestCase):
TEST_INFO_DICT = {
'build.prop' : {
@@ -353,10 +351,7 @@
script_writer.script)
-class LoadOemDictsTest(unittest.TestCase):
-
- def tearDown(self):
- common.Cleanup()
+class LoadOemDictsTest(test_utils.ReleaseToolsTestCase):
def test_NoneDict(self):
self.assertIsNone(_LoadOemDicts(None))
@@ -389,7 +384,7 @@
self.assertEqual('{}'.format(i), oem_dict['ro.build.index'])
-class OtaFromTargetFilesTest(unittest.TestCase):
+class OtaFromTargetFilesTest(test_utils.ReleaseToolsTestCase):
TEST_TARGET_INFO_DICT = {
'build.prop' : {
@@ -431,9 +426,6 @@
common.OPTIONS.search_path = test_utils.get_search_path()
self.assertIsNotNone(common.OPTIONS.search_path)
- def tearDown(self):
- common.Cleanup()
-
def test_GetPackageMetadata_abOta_full(self):
target_info_dict = copy.deepcopy(self.TEST_TARGET_INFO_DICT)
target_info_dict['ab_update'] = 'true'
@@ -721,14 +713,11 @@
)
-class PropertyFilesTest(unittest.TestCase):
+class PropertyFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
common.OPTIONS.no_signing = False
- def tearDown(self):
- common.Cleanup()
-
@staticmethod
def construct_zip_package(entries):
zip_file = common.MakeTempFile(suffix='.zip')
@@ -1024,11 +1013,11 @@
'--signature_size', str(self.SIGNATURE_SIZE),
'--metadata_hash_file', metadata_sig_file,
'--payload_hash_file', payload_sig_file]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
- 'Failed to run brillo_update_payload: {}'.format(stdoutdata))
+ 'Failed to run brillo_update_payload:\n{}'.format(stdoutdata))
signed_metadata_sig_file = payload_signer.Sign(metadata_sig_file)
@@ -1152,7 +1141,7 @@
property_files.Verify(zip_fp, raw_metadata)
-class PayloadSignerTest(unittest.TestCase):
+class PayloadSignerTest(test_utils.ReleaseToolsTestCase):
SIGFILE = 'sigfile.bin'
SIGNED_SIGFILE = 'signed-sigfile.bin'
@@ -1168,9 +1157,6 @@
common.OPTIONS.package_key : None,
}
- def tearDown(self):
- common.Cleanup()
-
def _assertFilesEqual(self, file1, file2):
with open(file1, 'rb') as fp1, open(file2, 'rb') as fp2:
self.assertEqual(fp1.read(), fp2.read())
@@ -1231,7 +1217,7 @@
self._assertFilesEqual(verify_file, signed_file)
-class PayloadTest(unittest.TestCase):
+class PayloadTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
@@ -1245,9 +1231,6 @@
common.OPTIONS.package_key : None,
}
- def tearDown(self):
- common.Cleanup()
-
@staticmethod
def _create_payload_full(secondary=False):
target_file = construct_target_files(secondary)
diff --git a/tools/releasetools/test_rangelib.py b/tools/releasetools/test_rangelib.py
index e181187..1251e11 100644
--- a/tools/releasetools/test_rangelib.py
+++ b/tools/releasetools/test_rangelib.py
@@ -14,11 +14,11 @@
# limitations under the License.
#
-import unittest
-
from rangelib import RangeSet
+from test_utils import ReleaseToolsTestCase
-class RangeSetTest(unittest.TestCase):
+
+class RangeSetTest(ReleaseToolsTestCase):
def test_union(self):
self.assertEqual(RangeSet("10-19 30-34").union(RangeSet("18-29")),
@@ -129,8 +129,8 @@
self.assertEqual(
RangeSet.parse_raw(RangeSet("0-9").to_string_raw()),
RangeSet("0-9"))
- self.assertEqual(RangeSet.parse_raw(
- RangeSet("2-10 12").to_string_raw()),
+ self.assertEqual(
+ RangeSet.parse_raw(RangeSet("2-10 12").to_string_raw()),
RangeSet("2-10 12"))
self.assertEqual(
RangeSet.parse_raw(RangeSet("11 2-10 12 1 0").to_string_raw()),
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index ac1b567..18762ee 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -14,11 +14,8 @@
# limitations under the License.
#
-from __future__ import print_function
-
import base64
import os.path
-import unittest
import zipfile
import common
@@ -28,7 +25,7 @@
ReplaceVerityKeyId, RewriteProps)
-class SignTargetFilesApksTest(unittest.TestCase):
+class SignTargetFilesApksTest(test_utils.ReleaseToolsTestCase):
MAC_PERMISSIONS_XML = """<?xml version="1.0" encoding="iso-8859-1"?>
<policy>
@@ -39,9 +36,6 @@
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
def test_EditTags(self):
self.assertEqual(EditTags('dev-keys'), ('release-keys'))
self.assertEqual(EditTags('test-keys'), ('release-keys'))
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
index a15ff5b..b9c8dc7 100644
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -21,6 +21,7 @@
import os
import os.path
import struct
+import unittest
import common
@@ -110,3 +111,10 @@
fp.write(os.urandom(data_size))
return sparse_image
+
+
+class ReleaseToolsTestCase(unittest.TestCase):
+ """A common base class for all the releasetools unittests."""
+
+ def tearDown(self):
+ common.Cleanup()
diff --git a/tools/releasetools/test_validate_target_files.py b/tools/releasetools/test_validate_target_files.py
index 0aaf069..7b29ef8 100644
--- a/tools/releasetools/test_validate_target_files.py
+++ b/tools/releasetools/test_validate_target_files.py
@@ -16,13 +16,9 @@
"""Unittests for validate_target_files.py."""
-from __future__ import print_function
-
import os
import os.path
import shutil
-import subprocess
-import unittest
import build_image
import common
@@ -30,21 +26,18 @@
from validate_target_files import ValidateVerifiedBootImages
-class ValidateTargetFilesTest(unittest.TestCase):
+class ValidateTargetFilesTest(test_utils.ReleaseToolsTestCase):
def setUp(self):
self.testdata_dir = test_utils.get_testdata_dir()
- def tearDown(self):
- common.Cleanup()
-
def _generate_boot_image(self, output_file):
kernel = common.MakeTempFile(prefix='kernel-')
with open(kernel, 'wb') as kernel_fp:
kernel_fp.write(os.urandom(10))
cmd = ['mkbootimg', '--kernel', kernel, '-o', output_file]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
@@ -53,7 +46,7 @@
cmd = ['boot_signer', '/boot', output_file,
os.path.join(self.testdata_dir, 'testkey.pk8'),
os.path.join(self.testdata_dir, 'testkey.x509.pem'), output_file]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
@@ -123,7 +116,7 @@
system_root = common.MakeTempDir()
cmd = ['mkuserimg_mke2fs', '-s', system_root, output_file, 'ext4',
'/system', str(image_size), '-j', '0']
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
self.assertEqual(
0, proc.returncode,
diff --git a/tools/releasetools/test_verity_utils.py b/tools/releasetools/test_verity_utils.py
index 580612f..a9cd17b 100644
--- a/tools/releasetools/test_verity_utils.py
+++ b/tools/releasetools/test_verity_utils.py
@@ -16,23 +16,22 @@
"""Unittests for verity_utils.py."""
-from __future__ import print_function
-
-import os
import os.path
-import unittest
import build_image
import common
import sparse_img
-import test_utils
-import verity_utils
from rangelib import RangeSet
+from test_utils import get_testdata_dir, ReleaseToolsTestCase
+from verity_utils import (
+ CreateHashtreeInfoGenerator, HashtreeInfo,
+ VerifiedBootVersion1HashtreeInfoGenerator)
-class VerityUtilsTest(unittest.TestCase):
+class VerifiedBootVersion1HashtreeInfoGeneratorTest(ReleaseToolsTestCase):
+
def setUp(self):
- self.testdata_dir = test_utils.get_testdata_dir()
+ self.testdata_dir = get_testdata_dir()
self.partition_size = 1024 * 1024
self.prop_dict = {
@@ -48,9 +47,6 @@
self.expected_root_hash = \
"0b7c4565e87b1026e11fbab91c0bc29e185c847a5b44d40e6e86e461e8adf80d"
- def tearDown(self):
- common.Cleanup()
-
def _create_simg(self, raw_data):
output_file = common.MakeTempFile()
raw_image = common.MakeTempFile()
@@ -88,33 +84,33 @@
return output_file
- def test_VerifiedBootVersion1HashtreeInfoGenerator_create(self):
+ def test_CreateHashtreeInfoGenerator(self):
image_file = sparse_img.SparseImage(self._generate_image())
- generator = verity_utils.CreateHashtreeInfoGenerator(
+ generator = CreateHashtreeInfoGenerator(
'system', image_file, self.prop_dict)
self.assertEqual(
- verity_utils.VerifiedBootVersion1HashtreeInfoGenerator, type(generator))
+ VerifiedBootVersion1HashtreeInfoGenerator, type(generator))
self.assertEqual(self.partition_size, generator.partition_size)
self.assertTrue(generator.fec_supported)
- def test_VerifiedBootVersion1HashtreeInfoGenerator_decomposeImage(self):
+ def test_DecomposeSparseImage(self):
image_file = sparse_img.SparseImage(self._generate_image())
- generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
+ generator = VerifiedBootVersion1HashtreeInfoGenerator(
self.partition_size, 4096, True)
generator.DecomposeSparseImage(image_file)
self.assertEqual(991232, generator.filesystem_size)
self.assertEqual(12288, generator.hashtree_size)
self.assertEqual(32768, generator.metadata_size)
- def test_VerifiedBootVersion1HashtreeInfoGenerator_parseHashtreeMetadata(
- self):
+ def test_ParseHashtreeMetadata(self):
image_file = sparse_img.SparseImage(self._generate_image())
- generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
+ generator = VerifiedBootVersion1HashtreeInfoGenerator(
self.partition_size, 4096, True)
generator.DecomposeSparseImage(image_file)
+ # pylint: disable=protected-access
generator._ParseHashtreeMetadata()
self.assertEqual(
@@ -122,13 +118,12 @@
self.assertEqual(self.fixed_salt, generator.hashtree_info.salt)
self.assertEqual(self.expected_root_hash, generator.hashtree_info.root_hash)
- def test_VerifiedBootVersion1HashtreeInfoGenerator_validateHashtree_smoke(
- self):
- generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
+ def test_ValidateHashtree_smoke(self):
+ generator = VerifiedBootVersion1HashtreeInfoGenerator(
self.partition_size, 4096, True)
generator.image = sparse_img.SparseImage(self._generate_image())
- generator.hashtree_info = info = verity_utils.HashtreeInfo()
+ generator.hashtree_info = info = HashtreeInfo()
info.filesystem_range = RangeSet(data=[0, 991232 / 4096])
info.hashtree_range = RangeSet(
data=[991232 / 4096, (991232 + 12288) / 4096])
@@ -138,13 +133,12 @@
self.assertTrue(generator.ValidateHashtree())
- def test_VerifiedBootVersion1HashtreeInfoGenerator_validateHashtree_failure(
- self):
- generator = verity_utils.VerifiedBootVersion1HashtreeInfoGenerator(
+ def test_ValidateHashtree_failure(self):
+ generator = VerifiedBootVersion1HashtreeInfoGenerator(
self.partition_size, 4096, True)
generator.image = sparse_img.SparseImage(self._generate_image())
- generator.hashtree_info = info = verity_utils.HashtreeInfo()
+ generator.hashtree_info = info = HashtreeInfo()
info.filesystem_range = RangeSet(data=[0, 991232 / 4096])
info.hashtree_range = RangeSet(
data=[991232 / 4096, (991232 + 12288) / 4096])
@@ -154,10 +148,9 @@
self.assertFalse(generator.ValidateHashtree())
- def test_VerifiedBootVersion1HashtreeInfoGenerator_generate(self):
+ def test_Generate(self):
image_file = sparse_img.SparseImage(self._generate_image())
- generator = verity_utils.CreateHashtreeInfoGenerator(
- 'system', 4096, self.prop_dict)
+ generator = CreateHashtreeInfoGenerator('system', 4096, self.prop_dict)
info = generator.Generate(image_file)
self.assertEqual(RangeSet(data=[0, 991232 / 4096]), info.filesystem_range)
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 09f800f..1cc4a60 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -35,7 +35,6 @@
import logging
import os.path
import re
-import subprocess
import zipfile
import common
@@ -256,7 +255,7 @@
continue
cmd = ['boot_signer', '-verify', image_path, '-certificate', verity_key]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
'Failed to verify {} with boot_signer:\n{}'.format(image, stdoutdata)
@@ -299,7 +298,7 @@
continue
cmd = ['verity_verifier', image_path, '-mincrypt', verity_key_mincrypt]
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
'Failed to verify {} with verity_verifier (key: {}):\n{}'.format(
@@ -328,7 +327,7 @@
partition, info_dict, options[key_name])
cmd.extend(["--expected_chain_partition", chained_partition_arg])
- proc = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ proc = common.Run(cmd)
stdoutdata, _ = proc.communicate()
assert proc.returncode == 0, \
'Failed to verify {} with verity_verifier (key: {}):\n{}'.format(
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
index 38ebcf5..c512ef3 100644
--- a/tools/releasetools/verity_utils.py
+++ b/tools/releasetools/verity_utils.py
@@ -63,10 +63,6 @@
raise NotImplementedError
-class VerifiedBootVersion2HashtreeInfoGenerator(HashtreeInfoGenerator):
- pass
-
-
class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
"""A class that parses the metadata of hashtree for a given partition."""
diff --git a/tools/uuidgen.py b/tools/uuidgen.py
deleted file mode 100755
index d3091a7..0000000
--- a/tools/uuidgen.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2018 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import print_function
-import sys
-import uuid
-
-def uuidgen(name):
- return uuid.uuid5(uuid.uuid5(uuid.NAMESPACE_URL, "android.com"), name)
-
-if __name__ == "__main__":
- if len(sys.argv) < 2:
- print("Usage: uuidgen.py <name>")
- sys.exit(1)
- name = sys.argv[1]
- print(uuidgen(name))
diff --git a/tools/zipalign/ZipAlign.cpp b/tools/zipalign/ZipAlign.cpp
index af04a34..eea1749 100644
--- a/tools/zipalign/ZipAlign.cpp
+++ b/tools/zipalign/ZipAlign.cpp
@@ -111,7 +111,7 @@
status = pZout->add(pZin, pEntry, padding, &pNewEntry);
}
- if (status != NO_ERROR)
+ if (status != OK)
return 1;
bias += padding;
//printf(" added '%s' at %ld (pad=%d)\n",
@@ -146,13 +146,13 @@
return 1;
}
- if (zin.open(inFileName, ZipFile::kOpenReadOnly) != NO_ERROR) {
+ if (zin.open(inFileName, ZipFile::kOpenReadOnly) != OK) {
fprintf(stderr, "Unable to open '%s' as zip archive\n", inFileName);
return 1;
}
if (zout.open(outFileName,
ZipFile::kOpenReadWrite|ZipFile::kOpenCreate|ZipFile::kOpenTruncate)
- != NO_ERROR)
+ != OK)
{
fprintf(stderr, "Unable to open '%s' as zip archive\n", outFileName);
return 1;
@@ -178,7 +178,7 @@
if (verbose)
printf("Verifying alignment of %s (%d)...\n", fileName, alignment);
- if (zipFile.open(fileName, ZipFile::kOpenReadOnly) != NO_ERROR) {
+ if (zipFile.open(fileName, ZipFile::kOpenReadOnly) != OK) {
fprintf(stderr, "Unable to open '%s' for verification\n", fileName);
return 1;
}
diff --git a/tools/zipalign/ZipEntry.cpp b/tools/zipalign/ZipEntry.cpp
index c3c833e..810d74a 100644
--- a/tools/zipalign/ZipEntry.cpp
+++ b/tools/zipalign/ZipEntry.cpp
@@ -48,7 +48,7 @@
/* read the CDE */
result = mCDE.read(fp);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("mCDE.read failed\n");
return result;
}
@@ -64,7 +64,7 @@
}
result = mLFH.read(fp);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("mLFH.read failed\n");
return result;
}
@@ -103,7 +103,7 @@
* can defer worrying about that to when we're extracting data.
*/
- return NO_ERROR;
+ return OK;
}
/*
@@ -189,7 +189,7 @@
mLFH.mExtraFieldLength+1);
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -225,7 +225,7 @@
mLFH.mExtraFieldLength = padding;
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -403,7 +403,7 @@
*/
status_t ZipEntry::LocalFileHeader::read(FILE* fp)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
uint8_t buf[kLFHLen];
assert(mFileName == NULL);
@@ -499,7 +499,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
@@ -537,7 +537,7 @@
*/
status_t ZipEntry::CentralDirEntry::read(FILE* fp)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
uint8_t buf[kCDELen];
/* no re-use */
@@ -669,7 +669,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
/*
diff --git a/tools/zipalign/ZipFile.cpp b/tools/zipalign/ZipFile.cpp
index 9e44956..63fb962 100644
--- a/tools/zipalign/ZipFile.cpp
+++ b/tools/zipalign/ZipFile.cpp
@@ -120,7 +120,7 @@
* have a need for empty zip files.)
*/
mNeedCDRewrite = true;
- result = NO_ERROR;
+ result = OK;
}
if (flags & kOpenReadOnly)
@@ -205,7 +205,7 @@
*/
status_t ZipFile::readCentralDir(void)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
uint8_t* buf = NULL;
off_t fileLength, seekStart;
long readAmount;
@@ -267,7 +267,7 @@
/* extract eocd values */
result = mEOCD.readBuf(buf + i, readAmount - i);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("Failure reading %ld bytes of EOCD values", readAmount - i);
goto bail;
}
@@ -311,7 +311,7 @@
ZipEntry* pEntry = new ZipEntry;
result = pEntry->initFromCDE(mZipFp);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("initFromCDE failed\n");
delete pEntry;
goto bail;
@@ -361,7 +361,7 @@
const char* storageName, int compressionMethod, ZipEntry** ppEntry)
{
ZipEntry* pEntry = NULL;
- status_t result = NO_ERROR;
+ status_t result = OK;
long lfhPosn, startPosn, endPosn, uncompressedLen;
FILE* inputFp = NULL;
uint32_t crc;
@@ -415,7 +415,7 @@
if (compressionMethod == ZipEntry::kCompressDeflated) {
bool failed = false;
result = compressFpToFp(mZipFp, inputFp, data, size, &crc);
- if (result != NO_ERROR) {
+ if (result != OK) {
ALOGD("compression failed, storing\n");
failed = true;
} else {
@@ -447,7 +447,7 @@
} else {
result = copyDataToFp(mZipFp, data, size, &crc);
}
- if (result != NO_ERROR) {
+ if (result != OK) {
// don't need to truncate; happens in CDE rewrite
ALOGD("failed copying data in\n");
goto bail;
@@ -535,11 +535,11 @@
}
result = pEntry->initFromExternal(pSourceEntry);
- if (result != NO_ERROR)
+ if (result != OK)
goto bail;
if (padding != 0) {
result = pEntry->addPadding(padding);
- if (result != NO_ERROR)
+ if (result != OK)
goto bail;
}
@@ -574,7 +574,7 @@
copyLen += ZipEntry::kDataDescriptorLen;
if (copyPartialFpToFp(mZipFp, pSourceZip->mZipFp, copyLen, NULL)
- != NO_ERROR)
+ != OK)
{
ALOGW("copy of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
@@ -603,7 +603,7 @@
*ppEntry = pEntry;
pEntry = NULL;
- result = NO_ERROR;
+ result = OK;
bail:
delete pEntry;
@@ -642,7 +642,7 @@
}
result = pEntry->initFromExternal(pSourceEntry);
- if (result != NO_ERROR)
+ if (result != OK)
goto bail;
/*
@@ -682,7 +682,7 @@
}
long startPosn = ftell(mZipFp);
uint32_t crc;
- if (compressFpToFp(mZipFp, NULL, buf, uncompressedLen, &crc) != NO_ERROR) {
+ if (compressFpToFp(mZipFp, NULL, buf, uncompressedLen, &crc) != OK) {
ALOGW("recompress of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
free(buf);
@@ -699,7 +699,7 @@
copyLen += ZipEntry::kDataDescriptorLen;
if (copyPartialFpToFp(mZipFp, pSourceZip->mZipFp, copyLen, NULL)
- != NO_ERROR)
+ != OK)
{
ALOGW("copy of '%s' failed\n", pEntry->mCDE.mFileName);
result = UNKNOWN_ERROR;
@@ -738,7 +738,7 @@
*ppEntry = pEntry;
pEntry = NULL;
- result = NO_ERROR;
+ result = OK;
bail:
delete pEntry;
@@ -773,7 +773,7 @@
}
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -793,7 +793,7 @@
}
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -837,7 +837,7 @@
length -= readSize;
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -849,7 +849,7 @@
status_t ZipFile::compressFpToFp(FILE* dstFp, FILE* srcFp,
const void* data, size_t size, uint32_t* pCRC32)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
const size_t kBufSize = 1024 * 1024;
uint8_t* inBuf = NULL;
uint8_t* outBuf = NULL;
@@ -933,7 +933,7 @@
/* mark entry as deleted, and mark archive as dirty */
pEntry->setDeleted();
mNeedCDRewrite = true;
- return NO_ERROR;
+ return OK;
}
/*
@@ -944,19 +944,19 @@
*/
status_t ZipFile::flush(void)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
long eocdPosn;
int i, count;
if (mReadOnly)
return INVALID_OPERATION;
if (!mNeedCDRewrite)
- return NO_ERROR;
+ return OK;
assert(mZipFp != NULL);
result = crunchArchive();
- if (result != NO_ERROR)
+ if (result != OK)
return result;
if (fseek(mZipFp, mEOCD.mCentralDirOffset, SEEK_SET) != 0)
@@ -986,7 +986,7 @@
/* should we clear the "newly added" flag in all entries now? */
mNeedCDRewrite = false;
- return NO_ERROR;
+ return OK;
}
/*
@@ -997,7 +997,7 @@
*/
status_t ZipFile::crunchArchive(void)
{
- status_t result = NO_ERROR;
+ status_t result = OK;
int i, count;
long delCount, adjust;
@@ -1065,7 +1065,7 @@
// pEntry->getFileName(), adjust);
result = filemove(mZipFp, pEntry->getLFHOffset() - adjust,
pEntry->getLFHOffset(), span);
- if (result != NO_ERROR) {
+ if (result != OK) {
/* this is why you use a temp file */
ALOGE("error during crunch - archive is toast\n");
return result;
@@ -1097,7 +1097,7 @@
status_t ZipFile::filemove(FILE* fp, off_t dst, off_t src, size_t n)
{
if (dst == src || n <= 0)
- return NO_ERROR;
+ return OK;
uint8_t readBuf[32768];
@@ -1140,7 +1140,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
@@ -1355,7 +1355,7 @@
memcpy(mComment, buf + kEOCDLen, mCommentLen);
}
- return NO_ERROR;
+ return OK;
}
/*
@@ -1382,7 +1382,7 @@
return UNKNOWN_ERROR;
}
- return NO_ERROR;
+ return OK;
}
/*