Merge "Pack JIT mini-debug-infos together."
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index cf35914..b6d6600 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -476,7 +476,6 @@
int rc = TEMP_FAILURE_RETRY(recvmsg(control_sock_, &msg, 0));
if (rc <= 0) {
- PLOG(WARNING) << "Receiving file descriptor from ADB failed (socket " << control_sock_ << ")";
return android::base::unique_fd(-1);
} else {
VLOG(jdwp) << "Fds have been received from ADB!";
@@ -624,7 +623,6 @@
android::base::unique_fd new_fd(ReadFdFromAdb());
if (new_fd == -1) {
// Something went wrong. We need to retry getting the control socket.
- PLOG(ERROR) << "Something went wrong getting fds from adb. Retry!";
control_sock_.reset();
break;
} else if (adb_connection_socket_ != -1) {
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 03e68ae..c73b988 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -73,8 +73,22 @@
HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art
TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art
-# Jar files for core.art.
-TEST_CORE_JARS := core-oj core-libart core-simple conscrypt okhttp bouncycastle
+# Modules to compile for core.art.
+# TODO: Move conscrypt from CORE_IMG_JARS to TEST_CORE_JARS and adjust scripts to fix Golem.
+CORE_IMG_JARS := core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt
+HOST_CORE_IMG_JARS := $(addsuffix -hostdex,$(CORE_IMG_JARS))
+TARGET_CORE_IMG_JARS := $(addsuffix -testdex,$(CORE_IMG_JARS))
+HOST_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_IMG_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
+ifeq ($(ART_TEST_ANDROID_ROOT),)
+TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),/$(DEXPREOPT_BOOT_JAR_DIR)/$(jar).jar)
+else
+TARGET_CORE_IMG_DEX_LOCATIONS := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(ART_TEST_ANDROID_ROOT)/$(jar).jar)
+endif
+HOST_CORE_IMG_DEX_FILES := $(foreach jar,$(HOST_CORE_IMG_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
+TARGET_CORE_IMG_DEX_FILES := $(foreach jar,$(TARGET_CORE_IMG_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
+
+# Jar files for the boot class path for testing. Must start with CORE_IMG_JARS.
+TEST_CORE_JARS := $(CORE_IMG_JARS)
HOST_TEST_CORE_JARS := $(addsuffix -hostdex,$(TEST_CORE_JARS))
TARGET_TEST_CORE_JARS := $(addsuffix -testdex,$(TEST_CORE_JARS))
HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_TEST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar)
@@ -83,7 +97,6 @@
else
TARGET_CORE_DEX_LOCATIONS := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(ART_TEST_ANDROID_ROOT)/framework/$(jar).jar)
endif
-
HOST_CORE_DEX_FILES := $(foreach jar,$(HOST_TEST_CORE_JARS), $(call intermediates-dir-for,JAVA_LIBRARIES,$(jar),t,COMMON)/javalib.jar)
TARGET_CORE_DEX_FILES := $(foreach jar,$(TARGET_TEST_CORE_JARS),$(call intermediates-dir-for,JAVA_LIBRARIES,$(jar), ,COMMON)/javalib.jar)
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index d8014bd..be1791b 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -133,6 +133,7 @@
LOCAL_MODULE_PATH := $(3)
LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
+ LOCAL_MIN_SDK_VERSION := 19
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
endif
include $(BUILD_JAVA_LIBRARY)
@@ -148,6 +149,7 @@
LOCAL_JAVA_LIBRARIES := $(HOST_TEST_CORE_JARS)
LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
+ LOCAL_MIN_SDK_VERSION := 19
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
endif
include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 6885946..a926d9a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -452,12 +452,7 @@
$$(gtest_exe) \
$$($(3)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \
$$($(3)TARGET_OUT_SHARED_LIBRARIES)/libopenjdkd.so \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar \
- $$(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
+ $$(foreach jar,$$(TARGET_TEST_CORE_JARS),$$(TARGET_OUT_JAVA_LIBRARIES)/$$(jar).jar)
ART_TEST_TARGET_GTEST_DEPENDENCIES += $$(gtest_deps)
@@ -515,7 +510,8 @@
$$($(3)ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$$(ART_HOST_SHLIB_EXTENSION) \
$$(gtest_exe) \
$$(ART_GTEST_$(1)_HOST_DEPS) \
- $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX))
+ $(foreach file,$(ART_GTEST_$(1)_DEX_DEPS),$(ART_TEST_HOST_GTEST_$(file)_DEX)) \
+ $(HOST_OUT_EXECUTABLES)/timeout_dumper
ART_TEST_HOST_GTEST_DEPENDENCIES += $$(gtest_deps)
@@ -528,7 +524,9 @@
$$(gtest_output): NAME := $$(gtest_rule)
ifeq (,$(SANITIZE_HOST))
$$(gtest_output): $$(gtest_exe) $$(gtest_deps)
- $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && $$< --gtest_output=xml:$$@ && \
+ $(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && \
+ timeout --foreground -k 120s -s SIGRTMIN+2 2400s $(HOST_OUT_EXECUTABLES)/timeout_dumper \
+ $$< --gtest_output=xml:$$@ && \
$$(call ART_TEST_PASSED,$$(NAME))) || $$(call ART_TEST_FAILED,$$(NAME))
else
# Note: envsetup currently exports ASAN_OPTIONS=detect_leaks=0 to suppress leak detection, as some
@@ -540,7 +538,9 @@
# under ASAN.
$$(gtest_output): $$(gtest_exe) $$(gtest_deps)
$(hide) ($$(call ART_TEST_SKIP,$$(NAME)) && set -o pipefail && \
- ASAN_OPTIONS=detect_leaks=1 $$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
+ ASAN_OPTIONS=detect_leaks=1 timeout --foreground -k 120s -s SIGRTMIN+2 3600s \
+ $(HOST_OUT_EXECUTABLES)/timeout_dumper \
+ $$< --gtest_output=xml:$$@ 2>&1 | tee $$<.tmp.out >&2 && \
{ $$(call ART_TEST_PASSED,$$(NAME)) ; rm $$<.tmp.out ; }) || \
( grep -q AddressSanitizer $$<.tmp.out && export ANDROID_BUILD_TOP=`pwd` && \
{ echo "ABI: 'x86_64'" | cat - $$<.tmp.out | development/scripts/stack | tail -n 3000 ; } ; \
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index e2adac1..2ad1143 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -39,8 +39,6 @@
# Use dex2oat debug version for better error reporting
# $(1): compiler - optimizing, interpreter or interp-ac (interpreter-access-checks).
# $(2): 2ND_ or undefined, 2ND_ for 32-bit host builds.
-# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
-# run-test --no-image
define create-core-oat-host-rules
core_compile_options :=
core_image_name :=
@@ -80,13 +78,15 @@
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(HOST_CORE_DEX_LOCATIONS) $$(core_dex2oat_dependency)
+$$(core_image_name): $$(HOST_CORE_IMG_DEX_LOCATIONS) $$(core_dex2oat_dependency)
@echo "host dex2oat: $$@"
@mkdir -p $$(dir $$@)
$$(hide) ANDROID_LOG_TAGS="*:e" $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
- --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(HOST_CORE_DEX_FILES)) \
- $$(addprefix --dex-location=,$$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+ --image-classes=$$(PRELOADED_CLASSES) \
+ $$(addprefix --dex-file=,$$(HOST_CORE_IMG_DEX_FILES)) \
+ $$(addprefix --dex-location=,$$(HOST_CORE_IMG_DEX_LOCATIONS)) \
+ --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_HOST_BASE_ADDRESS) --instruction-set=$$($(2)ART_HOST_ARCH) \
$$(LOCAL_$(2)DEX2OAT_HOST_INSTRUCTION_SET_FEATURES_OPTION) \
@@ -169,13 +169,15 @@
$$(core_image_name): PRIVATE_CORE_COMPILE_OPTIONS := $$(core_compile_options)
$$(core_image_name): PRIVATE_CORE_IMG_NAME := $$(core_image_name)
$$(core_image_name): PRIVATE_CORE_OAT_NAME := $$(core_oat_name)
-$$(core_image_name): $$(TARGET_CORE_DEX_FILES) $$(core_dex2oat_dependency)
+$$(core_image_name): $$(TARGET_CORE_IMG_DEX_FILES) $$(core_dex2oat_dependency)
@echo "target dex2oat: $$@"
@mkdir -p $$(dir $$@)
$$(hide) $$(DEX2OAT) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) \
--runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
- --image-classes=$$(PRELOADED_CLASSES) $$(addprefix --dex-file=,$$(TARGET_CORE_DEX_FILES)) \
- $$(addprefix --dex-location=,$$(TARGET_CORE_DEX_LOCATIONS)) --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
+ --image-classes=$$(PRELOADED_CLASSES) \
+ $$(addprefix --dex-file=,$$(TARGET_CORE_IMG_DEX_FILES)) \
+ $$(addprefix --dex-location=,$$(TARGET_CORE_IMG_DEX_LOCATIONS)) \
+ --oat-file=$$(PRIVATE_CORE_OAT_NAME) \
--oat-location=$$(PRIVATE_CORE_OAT_NAME) --image=$$(PRIVATE_CORE_IMG_NAME) \
--base=$$(LIBART_IMG_TARGET_BASE_ADDRESS) --instruction-set=$$($(2)TARGET_ARCH) \
--instruction-set-variant=$$($(2)DEX2OAT_TARGET_CPU_VARIANT) \
diff --git a/build/apex/Android.bp b/build/apex/Android.bp
index f2e12f6..0ec0a15 100644
--- a/build/apex/Android.bp
+++ b/build/apex/Android.bp
@@ -19,6 +19,11 @@
"libopenjdkjvmti",
"libadbconnection",
]
+bionic_native_shared_libs = [
+ "libc",
+ "libm",
+ "libdl",
+]
// - Fake library that avoids namespace issues and gives some warnings for nosy apps.
art_runtime_fake_native_shared_libs = [
// FIXME: Does not work as-is, because `libart_fake` is defined in libart_fake/Android.mk,
@@ -102,7 +107,8 @@
compile_multilib: "both",
manifest: "manifest.json",
native_shared_libs: art_runtime_base_native_shared_libs
- + art_runtime_fake_native_shared_libs,
+ + art_runtime_fake_native_shared_libs
+ + bionic_native_shared_libs,
multilib: {
both: {
// TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
@@ -130,7 +136,8 @@
manifest: "manifest.json",
native_shared_libs: art_runtime_base_native_shared_libs
+ art_runtime_fake_native_shared_libs
- + art_runtime_debug_native_shared_libs,
+ + art_runtime_debug_native_shared_libs
+ + bionic_native_shared_libs,
multilib: {
both: {
// TODO: Add logic to create a `dalvikvm` symlink to `dalvikvm32` or `dalvikvm64`
diff --git a/build/apex/runtests.sh b/build/apex/runtests.sh
index c19c7bd..b5e8d8b 100755
--- a/build/apex/runtests.sh
+++ b/build/apex/runtests.sh
@@ -33,6 +33,7 @@
sudo apt-get install libguestfs-tools
"
+
[[ -n "$ANDROID_PRODUCT_OUT" ]] \
|| die "You need to source and lunch before you can use this script."
@@ -41,6 +42,7 @@
build_apex_p=true
list_image_files_p=false
+print_image_tree_p=false
function usage {
cat <<EOF
@@ -48,7 +50,8 @@
Build (optional) and run tests on Android Runtime APEX package (on host).
-s, --skip-build skip the build step
- -l, --list-files list the contents of the ext4 image
+ -l, --list-files list the contents of the ext4 image using `find`
+ -t, --print-tree list the contents of the ext4 image using `tree`
-h, --help display this help and exit
EOF
@@ -59,6 +62,7 @@
case "$1" in
(-s|--skip-build) build_apex_p=false;;
(-l|--list-files) list_image_files_p=true;;
+ (-t|--print-tree) print_image_tree_p=true;;
(-h|--help) usage;;
(*) die "Unknown option: '$1'
Try '$0 --help' for more information.";;
@@ -66,8 +70,42 @@
shift
done
-work_dir=$(mktemp -d)
-mount_point="$work_dir/image"
+if $print_image_tree_p; then
+ which tree >/dev/null || die "This script requires the 'tree' tool.
+On Debian-based systems, this can be installed with:
+
+ sudo apt-get install tree
+"
+fi
+
+
+# build_apex APEX_MODULE
+# ----------------------
+# Build APEX package APEX_MODULE.
+function build_apex {
+ if $build_apex_p; then
+ local apex_module=$1
+ say "Building package $apex_module" && make "$apex_module" || die "Cannot build $apex_module"
+ fi
+}
+
+# maybe_list_apex_contents MOUNT_POINT
+# ------------------------------------
+# If any listing/printing option was used, honor them and display the contents
+# of the APEX payload at MOUNT_POINT.
+function maybe_list_apex_contents {
+ local mount_point=$1
+
+ # List the contents of the mounted image using `find` (optional).
+ if $list_image_files_p; then
+ say "Listing image files" && find "$mount_point"
+ fi
+
+ # List the contents of the mounted image using `tree` (optional).
+ if $print_image_tree_p; then
+ say "Printing image tree" && ls -ld "$mount_point" && tree -aph --du "$mount_point"
+ fi
+}
function check_binary {
[[ -x "$mount_point/bin/$1" ]] || die "Cannot find binary '$1' in mounted image"
@@ -91,36 +129,22 @@
|| die "Cannot find library '$1' in mounted image"
}
-function build_apex {
- if $build_apex_p; then
- say "Building package $1" && make "$1" || die "Cannot build $1"
- fi
-}
-
-function check_contents {
-
+# Check contents of APEX payload located in `$mount_point`.
+function check_release_contents {
# Check that the mounted image contains a manifest.
[[ -f "$mount_point/apex_manifest.json" ]] || die "no manifest"
# Check that the mounted image contains ART base binaries.
check_multilib_binary dalvikvm
- # TODO: Does not work yet.
+ # TODO: Does not work yet (b/119942078).
: check_binary_symlink dalvikvm
check_binary dex2oat
check_binary dexoptanalyzer
check_binary profman
- # Check that the mounted image contains ART tools binaries.
- check_binary dexdiag
- check_binary dexdump
- check_binary dexlist
# oatdump is only in device apex's due to build rules
- # check_binary oatdump
-
- # Check that the mounted image contains ART debug binaries.
- check_binary dex2oatd
- check_binary dexoptanalyzerd
- check_binary profmand
+ # TODO: Check for it when it is also built for host.
+ : check_binary oatdump
# Check that the mounted image contains ART libraries.
check_library libart-compiler.so
@@ -135,20 +159,6 @@
check_library libdexfile.so
check_library libprofile.so
- # Check that the mounted image contains ART debug libraries.
- check_library libartd-compiler.so
- check_library libartd.so
- check_library libopenjdkd.so
- check_library libopenjdkjvmd.so
- check_library libopenjdkjvmtid.so
- check_library libadbconnectiond.so
- # TODO: Should we check for these libraries too, even if they are not explicitly
- # listed as dependencies in the Android Runtime APEX module rule?
- check_library libdexfiled.so
- check_library libartbased.so
- check_library libartd-dexlayout.so
- check_library libprofiled.so
-
# TODO: Should we check for other libraries, such as:
#
# libbacktrace.so
@@ -164,105 +174,210 @@
# ?
}
+# Check debug contents of APEX payload located in `$mount_point`.
+function check_debug_contents {
+ # Check that the mounted image contains ART tools binaries.
+ check_binary dexdiag
+ check_binary dexdump
+ check_binary dexlist
-# *****************************************
-# * Testing for com.android.runtime.debug *
-# *****************************************
+ # Check that the mounted image contains ART debug binaries.
+ check_binary dex2oatd
+ check_binary dexoptanalyzerd
+ check_binary profmand
-# Garbage collection.
-function finish_device_debug {
- # Don't fail early during cleanup.
- set +e
+ # Check that the mounted image contains ART debug libraries.
+ check_library libartd-compiler.so
+ check_library libartd.so
+ check_library libopenjdkd.so
+ check_library libopenjdkjvmd.so
+ check_library libopenjdkjvmtid.so
+ check_library libadbconnectiond.so
+ # TODO: Should we check for these libraries too, even if they are not explicitly
+ # listed as dependencies in the Android Runtime APEX module rule?
+ check_library libdexfiled.so
+ check_library libartbased.so
+ check_library libartd-dexlayout.so
+ check_library libprofiled.so
+}
+
+# Testing target (device) APEX packages.
+# ======================================
+
+# Clean-up.
+function cleanup_target {
guestunmount "$mount_point"
rm -rf "$work_dir"
}
-trap finish_device_debug EXIT
+# Garbage collection.
+function finish_target {
+ # Don't fail early during cleanup.
+ set +e
+ cleanup_target
+}
-# TODO: Also exercise the Release Runtime APEX (`com.android.runtime.release`).
+# setup_target_apex APEX_MODULE MOUNT_POINT
+# -----------------------------------------
+# Extract image from target APEX_MODULE and mount it in MOUNT_POINT.
+function setup_target_apex {
+ local apex_module=$1
+ local mount_point=$2
+ local system_apexdir="$ANDROID_PRODUCT_OUT/system/apex"
+ local apex_package="$system_apexdir/$apex_module.apex"
+
+ say "Extracting and mounting image"
+
+ # Extract the payload from the Android Runtime APEX.
+ local image_filename="apex_payload.img"
+ unzip -q "$apex_package" "$image_filename" -d "$work_dir"
+ mkdir "$mount_point"
+ local image_file="$work_dir/$image_filename"
+
+ # Check filesystems in the image.
+ local image_filesystems="$work_dir/image_filesystems"
+ virt-filesystems -a "$image_file" >"$image_filesystems"
+ # We expect a single partition (/dev/sda) in the image.
+ local partition="/dev/sda"
+ echo "$partition" | cmp "$image_filesystems" -
+
+ # Mount the image from the Android Runtime APEX.
+ guestmount -a "$image_file" -m "$partition" "$mount_point"
+}
+
+# Testing release APEX package (com.android.runtime.release).
+# -----------------------------------------------------------
+
+apex_module="com.android.runtime.release"
+
+say "Processing APEX package $apex_module"
+
+work_dir=$(mktemp -d)
+mount_point="$work_dir/image"
+
+trap finish_target EXIT
+
+# Build the APEX package (optional).
+build_apex "$apex_module"
+
+# Set up APEX package.
+setup_target_apex "$apex_module" "$mount_point"
+
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents "$mount_point"
+
+# Run tests on APEX package.
+say "Checking APEX package $apex_module"
+check_release_contents
+
+# Clean up.
+trap - EXIT
+cleanup_target
+
+say "$apex_module tests passed"
+echo
+
+# Testing debug APEX package (com.android.runtime.debug).
+# -------------------------------------------------------
+
apex_module="com.android.runtime.debug"
-# Build the Android Runtime APEX package (optional).
-build_apex $apex_module
+say "Processing APEX package $apex_module"
-system_apexdir="$ANDROID_PRODUCT_OUT/system/apex"
-apex_package="$system_apexdir/$apex_module.apex"
+work_dir=$(mktemp -d)
+mount_point="$work_dir/image"
-say "Extracting and mounting image"
+trap finish_target EXIT
-# Extract the payload from the Android Runtime APEX.
-image_filename="apex_payload.img"
-unzip -q "$apex_package" "$image_filename" -d "$work_dir"
-mkdir "$mount_point"
-image_file="$work_dir/$image_filename"
+# Build the APEX package (optional).
+build_apex "$apex_module"
-# Check filesystems in the image.
-image_filesystems="$work_dir/image_filesystems"
-virt-filesystems -a "$image_file" >"$image_filesystems"
-# We expect a single partition (/dev/sda) in the image.
-partition="/dev/sda"
-echo "$partition" | cmp "$image_filesystems" -
+# Set up APEX package.
+setup_target_apex "$apex_module" "$mount_point"
-# Mount the image from the Android Runtime APEX.
-guestmount -a "$image_file" -m "$partition" "$mount_point"
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents "$mount_point"
-# List the contents of the mounted image (optional).
-$list_image_files_p && say "Listing image files" && ls -ld "$mount_point" && tree -ap "$mount_point"
-
-say "Running tests"
-
-check_contents
-
-# Check for files pulled in from device-only oatdump.
+# Run tests on APEX package.
+say "Checking APEX package $apex_module"
+check_release_contents
+check_debug_contents
+# Check for files pulled in from debug target-only oatdump.
check_binary oatdump
check_library libart-disassembler.so
-# Cleanup
+# Clean up.
trap - EXIT
-guestunmount "$mount_point"
-rm -rf "$work_dir"
+cleanup_target
-say "$apex_module Tests passed"
+say "$apex_module tests passed"
+echo
-# ****************************************
-# * Testing for com.android.runtime.host *
-# ****************************************
+
+# Testing host APEX package (com.android.runtime.host).
+# =====================================================
+
+# Clean-up.
+function cleanup_host {
+ rm -rf "$work_dir"
+}
# Garbage collection.
function finish_host {
# Don't fail early during cleanup.
set +e
- rm -rf "$work_dir"
+ cleanup_host
}
+# setup_host_apex APEX_MODULE MOUNT_POINT
+# ---------------------------------------
+# Extract Zip file from host APEX_MODULE and extract it in MOUNT_POINT.
+function setup_host_apex {
+ local apex_module=$1
+ local mount_point=$2
+ local system_apexdir="$ANDROID_HOST_OUT/apex"
+ local apex_package="$system_apexdir/$apex_module.zipapex"
+
+ say "Extracting payload"
+
+ # Extract the payload from the Android Runtime APEX.
+ local image_filename="apex_payload.zip"
+ unzip -q "$apex_package" "$image_filename" -d "$work_dir"
+ mkdir "$mount_point"
+ local image_file="$work_dir/$image_filename"
+
+ # Unzipping the payload
+ unzip -q "$image_file" -d "$mount_point"
+}
+
+apex_module="com.android.runtime.host"
+
+say "Processing APEX package $apex_module"
+
work_dir=$(mktemp -d)
mount_point="$work_dir/zip"
trap finish_host EXIT
-apex_module="com.android.runtime.host"
+# Build the APEX package (optional).
+build_apex "$apex_module"
-# Build the Android Runtime APEX package (optional).
-build_apex $apex_module
+# Set up APEX package.
+setup_host_apex "$apex_module" "$mount_point"
-system_apexdir="$ANDROID_HOST_OUT/apex"
-apex_package="$system_apexdir/$apex_module.zipapex"
+# List the contents of the APEX image (optional).
+maybe_list_apex_contents "$mount_point"
-say "Extracting payload"
+# Run tests on APEX package.
+say "Checking APEX package $apex_module"
+check_release_contents
+check_debug_contents
-# Extract the payload from the Android Runtime APEX.
-image_filename="apex_payload.zip"
-unzip -q "$apex_package" "$image_filename" -d "$work_dir"
-mkdir "$mount_point"
-image_file="$work_dir/$image_filename"
+# Clean up.
+trap - EXIT
+cleanup_host
-# Unzipping the payload
-unzip -q "$image_file" -d "$mount_point"
+say "$apex_module tests passed"
-say "Running tests"
-check_contents
-
-say "$apex_module Tests passed"
-
-say "Tests passed"
+say "All Android Runtime APEX tests passed"
diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h
index 82c04e7..952be44 100644
--- a/cmdline/cmdline_parser.h
+++ b/cmdline/cmdline_parser.h
@@ -206,7 +206,7 @@
};
load_value_ = []() -> TArg& {
assert(false && "Should not be appending values to ignored arguments");
- return *reinterpret_cast<TArg*>(0); // Blow up.
+ __builtin_trap(); // Blow up.
};
save_value_specified_ = true;
@@ -270,7 +270,7 @@
load_value_ = []() -> TArg& {
assert(false && "No load value function defined");
- return *reinterpret_cast<TArg*>(0); // Blow up.
+ __builtin_trap(); // Blow up.
};
}
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 18f7105..0039be0 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1742,6 +1742,9 @@
if (&cls->GetDexFile() == &accessor.GetDexFile()) {
ObjectLock<mirror::Class> lock(self, cls);
mirror::Class::SetStatus(cls, status, self);
+ if (status >= ClassStatus::kVerified) {
+ cls->SetVerificationAttempted();
+ }
}
} else {
DCHECK(self->IsExceptionPending());
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 27749a6..93575d7 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -126,11 +126,11 @@
}
extern "C" bool jit_compile_method(
- void* handle, ArtMethod* method, Thread* self, bool osr)
+ void* handle, ArtMethod* method, Thread* self, bool baseline, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
- return jit_compiler->CompileMethod(self, method, osr);
+ return jit_compiler->CompileMethod(self, method, baseline, osr);
}
extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
@@ -184,7 +184,7 @@
}
}
-bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool osr) {
+bool JitCompiler::CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr) {
SCOPED_TRACE << "JIT compiling " << method->PrettyMethod();
DCHECK(!method->IsProxyMethod());
@@ -201,7 +201,7 @@
TimingLogger::ScopedTiming t2("Compiling", &logger);
JitCodeCache* const code_cache = runtime->GetJit()->GetCodeCache();
success = compiler_driver_->GetCompiler()->JitCompile(
- self, code_cache, method, /* baseline= */ false, osr, jit_logger_.get());
+ self, code_cache, method, baseline, osr, jit_logger_.get());
}
// Trim maps to reduce memory usage.
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index d201611..29d2761 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -37,7 +37,7 @@
virtual ~JitCompiler();
// Compilation entrypoint. Returns whether the compilation succeeded.
- bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
+ bool CompileMethod(Thread* self, ArtMethod* method, bool baseline, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
const CompilerOptions& GetCompilerOptions() const {
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 92b9543..bd4304c 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -1300,15 +1300,15 @@
EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj2));
- EXPECT_EQ(0x12345678ABCDEF88ll, val1);
- EXPECT_EQ(0x7FEDCBA987654321ll, val2);
+ EXPECT_EQ(0x12345678ABCDEF88LL, val1);
+ EXPECT_EQ(0x7FEDCBA987654321LL, val2);
return 42;
}
void JniCompilerTest::GetTextImpl() {
SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I",
CURRENT_JNI_WRAPPER(my_gettext));
- jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88ll, jobj_,
+ jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 0x12345678ABCDEF88LL, jobj_,
INT64_C(0x7FEDCBA987654321), jobj_);
EXPECT_EQ(result, 42);
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 5bd1122..50b13c8 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -243,7 +243,8 @@
// compilation.
#define UNREACHABLE_INTRINSIC(Arch, Name) \
void IntrinsicLocationsBuilder ## Arch::Visit ## Name(HInvoke* invoke) { \
- if (!codegen_->GetCompilerOptions().IsBaseline()) { \
+ if (Runtime::Current()->IsAotCompiler() && \
+ !codegen_->GetCompilerOptions().IsBaseline()) { \
LOG(FATAL) << "Unreachable: intrinsic " << invoke->GetIntrinsic() \
<< " should have been converted to HIR"; \
} \
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 1688ea7..0b17c9d 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -2927,7 +2927,7 @@
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::RequiresRegister());
- locations->SetOut(Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
// Lower the invoke of CRC32.update(int crc, int b).
@@ -2945,9 +2945,13 @@
// result = crc32_for_byte(crc, b)
// crc = ~result
// It is directly lowered to three instructions.
- __ Mvn(out, crc);
- __ Crc32b(out, out, val);
- __ Mvn(out, out);
+
+ UseScratchRegisterScope temps(masm);
+ Register tmp = temps.AcquireSameSizeAs(out);
+
+ __ Mvn(tmp, crc);
+ __ Crc32b(tmp, tmp, val);
+ __ Mvn(out, tmp);
}
// The threshold for sizes of arrays to use the library provided implementation
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index a5bba9b..0b2c0b6 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -601,8 +601,7 @@
Fatal(StringPrintf("dex2oat did not finish after %" PRId64 " seconds",
timeout_in_milliseconds_/1000));
} else if (rc != 0) {
- std::string message(StringPrintf("pthread_cond_timedwait failed: %s",
- strerror(errno)));
+ std::string message(StringPrintf("pthread_cond_timedwait failed: %s", strerror(rc)));
Fatal(message.c_str());
}
}
@@ -624,7 +623,6 @@
explicit Dex2Oat(TimingLogger* timings) :
compiler_kind_(Compiler::kOptimizing),
// Take the default set of instruction features from the build.
- boot_image_checksum_(0),
key_value_store_(nullptr),
verification_results_(nullptr),
runtime_(nullptr),
@@ -1437,17 +1435,22 @@
if (!IsBootImage()) {
// When compiling an app, create the runtime early to retrieve
- // the image location key needed for the oat header.
+ // the boot image checksums needed for the oat header.
if (!CreateRuntime(std::move(runtime_options))) {
return dex2oat::ReturnCode::kCreateRuntime;
}
if (CompilerFilter::DependsOnImageChecksum(compiler_options_->GetCompilerFilter())) {
TimingLogger::ScopedTiming t3("Loading image checksum", timings_);
- std::vector<ImageSpace*> image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces();
- boot_image_checksum_ = image_spaces[0]->GetImageHeader().GetImageChecksum();
- } else {
- boot_image_checksum_ = 0u;
+ Runtime* runtime = Runtime::Current();
+ key_value_store_->Put(OatHeader::kBootClassPathKey,
+ android::base::Join(runtime->GetBootClassPathLocations(), ':'));
+ std::vector<ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces();
+ const std::vector<const DexFile*>& bcp_dex_files =
+ runtime->GetClassLinker()->GetBootClassPath();
+ key_value_store_->Put(
+ OatHeader::kBootClassPathChecksumsKey,
+ gc::space::ImageSpace::GetBootClassPathChecksums(image_spaces, bcp_dex_files));
}
// Open dex files for class path.
@@ -2015,7 +2018,7 @@
elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
}
- if (!oat_writer->WriteHeader(elf_writer->GetStream(), boot_image_checksum_)) {
+ if (!oat_writer->WriteHeader(elf_writer->GetStream())) {
LOG(ERROR) << "Failed to write oat header to the ELF file " << oat_file->GetPath();
return false;
}
@@ -2646,7 +2649,6 @@
std::unique_ptr<CompilerOptions> compiler_options_;
Compiler::Kind compiler_kind_;
- uint32_t boot_image_checksum_;
std::unique_ptr<SafeMap<std::string, std::string> > key_value_store_;
std::unique_ptr<VerificationResults> verification_results_;
diff --git a/dex2oat/linker/image_test.h b/dex2oat/linker/image_test.h
index bd8cf5a..fa0a3d4 100644
--- a/dex2oat/linker/image_test.h
+++ b/dex2oat/linker/image_test.h
@@ -326,8 +326,7 @@
elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
}
- bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream(),
- /*boot_image_checksum=*/ 0u);
+ bool header_ok = oat_writer->WriteHeader(elf_writer->GetStream());
ASSERT_TRUE(header_ok);
writer->UpdateOatFileHeader(i, oat_writer->GetOatHeader());
diff --git a/dex2oat/linker/image_writer.cc b/dex2oat/linker/image_writer.cc
index 61d105f..e4e4b13 100644
--- a/dex2oat/linker/image_writer.cc
+++ b/dex2oat/linker/image_writer.cc
@@ -32,6 +32,7 @@
#include "base/enums.h"
#include "base/globals.h"
#include "base/logging.h" // For VLOG.
+#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
#include "class_linker-inl.h"
#include "class_root.h"
@@ -153,6 +154,26 @@
: nullptr;
}
+bool ImageWriter::IsImageObject(ObjPtr<mirror::Object> obj) const {
+ // For boot image, we keep all objects remaining after the GC in PrepareImageAddressSpace().
+ if (compiler_options_.IsBootImage()) {
+ return true;
+ }
+ // Objects already in the boot image do not belong to the image being written.
+ if (IsInBootImage(obj.Ptr())) {
+ return false;
+ }
+ // DexCaches for the boot class path components that are not a part of the boot image
+ // cannot be garbage collected in PrepareImageAddressSpace() but we do not want to
+ // include them in the app image. So make sure we include only the app DexCaches.
+ if (obj->IsDexCache() &&
+ !ContainsElement(compiler_options_.GetDexFilesForOatFile(),
+ obj->AsDexCache()->GetDexFile())) {
+ return false;
+ }
+ return true;
+}
+
// Return true if an object is already in an image space.
bool ImageWriter::IsInBootImage(const void* obj) const {
gc::Heap* const heap = Runtime::Current()->GetHeap();
@@ -437,7 +458,7 @@
*/
heap->VisitObjects([this, &visitor](ObjPtr<mirror::Object> object)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!IsInBootImage(object.Ptr())) {
+ if (IsImageObject(object)) {
visitor.SetObject(object);
if (object->IsDexCache()) {
@@ -680,7 +701,7 @@
ObjPtr<mirror::ClassLoader> class_loader = GetAppClassLoader();
std::vector<ObjPtr<mirror::DexCache>> dex_caches = FindDexCaches(self);
for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
- if (IsInBootImage(dex_cache.Ptr())) {
+ if (!IsImageObject(dex_cache)) {
continue; // Boot image DexCache is not written to the app image.
}
PreloadDexCache(dex_cache, class_loader);
@@ -989,7 +1010,7 @@
for (const ClassLinker::DexCacheData& data : class_linker->GetDexCachesData()) {
ObjPtr<mirror::DexCache> dex_cache =
ObjPtr<mirror::DexCache>::DownCast(self->DecodeJObject(data.weak_root));
- if (dex_cache == nullptr || IsInBootImage(dex_cache.Ptr())) {
+ if (dex_cache == nullptr || !IsImageObject(dex_cache)) {
continue;
}
const DexFile* dex_file = dex_cache->GetDexFile();
@@ -1758,7 +1779,8 @@
for (ObjPtr<mirror::DexCache> dex_cache : dex_caches) {
// Pass the class loader associated with the DexCache. This can either be
// the app's `class_loader` or `nullptr` if boot class loader.
- PruneDexCache(dex_cache, IsInBootImage(dex_cache.Ptr()) ? nullptr : GetAppClassLoader());
+ bool is_app_image_dex_cache = compiler_options_.IsAppImage() && IsImageObject(dex_cache);
+ PruneDexCache(dex_cache, is_app_image_dex_cache ? GetAppClassLoader() : nullptr);
}
// Drop the array class cache in the ClassLinker, as these are roots holding those classes live.
@@ -1856,7 +1878,7 @@
continue;
}
const DexFile* dex_file = dex_cache->GetDexFile();
- if (!IsInBootImage(dex_cache.Ptr())) {
+ if (IsImageObject(dex_cache)) {
dex_cache_count += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
}
}
@@ -1875,7 +1897,7 @@
continue;
}
const DexFile* dex_file = dex_cache->GetDexFile();
- if (!IsInBootImage(dex_cache.Ptr())) {
+ if (IsImageObject(dex_cache)) {
non_image_dex_caches += image_dex_files.find(dex_file) != image_dex_files.end() ? 1u : 0u;
}
}
@@ -1889,7 +1911,7 @@
continue;
}
const DexFile* dex_file = dex_cache->GetDexFile();
- if (!IsInBootImage(dex_cache.Ptr()) &&
+ if (IsImageObject(dex_cache) &&
image_dex_files.find(dex_file) != image_dex_files.end()) {
dex_caches->Set<false>(i, dex_cache.Ptr());
++i;
@@ -1942,7 +1964,7 @@
mirror::Object* ImageWriter::TryAssignBinSlot(WorkStack& work_stack,
mirror::Object* obj,
size_t oat_index) {
- if (obj == nullptr || IsInBootImage(obj)) {
+ if (obj == nullptr || !IsImageObject(obj)) {
// Object is null or already in the image, there is no work to do.
return obj;
}
@@ -2373,7 +2395,7 @@
{
auto ensure_bin_slots_assigned = [&](mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(obj)) {
+ if (IsImageObject(obj)) {
CHECK(IsImageBinSlotAssigned(obj)) << mirror::Object::PrettyTypeOf(obj) << " " << obj;
}
};
@@ -2444,7 +2466,7 @@
{
auto unbin_objects_into_offset = [&](mirror::Object* obj)
REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!IsInBootImage(obj)) {
+ if (IsImageObject(obj)) {
UnbinObjectsIntoOffset(obj);
}
};
@@ -2909,7 +2931,7 @@
}
void ImageWriter::CopyAndFixupObject(Object* obj) {
- if (IsInBootImage(obj)) {
+ if (!IsImageObject(obj)) {
return;
}
size_t offset = GetImageOffset(obj);
diff --git a/dex2oat/linker/image_writer.h b/dex2oat/linker/image_writer.h
index 33bacf8..b680265 100644
--- a/dex2oat/linker/image_writer.h
+++ b/dex2oat/linker/image_writer.h
@@ -674,7 +674,12 @@
template <typename T>
T* NativeCopyLocation(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
- // Return true of obj is inside of the boot image space. This may only return true if we are
+ // Return true if `obj` belongs to the image we're writing.
+ // For a boot image, this is true for all objects.
+ // For an app image, boot image objects and boot class path dex caches are excluded.
+ bool IsImageObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
+
+ // Return true if `obj` is inside of the boot image space. This may only return true if we are
// compiling an app image.
bool IsInBootImage(const void* obj) const;
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index d045698..e2a9ac2 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -2808,11 +2808,9 @@
return true;
}
-bool OatWriter::WriteHeader(OutputStream* out, uint32_t boot_image_checksum) {
+bool OatWriter::WriteHeader(OutputStream* out) {
CHECK(write_state_ == WriteState::kWriteHeader);
- oat_header_->SetBootImageChecksum(boot_image_checksum);
-
// Update checksum with header data.
DCHECK_EQ(oat_header_->GetChecksum(), 0u); // For checksum calculation.
const uint8_t* header_begin = reinterpret_cast<const uint8_t*>(oat_header_.get());
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index 9cd2fd0..cc0e83a 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -198,7 +198,7 @@
// Check the size of the written oat file.
bool CheckOatSize(OutputStream* out, size_t file_offset, size_t relative_offset);
// Write the oat header. This finalizes the oat file.
- bool WriteHeader(OutputStream* out, uint32_t boot_image_checksum);
+ bool WriteHeader(OutputStream* out);
// Returns whether the oat file has an associated image.
bool HasImage() const {
diff --git a/dex2oat/linker/oat_writer_test.cc b/dex2oat/linker/oat_writer_test.cc
index 5de1540..ecf9db8 100644
--- a/dex2oat/linker/oat_writer_test.cc
+++ b/dex2oat/linker/oat_writer_test.cc
@@ -240,7 +240,7 @@
elf_writer->EndDataBimgRelRo(data_bimg_rel_ro);
}
- if (!oat_writer.WriteHeader(elf_writer->GetStream(), /*boot_image_checksum=*/ 42u)) {
+ if (!oat_writer.WriteHeader(elf_writer->GetStream())) {
return false;
}
@@ -396,6 +396,7 @@
ScratchFile tmp_base, tmp_oat(tmp_base, ".oat"), tmp_vdex(tmp_base, ".vdex");
SafeMap<std::string, std::string> key_value_store;
+ key_value_store.Put(OatHeader::kBootClassPathChecksumsKey, "testkey");
bool success = WriteElf(tmp_vdex.GetFile(),
tmp_oat.GetFile(),
class_linker->GetBootClassPath(),
@@ -418,7 +419,8 @@
const OatHeader& oat_header = oat_file->GetOatHeader();
ASSERT_TRUE(oat_header.IsValid());
ASSERT_EQ(class_linker->GetBootClassPath().size(), oat_header.GetDexFileCount()); // core
- ASSERT_EQ(42u, oat_header.GetBootImageChecksum());
+ ASSERT_TRUE(oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey) != nullptr);
+ ASSERT_STREQ("testkey", oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey));
ASSERT_TRUE(java_lang_dex_file_ != nullptr);
const DexFile& dex_file = *java_lang_dex_file_;
@@ -464,7 +466,7 @@
TEST_F(OatTest, OatHeaderSizeCheck) {
// If this test is failing and you have to update these constants,
// it is time to update OatHeader::kOatVersion
- EXPECT_EQ(68U, sizeof(OatHeader));
+ EXPECT_EQ(64U, sizeof(OatHeader));
EXPECT_EQ(4U, sizeof(OatMethodOffsets));
EXPECT_EQ(8U, sizeof(OatQuickMethodHeader));
EXPECT_EQ(166 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)),
diff --git a/dexdump/Android.bp b/dexdump/Android.bp
index d15bbda..434cb35 100644
--- a/dexdump/Android.bp
+++ b/dexdump/Android.bp
@@ -49,6 +49,9 @@
darwin: {
enabled: false,
},
+ windows: {
+ enabled: true,
+ },
},
}
diff --git a/libartbase/Android.bp b/libartbase/Android.bp
index 58d12a1..0fcd6a5 100644
--- a/libartbase/Android.bp
+++ b/libartbase/Android.bp
@@ -35,7 +35,6 @@
"base/memory_region.cc",
"base/mem_map.cc",
// "base/mem_map_fuchsia.cc", put in target when fuchsia supported by soong
- "base/mem_map_unix.cc",
"base/os_linux.cc",
"base/runtime_debug.cc",
"base/safe_copy.cc",
@@ -50,20 +49,38 @@
],
target: {
android: {
+ srcs: [
+ "base/mem_map_unix.cc",
+ ],
static_libs: [
// ZipArchive support, the order matters here to get all symbols.
"libziparchive",
"libz",
],
+ shared_libs: [
+ "liblog",
+ // For ashmem.
+ "libcutils",
+ // For common macros.
+ "libbase",
+ ],
// Exclude the version script from Darwin host since it's not
// supported by the linker there. That means ASan checks on Darwin
// might trigger ODR violations.
version_script: "libartbase.map",
},
- host: {
+ not_windows: {
+ srcs: [
+ "base/mem_map_unix.cc",
+ ],
shared_libs: [
"libziparchive",
"libz",
+ "liblog",
+ // For ashmem.
+ "libcutils",
+ // For common macros.
+ "libbase",
],
},
linux_glibc: {
@@ -71,17 +88,20 @@
},
windows: {
version_script: "libartbase.map",
+ static_libs: [
+ "libziparchive",
+ "libz",
+ "liblog",
+ // For ashmem.
+ "libcutils",
+ // For common macros.
+ "libbase",
+ ],
+ cflags: ["-Wno-thread-safety"],
},
},
generated_sources: ["art_libartbase_operator_srcs"],
cflags: ["-DBUILDING_LIBART=1"],
- shared_libs: [
- "liblog",
- // For ashmem.
- "libcutils",
- // For common macros.
- "libbase",
- ],
// Utilities used by various ART libs and tools are linked in statically
// here to avoid shared lib dependencies outside the ART APEX. No target
@@ -147,6 +167,14 @@
"libziparchive",
],
export_shared_lib_headers: ["libbase"],
+ target: {
+ windows: {
+ enabled: true,
+ shared: {
+ enabled: false,
+ },
+ },
+ },
}
art_cc_library {
@@ -160,6 +188,14 @@
"libziparchive",
],
export_shared_lib_headers: ["libbase"],
+ target: {
+ windows: {
+ enabled: true,
+ shared: {
+ enabled: false,
+ },
+ },
+ },
}
art_cc_library {
@@ -177,9 +213,6 @@
header_libs: [
"libnativehelper_header_only",
],
- include_dirs: [
- "external/icu/icu4c/source/common",
- ],
}
art_cc_test {
diff --git a/libartbase/base/arena_allocator.cc b/libartbase/base/arena_allocator.cc
index df3deba..0e7f6cc 100644
--- a/libartbase/base/arena_allocator.cc
+++ b/libartbase/base/arena_allocator.cc
@@ -16,7 +16,6 @@
#include "arena_allocator-inl.h"
-#include <sys/mman.h>
#include <algorithm>
#include <cstddef>
@@ -25,6 +24,8 @@
#include <android-base/logging.h>
+#include "mman.h"
+
namespace art {
constexpr size_t kMemoryToolRedZoneBytes = 8;
diff --git a/libartbase/base/common_art_test.cc b/libartbase/base/common_art_test.cc
index 278203d..53afea2 100644
--- a/libartbase/base/common_art_test.cc
+++ b/libartbase/base/common_art_test.cc
@@ -26,7 +26,6 @@
#include "android-base/stringprintf.h"
#include "android-base/strings.h"
#include "android-base/unique_fd.h"
-#include <unicode/uvernum.h>
#include "art_field-inl.h"
#include "base/file_utils.h"
@@ -329,15 +328,19 @@
}
std::vector<std::string> CommonArtTestImpl::GetLibCoreDexFileNames() {
- // Note: This must match the TEST_CORE_JARS in Android.common_path.mk
+ // Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
// because that's what we use for compiling the core.art image.
+ // It may contain additional modules from TEST_CORE_JARS.
static const char* const kLibcoreModules[] = {
+ // CORE_IMG_JARS modules.
"core-oj",
"core-libart",
"core-simple",
- "conscrypt",
"okhttp",
"bouncycastle",
+ "apache-xml",
+ // Additional modules.
+ "conscrypt",
};
std::vector<std::string> result;
diff --git a/libartbase/base/file_utils.cc b/libartbase/base/file_utils.cc
index f8d6016..9490798 100644
--- a/libartbase/base/file_utils.cc
+++ b/libartbase/base/file_utils.cc
@@ -19,11 +19,13 @@
#include <inttypes.h>
#include <sys/stat.h>
#include <sys/types.h>
+#ifndef _WIN32
#include <sys/wait.h>
+#endif
#include <unistd.h>
// We need dladdr.
-#ifndef __APPLE__
+#if !defined(__APPLE__) && !defined(_WIN32)
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#define DEFINED_GNU_SOURCE
@@ -84,6 +86,10 @@
}
std::string GetAndroidRootSafe(std::string* error_msg) {
+#ifdef _WIN32
+ *error_msg = "GetAndroidRootSafe unsupported for Windows.";
+ return "";
+#else
// Prefer ANDROID_ROOT if it's set.
const char* android_dir = getenv("ANDROID_ROOT");
if (android_dir != nullptr) {
@@ -118,6 +124,7 @@
return "";
}
return "/system";
+#endif
}
std::string GetAndroidRoot() {
@@ -179,6 +186,15 @@
void GetDalvikCache(const char* subdir, const bool create_if_absent, std::string* dalvik_cache,
bool* have_android_data, bool* dalvik_cache_exists, bool* is_global_cache) {
+#ifdef _WIN32
+ UNUSED(subdir);
+ UNUSED(create_if_absent);
+ UNUSED(dalvik_cache);
+ UNUSED(have_android_data);
+ UNUSED(dalvik_cache_exists);
+ UNUSED(is_global_cache);
+ LOG(FATAL) << "GetDalvikCache unsupported on Windows.";
+#else
CHECK(subdir != nullptr);
std::string error_msg;
const char* android_data = GetAndroidDataSafe(&error_msg);
@@ -199,6 +215,7 @@
*dalvik_cache_exists = ((mkdir(dalvik_cache_root.c_str(), 0700) == 0 || errno == EEXIST) &&
(mkdir(dalvik_cache->c_str(), 0700) == 0 || errno == EEXIST));
}
+#endif
}
std::string GetDalvikCache(const char* subdir) {
@@ -262,9 +279,15 @@
}
bool LocationIsOnSystem(const char* path) {
+#ifdef _WIN32
+ UNUSED(path);
+ LOG(FATAL) << "LocationIsOnSystem is unsupported on Windows.";
+ return false;
+#else
UniqueCPtr<const char[]> full_path(realpath(path, nullptr));
return full_path != nullptr &&
android::base::StartsWith(full_path.get(), GetAndroidRoot().c_str());
+#endif
}
bool LocationIsOnSystemFramework(const char* full_path) {
diff --git a/libartbase/base/malloc_arena_pool.cc b/libartbase/base/malloc_arena_pool.cc
index 02e29f1..4de34b5 100644
--- a/libartbase/base/malloc_arena_pool.cc
+++ b/libartbase/base/malloc_arena_pool.cc
@@ -16,7 +16,6 @@
#include "malloc_arena_pool.h"
-#include <sys/mman.h>
#include <algorithm>
#include <cstddef>
@@ -25,6 +24,7 @@
#include <android-base/logging.h>
#include "arena_allocator-inl.h"
+#include "mman.h"
namespace art {
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 532ca28..2833750 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -18,8 +18,7 @@
#include <inttypes.h>
#include <stdlib.h>
-#include <sys/mman.h> // For the PROT_* and MAP_* constants.
-#if !defined(ANDROID_OS) && !defined(__Fuchsia__)
+#if !defined(ANDROID_OS) && !defined(__Fuchsia__) && !defined(_WIN32)
#include <sys/resource.h>
#endif
@@ -39,6 +38,7 @@
#include "globals.h"
#include "logging.h" // For VLOG_IS_ON.
#include "memory_tool.h"
+#include "mman.h" // For the PROT_* and MAP_* constants.
#include "utils.h"
#ifndef MAP_ANONYMOUS
@@ -811,19 +811,30 @@
if (!kMadviseZeroes) {
memset(base_begin_, 0, base_size_);
}
+#ifdef _WIN32
+ // It is benign not to madvise away the pages here.
+ PLOG(WARNING) << "MemMap::MadviseDontNeedAndZero does not madvise on Windows.";
+#else
int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
if (result == -1) {
PLOG(WARNING) << "madvise failed";
}
+#endif
}
}
bool MemMap::Sync() {
+#ifdef _WIN32
+ // TODO: add FlushViewOfFile support.
+ PLOG(ERROR) << "MemMap::Sync unsupported on Windows.";
+ return false;
+#else
// Historical note: To avoid Valgrind errors, we temporarily lifted the lower-end noaccess
// protection before passing it to msync() when `redzone_size_` was non-null, as Valgrind
// only accepts page-aligned base address, and excludes the higher-end noaccess protection
// from the msync range. b/27552451.
return msync(BaseBegin(), BaseSize(), MS_SYNC) == 0;
+#endif
}
bool MemMap::Protect(int prot) {
@@ -832,10 +843,12 @@
return true;
}
+#ifndef _WIN32
if (mprotect(base_begin_, base_size_, prot) == 0) {
prot_ = prot;
return true;
}
+#endif
PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
<< prot << ") failed";
@@ -1206,7 +1219,11 @@
DCHECK_LE(page_begin, page_end);
DCHECK_LE(page_end, mem_end);
std::fill(mem_begin, page_begin, 0);
+#ifdef _WIN32
+ LOG(WARNING) << "ZeroAndReleasePages does not madvise on Windows.";
+#else
CHECK_NE(madvise(page_begin, page_end - page_begin, MADV_DONTNEED), -1) << "madvise failed";
+#endif
std::fill(page_end, mem_end, 0);
}
}
diff --git a/libartbase/base/mem_map_fuchsia.cc b/libartbase/base/mem_map_fuchsia.cc
index d1c92ce..6b0e06c 100644
--- a/libartbase/base/mem_map_fuchsia.cc
+++ b/libartbase/base/mem_map_fuchsia.cc
@@ -15,8 +15,8 @@
*/
#include "mem_map.h"
-#include <sys/mman.h>
#include "logging.h"
+#include "mman.h"
#include <zircon/process.h>
#include <zircon/syscalls.h>
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index 074d4c2..bf39fd1 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -16,8 +16,6 @@
#include "mem_map.h"
-#include <sys/mman.h>
-
#include <memory>
#include <random>
@@ -25,6 +23,7 @@
#include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS
#include "logging.h"
#include "memory_tool.h"
+#include "mman.h"
#include "unix_file/fd_file.h"
namespace art {
diff --git a/libartbase/base/mem_map_unix.cc b/libartbase/base/mem_map_unix.cc
index 601b049..ac854df 100644
--- a/libartbase/base/mem_map_unix.cc
+++ b/libartbase/base/mem_map_unix.cc
@@ -16,7 +16,7 @@
#include "mem_map.h"
-#include <sys/mman.h>
+#include "mman.h"
namespace art {
diff --git a/libartbase/base/membarrier.cc b/libartbase/base/membarrier.cc
index 4c86b6b..abb36bc 100644
--- a/libartbase/base/membarrier.cc
+++ b/libartbase/base/membarrier.cc
@@ -18,8 +18,10 @@
#include <errno.h>
+#if !defined(_WIN32)
#include <sys/syscall.h>
#include <unistd.h>
+#endif
#include "macros.h"
#if defined(__BIONIC__)
diff --git a/libartbase/base/memfd.cc b/libartbase/base/memfd.cc
index 7c20401..780be32 100644
--- a/libartbase/base/memfd.cc
+++ b/libartbase/base/memfd.cc
@@ -18,9 +18,11 @@
#include <errno.h>
#include <stdio.h>
+#if !defined(_WIN32)
#include <sys/syscall.h>
#include <sys/utsname.h>
#include <unistd.h>
+#endif
#include "macros.h"
diff --git a/libartbase/base/mman.h b/libartbase/base/mman.h
new file mode 100644
index 0000000..bd63f65
--- /dev/null
+++ b/libartbase/base/mman.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBARTBASE_BASE_MMAN_H_
+#define ART_LIBARTBASE_BASE_MMAN_H_
+
+#ifdef _WIN32
+
+// There is no sys/mman.h in mingw.
+// As these are just placeholders for the APIs, all values are stubbed out.
+
+#define PROT_READ 0 // 0x1
+#define PROT_WRITE 0 // 0x2
+#define PROT_EXEC 0 // 0x4
+#define PROT_NONE 0 // 0x0
+
+#define MAP_SHARED 0 // 0x01
+#define MAP_PRIVATE 0 // 0x02
+
+#define MAP_FAILED nullptr // ((void*) -1)
+#define MAP_FIXED 0 // 0x10
+#define MAP_ANONYMOUS 0 // 0x20
+
+#else
+
+#include <sys/mman.h>
+
+#endif
+
+
+#endif // ART_LIBARTBASE_BASE_MMAN_H_
diff --git a/libartbase/base/os_linux.cc b/libartbase/base/os_linux.cc
index f8b31cf..a00779e 100644
--- a/libartbase/base/os_linux.cc
+++ b/libartbase/base/os_linux.cc
@@ -50,7 +50,12 @@
}
File* OS::CreateEmptyFileWriteOnly(const char* name) {
- return art::CreateEmptyFile(name, O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC);
+#ifdef _WIN32
+ int flags = O_WRONLY | O_TRUNC;
+#else
+ int flags = O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC;
+#endif
+ return art::CreateEmptyFile(name, flags);
}
File* OS::OpenFileWithFlags(const char* name, int flags, bool auto_flush) {
diff --git a/libartbase/base/safe_copy.cc b/libartbase/base/safe_copy.cc
index b46b921..ad75aa7 100644
--- a/libartbase/base/safe_copy.cc
+++ b/libartbase/base/safe_copy.cc
@@ -16,8 +16,10 @@
#include "safe_copy.h"
+#ifdef __linux__
#include <sys/uio.h>
#include <sys/user.h>
+#endif
#include <unistd.h>
#include <algorithm>
diff --git a/libartbase/base/safe_copy_test.cc b/libartbase/base/safe_copy_test.cc
index c23651f..9f7d409 100644
--- a/libartbase/base/safe_copy_test.cc
+++ b/libartbase/base/safe_copy_test.cc
@@ -18,12 +18,12 @@
#include <errno.h>
#include <string.h>
-#include <sys/mman.h>
#include <sys/user.h>
#include "android-base/logging.h"
#include "globals.h"
#include "gtest/gtest.h"
+#include "mman.h"
namespace art {
diff --git a/libartbase/base/scoped_flock.cc b/libartbase/base/scoped_flock.cc
index 2f16fb2..b16a45a 100644
--- a/libartbase/base/scoped_flock.cc
+++ b/libartbase/base/scoped_flock.cc
@@ -35,6 +35,14 @@
/* static */ ScopedFlock LockedFile::Open(const char* filename, int flags, bool block,
std::string* error_msg) {
+#ifdef _WIN32
+ // TODO: implement file locking for Windows.
+ UNUSED(filename);
+ UNUSED(flags);
+ UNUSED(block);
+ *error_msg = "flock is unsupported on Windows";
+ return nullptr;
+#else
while (true) {
// NOTE: We don't check usage here because the ScopedFlock should *never* be
// responsible for flushing its underlying FD. Its only purpose should be
@@ -89,10 +97,19 @@
return ScopedFlock(new LockedFile(std::move((*file.get()))));
}
+#endif
}
ScopedFlock LockedFile::DupOf(const int fd, const std::string& path,
const bool read_only_mode, std::string* error_msg) {
+#ifdef _WIN32
+ // TODO: implement file locking for Windows.
+ UNUSED(fd);
+ UNUSED(path);
+ UNUSED(read_only_mode);
+ *error_msg = "flock is unsupported on Windows.";
+ return nullptr;
+#else
// NOTE: We don't check usage here because the ScopedFlock should *never* be
// responsible for flushing its underlying FD. Its only purpose should be
// to acquire a lock, and the unlock / close in the corresponding
@@ -112,9 +129,11 @@
}
return locked_file;
+#endif
}
void LockedFile::ReleaseLock() {
+#ifndef _WIN32
if (this->Fd() != -1) {
int flock_result = TEMP_FAILURE_RETRY(flock(this->Fd(), LOCK_UN));
if (flock_result != 0) {
@@ -126,6 +145,7 @@
PLOG(WARNING) << "Unable to unlock file " << this->GetPath();
}
}
+#endif
}
} // namespace art
diff --git a/libartbase/base/socket_peer_is_trusted.cc b/libartbase/base/socket_peer_is_trusted.cc
index 440054e..3996d90 100644
--- a/libartbase/base/socket_peer_is_trusted.cc
+++ b/libartbase/base/socket_peer_is_trusted.cc
@@ -16,8 +16,10 @@
#include "socket_peer_is_trusted.h"
+#if !defined(_WIN32)
#include <pwd.h>
#include <sys/socket.h>
+#endif
#include <android-base/logging.h>
diff --git a/libartbase/base/time_utils.cc b/libartbase/base/time_utils.cc
index 89a1109..aa6c987 100644
--- a/libartbase/base/time_utils.cc
+++ b/libartbase/base/time_utils.cc
@@ -14,12 +14,14 @@
* limitations under the License.
*/
+#include "time_utils.h"
+
#include <inttypes.h>
+#include <stdio.h>
+
#include <limits>
#include <sstream>
-#include "time_utils.h"
-
#include "android-base/stringprintf.h"
#include "logging.h"
@@ -30,6 +32,20 @@
namespace art {
+namespace {
+
+#if !defined(__linux__)
+int GetTimeOfDay(struct timeval* tv, struct timezone* tz) {
+#ifdef _WIN32
+ return mingw_gettimeofday(tv, tz);
+#else
+ return gettimeofday(tv, tz);
+#endif
+}
+#endif
+
+} // namespace
+
using android::base::StringPrintf;
std::string PrettyDuration(uint64_t nano_duration, size_t max_fraction_digits) {
@@ -117,7 +133,12 @@
std::string GetIsoDate() {
time_t now = time(nullptr);
tm tmbuf;
+#ifdef _WIN32
+ localtime_s(&tmbuf, &now);
+ tm* ptm = &tmbuf;
+#else
tm* ptm = localtime_r(&now, &tmbuf);
+#endif
return StringPrintf("%04d-%02d-%02d %02d:%02d:%02d",
ptm->tm_year + 1900, ptm->tm_mon+1, ptm->tm_mday,
ptm->tm_hour, ptm->tm_min, ptm->tm_sec);
@@ -128,9 +149,9 @@
timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_nsec / UINT64_C(1000000);
-#else // __APPLE__
+#else
timeval now;
- gettimeofday(&now, nullptr);
+ GetTimeOfDay(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000) + now.tv_usec / UINT64_C(1000);
#endif
}
@@ -140,9 +161,9 @@
timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
-#else // __APPLE__
+#else
timeval now;
- gettimeofday(&now, nullptr);
+ GetTimeOfDay(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_usec;
#endif
}
@@ -152,9 +173,9 @@
timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
-#else // __APPLE__
+#else
timeval now;
- gettimeofday(&now, nullptr);
+ GetTimeOfDay(&now, nullptr);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_usec * UINT64_C(1000);
#endif
}
@@ -164,7 +185,7 @@
timespec now;
clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
-#else // __APPLE__
+#else
UNIMPLEMENTED(WARNING);
return -1;
#endif
@@ -176,8 +197,13 @@
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &now);
return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000000) + now.tv_nsec;
#else
- UNIMPLEMENTED(WARNING);
- return -1;
+ // We cannot use clock_gettime() here. Return the process wall clock time
+ // (using art::NanoTime, which relies on gettimeofday()) as approximation of
+ // the process CPU time instead.
+ //
+ // Note: clock_gettime() is available from macOS 10.12 (Darwin 16), but we try
+ // to keep things simple here.
+ return NanoTime();
#endif
}
@@ -190,12 +216,12 @@
void InitTimeSpec(bool absolute, int clock, int64_t ms, int32_t ns, timespec* ts) {
if (absolute) {
-#if !defined(__APPLE__)
+#if defined(__linux__)
clock_gettime(clock, ts);
#else
UNUSED(clock);
timeval tv;
- gettimeofday(&tv, nullptr);
+ GetTimeOfDay(&tv, nullptr);
ts->tv_sec = tv.tv_sec;
ts->tv_nsec = tv.tv_usec * 1000;
#endif
diff --git a/libartbase/base/time_utils.h b/libartbase/base/time_utils.h
index 431d3e1..15805f3 100644
--- a/libartbase/base/time_utils.h
+++ b/libartbase/base/time_utils.h
@@ -18,6 +18,7 @@
#define ART_LIBARTBASE_BASE_TIME_UTILS_H_
#include <stdint.h>
+#include <stdio.h> // Needed for correct _WIN32 build.
#include <time.h>
#include <string>
diff --git a/libartbase/base/unix_file/fd_file.cc b/libartbase/base/unix_file/fd_file.cc
index 76894c6..8831b9c 100644
--- a/libartbase/base/unix_file/fd_file.cc
+++ b/libartbase/base/unix_file/fd_file.cc
@@ -25,8 +25,13 @@
#include <android/fdsan.h>
#endif
+#if defined(_WIN32)
+#include <windows.h>
+#endif
+
#include <limits>
+#include <android-base/file.h>
#include <android-base/logging.h>
// Includes needed for FdFile::Copy().
@@ -40,6 +45,96 @@
namespace unix_file {
+#if defined(_WIN32)
+// RAII wrapper for an event object to allow asynchronous I/O to correctly signal completion.
+class ScopedEvent {
+ public:
+ ScopedEvent() {
+ handle_ = CreateEventA(/*lpEventAttributes*/ nullptr,
+ /*bManualReset*/ true,
+ /*bInitialState*/ false,
+ /*lpName*/ nullptr);
+ }
+
+ ~ScopedEvent() { CloseHandle(handle_); }
+
+ HANDLE handle() { return handle_; }
+
+ private:
+ HANDLE handle_;
+ DISALLOW_COPY_AND_ASSIGN(ScopedEvent);
+};
+
+// Windows implementation of pread/pwrite. Note that these DO move the file descriptor's read/write
+// position, but do so atomically.
+static ssize_t pread(int fd, void* data, size_t byte_count, off64_t offset) {
+ ScopedEvent event;
+ if (event.handle() == INVALID_HANDLE_VALUE) {
+ PLOG(ERROR) << "Could not create event handle.";
+ errno = EIO;
+ return static_cast<ssize_t>(-1);
+ }
+
+ auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+ DWORD bytes_read = 0;
+ OVERLAPPED overlapped = {};
+ overlapped.Offset = static_cast<DWORD>(offset);
+ overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+ overlapped.hEvent = event.handle();
+ if (!ReadFile(handle, data, static_cast<DWORD>(byte_count), &bytes_read, &overlapped)) {
+ // If the read failed with other than ERROR_IO_PENDING, return an error.
+ // ERROR_IO_PENDING signals the write was begun asynchronously.
+ // Block until the asynchronous operation has finished or fails, and return
+ // result accordingly.
+ if (::GetLastError() != ERROR_IO_PENDING ||
+ !::GetOverlappedResult(handle, &overlapped, &bytes_read, TRUE)) {
+ // In case someone tries to read errno (since this is masquerading as a POSIX call).
+ errno = EIO;
+ return static_cast<ssize_t>(-1);
+ }
+ }
+ return static_cast<ssize_t>(bytes_read);
+}
+
+static ssize_t pwrite(int fd, const void* buf, size_t count, off64_t offset) {
+ ScopedEvent event;
+ if (event.handle() == INVALID_HANDLE_VALUE) {
+ PLOG(ERROR) << "Could not create event handle.";
+ errno = EIO;
+ return static_cast<ssize_t>(-1);
+ }
+
+ auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+ DWORD bytes_written = 0;
+ OVERLAPPED overlapped = {};
+ overlapped.Offset = static_cast<DWORD>(offset);
+ overlapped.OffsetHigh = static_cast<DWORD>(offset >> 32);
+ overlapped.hEvent = event.handle();
+ if (!::WriteFile(handle, buf, count, &bytes_written, &overlapped)) {
+ // If the write failed with other than ERROR_IO_PENDING, return an error.
+ // ERROR_IO_PENDING signals the write was begun asynchronously.
+ // Block until the asynchronous operation has finished or fails, and return
+ // result accordingly.
+ if (::GetLastError() != ERROR_IO_PENDING ||
+ !::GetOverlappedResult(handle, &overlapped, &bytes_written, TRUE)) {
+ // In case someone tries to read errno (since this is masquerading as a POSIX call).
+ errno = EIO;
+ return static_cast<ssize_t>(-1);
+ }
+ }
+ return static_cast<ssize_t>(bytes_written);
+}
+
+static int fsync(int fd) {
+ auto handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd));
+ if (handle != INVALID_HANDLE_VALUE && ::FlushFileBuffers(handle)) {
+ return 0;
+ }
+ errno = EINVAL;
+ return -1;
+}
+#endif
+
#if defined(__BIONIC__)
static uint64_t GetFdFileOwnerTag(FdFile* fd_file) {
return android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_ART_FDFILE,
diff --git a/libartbase/base/utils.cc b/libartbase/base/utils.cc
index 0f172fd..58d8575 100644
--- a/libartbase/base/utils.cc
+++ b/libartbase/base/utils.cc
@@ -19,9 +19,7 @@
#include <inttypes.h>
#include <pthread.h>
#include <sys/stat.h>
-#include <sys/syscall.h>
#include <sys/types.h>
-#include <sys/wait.h>
#include <unistd.h>
#include <fstream>
@@ -47,6 +45,16 @@
#if defined(__linux__)
#include <linux/unistd.h>
+#include <sys/syscall.h>
+#endif
+
+#if defined(_WIN32)
+#include <windows.h>
+// This include needs to be here due to our coding conventions. Unfortunately
+// it drags in the definition of the dread ERROR macro.
+#ifdef ERROR
+#undef ERROR
+#endif
#endif
namespace art {
@@ -61,6 +69,8 @@
return owner;
#elif defined(__BIONIC__)
return gettid();
+#elif defined(_WIN32)
+ return static_cast<pid_t>(::GetCurrentThreadId());
#else
return syscall(__NR_gettid);
#endif
@@ -68,12 +78,17 @@
std::string GetThreadName(pid_t tid) {
std::string result;
+#ifdef _WIN32
+ UNUSED(tid);
+ result = "<unknown>";
+#else
// TODO: make this less Linux-specific.
if (ReadFileToString(StringPrintf("/proc/self/task/%d/comm", tid), &result)) {
result.resize(result.size() - 1); // Lose the trailing '\n'.
} else {
result = "<unknown>";
}
+#endif
return result;
}
@@ -137,7 +152,7 @@
} else {
s = thread_name + len - 15;
}
-#if defined(__linux__)
+#if defined(__linux__) || defined(_WIN32)
// pthread_setname_np fails rather than truncating long strings.
char buf[16]; // MAX_TASK_COMM_LEN=16 is hard-coded in the kernel.
strncpy(buf, s, sizeof(buf)-1);
@@ -153,6 +168,11 @@
void GetTaskStats(pid_t tid, char* state, int* utime, int* stime, int* task_cpu) {
*utime = *stime = *task_cpu = 0;
+#ifdef _WIN32
+ // TODO: implement this.
+ UNUSED(tid);
+ *state = 'S';
+#else
std::string stats;
// TODO: make this less Linux-specific.
if (!ReadFileToString(StringPrintf("/proc/self/task/%d/stat", tid), &stats)) {
@@ -167,6 +187,7 @@
*utime = strtoull(fields[11].c_str(), nullptr, 10);
*stime = strtoull(fields[12].c_str(), nullptr, 10);
*task_cpu = strtoull(fields[36].c_str(), nullptr, 10);
+#endif
}
static void ParseStringAfterChar(const std::string& s,
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index a7f4b28..5056edc 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -18,7 +18,6 @@
#include <fcntl.h>
#include <stdio.h>
-#include <sys/mman.h> // For the PROT_* and MAP_* constants.
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
@@ -27,6 +26,7 @@
#include "android-base/stringprintf.h"
#include "ziparchive/zip_archive.h"
+#include "base/mman.h"
#include "bit_utils.h"
#include "unix_file/fd_file.h"
@@ -203,6 +203,11 @@
}
static void SetCloseOnExec(int fd) {
+#ifdef _WIN32
+ // Exec is not supported on Windows.
+ UNUSED(fd);
+ PLOG(ERROR) << "SetCloseOnExec is not supported on Windows.";
+#else
// This dance is more portable than Linux's O_CLOEXEC open(2) flag.
int flags = fcntl(fd, F_GETFD);
if (flags == -1) {
@@ -214,6 +219,7 @@
PLOG(WARNING) << "fcntl(" << fd << ", F_SETFD, " << flags << ") failed";
return;
}
+#endif
}
ZipArchive* ZipArchive::Open(const char* filename, std::string* error_msg) {
diff --git a/libdexfile/Android.bp b/libdexfile/Android.bp
index 7f25f02..a4f7e25 100644
--- a/libdexfile/Android.bp
+++ b/libdexfile/Android.bp
@@ -43,30 +43,58 @@
"libziparchive",
"libz",
],
+ shared_libs: [
+ // For MemMap.
+ "libartbase",
+ "liblog",
+ // For atrace.
+ "libcutils",
+ // For common macros.
+ "libbase",
+ ],
+ export_shared_lib_headers: [
+ "libartbase",
+ "libbase",
+ ],
},
- host: {
+ not_windows: {
shared_libs: [
"libziparchive",
"libz",
+ // For MemMap.
+ "libartbase",
+ "liblog",
+ // For atrace.
+ "libcutils",
+ // For common macros.
+ "libbase",
],
+ export_shared_lib_headers: [
+ "libartbase",
+ "libbase",
+ ],
+ },
+ windows: {
+ static_libs: [
+ "libziparchive",
+ "libz",
+ // For MemMap.
+ "libartbase",
+ "liblog",
+ // For atrace.
+ "libcutils",
+ // For common macros.
+ "libbase",
+ ],
+ export_static_lib_headers: [
+ "libartbase",
+ "libbase",
+ ],
+ cflags: ["-Wno-thread-safety"],
},
},
generated_sources: ["dexfile_operator_srcs"],
- shared_libs: [
- // For MemMap.
- "libartbase",
- "liblog",
- // For atrace.
- "libcutils",
- // For common macros.
- "libbase",
- "libz",
- ],
export_include_dirs: ["."],
- export_shared_lib_headers: [
- "libartbase",
- "libbase",
- ],
}
cc_defaults {
@@ -121,6 +149,14 @@
strip: {
keep_symbols: true,
},
+ target: {
+ windows: {
+ enabled: true,
+ shared: {
+ enabled: false,
+ },
+ },
+ },
}
art_cc_library {
@@ -129,6 +165,79 @@
"art_debug_defaults",
"libdexfile_defaults",
],
+ target: {
+ windows: {
+ enabled: true,
+ shared: {
+ enabled: false,
+ },
+ },
+ },
+}
+
+cc_library_headers {
+ name: "libdexfile_external_headers",
+ host_supported: true,
+ header_libs: ["libbase_headers"],
+ export_header_lib_headers: ["libbase_headers"],
+ export_include_dirs: ["external/include"],
+
+ target: {
+ windows: {
+ enabled: true,
+ },
+ },
+}
+
+cc_library {
+ name: "libdexfile_external",
+ host_supported: true,
+ srcs: [
+ "external/dex_file_ext.cc",
+ ],
+ header_libs: ["libdexfile_external_headers"],
+ shared_libs: [
+ "libbase",
+ "libdexfile",
+ ],
+
+ // TODO(b/120670568): Enable this when linking bug is fixed.
+ // stubs: {
+ // symbol_file: "external/libdexfile_external.map.txt",
+ // versions: ["1"],
+ // },
+
+ // Hide symbols using version scripts for targets that support it, i.e. all
+ // but Darwin.
+ // TODO(b/120670568): Clean this up when stubs above is enabled.
+ target: {
+ android: {
+ version_script: "external/libdexfile_external.map.txt",
+ },
+ linux_bionic: {
+ version_script: "external/libdexfile_external.map.txt",
+ },
+ linux_glibc: {
+ version_script: "external/libdexfile_external.map.txt",
+ },
+ windows: {
+ version_script: "external/libdexfile_external.map.txt",
+ },
+ },
+}
+
+// Support library with a C++ API for accessing the libdexfile API for external
+// (non-ART) users. They should link to their own instance of this (either
+// statically or through linker namespaces).
+cc_library {
+ name: "libdexfile_support",
+ host_supported: true,
+ srcs: [
+ "external/dex_file_supp.cc",
+ ],
+ header_libs: ["libdexfile_external_headers"],
+ shared_libs: ["libdexfile_external"],
+ export_header_lib_headers: ["libdexfile_external_headers"],
}
art_cc_test {
diff --git a/libdexfile/dex/art_dex_file_loader.cc b/libdexfile/dex/art_dex_file_loader.cc
index 20a519b..57e838f 100644
--- a/libdexfile/dex/art_dex_file_loader.cc
+++ b/libdexfile/dex/art_dex_file_loader.cc
@@ -16,7 +16,6 @@
#include "art_dex_file_loader.h"
-#include <sys/mman.h> // For the PROT_* and MAP_* constants.
#include <sys/stat.h>
#include "android-base/stringprintf.h"
@@ -24,6 +23,7 @@
#include "base/file_magic.h"
#include "base/file_utils.h"
#include "base/mem_map.h"
+#include "base/mman.h" // For the PROT_* and MAP_* constants.
#include "base/stl_util.h"
#include "base/systrace.h"
#include "base/unix_file/fd_file.h"
@@ -156,14 +156,16 @@
return false;
}
-std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const {
+std::unique_ptr<const DexFile> ArtDexFileLoader::Open(
+ const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::unique_ptr<DexFileContainer> container) const {
ScopedTrace trace(std::string("Open dex file from RAM ") + location);
return OpenCommon(base,
size,
@@ -175,7 +177,7 @@
verify,
verify_checksum,
error_msg,
- /*container=*/ nullptr,
+ std::move(container),
/*verify_result=*/ nullptr);
}
diff --git a/libdexfile/dex/art_dex_file_loader.h b/libdexfile/dex/art_dex_file_loader.h
index 40d4673..d41eac5 100644
--- a/libdexfile/dex/art_dex_file_loader.h
+++ b/libdexfile/dex/art_dex_file_loader.h
@@ -54,14 +54,16 @@
bool* only_contains_uncompressed_dex = nullptr) const override;
// Opens .dex file, backed by existing memory
- std::unique_ptr<const DexFile> Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const override;
+ std::unique_ptr<const DexFile> Open(
+ const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::unique_ptr<DexFileContainer> container = nullptr) const override;
// Opens .dex file that has been memory-mapped by the caller.
std::unique_ptr<const DexFile> Open(const std::string& location,
diff --git a/libdexfile/dex/dex_file_layout.cc b/libdexfile/dex/dex_file_layout.cc
index 75a3111..929025a 100644
--- a/libdexfile/dex/dex_file_layout.cc
+++ b/libdexfile/dex/dex_file_layout.cc
@@ -16,9 +16,9 @@
#include "dex_file_layout.h"
-#include <sys/mman.h>
#include "base/bit_utils.h"
+#include "base/mman.h"
#include "dex_file.h"
namespace art {
@@ -26,6 +26,12 @@
int DexLayoutSection::MadviseLargestPageAlignedRegion(const uint8_t* begin,
const uint8_t* end,
int advice) {
+#ifdef _WIN32
+ UNUSED(begin);
+ UNUSED(end);
+ UNUSED(advice);
+ PLOG(WARNING) << "madvise is unsupported on Windows.";
+#else
DCHECK_LE(begin, end);
begin = AlignUp(begin, kPageSize);
end = AlignDown(end, kPageSize);
@@ -37,6 +43,7 @@
}
return result;
}
+#endif
return 0;
}
@@ -50,6 +57,11 @@
}
void DexLayoutSections::Madvise(const DexFile* dex_file, MadviseState state) const {
+#ifdef _WIN32
+ UNUSED(dex_file);
+ UNUSED(state);
+ PLOG(WARNING) << "madvise is unsupported on Windows.";
+#else
// The dex file is already defaulted to random access everywhere.
for (const DexLayoutSection& section : sections_) {
switch (state) {
@@ -79,6 +91,7 @@
}
}
}
+#endif
}
std::ostream& operator<<(std::ostream& os, const DexLayoutSection& section) {
diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc
index 3667c8c..a719d41 100644
--- a/libdexfile/dex/dex_file_loader.cc
+++ b/libdexfile/dex/dex_file_loader.cc
@@ -187,12 +187,18 @@
std::string base_location = GetBaseLocation(dex_location);
const char* suffix = dex_location + base_location.size();
DCHECK(suffix[0] == 0 || suffix[0] == kMultiDexSeparator);
+#ifdef _WIN32
+ // Warning: No symbolic link processing here.
+ PLOG(WARNING) << "realpath is unsupported on Windows.";
+#else
// Warning: Bionic implementation of realpath() allocates > 12KB on the stack.
// Do not run this code on a small stack, e.g. in signal handler.
UniqueCPtr<const char[]> path(realpath(base_location.c_str(), nullptr));
if (path != nullptr && path.get() != base_location) {
return std::string(path.get()) + suffix;
- } else if (suffix[0] == 0) {
+ }
+#endif
+ if (suffix[0] == 0) {
return base_location;
} else {
return dex_location;
@@ -212,14 +218,16 @@
return false;
}
-std::unique_ptr<const DexFile> DexFileLoader::Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const {
+std::unique_ptr<const DexFile> DexFileLoader::Open(
+ const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::unique_ptr<DexFileContainer> container) const {
return OpenCommon(base,
size,
/*data_base=*/ nullptr,
@@ -230,7 +238,7 @@
verify,
verify_checksum,
error_msg,
- /*container=*/ nullptr,
+ std::move(container),
/*verify_result=*/ nullptr);
}
diff --git a/libdexfile/dex/dex_file_loader.h b/libdexfile/dex/dex_file_loader.h
index 8fc836e..49e177f 100644
--- a/libdexfile/dex/dex_file_loader.h
+++ b/libdexfile/dex/dex_file_loader.h
@@ -121,14 +121,16 @@
bool* zip_file_only_contains_uncompress_dex = nullptr) const;
// Opens .dex file, backed by existing memory
- virtual std::unique_ptr<const DexFile> Open(const uint8_t* base,
- size_t size,
- const std::string& location,
- uint32_t location_checksum,
- const OatDexFile* oat_dex_file,
- bool verify,
- bool verify_checksum,
- std::string* error_msg) const;
+ virtual std::unique_ptr<const DexFile> Open(
+ const uint8_t* base,
+ size_t size,
+ const std::string& location,
+ uint32_t location_checksum,
+ const OatDexFile* oat_dex_file,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg,
+ std::unique_ptr<DexFileContainer> container = nullptr) const;
// Open a dex file with a separate data section.
virtual std::unique_ptr<const DexFile> OpenWithDataSection(
diff --git a/libdexfile/external/dex_file_ext.cc b/libdexfile/external/dex_file_ext.cc
new file mode 100644
index 0000000..5c353b5
--- /dev/null
+++ b/libdexfile/external/dex_file_ext.cc
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <cerrno>
+#include <cstring>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <android-base/logging.h>
+#include <android-base/macros.h>
+#include <android-base/mapped_file.h>
+#include <android-base/stringprintf.h>
+
+#include <dex/class_accessor-inl.h>
+#include <dex/code_item_accessors-inl.h>
+#include <dex/dex_file-inl.h>
+#include <dex/dex_file_loader.h>
+
+#include "art_api/ext_dex_file.h"
+
+extern "C" class ExtDexFileString {
+ public:
+ const std::string str_;
+};
+
+namespace art {
+namespace {
+
+const ExtDexFileString empty_string{""};
+
+struct MethodCacheEntry {
+ int32_t offset; // Offset relative to the start of the dex file header.
+ int32_t len;
+ int32_t index; // Method index.
+};
+
+class MappedFileContainer : public DexFileContainer {
+ public:
+ explicit MappedFileContainer(std::unique_ptr<android::base::MappedFile>&& map)
+ : map_(std::move(map)) {}
+ ~MappedFileContainer() override {}
+ int GetPermissions() override { return 0; }
+ bool IsReadOnly() override { return true; }
+ bool EnableWrite() override { return false; }
+ bool DisableWrite() override { return false; }
+
+ private:
+ std::unique_ptr<android::base::MappedFile> map_;
+ DISALLOW_COPY_AND_ASSIGN(MappedFileContainer);
+};
+
+} // namespace
+} // namespace art
+
+extern "C" {
+
+const ExtDexFileString* ExtDexFileMakeString(const char* str) {
+ if (str[0] == '\0') {
+ return &art::empty_string;
+ }
+ return new ExtDexFileString{str};
+}
+
+const char* ExtDexFileGetString(const ExtDexFileString* ext_string, /*out*/ size_t* size) {
+ DCHECK(ext_string != nullptr);
+ *size = ext_string->str_.size();
+ return ext_string->str_.data();
+}
+
+void ExtDexFileFreeString(const ExtDexFileString* ext_string) {
+ DCHECK(ext_string != nullptr);
+ if (ext_string != &art::empty_string) {
+ delete (ext_string);
+ }
+}
+
+// Wraps DexFile to add the caching needed by the external interface. This is
+// what gets passed over as ExtDexFile*.
+class ExtDexFile {
+ // Method cache for GetMethodInfoForOffset. This is populated as we iterate
+ // sequentially through the class defs. MethodCacheEntry.name is only set for
+ // methods returned by GetMethodInfoForOffset.
+ std::map<int32_t, art::MethodCacheEntry> method_cache_;
+
+ // Index of first class def for which method_cache_ isn't complete.
+ uint32_t class_def_index_ = 0;
+
+ public:
+ std::unique_ptr<const art::DexFile> dex_file_;
+ explicit ExtDexFile(std::unique_ptr<const art::DexFile>&& dex_file)
+ : dex_file_(std::move(dex_file)) {}
+
+ art::MethodCacheEntry* GetMethodCacheEntryForOffset(int64_t dex_offset) {
+ // First look in the method cache.
+ auto it = method_cache_.upper_bound(dex_offset);
+ if (it != method_cache_.end() && dex_offset >= it->second.offset) {
+ return &it->second;
+ }
+
+ for (; class_def_index_ < dex_file_->NumClassDefs(); class_def_index_++) {
+ art::ClassAccessor accessor(*dex_file_, class_def_index_);
+
+ for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
+ art::CodeItemInstructionAccessor code = method.GetInstructions();
+ if (!code.HasCodeItem()) {
+ continue;
+ }
+
+ int32_t offset = reinterpret_cast<const uint8_t*>(code.Insns()) - dex_file_->Begin();
+ int32_t len = code.InsnsSizeInBytes();
+ int32_t index = method.GetIndex();
+ auto res = method_cache_.emplace(offset + len, art::MethodCacheEntry{offset, len, index});
+ if (offset <= dex_offset && dex_offset < offset + len) {
+ return &res.first->second;
+ }
+ }
+ }
+
+ return nullptr;
+ }
+};
+
+int ExtDexFileOpenFromMemory(const void* addr,
+ /*inout*/ size_t* size,
+ const char* location,
+ /*out*/ const ExtDexFileString** ext_error_msg,
+ /*out*/ ExtDexFile** ext_dex_file) {
+ if (*size < sizeof(art::DexFile::Header)) {
+ *size = sizeof(art::DexFile::Header);
+ *ext_error_msg = nullptr;
+ return false;
+ }
+
+ const art::DexFile::Header* header = reinterpret_cast<const art::DexFile::Header*>(addr);
+ uint32_t file_size = header->file_size_;
+ if (art::CompactDexFile::IsMagicValid(header->magic_)) {
+ // Compact dex files store the data section separately so that it can be shared.
+ // Therefore we need to extend the read memory range to include it.
+ // TODO: This might be wasteful as we might read data in between as well.
+ // In practice, this should be fine, as such sharing only happens on disk.
+ uint32_t computed_file_size;
+ if (__builtin_add_overflow(header->data_off_, header->data_size_, &computed_file_size)) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("Corrupt CompactDexFile header in '%s'", location)};
+ return false;
+ }
+ if (computed_file_size > file_size) {
+ file_size = computed_file_size;
+ }
+ } else if (!art::StandardDexFile::IsMagicValid(header->magic_)) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("Unrecognized dex file header in '%s'", location)};
+ return false;
+ }
+
+ if (*size < file_size) {
+ *size = file_size;
+ *ext_error_msg = nullptr;
+ return false;
+ }
+
+ std::string loc_str(location);
+ art::DexFileLoader loader;
+ std::string error_msg;
+ std::unique_ptr<const art::DexFile> dex_file = loader.Open(static_cast<const uint8_t*>(addr),
+ *size,
+ loc_str,
+ header->checksum_,
+ /*oat_dex_file=*/nullptr,
+ /*verify=*/false,
+ /*verify_checksum=*/false,
+ &error_msg);
+ if (dex_file == nullptr) {
+ *ext_error_msg = new ExtDexFileString{std::move(error_msg)};
+ return false;
+ }
+
+ *ext_dex_file = new ExtDexFile(std::move(dex_file));
+ return true;
+}
+
+int ExtDexFileOpenFromFd(int fd,
+ off_t offset,
+ const char* location,
+ /*out*/ const ExtDexFileString** ext_error_msg,
+ /*out*/ ExtDexFile** ext_dex_file) {
+ size_t length;
+ {
+ struct stat sbuf;
+ std::memset(&sbuf, 0, sizeof(sbuf));
+ if (fstat(fd, &sbuf) == -1) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("fstat '%s' failed: %s", location, std::strerror(errno))};
+ return false;
+ }
+ if (S_ISDIR(sbuf.st_mode)) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("Attempt to mmap directory '%s'", location)};
+ return false;
+ }
+ length = sbuf.st_size;
+ }
+
+ if (length < offset + sizeof(art::DexFile::Header)) {
+ *ext_error_msg = new ExtDexFileString{android::base::StringPrintf(
+ "Offset %" PRId64 " too large for '%s' of size %zu", int64_t{offset}, location, length)};
+ return false;
+ }
+
+ // Cannot use MemMap in libartbase here, because it pulls in dlopen which we
+ // can't have when being compiled statically.
+ std::unique_ptr<android::base::MappedFile> map =
+ android::base::MappedFile::FromFd(fd, offset, length, PROT_READ);
+ if (map == nullptr) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("mmap '%s' failed: %s", location, std::strerror(errno))};
+ return false;
+ }
+
+ const art::DexFile::Header* header = reinterpret_cast<const art::DexFile::Header*>(map->data());
+ uint32_t file_size;
+ if (__builtin_add_overflow(offset, header->file_size_, &file_size)) {
+ *ext_error_msg =
+ new ExtDexFileString{android::base::StringPrintf("Corrupt header in '%s'", location)};
+ return false;
+ }
+ if (length < file_size) {
+ *ext_error_msg = new ExtDexFileString{
+ android::base::StringPrintf("Dex file '%s' too short: expected %" PRIu32 ", got %" PRIu64,
+ location,
+ file_size,
+ uint64_t{length})};
+ return false;
+ }
+
+ void* addr = map->data();
+ size_t size = map->size();
+ auto container = std::make_unique<art::MappedFileContainer>(std::move(map));
+
+ std::string loc_str(location);
+ std::string error_msg;
+ art::DexFileLoader loader;
+ std::unique_ptr<const art::DexFile> dex_file = loader.Open(reinterpret_cast<const uint8_t*>(addr),
+ size,
+ loc_str,
+ header->checksum_,
+ /*oat_dex_file=*/nullptr,
+ /*verify=*/false,
+ /*verify_checksum=*/false,
+ &error_msg,
+ std::move(container));
+ if (dex_file == nullptr) {
+ *ext_error_msg = new ExtDexFileString{std::move(error_msg)};
+ return false;
+ }
+ *ext_dex_file = new ExtDexFile(std::move(dex_file));
+ return true;
+}
+
+int ExtDexFileGetMethodInfoForOffset(ExtDexFile* ext_dex_file,
+ int64_t dex_offset,
+ /*out*/ ExtDexFileMethodInfo* method_info) {
+ if (!ext_dex_file->dex_file_->IsInDataSection(ext_dex_file->dex_file_->Begin() + dex_offset)) {
+ return false; // The DEX offset is not within the bytecode of this dex file.
+ }
+
+ if (ext_dex_file->dex_file_->IsCompactDexFile()) {
+ // The data section of compact dex files might be shared.
+ // Check the subrange unique to this compact dex.
+ const art::CompactDexFile::Header& cdex_header =
+ ext_dex_file->dex_file_->AsCompactDexFile()->GetHeader();
+ uint32_t begin = cdex_header.data_off_ + cdex_header.OwnedDataBegin();
+ uint32_t end = cdex_header.data_off_ + cdex_header.OwnedDataEnd();
+ if (dex_offset < begin || dex_offset >= end) {
+ return false; // The DEX offset is not within the bytecode of this dex file.
+ }
+ }
+
+ art::MethodCacheEntry* entry = ext_dex_file->GetMethodCacheEntryForOffset(dex_offset);
+ if (entry != nullptr) {
+ method_info->offset = entry->offset;
+ method_info->len = entry->len;
+ method_info->name =
+ new ExtDexFileString{ext_dex_file->dex_file_->PrettyMethod(entry->index, false)};
+ return true;
+ }
+
+ return false;
+}
+
+void ExtDexFileGetAllMethodInfos(ExtDexFile* ext_dex_file,
+ int with_signature,
+ ExtDexFileMethodInfoCallback* method_info_cb,
+ void* user_data) {
+ for (art::ClassAccessor accessor : ext_dex_file->dex_file_->GetClasses()) {
+ for (const art::ClassAccessor::Method& method : accessor.GetMethods()) {
+ art::CodeItemInstructionAccessor code = method.GetInstructions();
+ if (!code.HasCodeItem()) {
+ continue;
+ }
+
+ ExtDexFileMethodInfo method_info;
+ method_info.offset = static_cast<int32_t>(reinterpret_cast<const uint8_t*>(code.Insns()) -
+ ext_dex_file->dex_file_->Begin());
+ method_info.len = code.InsnsSizeInBytes();
+ method_info.name = new ExtDexFileString{
+ ext_dex_file->dex_file_->PrettyMethod(method.GetIndex(), with_signature)};
+ method_info_cb(&method_info, user_data);
+ }
+ }
+}
+
+void ExtDexFileFree(ExtDexFile* ext_dex_file) { delete (ext_dex_file); }
+
+} // extern "C"
diff --git a/libdexfile/external/dex_file_supp.cc b/libdexfile/external/dex_file_supp.cc
new file mode 100644
index 0000000..6514c8a
--- /dev/null
+++ b/libdexfile/external/dex_file_supp.cc
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "art_api/ext_dex_file.h"
+
+namespace art_api {
+namespace dex {
+
+DexFile::~DexFile() { ExtDexFileFree(ext_dex_file_); }
+
+MethodInfo DexFile::AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info) {
+ return {ext_method_info.offset, ext_method_info.len, DexString(ext_method_info.name)};
+}
+
+void DexFile::AddMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, void* ctx) {
+ auto vect = static_cast<MethodInfoVector*>(ctx);
+ vect->emplace_back(AbsorbMethodInfo(*ext_method_info));
+}
+
+} // namespace dex
+} // namespace art_api
diff --git a/libdexfile/external/include/art_api/ext_dex_file.h b/libdexfile/external/include/art_api/ext_dex_file.h
new file mode 100644
index 0000000..5f64ab1
--- /dev/null
+++ b/libdexfile/external/include/art_api/ext_dex_file.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_EXT_DEX_FILE_H_
+#define ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_EXT_DEX_FILE_H_
+
+// Dex file external API
+
+#include <sys/types.h>
+
+#include <cstring>
+#include <memory>
+#include <string>
+#include <string_view>
+#include <vector>
+
+#include <android-base/macros.h>
+
+extern "C" {
+
+// This is the stable C ABI that backs art_api::dex below. Structs and functions
+// may only be added here.
+// TODO(b/120978655): Move this to a separate pure C header.
+//
+// Clients should use the C++ wrappers in art_api::dex instead.
+
+// Opaque wrapper for an std::string allocated in libdexfile which must be freed
+// using ExtDexFileFreeString.
+class ExtDexFileString;
+
+// Returns an ExtDexFileString initialized to the given string.
+const ExtDexFileString* ExtDexFileMakeString(const char* str);
+
+// Returns a pointer to the underlying null-terminated character array and its
+// size for the given ExtDexFileString.
+const char* ExtDexFileGetString(const ExtDexFileString* ext_string, /*out*/ size_t* size);
+
+// Frees an ExtDexFileString.
+void ExtDexFileFreeString(const ExtDexFileString* ext_string);
+
+struct ExtDexFileMethodInfo {
+ int32_t offset;
+ int32_t len;
+ const ExtDexFileString* name;
+};
+
+class ExtDexFile;
+
+// See art_api::dex::DexFile::OpenFromMemory. Returns true on success.
+int ExtDexFileOpenFromMemory(const void* addr,
+ /*inout*/ size_t* size,
+ const char* location,
+ /*out*/ const ExtDexFileString** error_msg,
+ /*out*/ ExtDexFile** ext_dex_file);
+
+// See art_api::dex::DexFile::OpenFromFd. Returns true on success.
+int ExtDexFileOpenFromFd(int fd,
+ off_t offset,
+ const char* location,
+ /*out*/ const ExtDexFileString** error_msg,
+ /*out*/ ExtDexFile** ext_dex_file);
+
+// See art_api::dex::DexFile::GetMethodInfoForOffset. Returns true on success.
+int ExtDexFileGetMethodInfoForOffset(ExtDexFile* ext_dex_file,
+ int64_t dex_offset,
+ /*out*/ ExtDexFileMethodInfo* method_info);
+
+typedef void ExtDexFileMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info,
+ void* user_data);
+
+// See art_api::dex::DexFile::GetAllMethodInfos.
+void ExtDexFileGetAllMethodInfos(ExtDexFile* ext_dex_file,
+ int with_signature,
+ ExtDexFileMethodInfoCallback* method_info_cb,
+ void* user_data);
+
+// Frees an ExtDexFile.
+void ExtDexFileFree(ExtDexFile* ext_dex_file);
+
+} // extern "C"
+
+namespace art_api {
+namespace dex {
+
+// Minimal std::string look-alike for a string returned from libdexfile.
+class DexString final {
+ public:
+ DexString(DexString&& dex_str) { ReplaceExtString(std::move(dex_str)); }
+ explicit DexString(const char* str = "") : ext_string_(ExtDexFileMakeString(str)) {}
+ ~DexString() { ExtDexFileFreeString(ext_string_); }
+
+ DexString& operator=(DexString&& dex_str) {
+ ReplaceExtString(std::move(dex_str));
+ return *this;
+ }
+
+ const char* data() const {
+ size_t ignored;
+ return ExtDexFileGetString(ext_string_, &ignored);
+ }
+ const char* c_str() const { return data(); }
+
+ size_t size() const {
+ size_t len;
+ (void)ExtDexFileGetString(ext_string_, &len);
+ return len;
+ }
+ size_t length() const { return size(); }
+
+ operator std::string_view() const {
+ size_t len;
+ const char* chars = ExtDexFileGetString(ext_string_, &len);
+ return std::string_view(chars, len);
+ }
+
+ private:
+ friend class DexFile;
+ friend bool operator==(const DexString&, const DexString&);
+ explicit DexString(const ExtDexFileString* ext_string) : ext_string_(ext_string) {}
+ const ExtDexFileString* ext_string_; // Owned instance. Never nullptr.
+
+ void ReplaceExtString(DexString&& dex_str) {
+ ext_string_ = dex_str.ext_string_;
+ dex_str.ext_string_ = ExtDexFileMakeString("");
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(DexString);
+};
+
+inline bool operator==(const DexString& s1, const DexString& s2) {
+ size_t l1, l2;
+ const char* str1 = ExtDexFileGetString(s1.ext_string_, &l1);
+ const char* str2 = ExtDexFileGetString(s2.ext_string_, &l2);
+ // Use memcmp to avoid assumption about absence of null characters in the strings.
+ return l1 == l2 && !std::memcmp(str1, str2, l1);
+}
+
+struct MethodInfo {
+ int32_t offset; // Code offset relative to the start of the dex file header
+ int32_t len; // Code length
+ DexString name;
+};
+
+inline bool operator==(const MethodInfo& s1, const MethodInfo& s2) {
+ return s1.offset == s2.offset && s1.len == s2.len && s1.name == s2.name;
+}
+
+// External stable API to access ordinary dex files and CompactDex. This wraps
+// the stable C ABI and handles instance ownership. Thread-compatible but not
+// thread-safe.
+class DexFile {
+ public:
+ DexFile(DexFile&& dex_file) {
+ ext_dex_file_ = dex_file.ext_dex_file_;
+ dex_file.ext_dex_file_ = nullptr;
+ }
+ virtual ~DexFile();
+
+ // Interprets a chunk of memory as a dex file. As long as *size is too small,
+ // returns nullptr, sets *size to a new size to try again with, and sets
+ // *error_msg to "". That might happen repeatedly. Also returns nullptr
+ // on error in which case *error_msg is set to a nonempty string.
+ //
+ // location is a string that describes the dex file, and is preferably its
+ // path. It is mostly used to make error messages better, and may be "".
+ //
+ // The caller must retain the memory.
+ static std::unique_ptr<DexFile> OpenFromMemory(const void* addr,
+ size_t* size,
+ const std::string& location,
+ /*out*/ std::string* error_msg) {
+ ExtDexFile* ext_dex_file;
+ const ExtDexFileString* ext_error_msg = nullptr;
+ if (ExtDexFileOpenFromMemory(addr, size, location.c_str(), &ext_error_msg, &ext_dex_file)) {
+ return std::unique_ptr<DexFile>(new DexFile(ext_dex_file));
+ }
+ *error_msg = (ext_error_msg == nullptr) ? "" : std::string(DexString(ext_error_msg));
+ return nullptr;
+ }
+
+ // mmaps the given file offset in the open fd and reads a dexfile from there.
+ // Returns nullptr on error in which case *error_msg is set.
+ //
+ // location is a string that describes the dex file, and is preferably its
+ // path. It is mostly used to make error messages better, and may be "".
+ static std::unique_ptr<DexFile> OpenFromFd(int fd,
+ off_t offset,
+ const std::string& location,
+ /*out*/ std::string* error_msg) {
+ ExtDexFile* ext_dex_file;
+ const ExtDexFileString* ext_error_msg = nullptr;
+ if (ExtDexFileOpenFromFd(fd, offset, location.c_str(), &ext_error_msg, &ext_dex_file)) {
+ return std::unique_ptr<DexFile>(new DexFile(ext_dex_file));
+ }
+ *error_msg = std::string(DexString(ext_error_msg));
+ return nullptr;
+ }
+
+ // Given an offset relative to the start of the dex file header, if there is a
+ // method whose instruction range includes that offset then returns info about
+ // it, otherwise returns a struct with offset == 0.
+ MethodInfo GetMethodInfoForOffset(int64_t dex_offset) {
+ ExtDexFileMethodInfo ext_method_info;
+ if (ExtDexFileGetMethodInfoForOffset(ext_dex_file_, dex_offset, &ext_method_info)) {
+ return AbsorbMethodInfo(ext_method_info);
+ }
+ return {/*offset=*/0, /*len=*/0, /*name=*/DexString()};
+ }
+
+ // Returns info structs about all methods in the dex file. MethodInfo.name
+ // receives the full function signature if with_signature is set, otherwise it
+ // gets the class and method name only.
+ std::vector<MethodInfo> GetAllMethodInfos(bool with_signature = true) {
+ MethodInfoVector res;
+ ExtDexFileGetAllMethodInfos(
+ ext_dex_file_, with_signature, AddMethodInfoCallback, static_cast<void*>(&res));
+ return res;
+ }
+
+ private:
+ explicit DexFile(ExtDexFile* ext_dex_file) : ext_dex_file_(ext_dex_file) {}
+ ExtDexFile* ext_dex_file_; // Owned instance. nullptr only in moved-from zombies.
+
+ typedef std::vector<MethodInfo> MethodInfoVector;
+
+ static MethodInfo AbsorbMethodInfo(const ExtDexFileMethodInfo& ext_method_info);
+ static void AddMethodInfoCallback(const ExtDexFileMethodInfo* ext_method_info, void* user_data);
+
+ DISALLOW_COPY_AND_ASSIGN(DexFile);
+};
+
+} // namespace dex
+} // namespace art_api
+
+#endif // ART_LIBDEXFILE_EXTERNAL_INCLUDE_ART_API_EXT_DEX_FILE_H_
diff --git a/libdexfile/external/libdexfile_external.map.txt b/libdexfile/external/libdexfile_external.map.txt
new file mode 100644
index 0000000..450b633
--- /dev/null
+++ b/libdexfile/external/libdexfile_external.map.txt
@@ -0,0 +1,13 @@
+LIBDEXFILE_EXTERNAL_1 {
+ global:
+ ExtDexFileFree;
+ ExtDexFileFreeString;
+ ExtDexFileGetAllMethodInfos;
+ ExtDexFileGetMethodInfoForOffset;
+ ExtDexFileGetString;
+ ExtDexFileMakeString;
+ ExtDexFileOpenFromFd;
+ ExtDexFileOpenFromMemory;
+ local:
+ *;
+};
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 1c74a92..d2a5bb8 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -473,9 +473,6 @@
GetQuickToInterpreterBridgeOffset);
#undef DUMP_OAT_HEADER_OFFSET
- os << "BOOT IMAGE CHECKSUM:\n";
- os << StringPrintf("0x%08x\n\n", oat_header.GetBootImageChecksum());
-
// Print the key-value store.
{
os << "KEY VALUE STORE:\n";
diff --git a/runtime/Android.bp b/runtime/Android.bp
index b03ef60..71c5b74 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -345,6 +345,11 @@
static_libs: [
"libz", // For adler32.
],
+ cflags: [
+ // ART is allowed to link to libicuuc directly
+ // since they are in the same module
+ "-DANDROID_LINK_SHARED_ICU4C",
+ ],
},
android_arm: {
ldflags: JIT_DEBUG_REGISTER_CODE_LDFLAGS,
@@ -377,12 +382,12 @@
export_generated_headers: ["cpp-define-generator-asm-support"],
include_dirs: [
"art/sigchainlib",
- "external/icu/icu4c/source/common",
"external/zlib",
],
header_libs: [
"art_cmdlineparser_headers",
"cpp-define-generator-definitions",
+ "libicuuc_headers",
"libnativehelper_header_only",
"jni_platform_headers",
],
@@ -420,6 +425,8 @@
"libbacktrace",
"libbase",
"libcutils",
+ "libdexfile_external", // libunwindstack dependency
+ "libdexfile_support", // libunwindstack dependency
"liblog",
"libnativebridge",
"libnativeloader",
@@ -553,9 +560,6 @@
header_libs: [
"libnativehelper_header_only",
],
- include_dirs: [
- "external/icu/icu4c/source/common",
- ],
}
art_cc_test {
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d33541c..3b92e2c 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -212,6 +212,22 @@
self->AssertPendingException();
}
+// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
+// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
+// before.
+template <bool kNeedsVerified = false>
+static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (kNeedsVerified) {
+ // To not fail access-flags access checks, push a minimal state.
+ mirror::Class::SetStatus(klass, ClassStatus::kVerified, Thread::Current());
+ }
+ if (!klass->WasVerificationAttempted()) {
+ klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
+ klass->SetVerificationAttempted();
+ }
+}
+
void ClassLinker::ThrowEarlierClassFailure(ObjPtr<mirror::Class> c, bool wrap_in_no_class_def) {
// The class failed to initialize on a previous attempt, so we want to throw
// a NoClassDefFoundError (v2 2.17.5). The exception to this rule is if we
@@ -1037,20 +1053,15 @@
runtime->SetSentinel(heap->AllocNonMovableObject<true>(
self, java_lang_Object, java_lang_Object->GetObjectSize(), VoidFunctor()));
- const std::vector<std::string>& boot_class_path = runtime->GetBootClassPath();
- if (boot_class_path.size() != spaces.size()) {
- *error_msg = StringPrintf("Boot class path has %zu components but there are %zu image spaces.",
- boot_class_path.size(),
- spaces.size());
- return false;
- }
+ const std::vector<std::string>& boot_class_path_locations = runtime->GetBootClassPathLocations();
+ CHECK_LE(spaces.size(), boot_class_path_locations.size());
for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
// Boot class loader, use a null handle.
std::vector<std::unique_ptr<const DexFile>> dex_files;
if (!AddImageSpace(spaces[i],
ScopedNullHandle<mirror::ClassLoader>(),
/*dex_elements=*/ nullptr,
- /*dex_location=*/ boot_class_path[i].c_str(),
+ /*dex_location=*/ boot_class_path_locations[i].c_str(),
/*out*/&dex_files,
error_msg)) {
return false;
@@ -1069,6 +1080,15 @@
return true;
}
+void ClassLinker::AddExtraBootDexFiles(
+ Thread* self,
+ std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files) {
+ for (std::unique_ptr<const DexFile>& dex_file : additional_dex_files) {
+ AppendToBootClassPath(self, *dex_file);
+ boot_dex_files_.push_back(std::move(dex_file));
+ }
+}
+
bool ClassLinker::IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
ObjPtr<mirror::ClassLoader> class_loader) {
return class_loader == nullptr ||
@@ -3946,6 +3966,7 @@
h_class->SetAccessFlags(kAccPublic | kAccFinal | kAccAbstract);
h_class->SetPrimitiveType(type);
h_class->SetIfTable(GetClassRoot<mirror::Object>(this)->GetIfTable());
+ EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(h_class, image_pointer_size_);
mirror::Class::SetStatus(h_class, ClassStatus::kInitialized, self);
const char* descriptor = Primitive::Descriptor(type);
ObjPtr<mirror::Class> existing = InsertClass(descriptor,
@@ -4093,6 +4114,7 @@
new_class->PopulateEmbeddedVTable(image_pointer_size_);
ImTable* object_imt = java_lang_Object->GetImt(image_pointer_size_);
new_class->SetImt(object_imt, image_pointer_size_);
+ EnsureSkipAccessChecksMethods</* kNeedsVerified= */ true>(new_class, image_pointer_size_);
mirror::Class::SetStatus(new_class, ClassStatus::kInitialized, self);
// don't need to set new_class->SetObjectSize(..)
// because Object::SizeOf delegates to Array::SizeOf
@@ -4123,6 +4145,8 @@
// and remove "interface".
access_flags |= kAccAbstract | kAccFinal;
access_flags &= ~kAccInterface;
+ // Arrays are access-checks-clean and preverified.
+ access_flags |= kAccVerificationAttempted;
new_class->SetAccessFlags(access_flags);
@@ -4357,17 +4381,6 @@
return false;
}
-// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
-// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
-// before.
-static void EnsureSkipAccessChecksMethods(Handle<mirror::Class> klass, PointerSize pointer_size)
- REQUIRES_SHARED(Locks::mutator_lock_) {
- if (!klass->WasVerificationAttempted()) {
- klass->SetSkipAccessChecksFlagOnAllMethods(pointer_size);
- klass->SetVerificationAttempted();
- }
-}
-
verifier::FailureKind ClassLinker::VerifyClass(
Thread* self, Handle<mirror::Class> klass, verifier::HardFailLogMode log_level) {
{
@@ -4844,6 +4857,7 @@
{
// Lock on klass is released. Lock new class object.
ObjectLock<mirror::Class> initialization_lock(self, klass);
+ EnsureSkipAccessChecksMethods(klass, image_pointer_size_);
mirror::Class::SetStatus(klass, ClassStatus::kInitialized, self);
}
@@ -5594,8 +5608,7 @@
DCHECK(c != nullptr);
if (c->IsInitialized()) {
- EnsureSkipAccessChecksMethods(c, image_pointer_size_);
- self->AssertNoPendingException();
+ DCHECK(c->WasVerificationAttempted()) << c->PrettyClassAndClassLoader();
return true;
}
// SubtypeCheckInfo::Initialized must happen-before any new-instance for that type.
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index d3eab7c..d0a7c9b 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -127,6 +127,12 @@
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::dex_lock_);
+ // Add boot class path dex files that were not included in the boot image.
+ // ClassLinker takes ownership of these dex files.
+ void AddExtraBootDexFiles(Thread* self,
+ std::vector<std::unique_ptr<const DexFile>>&& additional_dex_files)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// Add an image space to the class linker, may fix up classloader fields and dex cache fields.
// The dex files that were newly opened for the space are placed in the out argument
// out_dex_files. Returns true if the operation succeeded.
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index fe45b9e..061c788 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -114,7 +114,8 @@
EXPECT_EQ(0, primitive->GetIfTableCount());
EXPECT_TRUE(primitive->GetIfTable() != nullptr);
EXPECT_EQ(primitive->GetIfTable()->Count(), 0u);
- EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract, primitive->GetAccessFlags());
+ EXPECT_EQ(kAccPublic | kAccFinal | kAccAbstract | kAccVerificationAttempted,
+ primitive->GetAccessFlags());
}
void AssertObjectClass(ObjPtr<mirror::Class> JavaLangObject)
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index a101976..a20baa0 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -24,7 +24,6 @@
#include "nativehelper/scoped_local_ref.h"
#include "android-base/stringprintf.h"
-#include <unicode/uvernum.h>
#include "art_field-inl.h"
#include "base/file_utils.h"
diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc
index b46c933..7f697d1 100644
--- a/runtime/dexopt_test.cc
+++ b/runtime/dexopt_test.cc
@@ -116,19 +116,19 @@
ASSERT_TRUE(odex_file.get() != nullptr) << error_msg;
EXPECT_EQ(filter, odex_file->GetCompilerFilter());
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(image_location.c_str(),
- kRuntimeISA,
- &error_msg));
- ASSERT_TRUE(image_header != nullptr) << error_msg;
+ std::string boot_image_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+ Runtime::Current()->GetBootClassPath(), image_location, kRuntimeISA, &error_msg);
+ ASSERT_FALSE(boot_image_checksums.empty()) << error_msg;
+
const OatHeader& oat_header = odex_file->GetOatHeader();
- uint32_t boot_image_checksum = image_header->GetImageChecksum();
if (CompilerFilter::DependsOnImageChecksum(filter)) {
+ const char* checksums = oat_header.GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+ ASSERT_TRUE(checksums != nullptr);
if (with_alternate_image) {
- EXPECT_NE(boot_image_checksum, oat_header.GetBootImageChecksum());
+ EXPECT_NE(boot_image_checksums, checksums);
} else {
- EXPECT_EQ(boot_image_checksum, oat_header.GetBootImageChecksum());
+ EXPECT_EQ(boot_image_checksums, checksums);
}
}
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index e508d5f..11ad8a8 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -38,6 +38,7 @@
#pragma GCC diagnostic ignored "-Wempty-body"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
+#pragma GCC diagnostic ignored "-Wexpansion-to-defined"
#include "../../../external/dlmalloc/malloc.c"
// Note: malloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity
// of libbase, so undefine it now.
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 3160422..1014c0e 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -37,14 +37,15 @@
mirror::Object* ref,
accounting::ContinuousSpaceBitmap* bitmap) {
if (kEnableGenerationalConcurrentCopyingCollection
- && young_gen_
&& !done_scanning_.load(std::memory_order_acquire)) {
- // Everything in the unevac space should be marked for generational CC except for large objects.
- DCHECK(region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref)) << ref << " "
+ // Everything in the unevac space should be marked for young generation CC,
+ // except for large objects.
+ DCHECK(!young_gen_ || region_space_bitmap_->Test(ref) || region_space_->IsLargeObject(ref))
+ << ref << " "
<< ref->GetClass<kVerifyNone, kWithoutReadBarrier>()->PrettyClass();
- // Since the mark bitmap is still filled in from last GC, we can not use that or else the
- // mutator may see references to the from space. Instead, use the baker pointer itself as
- // the mark bit.
+ // Since the mark bitmap is still filled in from last GC (or from marking phase of 2-phase CC,
+ // we can not use that or else the mutator may see references to the from space. Instead, use
+ // the baker pointer itself as the mark bit.
if (ref->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState())) {
// TODO: We don't actually need to scan this object later, we just need to clear the gray
// bit.
@@ -244,7 +245,7 @@
DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
return true;
- } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+ } else if (!kEnableGenerationalConcurrentCopyingCollection
|| done_scanning_.load(std::memory_order_acquire)) {
// If the card table scanning is not finished yet, then only read-barrier
// state should be checked. Checking the mark bitmap is unreliable as there
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 7736568..3b57b07 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -95,6 +95,8 @@
weak_ref_access_enabled_(true),
copied_live_bytes_ratio_sum_(0.f),
gc_count_(0),
+ region_space_inter_region_bitmap_(nullptr),
+ non_moving_space_inter_region_bitmap_(nullptr),
reclaimed_bytes_ratio_sum_(0.f),
young_gen_(young_gen),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
@@ -188,6 +190,11 @@
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
InitializePhase();
+ // In case of forced evacuation, all regions are evacuated and hence no
+ // need to compute live_bytes.
+ if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_ && !force_evacuate_all_) {
+ MarkingPhase();
+ }
}
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
// Switch to read barrier mark entrypoints before we gray the objects. This is required in case
@@ -201,7 +208,7 @@
FlipThreadRoots();
{
ReaderMutexLock mu(self, *Locks::mutator_lock_);
- MarkingPhase();
+ CopyingPhase();
}
// Verify no from space refs. This causes a pause.
if (kEnableNoFromSpaceRefsVerification) {
@@ -280,6 +287,29 @@
gc_barrier_->Increment(self, barrier_count);
}
+void ConcurrentCopying::CreateInterRegionRefBitmaps() {
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+ DCHECK(region_space_inter_region_bitmap_ == nullptr);
+ DCHECK(non_moving_space_inter_region_bitmap_ == nullptr);
+ DCHECK(region_space_ != nullptr);
+ DCHECK(heap_->non_moving_space_ != nullptr);
+ // Region-space
+ region_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+ "region-space inter region ref bitmap",
+ reinterpret_cast<uint8_t*>(region_space_->Begin()),
+ region_space_->Limit() - region_space_->Begin()));
+ CHECK(region_space_inter_region_bitmap_ != nullptr)
+ << "Couldn't allocate region-space inter region ref bitmap";
+
+ // non-moving-space
+ non_moving_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+ "non-moving-space inter region ref bitmap",
+ reinterpret_cast<uint8_t*>(heap_->non_moving_space_->Begin()),
+ heap_->non_moving_space_->Limit() - heap_->non_moving_space_->Begin()));
+ CHECK(non_moving_space_inter_region_bitmap_ != nullptr)
+ << "Couldn't allocate non-moving-space inter region ref bitmap";
+}
+
void ConcurrentCopying::BindBitmaps() {
Thread* self = Thread::Current();
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
@@ -292,6 +322,7 @@
} else {
CHECK(!space->IsZygoteSpace());
CHECK(!space->IsImageSpace());
+ CHECK(space == region_space_ || space == heap_->non_moving_space_);
if (kEnableGenerationalConcurrentCopyingCollection) {
if (space == region_space_) {
region_space_bitmap_ = region_space_->GetMarkBitmap();
@@ -299,12 +330,22 @@
DCHECK_EQ(space->GetGcRetentionPolicy(), space::kGcRetentionPolicyAlwaysCollect);
space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap();
}
- // Age all of the cards for the region space so that we know which evac regions to scan.
- Runtime::Current()->GetHeap()->GetCardTable()->ModifyCardsAtomic(
- space->Begin(),
- space->End(),
- AgeCardVisitor(),
- VoidFunctor());
+ if (young_gen_) {
+ // Age all of the cards for the region space so that we know which evac regions to scan.
+ heap_->GetCardTable()->ModifyCardsAtomic(space->Begin(),
+ space->End(),
+ AgeCardVisitor(),
+ VoidFunctor());
+ } else {
+ // In a full-heap GC cycle, the card-table corresponding to region-space and
+ // non-moving space can be cleared, because this cycle only needs to
+ // capture writes during the marking phase of this cycle to catch
+ // objects that skipped marking due to heap mutation. Furthermore,
+ // if the next GC is a young-gen cycle, then it only needs writes to
+ // be captured after the thread-flip of this GC cycle, as that is when
+ // the young-gen for the next GC cycle starts getting populated.
+ heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit());
+ }
} else {
if (space == region_space_) {
// It is OK to clear the bitmap with mutators running since the only place it is read is
@@ -381,6 +422,7 @@
if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
region_space_bitmap_->Clear();
}
+ mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, std::memory_order_relaxed);
// Mark all of the zygote large objects without graying them.
MarkZygoteLargeObjects();
}
@@ -471,7 +513,7 @@
TimingLogger::ScopedTiming split("(Paused)FlipCallback", cc->GetTimings());
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
- if (kVerifyNoMissingCardMarks) {
+ if (kVerifyNoMissingCardMarks && cc->young_gen_) {
cc->VerifyNoMissingCardMarks();
}
CHECK_EQ(thread, self);
@@ -485,9 +527,11 @@
}
{
TimingLogger::ScopedTiming split2("(Paused)SetFromSpace", cc->GetTimings());
- // Only change live bytes for full CC.
+ // Only change live bytes for 1-phase full heap CC.
cc->region_space_->SetFromSpace(
- cc->rb_table_, evac_mode, /*clear_live_bytes=*/ !cc->young_gen_);
+ cc->rb_table_,
+ evac_mode,
+ /*clear_live_bytes=*/ !kEnableGenerationalConcurrentCopyingCollection);
}
cc->SwapStacks();
if (ConcurrentCopying::kEnableFromSpaceAccountingCheck) {
@@ -496,9 +540,7 @@
cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated();
}
cc->is_marking_ = true;
- cc->mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal,
- std::memory_order_relaxed);
- if (kIsDebugBuild && !cc->young_gen_) {
+ if (kIsDebugBuild && !kEnableGenerationalConcurrentCopyingCollection) {
cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared();
}
if (UNLIKELY(Runtime::Current()->IsActiveTransaction())) {
@@ -684,7 +726,7 @@
// Switch threads that from from-space to to-space refs. Forward/mark the thread roots.
void ConcurrentCopying::FlipThreadRoots() {
TimingLogger::ScopedTiming split("FlipThreadRoots", GetTimings());
- if (kVerboseMode) {
+ if (kVerboseMode || heap_->dump_region_info_before_gc_) {
LOG(INFO) << "time=" << region_space_->Time();
region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
}
@@ -860,13 +902,484 @@
ConcurrentCopying* const collector_;
};
-// Concurrently mark roots that are guarded by read barriers and process the mark stack.
+template <bool kAtomicTestAndSet>
+class ConcurrentCopying::CaptureRootsForMarkingVisitor : public RootVisitor {
+ public:
+ explicit CaptureRootsForMarkingVisitor(ConcurrentCopying* cc, Thread* self)
+ : collector_(cc), self_(self) {}
+
+ void VisitRoots(mirror::Object*** roots,
+ size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::Object** root = roots[i];
+ mirror::Object* ref = *root;
+ if (ref != nullptr && !collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+ collector_->PushOntoMarkStack(self_, ref);
+ }
+ }
+ }
+
+ void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
+ size_t count,
+ const RootInfo& info ATTRIBUTE_UNUSED) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ for (size_t i = 0; i < count; ++i) {
+ mirror::CompressedReference<mirror::Object>* const root = roots[i];
+ if (!root->IsNull()) {
+ mirror::Object* ref = root->AsMirrorPtr();
+ if (!collector_->TestAndSetMarkBitForRef<kAtomicTestAndSet>(ref)) {
+ collector_->PushOntoMarkStack(self_, ref);
+ }
+ }
+ }
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+ Thread* const self_;
+};
+
+class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
+ public:
+ RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
+ bool disable_weak_ref_access)
+ : concurrent_copying_(concurrent_copying),
+ disable_weak_ref_access_(disable_weak_ref_access) {
+ }
+
+ void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
+ // Note: self is not necessarily equal to thread since thread may be suspended.
+ Thread* const self = Thread::Current();
+ CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
+ << thread->GetState() << " thread " << thread << " self " << self;
+ // Revoke thread local mark stacks.
+ accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
+ if (tl_mark_stack != nullptr) {
+ MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
+ concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
+ thread->SetThreadLocalMarkStack(nullptr);
+ }
+ // Disable weak ref access.
+ if (disable_weak_ref_access_) {
+ thread->SetWeakRefAccessEnabled(false);
+ }
+ // If thread is a running mutator, then act on behalf of the garbage collector.
+ // See the code in ThreadList::RunCheckpoint.
+ concurrent_copying_->GetBarrier().Pass(self);
+ }
+
+ protected:
+ ConcurrentCopying* const concurrent_copying_;
+
+ private:
+ const bool disable_weak_ref_access_;
+};
+
+class ConcurrentCopying::CaptureThreadRootsForMarkingAndCheckpoint :
+ public RevokeThreadLocalMarkStackCheckpoint {
+ public:
+ explicit CaptureThreadRootsForMarkingAndCheckpoint(ConcurrentCopying* cc) :
+ RevokeThreadLocalMarkStackCheckpoint(cc, /* disable_weak_ref_access */ false) {}
+
+ void Run(Thread* thread) override
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ Thread* const self = Thread::Current();
+ ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ // We can use the non-CAS VisitRoots functions below because we update thread-local GC roots
+ // only.
+ CaptureRootsForMarkingVisitor</*kAtomicTestAndSet*/ true> visitor(concurrent_copying_, self);
+ thread->VisitRoots(&visitor, kVisitRootFlagAllRoots);
+ // Barrier handling is done in the base class' Run() below.
+ RevokeThreadLocalMarkStackCheckpoint::Run(thread);
+ }
+};
+
+void ConcurrentCopying::CaptureThreadRootsForMarking() {
+ TimingLogger::ScopedTiming split("CaptureThreadRootsForMarking", GetTimings());
+ if (kVerboseMode) {
+ LOG(INFO) << "time=" << region_space_->Time();
+ region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+ }
+ Thread* const self = Thread::Current();
+ CaptureThreadRootsForMarkingAndCheckpoint check_point(this);
+ ThreadList* thread_list = Runtime::Current()->GetThreadList();
+ gc_barrier_->Init(self, 0);
+ size_t barrier_count = thread_list->RunCheckpoint(&check_point, /* callback */ nullptr);
+ // If there are no threads to wait which implys that all the checkpoint functions are finished,
+ // then no need to release the mutator lock.
+ if (barrier_count == 0) {
+ return;
+ }
+ Locks::mutator_lock_->SharedUnlock(self);
+ {
+ ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
+ gc_barrier_->Increment(self, barrier_count);
+ }
+ Locks::mutator_lock_->SharedLock(self);
+ if (kVerboseMode) {
+ LOG(INFO) << "time=" << region_space_->Time();
+ region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+ LOG(INFO) << "GC end of CaptureThreadRootsForMarking";
+ }
+}
+
+// Used to scan ref fields of an object.
+template <bool kHandleInterRegionRefs>
+class ConcurrentCopying::ComputeLiveBytesAndMarkRefFieldsVisitor {
+ public:
+ explicit ComputeLiveBytesAndMarkRefFieldsVisitor(ConcurrentCopying* collector,
+ size_t obj_region_idx)
+ : collector_(collector),
+ obj_region_idx_(obj_region_idx),
+ contains_inter_region_idx_(false) {}
+
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
+ DCHECK_EQ(collector_->RegionSpace()->RegionIdxForRef(obj), obj_region_idx_);
+ DCHECK(kHandleInterRegionRefs || collector_->immune_spaces_.ContainsObject(obj));
+ CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset));
+ }
+
+ void operator()(ObjPtr<mirror::Class> klass, ObjPtr<mirror::Reference> ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
+ DCHECK(klass->IsTypeOfReferenceClass());
+ // If the referent is not null, then we must re-visit the object during
+ // copying phase to enqueue it for delayed processing and setting
+ // read-barrier state to gray to ensure that call to GetReferent() triggers
+ // the read-barrier. We use same data structure that is used to remember
+ // objects with inter-region refs for this purpose too.
+ if (kHandleInterRegionRefs
+ && !contains_inter_region_idx_
+ && ref->AsReference()->GetReferent<kWithoutReadBarrier>() != nullptr) {
+ contains_inter_region_idx_ = true;
+ }
+ }
+
+ void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (!root->IsNull()) {
+ VisitRoot(root);
+ }
+ }
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ ALWAYS_INLINE
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ CheckReference(root->AsMirrorPtr());
+ }
+
+ bool ContainsInterRegionRefs() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
+ return contains_inter_region_idx_;
+ }
+
+ private:
+ void CheckReference(mirror::Object* ref) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ if (ref == nullptr) {
+ // Nothing to do.
+ return;
+ }
+ if (!collector_->TestAndSetMarkBitForRef(ref)) {
+ collector_->PushOntoLocalMarkStack(ref);
+ }
+ if (kHandleInterRegionRefs && !contains_inter_region_idx_) {
+ size_t ref_region_idx = collector_->RegionSpace()->RegionIdxForRef(ref);
+ // If a region-space object refers to an outside object, we will have a
+ // mismatch of region idx, but the object need not be re-visited in
+ // copying phase.
+ if (ref_region_idx != static_cast<size_t>(-1) && obj_region_idx_ != ref_region_idx) {
+ contains_inter_region_idx_ = true;
+ }
+ }
+ }
+
+ ConcurrentCopying* const collector_;
+ const size_t obj_region_idx_;
+ mutable bool contains_inter_region_idx_;
+};
+
+void ConcurrentCopying::AddLiveBytesAndScanRef(mirror::Object* ref) {
+ DCHECK(ref != nullptr);
+ DCHECK(!immune_spaces_.ContainsObject(ref));
+ DCHECK(TestMarkBitmapForRef(ref));
+ size_t obj_region_idx = static_cast<size_t>(-1);
+ if (LIKELY(region_space_->HasAddress(ref))) {
+ obj_region_idx = region_space_->RegionIdxForRefUnchecked(ref);
+ // Add live bytes to the corresponding region
+ if (!region_space_->IsRegionNewlyAllocated(obj_region_idx)) {
+ // Newly Allocated regions are always chosen for evacuation. So no need
+ // to update live_bytes_.
+ size_t obj_size = ref->SizeOf<kDefaultVerifyFlags>();
+ size_t alloc_size = RoundUp(obj_size, space::RegionSpace::kAlignment);
+ region_space_->AddLiveBytes(ref, alloc_size);
+ }
+ }
+ ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ true>
+ visitor(this, obj_region_idx);
+ ref->VisitReferences</*kVisitNativeRoots=*/ true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor, visitor);
+ // Mark the corresponding card dirty if the object contains any
+ // inter-region reference.
+ if (visitor.ContainsInterRegionRefs()) {
+ if (obj_region_idx == static_cast<size_t>(-1)) {
+ // If an inter-region ref has been found in a non-region-space, then it
+ // must be non-moving-space. This is because this function cannot be
+ // called on a immune-space object, and a large-object-space object has
+ // only class object reference, which is either in some immune-space, or
+ // in non-moving-space.
+ DCHECK(heap_->non_moving_space_->HasAddress(ref));
+ non_moving_space_inter_region_bitmap_->Set(ref);
+ } else {
+ region_space_inter_region_bitmap_->Set(ref);
+ }
+ }
+}
+
+template <bool kAtomic>
+bool ConcurrentCopying::TestAndSetMarkBitForRef(mirror::Object* ref) {
+ accounting::ContinuousSpaceBitmap* bitmap = nullptr;
+ accounting::LargeObjectBitmap* los_bitmap = nullptr;
+ if (LIKELY(region_space_->HasAddress(ref))) {
+ bitmap = region_space_bitmap_;
+ } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+ bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
+ } else if (immune_spaces_.ContainsObject(ref)) {
+ // References to immune space objects are always live.
+ DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+ return true;
+ } else {
+ // Should be a large object. Must be page aligned and the LOS must exist.
+ if (kIsDebugBuild
+ && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+ // It must be heap corruption. Remove memory protection and dump data.
+ region_space_->Unprotect();
+ heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+ MemberOffset(0),
+ ref,
+ /* fatal */ true);
+ }
+ los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
+ }
+ if (kAtomic) {
+ return (bitmap != nullptr) ? bitmap->AtomicTestAndSet(ref) : los_bitmap->AtomicTestAndSet(ref);
+ } else {
+ return (bitmap != nullptr) ? bitmap->Set(ref) : los_bitmap->Set(ref);
+ }
+}
+
+bool ConcurrentCopying::TestMarkBitmapForRef(mirror::Object* ref) {
+ if (LIKELY(region_space_->HasAddress(ref))) {
+ return region_space_bitmap_->Test(ref);
+ } else if (heap_->GetNonMovingSpace()->HasAddress(ref)) {
+ return heap_->GetNonMovingSpace()->GetMarkBitmap()->Test(ref);
+ } else if (immune_spaces_.ContainsObject(ref)) {
+ // References to immune space objects are always live.
+ DCHECK(heap_mark_bitmap_->GetContinuousSpaceBitmap(ref)->Test(ref));
+ return true;
+ } else {
+ // Should be a large object. Must be page aligned and the LOS must exist.
+ if (kIsDebugBuild
+ && (!IsAligned<kPageSize>(ref) || heap_->GetLargeObjectsSpace() == nullptr)) {
+ // It must be heap corruption. Remove memory protection and dump data.
+ region_space_->Unprotect();
+ heap_->GetVerification()->LogHeapCorruption(/* obj */ nullptr,
+ MemberOffset(0),
+ ref,
+ /* fatal */ true);
+ }
+ return heap_->GetLargeObjectsSpace()->GetMarkBitmap()->Test(ref);
+ }
+}
+
+void ConcurrentCopying::PushOntoLocalMarkStack(mirror::Object* ref) {
+ if (kIsDebugBuild) {
+ Thread *self = Thread::Current();
+ DCHECK_EQ(thread_running_gc_, self);
+ DCHECK(self->GetThreadLocalMarkStack() == nullptr);
+ }
+ DCHECK_EQ(mark_stack_mode_.load(std::memory_order_relaxed), kMarkStackModeThreadLocal);
+ gc_mark_stack_->PushBack(ref);
+}
+
+void ConcurrentCopying::ProcessMarkStackForMarkingAndComputeLiveBytes() {
+ // Process thread-local mark stack containing thread roots
+ ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false,
+ /* checkpoint_callback */ nullptr,
+ [this] (mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ AddLiveBytesAndScanRef(ref);
+ });
+
+ while (!gc_mark_stack_->IsEmpty()) {
+ mirror::Object* ref = gc_mark_stack_->PopBack();
+ AddLiveBytesAndScanRef(ref);
+ }
+}
+
+class ConcurrentCopying::ImmuneSpaceCaptureRefsVisitor {
+ public:
+ explicit ImmuneSpaceCaptureRefsVisitor(ConcurrentCopying* cc) : collector_(cc) {}
+
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
+ ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ false>
+ visitor(collector_, /*obj_region_idx*/ static_cast<size_t>(-1));
+ obj->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ visitor, visitor);
+ }
+
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
+ reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
+ }
+
+ private:
+ ConcurrentCopying* const collector_;
+};
+
+/* Invariants for two-phase CC
+ * ===========================
+ * A) Definitions
+ * ---------------
+ * 1) Black: marked in bitmap, rb_state is non-gray, and not in mark stack
+ * 2) Black-clean: marked in bitmap, and corresponding card is clean/aged
+ * 3) Black-dirty: marked in bitmap, and corresponding card is dirty
+ * 4) Gray: marked in bitmap, and exists in mark stack
+ * 5) Gray-dirty: marked in bitmap, rb_state is gray, corresponding card is
+ * dirty, and exists in mark stack
+ * 6) White: unmarked in bitmap, rb_state is non-gray, and not in mark stack
+ *
+ * B) Before marking phase
+ * -----------------------
+ * 1) All objects are white
+ * 2) Cards are either clean or aged (cannot be asserted without a STW pause)
+ * 3) Mark bitmap is cleared
+ * 4) Mark stack is empty
+ *
+ * C) During marking phase
+ * ------------------------
+ * 1) If a black object holds an inter-region or white reference, then its
+ * corresponding card is dirty. In other words, it changes from being
+ * black-clean to black-dirty
+ * 2) No black-clean object points to a white object
+ *
+ * D) After marking phase
+ * -----------------------
+ * 1) There are no gray objects
+ * 2) All newly allocated objects are in from space
+ * 3) No white object can be reachable, directly or otherwise, from a
+ * black-clean object
+ *
+ * E) During copying phase
+ * ------------------------
+ * 1) Mutators cannot observe white and black-dirty objects
+ * 2) New allocations are in to-space (newly allocated regions are part of to-space)
+ * 3) An object in mark stack must have its rb_state = Gray
+ *
+ * F) During card table scan
+ * --------------------------
+ * 1) Referents corresponding to root references are gray or in to-space
+ * 2) Every path from an object that is read or written by a mutator during
+ * this period to a dirty black object goes through some gray object.
+ * Mutators preserve this by graying black objects as needed during this
+ * period. Ensures that a mutator never encounters a black dirty object.
+ *
+ * G) After card table scan
+ * ------------------------
+ * 1) There are no black-dirty objects
+ * 2) Referents corresponding to root references are gray, black-clean or in
+ * to-space
+ *
+ * H) After copying phase
+ * -----------------------
+ * 1) Mark stack is empty
+ * 2) No references into evacuated from-space
+ * 3) No reference to an object which is unmarked and is also not in newly
+ * allocated region. In other words, no reference to white objects.
+*/
+
void ConcurrentCopying::MarkingPhase() {
TimingLogger::ScopedTiming split("MarkingPhase", GetTimings());
if (kVerboseMode) {
LOG(INFO) << "GC MarkingPhase";
}
+ accounting::CardTable* const card_table = heap_->GetCardTable();
+ Thread* const self = Thread::Current();
+ // Clear live_bytes_ of every non-free region, except the ones that are newly
+ // allocated.
+ region_space_->SetAllRegionLiveBytesZero();
+ if (kIsDebugBuild) {
+ region_space_->AssertAllRegionLiveBytesZeroOrCleared();
+ }
+ // Scan immune spaces
+ {
+ TimingLogger::ScopedTiming split2("ScanImmuneSpaces", GetTimings());
+ for (auto& space : immune_spaces_.GetSpaces()) {
+ DCHECK(space->IsImageSpace() || space->IsZygoteSpace());
+ accounting::ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
+ accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+ ImmuneSpaceCaptureRefsVisitor visitor(this);
+ if (table != nullptr) {
+ table->VisitObjects(ImmuneSpaceCaptureRefsVisitor::Callback, &visitor);
+ } else {
+ WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ card_table->Scan<false>(
+ live_bitmap,
+ space->Begin(),
+ space->Limit(),
+ visitor,
+ accounting::CardTable::kCardDirty - 1);
+ }
+ }
+ }
+ // Scan runtime roots
+ {
+ TimingLogger::ScopedTiming split2("VisitConcurrentRoots", GetTimings());
+ CaptureRootsForMarkingVisitor visitor(this, self);
+ Runtime::Current()->VisitConcurrentRoots(&visitor, kVisitRootFlagAllRoots);
+ }
+ {
+ // TODO: don't visit the transaction roots if it's not active.
+ TimingLogger::ScopedTiming split2("VisitNonThreadRoots", GetTimings());
+ CaptureRootsForMarkingVisitor visitor(this, self);
+ Runtime::Current()->VisitNonThreadRoots(&visitor);
+ }
+ // Capture thread roots
+ CaptureThreadRootsForMarking();
+ // Process mark stack
+ ProcessMarkStackForMarkingAndComputeLiveBytes();
+
+ if (kVerboseMode) {
+ LOG(INFO) << "GC end of MarkingPhase";
+ }
+}
+
+template <bool kNoUnEvac>
+void ConcurrentCopying::ScanDirtyObject(mirror::Object* obj) {
+ Scan<kNoUnEvac>(obj);
+ // Set the read-barrier state of a reference-type object to gray if its
+ // referent is not marked yet. This is to ensure that if GetReferent() is
+ // called, it triggers the read-barrier to process the referent before use.
+ if (UNLIKELY((obj->GetClass<kVerifyNone, kWithoutReadBarrier>()->IsTypeOfReferenceClass()))) {
+ mirror::Object* referent =
+ obj->AsReference<kVerifyNone, kWithoutReadBarrier>()->GetReferent<kWithoutReadBarrier>();
+ if (referent != nullptr && !IsInToSpace(referent)) {
+ obj->AtomicSetReadBarrierState(ReadBarrier::NonGrayState(), ReadBarrier::GrayState());
+ }
+ }
+}
+
+// Concurrently mark roots that are guarded by read barriers and process the mark stack.
+void ConcurrentCopying::CopyingPhase() {
+ TimingLogger::ScopedTiming split("CopyingPhase", GetTimings());
+ if (kVerboseMode) {
+ LOG(INFO) << "GC CopyingPhase";
+ }
Thread* self = Thread::Current();
+ accounting::CardTable* const card_table = heap_->GetCardTable();
if (kIsDebugBuild) {
MutexLock mu(self, *Locks::thread_list_lock_);
CHECK(weak_ref_access_enabled_);
@@ -879,7 +1392,7 @@
if (kUseBakerReadBarrier) {
gc_grays_immune_objects_ = false;
}
- if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ if (kEnableGenerationalConcurrentCopyingCollection) {
if (kVerboseMode) {
LOG(INFO) << "GC ScanCardsForSpace";
}
@@ -897,39 +1410,76 @@
continue;
}
// Scan all of the objects on dirty cards in unevac from space, and non moving space. These
- // are from previous GCs and may reference things in the from space.
+ // are from previous GCs (or from marking phase of 2-phase full GC) and may reference things
+ // in the from space.
//
// Note that we do not need to process the large-object space (the only discontinuous space)
// as it contains only large string objects and large primitive array objects, that have no
// reference to other objects, except their class. There is no need to scan these large
// objects, as the String class and the primitive array classes are expected to never move
- // during a minor (young-generation) collection:
+ // during a collection:
// - In the case where we run with a boot image, these classes are part of the image space,
// which is an immune space.
// - In the case where we run without a boot image, these classes are allocated in the
// non-moving space (see art::ClassLinker::InitWithoutImage).
- Runtime::Current()->GetHeap()->GetCardTable()->Scan<false>(
+ card_table->Scan<false>(
space->GetMarkBitmap(),
space->Begin(),
space->End(),
[this, space](mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES_SHARED(Locks::mutator_lock_) {
- // Don't push or gray unevac refs.
- if (kIsDebugBuild && space == region_space_) {
- // We may get unevac large objects.
- if (!region_space_->IsInUnevacFromSpace(obj)) {
- CHECK(region_space_bitmap_->Test(obj));
- region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
- LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+ // TODO: This code may be refactored to avoid scanning object while
+ // done_scanning_ is false by setting rb_state to gray, and pushing the
+ // object on mark stack. However, it will also require clearing the
+ // corresponding mark-bit and, for region space objects,
+ // decrementing the object's size from the corresponding region's
+ // live_bytes.
+ if (young_gen_) {
+ // Don't push or gray unevac refs.
+ if (kIsDebugBuild && space == region_space_) {
+ // We may get unevac large objects.
+ if (!region_space_->IsInUnevacFromSpace(obj)) {
+ CHECK(region_space_bitmap_->Test(obj));
+ region_space_->DumpRegionForObject(LOG_STREAM(FATAL_WITHOUT_ABORT), obj);
+ LOG(FATAL) << "Scanning " << obj << " not in unevac space";
+ }
}
+ ScanDirtyObject</*kNoUnEvac*/ true>(obj);
+ } else if (space != region_space_) {
+ DCHECK(space == heap_->non_moving_space_);
+ // We need to process un-evac references as they may be unprocessed,
+ // if they skipped the marking phase due to heap mutation.
+ ScanDirtyObject</*kNoUnEvac*/ false>(obj);
+ non_moving_space_inter_region_bitmap_->Clear(obj);
+ } else if (region_space_->IsInUnevacFromSpace(obj)) {
+ ScanDirtyObject</*kNoUnEvac*/ false>(obj);
+ region_space_inter_region_bitmap_->Clear(obj);
}
- Scan<true>(obj);
},
- accounting::CardTable::kCardDirty - 1);
+ accounting::CardTable::kCardAged);
+
+ if (!young_gen_) {
+ auto visitor = [this](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
+ // We don't need to process un-evac references as any unprocessed
+ // ones will be taken care of in the card-table scan above.
+ ScanDirtyObject</*kNoUnEvac*/ true>(obj);
+ };
+ if (space == region_space_) {
+ region_space_->ScanUnevacFromSpace(region_space_inter_region_bitmap_.get(), visitor);
+ } else {
+ DCHECK(space == heap_->non_moving_space_);
+ non_moving_space_inter_region_bitmap_->VisitMarkedRange(
+ reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->End()),
+ visitor);
+ }
+ }
}
// Done scanning unevac space.
done_scanning_.store(true, std::memory_order_release);
+ // NOTE: inter-region-ref bitmaps can be cleared here to release memory, if needed.
+ // Currently we do it in ReclaimPhase().
if (kVerboseMode) {
LOG(INFO) << "GC end of ScanCardsForSpace";
}
@@ -947,10 +1497,13 @@
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects && table != nullptr) {
table->VisitObjects(ImmuneSpaceScanObjVisitor::Callback, &visitor);
} else {
- // TODO: Scan only the aged cards.
- live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->Limit()),
- visitor);
+ WriterMutexLock rmu(Thread::Current(), *Locks::heap_bitmap_lock_);
+ card_table->Scan<false>(
+ live_bitmap,
+ space->Begin(),
+ space->Limit(),
+ visitor,
+ accounting::CardTable::kCardDirty - 1);
}
}
}
@@ -1059,7 +1612,7 @@
CHECK(weak_ref_access_enabled_);
}
if (kVerboseMode) {
- LOG(INFO) << "GC end of MarkingPhase";
+ LOG(INFO) << "GC end of CopyingPhase";
}
}
@@ -1419,40 +1972,6 @@
ConcurrentCopying* const collector_;
};
-class ConcurrentCopying::RevokeThreadLocalMarkStackCheckpoint : public Closure {
- public:
- RevokeThreadLocalMarkStackCheckpoint(ConcurrentCopying* concurrent_copying,
- bool disable_weak_ref_access)
- : concurrent_copying_(concurrent_copying),
- disable_weak_ref_access_(disable_weak_ref_access) {
- }
-
- void Run(Thread* thread) override NO_THREAD_SAFETY_ANALYSIS {
- // Note: self is not necessarily equal to thread since thread may be suspended.
- Thread* self = Thread::Current();
- CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
- << thread->GetState() << " thread " << thread << " self " << self;
- // Revoke thread local mark stacks.
- accounting::AtomicStack<mirror::Object>* tl_mark_stack = thread->GetThreadLocalMarkStack();
- if (tl_mark_stack != nullptr) {
- MutexLock mu(self, concurrent_copying_->mark_stack_lock_);
- concurrent_copying_->revoked_mark_stacks_.push_back(tl_mark_stack);
- thread->SetThreadLocalMarkStack(nullptr);
- }
- // Disable weak ref access.
- if (disable_weak_ref_access_) {
- thread->SetWeakRefAccessEnabled(false);
- }
- // If thread is a running mutator, then act on behalf of the garbage collector.
- // See the code in ThreadList::RunCheckpoint.
- concurrent_copying_->GetBarrier().Pass(self);
- }
-
- private:
- ConcurrentCopying* const concurrent_copying_;
- const bool disable_weak_ref_access_;
-};
-
void ConcurrentCopying::RevokeThreadLocalMarkStacks(bool disable_weak_ref_access,
Closure* checkpoint_callback) {
Thread* self = Thread::Current();
@@ -1510,7 +2029,11 @@
if (mark_stack_mode == kMarkStackModeThreadLocal) {
// Process the thread-local mark stacks and the GC mark stack.
count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ false,
- /* checkpoint_callback= */ nullptr);
+ /* checkpoint_callback= */ nullptr,
+ [this] (mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ProcessMarkStackRef(ref);
+ });
while (!gc_mark_stack_->IsEmpty()) {
mirror::Object* to_ref = gc_mark_stack_->PopBack();
ProcessMarkStackRef(to_ref);
@@ -1566,8 +2089,10 @@
return count == 0;
}
+template <typename Processor>
size_t ConcurrentCopying::ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
- Closure* checkpoint_callback) {
+ Closure* checkpoint_callback,
+ const Processor& processor) {
// Run a checkpoint to collect all thread local mark stacks and iterate over them all.
RevokeThreadLocalMarkStacks(disable_weak_ref_access, checkpoint_callback);
size_t count = 0;
@@ -1581,7 +2106,7 @@
for (accounting::AtomicStack<mirror::Object>* mark_stack : mark_stacks) {
for (StackReference<mirror::Object>* p = mark_stack->Begin(); p != mark_stack->End(); ++p) {
mirror::Object* to_ref = p->AsMirrorPtr();
- ProcessMarkStackRef(to_ref);
+ processor(to_ref);
++count;
}
{
@@ -1632,6 +2157,12 @@
perform_scan = true;
// Only add to the live bytes if the object was not already marked and we are not the young
// GC.
+ // Why add live bytes even after 2-phase GC?
+ // We need to ensure that if there is a unevac region with any live
+ // objects, then its live_bytes must be non-zero. Otherwise,
+ // ClearFromSpace() will clear the region. Considering, that we may skip
+ // live objects during marking phase of 2-phase GC, we have to take care
+ // of such objects here.
add_to_live_bytes = true;
}
break;
@@ -1773,7 +2304,12 @@
DisableWeakRefAccessCallback dwrac(this);
// Process the thread local mark stacks one last time after switching to the shared mark stack
// mode and disable weak ref accesses.
- ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true, &dwrac);
+ ProcessThreadLocalMarkStacks(/* disable_weak_ref_access= */ true,
+ &dwrac,
+ [this] (mirror::Object* ref)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ ProcessMarkStackRef(ref);
+ });
if (kVerboseMode) {
LOG(INFO) << "Switched to shared mark stack mode and disabled weak ref access";
}
@@ -2039,7 +2575,7 @@
uint64_t cleared_objects;
{
TimingLogger::ScopedTiming split4("ClearFromSpace", GetTimings());
- region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects);
+ region_space_->ClearFromSpace(&cleared_bytes, &cleared_objects, /*clear_bitmap*/ !young_gen_);
// `cleared_bytes` and `cleared_objects` may be greater than the from space equivalents since
// RegionSpace::ClearFromSpace may clear empty unevac regions.
CHECK_GE(cleared_bytes, from_bytes);
@@ -2083,6 +2619,11 @@
CheckEmptyMarkStack();
+ if (heap_->dump_region_info_after_gc_) {
+ LOG(INFO) << "time=" << region_space_->Time();
+ region_space_->DumpNonFreeRegions(LOG_STREAM(INFO));
+ }
+
if (kVerboseMode) {
LOG(INFO) << "GC end of ReclaimPhase";
}
@@ -2348,7 +2889,7 @@
DCHECK(!immune_spaces_.ContainsObject(from_ref)) << "ref=" << from_ref;
if (kUseBakerReadBarrier && from_ref->GetReadBarrierStateAcquire() == ReadBarrier::GrayState()) {
return true;
- } else if (!(kEnableGenerationalConcurrentCopyingCollection && young_gen_)
+ } else if (!kEnableGenerationalConcurrentCopyingCollection
|| done_scanning_.load(std::memory_order_acquire)) {
// Read the comment in IsMarkedInUnevacFromSpace()
accounting::ContinuousSpaceBitmap* mark_bitmap = heap_->GetNonMovingSpace()->GetMarkBitmap();
@@ -2939,7 +3480,7 @@
los_bitmap = heap_->GetLargeObjectsSpace()->GetMarkBitmap();
DCHECK(los_bitmap->HasAddress(ref));
}
- if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ if (kEnableGenerationalConcurrentCopyingCollection) {
// The sticky-bit CC collector is only compatible with Baker-style read barriers.
DCHECK(kUseBakerReadBarrier);
// Not done scanning, use AtomicSetReadBarrierPointer.
@@ -3012,6 +3553,9 @@
TimingLogger::ScopedTiming split("ClearRegionSpaceCards", GetTimings());
// We do not currently use the region space cards at all, madvise them away to save ram.
heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
+ } else if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
+ region_space_inter_region_bitmap_->Clear();
+ non_moving_space_inter_region_bitmap_->Clear();
}
{
MutexLock mu(self, skipped_blocks_lock_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 237e070..a2d4837 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -25,7 +25,7 @@
#include "mirror/object_reference.h"
#include "offsets.h"
-#include <unordered_map>
+#include <memory>
#include <vector>
namespace art {
@@ -79,6 +79,8 @@
void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+ void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void FinishPhase() REQUIRES(!mark_stack_lock_,
@@ -96,6 +98,9 @@
return kCollectorTypeCC;
}
void RevokeAllThreadLocalBuffers() override;
+ // Creates inter-region ref bitmaps for region-space and non-moving-space.
+ // Gets called in Heap construction after the two spaces are created.
+ void CreateInterRegionRefBitmaps();
void SetRegionSpace(space::RegionSpace* region_space) {
DCHECK(region_space != nullptr);
region_space_ = region_space;
@@ -161,6 +166,13 @@
template <bool kNoUnEvac>
void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
+ // Scan the reference fields of object 'obj' in the dirty cards during
+ // card-table scan. In addition to visiting the references, it also sets the
+ // read-barrier state to gray for Reference-type objects to ensure that
+ // GetReferent() called on these objects calls the read-barrier on the referent.
+ template <bool kNoUnEvac>
+ void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
// Process a field.
template <bool kNoUnEvac>
void Process(mirror::Object* obj, MemberOffset offset)
@@ -198,7 +210,10 @@
void VerifyNoMissingCardMarks()
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
+ template <typename Processor>
+ size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
+ Closure* checkpoint_callback,
+ const Processor& processor)
REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -295,6 +310,15 @@
// Set the read barrier mark entrypoints to non-null.
void ActivateReadBarrierEntrypoints();
+ void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
+ void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ template <bool kAtomic = false>
+ bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
+ void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES(!mark_stack_lock_);
+
space::RegionSpace* region_space_; // The underlying region space.
std::unique_ptr<Barrier> gc_barrier_;
std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
@@ -368,6 +392,10 @@
// possible for minor GC if all allocated objects are in non-moving
// space.)
size_t gc_count_;
+ // Bit is set if the corresponding object has inter-region references that
+ // were found during the marking phase of two-phase full-heap GC cycle.
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> region_space_inter_region_bitmap_;
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> non_moving_space_inter_region_bitmap_;
// reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
float reclaimed_bytes_ratio_sum_;
@@ -375,7 +403,7 @@
// Generational "sticky", only trace through dirty objects in region space.
const bool young_gen_;
// If true, the GC thread is done scanning marked objects on dirty and aged
- // card (see ConcurrentCopying::MarkingPhase).
+ // card (see ConcurrentCopying::CopyingPhase).
Atomic<bool> done_scanning_;
// The skipped blocks are memory blocks/chucks that were copies of
@@ -441,6 +469,10 @@
class VerifyNoFromSpaceRefsFieldVisitor;
class VerifyNoFromSpaceRefsVisitor;
class VerifyNoMissingCardMarkVisitor;
+ class ImmuneSpaceCaptureRefsVisitor;
+ template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
+ class CaptureThreadRootsForMarkingAndCheckpoint;
+ template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
};
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 46ff7dc..2ef3d92 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -90,13 +90,14 @@
Thread* self = Thread::Current();
uint64_t start_time = NanoTime();
uint64_t thread_cpu_start_time = ThreadCpuNanoTime();
- GetHeap()->CalculateWeightedAllocatedBytes();
+ GetHeap()->CalculatePreGcWeightedAllocatedBytes();
Iteration* current_iteration = GetCurrentIteration();
current_iteration->Reset(gc_cause, clear_soft_references);
// Note transaction mode is single-threaded and there's no asynchronous GC and this flag doesn't
// change in the middle of a GC.
is_transaction_active_ = Runtime::Current()->IsActiveTransaction();
RunPhases(); // Run all the GC phases.
+ GetHeap()->CalculatePostGcWeightedAllocatedBytes();
// Add the current timings to the cumulative timings.
cumulative_timings_.AddLogger(*GetTimings());
// Update cumulative statistics with how many bytes the GC iteration freed.
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 9e1ba35..1c09b5c 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -214,7 +214,7 @@
if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) {
// New_num_bytes_allocated is zero if we didn't update num_bytes_allocated_.
// That's fine.
- CheckConcurrentGC(self, new_num_bytes_allocated, &obj);
+ CheckConcurrentGCForJava(self, new_num_bytes_allocated, &obj);
}
VerifyObject(obj);
self->VerifyStack();
@@ -254,8 +254,8 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated) {
- if (allocator_type != kAllocatorTypeTLAB &&
- allocator_type != kAllocatorTypeRegionTLAB &&
+ if (allocator_type != kAllocatorTypeRegionTLAB &&
+ allocator_type != kAllocatorTypeTLAB &&
allocator_type != kAllocatorTypeRosAlloc &&
UNLIKELY(IsOutOfMemoryOnAllocation(allocator_type, alloc_size, kGrow))) {
return nullptr;
@@ -396,30 +396,46 @@
inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
size_t alloc_size,
bool grow) {
- size_t new_footprint = num_bytes_allocated_.load(std::memory_order_relaxed) + alloc_size;
- if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
- if (UNLIKELY(new_footprint > growth_limit_)) {
+ size_t old_target = target_footprint_.load(std::memory_order_relaxed);
+ while (true) {
+ size_t old_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
+ size_t new_footprint = old_allocated + alloc_size;
+ // Tests against heap limits are inherently approximate, since multiple allocations may
+ // race, and this is not atomic with the allocation.
+ if (UNLIKELY(new_footprint <= old_target)) {
+ return false;
+ } else if (UNLIKELY(new_footprint > growth_limit_)) {
return true;
}
- if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) {
- if (!grow) {
+ // We are between target_footprint_ and growth_limit_ .
+ if (AllocatorMayHaveConcurrentGC(allocator_type) && IsGcConcurrent()) {
+ return false;
+ } else {
+ if (grow) {
+ if (target_footprint_.compare_exchange_weak(/*inout ref*/old_target, new_footprint,
+ std::memory_order_relaxed)) {
+ VlogHeapGrowth(old_target, new_footprint, alloc_size);
+ return false;
+ } // else try again.
+ } else {
return true;
}
- // TODO: Grow for allocation is racy, fix it.
- VlogHeapGrowth(max_allowed_footprint_, new_footprint, alloc_size);
- max_allowed_footprint_ = new_footprint;
}
}
- return false;
}
-// Request a GC if new_num_bytes_allocated is sufficiently large.
-// A call with new_num_bytes_allocated == 0 is a fast no-op.
-inline void Heap::CheckConcurrentGC(Thread* self,
+inline bool Heap::ShouldConcurrentGCForJava(size_t new_num_bytes_allocated) {
+ // For a Java allocation, we only check whether the number of Java allocated bytes excceeds a
+ // threshold. By not considering native allocation here, we (a) ensure that Java heap bounds are
+ // maintained, and (b) reduce the cost of the check here.
+ return new_num_bytes_allocated >= concurrent_start_bytes_;
+}
+
+inline void Heap::CheckConcurrentGCForJava(Thread* self,
size_t new_num_bytes_allocated,
ObjPtr<mirror::Object>* obj) {
- if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
- RequestConcurrentGCAndSaveObject(self, false, obj);
+ if (UNLIKELY(ShouldConcurrentGCForJava(new_num_bytes_allocated))) {
+ RequestConcurrentGCAndSaveObject(self, false /* force_full */, obj);
}
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index bfb1019..bf8aaae 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -17,6 +17,9 @@
#include "heap.h"
#include <limits>
+#if defined(__BIONIC__) || defined(__GLIBC__)
+#include <malloc.h> // For mallinfo()
+#endif
#include <memory>
#include <vector>
@@ -187,7 +190,7 @@
bool low_memory_mode,
size_t long_pause_log_threshold,
size_t long_gc_log_threshold,
- bool ignore_max_footprint,
+ bool ignore_target_footprint,
bool use_tlab,
bool verify_pre_gc_heap,
bool verify_pre_sweeping_heap,
@@ -198,7 +201,9 @@
bool gc_stress_mode,
bool measure_gc_performance,
bool use_homogeneous_space_compaction_for_oom,
- uint64_t min_interval_homogeneous_space_compaction_by_oom)
+ uint64_t min_interval_homogeneous_space_compaction_by_oom,
+ bool dump_region_info_before_gc,
+ bool dump_region_info_after_gc)
: non_moving_space_(nullptr),
rosalloc_space_(nullptr),
dlmalloc_space_(nullptr),
@@ -214,9 +219,11 @@
long_pause_log_threshold_(long_pause_log_threshold),
long_gc_log_threshold_(long_gc_log_threshold),
process_cpu_start_time_ns_(ProcessCpuNanoTime()),
- last_process_cpu_time_ns_(process_cpu_start_time_ns_),
- weighted_allocated_bytes_(0.0),
- ignore_max_footprint_(ignore_max_footprint),
+ pre_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
+ post_gc_last_process_cpu_time_ns_(process_cpu_start_time_ns_),
+ pre_gc_weighted_allocated_bytes_(0.0),
+ post_gc_weighted_allocated_bytes_(0.0),
+ ignore_target_footprint_(ignore_target_footprint),
zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
zygote_space_(nullptr),
large_object_threshold_(large_object_threshold),
@@ -229,13 +236,14 @@
next_gc_type_(collector::kGcTypePartial),
capacity_(capacity),
growth_limit_(growth_limit),
- max_allowed_footprint_(initial_size),
+ target_footprint_(initial_size),
concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
- new_native_bytes_allocated_(0),
+ native_bytes_registered_(0),
old_native_bytes_allocated_(0),
+ native_objects_notified_(0),
num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
@@ -294,7 +302,9 @@
backtrace_lock_(nullptr),
seen_backtrace_count_(0u),
unique_backtrace_count_(0u),
- gc_disabled_for_shutdown_(false) {
+ gc_disabled_for_shutdown_(false),
+ dump_region_info_before_gc_(dump_region_info_before_gc),
+ dump_region_info_after_gc_(dump_region_info_after_gc) {
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Heap() entering";
}
@@ -614,11 +624,11 @@
task_processor_.reset(new TaskProcessor());
reference_processor_.reset(new ReferenceProcessor());
pending_task_lock_ = new Mutex("Pending task lock");
- if (ignore_max_footprint_) {
+ if (ignore_target_footprint_) {
SetIdealFootprint(std::numeric_limits<size_t>::max());
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
- CHECK_NE(max_allowed_footprint_, 0U);
+ CHECK_NE(target_footprint_.load(std::memory_order_relaxed), 0U);
// Create our garbage collectors.
for (size_t i = 0; i < 2; ++i) {
const bool concurrent = i != 0;
@@ -656,6 +666,9 @@
concurrent_copying_collector_->SetRegionSpace(region_space_);
if (kEnableGenerationalConcurrentCopyingCollection) {
young_concurrent_copying_collector_->SetRegionSpace(region_space_);
+ // At this point, non-moving space should be created.
+ DCHECK(non_moving_space_ != nullptr);
+ concurrent_copying_collector_->CreateInterRegionRefBitmaps();
}
garbage_collectors_.push_back(concurrent_copying_collector_);
if (kEnableGenerationalConcurrentCopyingCollection) {
@@ -1070,12 +1083,25 @@
}
}
-void Heap::CalculateWeightedAllocatedBytes() {
- uint64_t current_process_cpu_time = ProcessCpuNanoTime();
+double Heap::CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
+ uint64_t current_process_cpu_time) const {
uint64_t bytes_allocated = GetBytesAllocated();
- double weight = current_process_cpu_time - last_process_cpu_time_ns_;
- weighted_allocated_bytes_ += weight * bytes_allocated;
- last_process_cpu_time_ns_ = current_process_cpu_time;
+ double weight = current_process_cpu_time - gc_last_process_cpu_time_ns;
+ return weight * bytes_allocated;
+}
+
+void Heap::CalculatePreGcWeightedAllocatedBytes() {
+ uint64_t current_process_cpu_time = ProcessCpuNanoTime();
+ pre_gc_weighted_allocated_bytes_ +=
+ CalculateGcWeightedAllocatedBytes(pre_gc_last_process_cpu_time_ns_, current_process_cpu_time);
+ pre_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
+}
+
+void Heap::CalculatePostGcWeightedAllocatedBytes() {
+ uint64_t current_process_cpu_time = ProcessCpuNanoTime();
+ post_gc_weighted_allocated_bytes_ +=
+ CalculateGcWeightedAllocatedBytes(post_gc_last_process_cpu_time_ns_, current_process_cpu_time);
+ post_gc_last_process_cpu_time_ns_ = current_process_cpu_time;
}
uint64_t Heap::GetTotalGcCpuTime() {
@@ -1143,10 +1169,11 @@
rosalloc_space_->DumpStats(os);
}
- os << "Registered native bytes allocated: "
- << (old_native_bytes_allocated_.load(std::memory_order_relaxed) +
- new_native_bytes_allocated_.load(std::memory_order_relaxed))
- << "\n";
+ os << "Native bytes total: " << GetNativeBytes()
+ << " registered: " << native_bytes_registered_.load(std::memory_order_relaxed) << "\n";
+
+ os << "Total native bytes at last GC: "
+ << old_native_bytes_allocated_.load(std::memory_order_relaxed) << "\n";
BaseMutex::DumpAll(os);
}
@@ -1157,8 +1184,12 @@
}
process_cpu_start_time_ns_ = ProcessCpuNanoTime();
- last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
- weighted_allocated_bytes_ = 0u;
+
+ pre_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
+ pre_gc_weighted_allocated_bytes_ = 0u;
+
+ post_gc_last_process_cpu_time_ns_ = process_cpu_start_time_ns_;
+ post_gc_weighted_allocated_bytes_ = 0u;
total_bytes_freed_ever_ = 0;
total_objects_freed_ever_ = 0;
@@ -1318,7 +1349,8 @@
size_t total_bytes_free = GetFreeMemory();
oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
<< " free bytes and " << PrettySize(GetFreeMemoryUntilOOME()) << " until OOM,"
- << " max allowed footprint " << max_allowed_footprint_ << ", growth limit "
+ << " target footprint " << target_footprint_.load(std::memory_order_relaxed)
+ << ", growth limit "
<< growth_limit_;
// If the allocation failed due to fragmentation, print out the largest continuous allocation.
if (total_bytes_free >= byte_count) {
@@ -1853,7 +1885,7 @@
}
void Heap::SetTargetHeapUtilization(float target) {
- DCHECK_GT(target, 0.0f); // asserted in Java code
+ DCHECK_GT(target, 0.1f); // asserted in Java code
DCHECK_LT(target, 1.0f);
target_utilization_ = target;
}
@@ -2267,8 +2299,8 @@
}
if (IsGcConcurrent()) {
concurrent_start_bytes_ =
- std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) -
- kMinConcurrentRemainingBytes;
+ UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
+ kMinConcurrentRemainingBytes);
} else {
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
}
@@ -2597,6 +2629,39 @@
ATRACE_INT("Heap size (KB)", heap_size / KB);
}
+size_t Heap::GetNativeBytes() {
+ size_t malloc_bytes;
+#if defined(__BIONIC__) || defined(__GLIBC__)
+ size_t mmapped_bytes;
+ struct mallinfo mi = mallinfo();
+ // In spite of the documentation, the jemalloc version of this call seems to do what we want,
+ // and it is thread-safe.
+ if (sizeof(size_t) > sizeof(mi.uordblks) && sizeof(size_t) > sizeof(mi.hblkhd)) {
+ // Shouldn't happen, but glibc declares uordblks as int.
+ // Avoiding sign extension gets us correct behavior for another 2 GB.
+ malloc_bytes = (unsigned int)mi.uordblks;
+ mmapped_bytes = (unsigned int)mi.hblkhd;
+ } else {
+ malloc_bytes = mi.uordblks;
+ mmapped_bytes = mi.hblkhd;
+ }
+ // From the spec, we clearly have mmapped_bytes <= malloc_bytes. Reality is sometimes
+ // dramatically different. (b/119580449) If so, fudge it.
+ if (mmapped_bytes > malloc_bytes) {
+ malloc_bytes = mmapped_bytes;
+ }
+#else
+ // We should hit this case only in contexts in which GC triggering is not critical. Effectively
+ // disable GC triggering based on malloc().
+ malloc_bytes = 1000;
+#endif
+ return malloc_bytes + native_bytes_registered_.load(std::memory_order_relaxed);
+ // An alternative would be to get RSS from /proc/self/statm. Empirically, that's no
+ // more expensive, and it would allow us to count memory allocated by means other than malloc.
+ // However it would change as pages are unmapped and remapped due to memory pressure, among
+ // other things. It seems risky to trigger GCs as a result of such changes.
+}
+
collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
GcCause gc_cause,
bool clear_soft_references) {
@@ -2647,16 +2712,7 @@
++runtime->GetStats()->gc_for_alloc_count;
++self->GetStats()->gc_for_alloc_count;
}
- const uint64_t bytes_allocated_before_gc = GetBytesAllocated();
-
- if (gc_type == NonStickyGcType()) {
- // Move all bytes from new_native_bytes_allocated_ to
- // old_native_bytes_allocated_ now that GC has been triggered, resetting
- // new_native_bytes_allocated_ to zero in the process.
- old_native_bytes_allocated_.fetch_add(
- new_native_bytes_allocated_.exchange(0, std::memory_order_relaxed),
- std::memory_order_relaxed);
- }
+ const size_t bytes_allocated_before_gc = GetBytesAllocated();
DCHECK_LT(gc_type, collector::kGcTypeMax);
DCHECK_NE(gc_type, collector::kGcTypeNone);
@@ -2683,7 +2739,7 @@
// active_concurrent_copying_collector_. So we should not concurrency here.
active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ?
young_concurrent_copying_collector_ : concurrent_copying_collector_;
- active_concurrent_copying_collector_->SetRegionSpace(region_space_);
+ DCHECK(active_concurrent_copying_collector_->RegionSpace() == region_space_);
}
collector = active_concurrent_copying_collector_;
break;
@@ -2728,6 +2784,9 @@
FinishGC(self, gc_type);
// Inform DDMS that a GC completed.
Dbg::GcDidFinish();
+
+ old_native_bytes_allocated_.store(GetNativeBytes());
+
// Unload native libraries for class unloading. We do this after calling FinishGC to prevent
// deadlocks in case the JNI_OnUnload function does allocations.
{
@@ -3502,16 +3561,17 @@
}
size_t Heap::GetPercentFree() {
- return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / max_allowed_footprint_);
+ return static_cast<size_t>(100.0f * static_cast<float>(
+ GetFreeMemory()) / target_footprint_.load(std::memory_order_relaxed));
}
-void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
- if (max_allowed_footprint > GetMaxMemory()) {
- VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
+void Heap::SetIdealFootprint(size_t target_footprint) {
+ if (target_footprint > GetMaxMemory()) {
+ VLOG(gc) << "Clamp target GC heap from " << PrettySize(target_footprint) << " to "
<< PrettySize(GetMaxMemory());
- max_allowed_footprint = GetMaxMemory();
+ target_footprint = GetMaxMemory();
}
- max_allowed_footprint_ = max_allowed_footprint;
+ target_footprint_.store(target_footprint, std::memory_order_relaxed);
}
bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
@@ -3544,10 +3604,10 @@
}
void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
- uint64_t bytes_allocated_before_gc) {
+ size_t bytes_allocated_before_gc) {
// We know what our utilization is at this moment.
// This doesn't actually resize any memory. It just lets the heap grow more when necessary.
- const uint64_t bytes_allocated = GetBytesAllocated();
+ const size_t bytes_allocated = GetBytesAllocated();
// Trace the new heap size after the GC is finished.
TraceHeapSize(bytes_allocated);
uint64_t target_size;
@@ -3555,16 +3615,18 @@
// Use the multiplier to grow more for foreground.
const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
// foreground.
- const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
- const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
+ const size_t adjusted_min_free = static_cast<size_t>(min_free_ * multiplier);
+ const size_t adjusted_max_free = static_cast<size_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
// Grow the heap for non sticky GC.
- ssize_t delta = bytes_allocated / GetTargetHeapUtilization() - bytes_allocated;
- CHECK_GE(delta, 0) << "bytes_allocated=" << bytes_allocated
- << " target_utilization_=" << target_utilization_;
+ uint64_t delta = bytes_allocated * (1.0 / GetTargetHeapUtilization() - 1.0);
+ DCHECK_LE(delta, std::numeric_limits<size_t>::max()) << "bytes_allocated=" << bytes_allocated
+ << " target_utilization_=" << target_utilization_;
target_size = bytes_allocated + delta * multiplier;
- target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
- target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
+ target_size = std::min(target_size,
+ static_cast<uint64_t>(bytes_allocated + adjusted_max_free));
+ target_size = std::max(target_size,
+ static_cast<uint64_t>(bytes_allocated + adjusted_min_free));
next_gc_type_ = collector::kGcTypeSticky;
} else {
collector::GcType non_sticky_gc_type = NonStickyGcType();
@@ -3581,22 +3643,24 @@
// We also check that the bytes allocated aren't over the footprint limit in order to prevent a
// pathological case where dead objects which aren't reclaimed by sticky could get accumulated
// if the sticky GC throughput always remained >= the full/partial throughput.
+ size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
if (current_gc_iteration_.GetEstimatedThroughput() * kStickyGcThroughputAdjustment >=
non_sticky_collector->GetEstimatedMeanThroughput() &&
non_sticky_collector->NumberOfIterations() > 0 &&
- bytes_allocated <= max_allowed_footprint_) {
+ bytes_allocated <= target_footprint) {
next_gc_type_ = collector::kGcTypeSticky;
} else {
next_gc_type_ = non_sticky_gc_type;
}
// If we have freed enough memory, shrink the heap back down.
- if (bytes_allocated + adjusted_max_free < max_allowed_footprint_) {
+ if (bytes_allocated + adjusted_max_free < target_footprint) {
target_size = bytes_allocated + adjusted_max_free;
} else {
- target_size = std::max(bytes_allocated, static_cast<uint64_t>(max_allowed_footprint_));
+ target_size = std::max(bytes_allocated, target_footprint);
}
}
- if (!ignore_max_footprint_) {
+ CHECK_LE(target_size, std::numeric_limits<size_t>::max());
+ if (!ignore_target_footprint_) {
SetIdealFootprint(target_size);
if (IsGcConcurrent()) {
const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
@@ -3605,26 +3669,25 @@
// Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
// how many bytes were allocated during the GC we need to add freed_bytes back on.
CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
- const uint64_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
+ const size_t bytes_allocated_during_gc = bytes_allocated + freed_bytes -
bytes_allocated_before_gc;
// Calculate when to perform the next ConcurrentGC.
// Estimate how many remaining bytes we will have when we need to start the next GC.
size_t remaining_bytes = bytes_allocated_during_gc;
remaining_bytes = std::min(remaining_bytes, kMaxConcurrentRemainingBytes);
remaining_bytes = std::max(remaining_bytes, kMinConcurrentRemainingBytes);
- if (UNLIKELY(remaining_bytes > max_allowed_footprint_)) {
+ size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+ if (UNLIKELY(remaining_bytes > target_footprint)) {
// A never going to happen situation that from the estimated allocation rate we will exceed
// the applications entire footprint with the given estimated allocation rate. Schedule
// another GC nearly straight away.
- remaining_bytes = kMinConcurrentRemainingBytes;
+ remaining_bytes = std::min(kMinConcurrentRemainingBytes, target_footprint);
}
- DCHECK_LE(remaining_bytes, max_allowed_footprint_);
- DCHECK_LE(max_allowed_footprint_, GetMaxMemory());
+ DCHECK_LE(target_footprint_.load(std::memory_order_relaxed), GetMaxMemory());
// Start a concurrent GC when we get close to the estimated remaining bytes. When the
// allocation rate is very high, remaining_bytes could tell us that we should start a GC
// right away.
- concurrent_start_bytes_ = std::max(max_allowed_footprint_ - remaining_bytes,
- static_cast<size_t>(bytes_allocated));
+ concurrent_start_bytes_ = std::max(target_footprint - remaining_bytes, bytes_allocated);
}
}
}
@@ -3652,11 +3715,11 @@
}
void Heap::ClearGrowthLimit() {
- if (max_allowed_footprint_ == growth_limit_ && growth_limit_ < capacity_) {
- max_allowed_footprint_ = capacity_;
+ if (target_footprint_.load(std::memory_order_relaxed) == growth_limit_
+ && growth_limit_ < capacity_) {
+ target_footprint_.store(capacity_, std::memory_order_relaxed);
concurrent_start_bytes_ =
- std::max(max_allowed_footprint_, kMinConcurrentRemainingBytes) -
- kMinConcurrentRemainingBytes;
+ UnsignedDifference(capacity_, kMinConcurrentRemainingBytes);
}
growth_limit_ = capacity_;
ScopedObjectAccess soa(Thread::Current());
@@ -3896,40 +3959,101 @@
static_cast<jlong>(timeout));
}
-void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
- size_t old_value = new_native_bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
+// For GC triggering purposes, we count old (pre-last-GC) and new native allocations as
+// different fractions of Java allocations.
+// For now, we essentially do not count old native allocations at all, so that we can preserve the
+// existing behavior of not limiting native heap size. If we seriously considered it, we would
+// have to adjust collection thresholds when we encounter large amounts of old native memory,
+// and handle native out-of-memory situations.
- if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() &&
- !IsGCRequestPending()) {
- // Trigger another GC because there have been enough native bytes
- // allocated since the last GC.
+static constexpr size_t kOldNativeDiscountFactor = 65536; // Approximately infinite for now.
+static constexpr size_t kNewNativeDiscountFactor = 2;
+
+// If weighted java + native memory use exceeds our target by kStopForNativeFactor, and
+// newly allocated memory exceeds kHugeNativeAlloc, we wait for GC to complete to avoid
+// running out of memory.
+static constexpr float kStopForNativeFactor = 2.0;
+static constexpr size_t kHugeNativeAllocs = 200*1024*1024;
+
+// Return the ratio of the weighted native + java allocated bytes to its target value.
+// A return value > 1.0 means we should collect. Significantly larger values mean we're falling
+// behind.
+inline float Heap::NativeMemoryOverTarget(size_t current_native_bytes) {
+ // Collection check for native allocation. Does not enforce Java heap bounds.
+ // With adj_start_bytes defined below, effectively checks
+ // <java bytes allocd> + c1*<old native allocd> + c2*<new native allocd) >= adj_start_bytes,
+ // where c3 > 1, and currently c1 and c2 are 1 divided by the values defined above.
+ size_t old_native_bytes = old_native_bytes_allocated_.load(std::memory_order_relaxed);
+ if (old_native_bytes > current_native_bytes) {
+ // Net decrease; skip the check, but update old value.
+ // It's OK to lose an update if two stores race.
+ old_native_bytes_allocated_.store(current_native_bytes, std::memory_order_relaxed);
+ return 0.0;
+ } else {
+ size_t new_native_bytes = UnsignedDifference(current_native_bytes, old_native_bytes);
+ size_t weighted_native_bytes = new_native_bytes / kNewNativeDiscountFactor
+ + old_native_bytes / kOldNativeDiscountFactor;
+ size_t adj_start_bytes = concurrent_start_bytes_
+ + NativeAllocationGcWatermark() / kNewNativeDiscountFactor;
+ return static_cast<float>(GetBytesAllocated() + weighted_native_bytes)
+ / static_cast<float>(adj_start_bytes);
+ }
+}
+
+inline void Heap::CheckConcurrentGCForNative(Thread* self) {
+ size_t current_native_bytes = GetNativeBytes();
+ float gc_urgency = NativeMemoryOverTarget(current_native_bytes);
+ if (UNLIKELY(gc_urgency >= 1.0)) {
if (IsGcConcurrent()) {
- RequestConcurrentGC(ThreadForEnv(env), kGcCauseForNativeAlloc, /*force_full=*/true);
+ RequestConcurrentGC(self, kGcCauseForNativeAlloc, /*force_full=*/true);
+ if (gc_urgency > kStopForNativeFactor
+ && current_native_bytes > kHugeNativeAllocs) {
+ // We're in danger of running out of memory due to rampant native allocation.
+ if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
+ LOG(INFO) << "Stopping for native allocation, urgency: " << gc_urgency;
+ }
+ WaitForGcToComplete(kGcCauseForAlloc, self);
+ }
} else {
CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
}
}
}
-void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
- // Take the bytes freed out of new_native_bytes_allocated_ first. If
- // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
- // out of old_native_bytes_allocated_ to ensure all freed bytes are
- // accounted for.
- size_t allocated;
- size_t new_freed_bytes;
- do {
- allocated = new_native_bytes_allocated_.load(std::memory_order_relaxed);
- new_freed_bytes = std::min(allocated, bytes);
- } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated,
- allocated - new_freed_bytes));
- if (new_freed_bytes < bytes) {
- old_native_bytes_allocated_.fetch_sub(bytes - new_freed_bytes, std::memory_order_relaxed);
+// About kNotifyNativeInterval allocations have occurred. Check whether we should garbage collect.
+void Heap::NotifyNativeAllocations(JNIEnv* env) {
+ native_objects_notified_.fetch_add(kNotifyNativeInterval, std::memory_order_relaxed);
+ CheckConcurrentGCForNative(ThreadForEnv(env));
+}
+
+// Register a native allocation with an explicit size.
+// This should only be done for large allocations of non-malloc memory, which we wouldn't
+// otherwise see.
+void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
+ native_bytes_registered_.fetch_add(bytes, std::memory_order_relaxed);
+ uint32_t objects_notified =
+ native_objects_notified_.fetch_add(1, std::memory_order_relaxed);
+ if (objects_notified % kNotifyNativeInterval == kNotifyNativeInterval - 1
+ || bytes > kCheckImmediatelyThreshold) {
+ CheckConcurrentGCForNative(ThreadForEnv(env));
}
}
+void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
+ size_t allocated;
+ size_t new_freed_bytes;
+ do {
+ allocated = native_bytes_registered_.load(std::memory_order_relaxed);
+ new_freed_bytes = std::min(allocated, bytes);
+ // We should not be registering more free than allocated bytes.
+ // But correctly keep going in non-debug builds.
+ DCHECK_EQ(new_freed_bytes, bytes);
+ } while (!native_bytes_registered_.CompareAndSetWeakRelaxed(allocated,
+ allocated - new_freed_bytes));
+}
+
size_t Heap::GetTotalMemory() const {
- return std::max(max_allowed_footprint_, GetBytesAllocated());
+ return std::max(target_footprint_.load(std::memory_order_relaxed), GetBytesAllocated());
}
void Heap::AddModUnionTable(accounting::ModUnionTable* mod_union_table) {
@@ -4231,8 +4355,8 @@
return verification_.get();
}
-void Heap::VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size) {
- VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint) << " to "
+void Heap::VlogHeapGrowth(size_t old_footprint, size_t new_footprint, size_t alloc_size) {
+ VLOG(heap) << "Growing heap from " << PrettySize(old_footprint) << " to "
<< PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
}
@@ -4243,20 +4367,21 @@
gc::Heap* heap = Runtime::Current()->GetHeap();
// Trigger a GC, if not already done. The first GC after fork, whenever it
// takes place, will adjust the thresholds to normal levels.
- if (heap->max_allowed_footprint_ == heap->growth_limit_) {
+ if (heap->target_footprint_.load(std::memory_order_relaxed) == heap->growth_limit_) {
heap->RequestConcurrentGC(self, kGcCauseBackground, false);
}
}
};
void Heap::PostForkChildAction(Thread* self) {
- // Temporarily increase max_allowed_footprint_ and concurrent_start_bytes_ to
+ // Temporarily increase target_footprint_ and concurrent_start_bytes_ to
// max values to avoid GC during app launch.
if (collector_type_ == kCollectorTypeCC && !IsLowMemoryMode()) {
- // Set max_allowed_footprint_ to the largest allowed value.
+ // Set target_footprint_ to the largest allowed value.
SetIdealFootprint(growth_limit_);
// Set concurrent_start_bytes_ to half of the heap size.
- concurrent_start_bytes_ = std::max(max_allowed_footprint_ / 2, GetBytesAllocated());
+ size_t target_footprint = target_footprint_.load(std::memory_order_relaxed);
+ concurrent_start_bytes_ = std::max(target_footprint / 2, GetBytesAllocated());
GetTaskProcessor()->AddTask(
self, new TriggerPostForkCCGcTask(NanoTime() + MsToNs(kPostForkMaxHeapDurationMS)));
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 57c7376..aa09cbe 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -126,7 +126,6 @@
class Heap {
public:
- // If true, measure the total allocation time.
static constexpr size_t kDefaultStartingSize = kPageSize;
static constexpr size_t kDefaultInitialSize = 2 * MB;
static constexpr size_t kDefaultMaximumSize = 256 * MB;
@@ -155,6 +154,16 @@
// Used so that we don't overflow the allocation time atomic integer.
static constexpr size_t kTimeAdjust = 1024;
+ // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
+ // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
+ // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec.
+ static constexpr uint32_t kNotifyNativeInterval = 32;
+
+ // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
+ // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
+ // make it safe to allocate that many bytes between checks.
+ static constexpr size_t kCheckImmediatelyThreshold = 300000;
+
// How often we allow heap trimming to happen (nanoseconds).
static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
// How long we wait after a transition request to perform a collector transition (nanoseconds).
@@ -187,7 +196,7 @@
bool low_memory_mode,
size_t long_pause_threshold,
size_t long_gc_threshold,
- bool ignore_max_footprint,
+ bool ignore_target_footprint,
bool use_tlab,
bool verify_pre_gc_heap,
bool verify_pre_sweeping_heap,
@@ -198,7 +207,9 @@
bool gc_stress_mode,
bool measure_gc_performance,
bool use_homogeneous_space_compaction,
- uint64_t min_interval_homogeneous_space_compaction_by_oom);
+ uint64_t min_interval_homogeneous_space_compaction_by_oom,
+ bool dump_region_info_before_gc,
+ bool dump_region_info_after_gc);
~Heap();
@@ -269,10 +280,22 @@
void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Inform the garbage collector of a non-malloc allocated native memory that might become
+ // reclaimable in the future as a result of Java garbage collection.
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void RegisterNativeFree(JNIEnv* env, size_t bytes);
+ // Notify the garbage collector of malloc allocations that might be reclaimable
+ // as a result of Java garbage collection. Each such call represents approximately
+ // kNotifyNativeInterval such allocations.
+ void NotifyNativeAllocations(JNIEnv* env)
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
+
+ uint32_t GetNotifyNativeInterval() {
+ return kNotifyNativeInterval;
+ }
+
// Change the allocator, updates entrypoints.
void ChangeAllocator(AllocatorType allocator)
REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
@@ -397,11 +420,16 @@
REQUIRES(!Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
- double GetWeightedAllocatedBytes() const {
- return weighted_allocated_bytes_;
+ double GetPreGcWeightedAllocatedBytes() const {
+ return pre_gc_weighted_allocated_bytes_;
}
- void CalculateWeightedAllocatedBytes();
+ double GetPostGcWeightedAllocatedBytes() const {
+ return post_gc_weighted_allocated_bytes_;
+ }
+
+ void CalculatePreGcWeightedAllocatedBytes();
+ void CalculatePostGcWeightedAllocatedBytes();
uint64_t GetTotalGcCpuTime();
uint64_t GetProcessCpuStartTime() const {
@@ -531,21 +559,20 @@
// Returns approximately how much free memory we have until the next GC happens.
size_t GetFreeMemoryUntilGC() const {
- return max_allowed_footprint_ - GetBytesAllocated();
+ return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
+ GetBytesAllocated());
}
// Returns approximately how much free memory we have until the next OOME happens.
size_t GetFreeMemoryUntilOOME() const {
- return growth_limit_ - GetBytesAllocated();
+ return UnsignedDifference(growth_limit_, GetBytesAllocated());
}
// Returns how much free memory we have until we need to grow the heap to perform an allocation.
// Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
size_t GetFreeMemory() const {
- size_t byte_allocated = num_bytes_allocated_.load(std::memory_order_relaxed);
- size_t total_memory = GetTotalMemory();
- // Make sure we don't get a negative number.
- return total_memory - std::min(total_memory, byte_allocated);
+ return UnsignedDifference(GetTotalMemory(),
+ num_bytes_allocated_.load(std::memory_order_relaxed));
}
// Get the space that corresponds to an object's address. Current implementation searches all
@@ -858,6 +885,9 @@
REQUIRES(!*gc_complete_lock_);
void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
+ double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
+ uint64_t current_process_cpu_time) const;
+
// Create a mem map with a preferred base address.
static MemMap MapAnonymousPreferredAddress(const char* name,
uint8_t* request_begin,
@@ -869,12 +899,16 @@
return main_space_backup_ != nullptr;
}
+ static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
+ return x > y ? x - y : 0;
+ }
+
static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
return
+ allocator_type != kAllocatorTypeRegionTLAB &&
allocator_type != kAllocatorTypeBumpPointer &&
allocator_type != kAllocatorTypeTLAB &&
- allocator_type != kAllocatorTypeRegion &&
- allocator_type != kAllocatorTypeRegionTLAB;
+ allocator_type != kAllocatorTypeRegion;
}
static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
if (kUseReadBarrier) {
@@ -882,24 +916,30 @@
return true;
}
return
- allocator_type != kAllocatorTypeBumpPointer &&
- allocator_type != kAllocatorTypeTLAB;
+ allocator_type != kAllocatorTypeTLAB &&
+ allocator_type != kAllocatorTypeBumpPointer;
}
static bool IsMovingGc(CollectorType collector_type) {
return
+ collector_type == kCollectorTypeCC ||
collector_type == kCollectorTypeSS ||
collector_type == kCollectorTypeGSS ||
- collector_type == kCollectorTypeCC ||
collector_type == kCollectorTypeCCBackground ||
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
- size_t new_num_bytes_allocated,
- ObjPtr<mirror::Object>* obj)
+
+ // Checks whether we should garbage collect:
+ ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
+ float NativeMemoryOverTarget(size_t current_native_bytes);
+ ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
+ size_t new_num_bytes_allocated,
+ ObjPtr<mirror::Object>* obj)
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
+ void CheckConcurrentGCForNative(Thread* self)
+ REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
accounting::ObjectStack* GetMarkStack() {
return mark_stack_.get();
@@ -960,6 +1000,11 @@
void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
REQUIRES_SHARED(Locks::mutator_lock_);
+ // Are we out of memory, and thus should force a GC or fail?
+ // For concurrent collectors, out of memory is defined by growth_limit_.
+ // For nonconcurrent collectors it is defined by target_footprint_ unless grow is
+ // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_
+ // to accomodate the allocation.
ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
size_t alloc_size,
bool grow);
@@ -1023,7 +1068,7 @@
// collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
// the GC was run.
void GrowForUtilization(collector::GarbageCollector* collector_ran,
- uint64_t bytes_allocated_before_gc = 0);
+ size_t bytes_allocated_before_gc = 0);
size_t GetPercentFree();
@@ -1057,8 +1102,8 @@
// What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
// sweep GC, false for other GC types.
bool IsGcConcurrent() const ALWAYS_INLINE {
- return collector_type_ == kCollectorTypeCMS ||
- collector_type_ == kCollectorTypeCC ||
+ return collector_type_ == kCollectorTypeCC ||
+ collector_type_ == kCollectorTypeCMS ||
collector_type_ == kCollectorTypeCCBackground;
}
@@ -1087,15 +1132,19 @@
return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
}
- // How large new_native_bytes_allocated_ can grow before we trigger a new
- // GC.
+ // Return the amount of space we allow for native memory when deciding whether to
+ // collect. We collect when a weighted sum of Java memory plus native memory exceeds
+ // the similarly weighted sum of the Java heap size target and this value.
ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
- // Reuse max_free_ for the native allocation gc watermark, so that the
- // native heap is treated in the same way as the Java heap in the case
- // where the gc watermark update would exceed max_free_. Using max_free_
- // instead of the target utilization means the watermark doesn't depend on
- // the current number of registered native allocations.
- return max_free_;
+ // It probably makes most sense to use a constant multiple of target_footprint_ .
+ // This is a good indication of the live data size, together with the
+ // intended space-time trade-off, as expressed by SetTargetHeapUtilization.
+ // For a fixed target utilization, the amount of GC effort per native
+ // allocated byte remains roughly constant as the Java heap size changes.
+ // But we previously triggered on max_free_ native allocation which is often much
+ // smaller. To avoid unexpected growth, we partially keep that limit in place for now.
+ // TODO: Consider HeapGrowthMultiplier(). Maybe.
+ return std::min(target_footprint_.load(std::memory_order_relaxed), 2 * max_free_);
}
ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
@@ -1105,6 +1154,11 @@
// Remove a vlog code from heap-inl.h which is transitively included in half the world.
static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
+ // Return our best approximation of the number of bytes of native memory that
+ // are currently in use, and could possibly be reclaimed as an indirect result
+ // of a garbage collection.
+ size_t GetNativeBytes();
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
@@ -1175,15 +1229,18 @@
// Starting time of the new process; meant to be used for measuring total process CPU time.
uint64_t process_cpu_start_time_ns_;
- // Last time GC started; meant to be used to measure the duration between two GCs.
- uint64_t last_process_cpu_time_ns_;
+ // Last time (before and after) GC started; meant to be used to measure the
+ // duration between two GCs.
+ uint64_t pre_gc_last_process_cpu_time_ns_;
+ uint64_t post_gc_last_process_cpu_time_ns_;
- // allocated_bytes * (current_process_cpu_time - last_process_cpu_time)
- double weighted_allocated_bytes_;
+ // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time)
+ double pre_gc_weighted_allocated_bytes_;
+ double post_gc_weighted_allocated_bytes_;
- // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
- // useful for benchmarking since it reduces time spent in GC to a low %.
- const bool ignore_max_footprint_;
+ // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this
+ // is useful for benchmarking since it reduces time spent in GC to a low %.
+ const bool ignore_target_footprint_;
// Lock which guards zygote space creation.
Mutex zygote_creation_lock_;
@@ -1232,14 +1289,18 @@
// The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
// programs it is "cleared" making it the same as capacity.
+ // Only weakly enforced for simultaneous allocations.
size_t growth_limit_;
- // When the number of bytes allocated exceeds the footprint TryAllocate returns null indicating
- // a GC should be triggered.
- size_t max_allowed_footprint_;
+ // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for
+ // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the
+ // concurrent GC case.
+ Atomic<size_t> target_footprint_;
// When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
// it completes ahead of an allocation failing.
+ // A multiple of this is also used to determine when to trigger a GC in response to native
+ // allocation.
size_t concurrent_start_bytes_;
// Since the heap was created, how many bytes have been freed.
@@ -1252,19 +1313,18 @@
// TLABS in their entirety, even if they have not yet been parceled out.
Atomic<size_t> num_bytes_allocated_;
- // Number of registered native bytes allocated since the last time GC was
- // triggered. Adjusted after each RegisterNativeAllocation and
- // RegisterNativeFree. Used to determine when to trigger GC for native
- // allocations.
- // See the REDESIGN section of go/understanding-register-native-allocation.
- Atomic<size_t> new_native_bytes_allocated_;
+ // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and
+ // RegisterNativeFree. Used to help determine when to trigger GC for native allocations. Should
+ // not include bytes allocated through the system malloc, since those are implicitly included.
+ Atomic<size_t> native_bytes_registered_;
- // Number of registered native bytes allocated prior to the last time GC was
- // triggered, for debugging purposes. The current number of registered
- // native bytes is determined by taking the sum of
- // old_native_bytes_allocated_ and new_native_bytes_allocated_.
+ // Approximately the smallest value of GetNativeBytes() we've seen since the last GC.
Atomic<size_t> old_native_bytes_allocated_;
+ // Total number of native objects of which we were notified since the beginning of time, mod 2^32.
+ // Allows us to check for GC only roughly every kNotifyNativeInterval allocations.
+ Atomic<uint32_t> native_objects_notified_;
+
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
// rosalloc thread-local buffers. It is temporarily accumulated
@@ -1349,10 +1409,10 @@
// Minimum free guarantees that you always have at least min_free_ free bytes after growing for
// utilization, regardless of target utilization ratio.
- size_t min_free_;
+ const size_t min_free_;
// The ideal maximum free size, when we grow the heap for utilization.
- size_t max_free_;
+ const size_t max_free_;
// Target ideal heap utilization ratio.
double target_utilization_;
@@ -1448,6 +1508,11 @@
// allocating.
bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
+ // Turned on by -XX:DumpRegionInfoBeforeGC and -XX:DumpRegionInfoAfterGC to
+ // emit region info before and after each GC cycle.
+ bool dump_region_info_before_gc_;
+ bool dump_region_info_after_gc_;
+
// Boot image spaces.
std::vector<space::ImageSpace*> boot_image_spaces_;
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 4c2074d..66db063 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -28,6 +28,7 @@
#include "art_field-inl.h"
#include "art_method-inl.h"
+#include "base/array_ref.h"
#include "base/bit_memory_region.h"
#include "base/callee_save_type.h"
#include "base/enums.h"
@@ -44,6 +45,7 @@
#include "dex/dex_file_loader.h"
#include "exec_utils.h"
#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/task_processor.h"
#include "image-inl.h"
#include "image_space_fs.h"
#include "intern_table-inl.h"
@@ -59,6 +61,7 @@
namespace gc {
namespace space {
+using android::base::StringAppendF;
using android::base::StringPrintf;
Atomic<uint32_t> ImageSpace::bitmap_index_(0);
@@ -654,6 +657,22 @@
const CodeVisitor code_visitor_;
};
+template <typename ReferenceVisitor>
+class ImageSpace::ClassTableVisitor final {
+ public:
+ explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
+ : reference_visitor_(reference_visitor) {}
+
+ void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
+ REQUIRES_SHARED(Locks::mutator_lock_) {
+ DCHECK(root->AsMirrorPtr() != nullptr);
+ root->Assign(reference_visitor_(root->AsMirrorPtr()));
+ }
+
+ private:
+ ReferenceVisitor reference_visitor_;
+};
+
// Helper class encapsulating loading, so we can access private ImageSpace members (this is a
// nested class), but not declare functions in the header.
class ImageSpace::Loader {
@@ -666,30 +685,39 @@
REQUIRES_SHARED(Locks::mutator_lock_) {
TimingLogger logger(__PRETTY_FUNCTION__, /*precise=*/ true, VLOG_IS_ON(image));
- const bool create_thread_pool = true;
std::unique_ptr<ThreadPool> thread_pool;
- if (create_thread_pool) {
- TimingLogger::ScopedTiming timing("CreateThreadPool", &logger);
- ScopedThreadStateChange stsc(Thread::Current(), kNative);
- constexpr size_t kStackSize = 64 * KB;
- constexpr size_t kMaxRuntimeWorkers = 4u;
- const size_t num_workers =
- std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
- thread_pool.reset(new ThreadPool("Runtime", num_workers, /*create_peers=*/false, kStackSize));
- thread_pool->StartWorkers(Thread::Current());
- }
-
std::unique_ptr<ImageSpace> space = Init(image_filename,
image_location,
oat_file,
&logger,
- thread_pool.get(),
+ &thread_pool,
image_reservation,
error_msg);
if (thread_pool != nullptr) {
- TimingLogger::ScopedTiming timing("CreateThreadPool", &logger);
- ScopedThreadStateChange stsc(Thread::Current(), kNative);
- thread_pool.reset();
+ // Delay the thread pool deletion to prevent the deletion slowing down the startup by causing
+ // preemption. TODO: Just do this in heap trim.
+ static constexpr uint64_t kThreadPoolDeleteDelay = MsToNs(5000);
+
+ class DeleteThreadPoolTask : public HeapTask {
+ public:
+ explicit DeleteThreadPoolTask(std::unique_ptr<ThreadPool>&& thread_pool)
+ : HeapTask(NanoTime() + kThreadPoolDeleteDelay), thread_pool_(std::move(thread_pool)) {}
+
+ void Run(Thread* self) override {
+ ScopedTrace trace("DestroyThreadPool");
+ ScopedThreadStateChange stsc(self, kNative);
+ thread_pool_.reset();
+ }
+
+ private:
+ std::unique_ptr<ThreadPool> thread_pool_;
+ };
+ gc::TaskProcessor* const processor = Runtime::Current()->GetHeap()->GetTaskProcessor();
+ // The thread pool is already done being used since Init has finished running. Deleting the
+ // thread pool is done async since it takes a non-trivial amount of time to do.
+ if (processor != nullptr) {
+ processor->AddTask(Thread::Current(), new DeleteThreadPoolTask(std::move(thread_pool)));
+ }
}
if (space != nullptr) {
uint32_t expected_reservation_size =
@@ -701,11 +729,22 @@
TimingLogger::ScopedTiming timing("RelocateImage", &logger);
ImageHeader* image_header = reinterpret_cast<ImageHeader*>(space->GetMemMap()->Begin());
- if (!RelocateInPlace(*image_header,
- space->GetMemMap()->Begin(),
- space->GetLiveBitmap(),
- oat_file,
- error_msg)) {
+ const PointerSize pointer_size = image_header->GetPointerSize();
+ bool result;
+ if (pointer_size == PointerSize::k64) {
+ result = RelocateInPlace<PointerSize::k64>(*image_header,
+ space->GetMemMap()->Begin(),
+ space->GetLiveBitmap(),
+ oat_file,
+ error_msg);
+ } else {
+ result = RelocateInPlace<PointerSize::k32>(*image_header,
+ space->GetMemMap()->Begin(),
+ space->GetLiveBitmap(),
+ oat_file,
+ error_msg);
+ }
+ if (!result) {
return nullptr;
}
Runtime* runtime = Runtime::Current();
@@ -740,7 +779,7 @@
const char* image_location,
const OatFile* oat_file,
TimingLogger* logger,
- ThreadPool* thread_pool,
+ std::unique_ptr<ThreadPool>* thread_pool,
/*inout*/MemMap* image_reservation,
/*out*/std::string* error_msg)
REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -817,6 +856,18 @@
return nullptr;
}
+ const size_t kMinBlocks = 2;
+ if (thread_pool != nullptr && image_header->GetBlockCount() >= kMinBlocks) {
+ TimingLogger::ScopedTiming timing("CreateThreadPool", logger);
+ ScopedThreadStateChange stsc(Thread::Current(), kNative);
+ constexpr size_t kStackSize = 64 * KB;
+ constexpr size_t kMaxRuntimeWorkers = 4u;
+ const size_t num_workers =
+ std::min(static_cast<size_t>(std::thread::hardware_concurrency()), kMaxRuntimeWorkers);
+ thread_pool->reset(new ThreadPool("Image", num_workers, /*create_peers=*/false, kStackSize));
+ thread_pool->get()->StartWorkers(Thread::Current());
+ }
+
// GetImageBegin is the preferred address to map the image. If we manage to map the
// image at the image begin, the amount of fixup work required is minimized.
// If it is pic we will retry with error_msg for the failure case. Pass a null error_msg to
@@ -829,7 +880,7 @@
*image_header,
file->Fd(),
logger,
- thread_pool,
+ thread_pool != nullptr ? thread_pool->get() : nullptr,
image_reservation,
error_msg);
if (!map.IsValid()) {
@@ -966,8 +1017,7 @@
const uint64_t start = NanoTime();
Thread* const self = Thread::Current();
- const size_t kMinBlocks = 2;
- const bool use_parallel = pool != nullptr && image_header.GetBlockCount() >= kMinBlocks;
+ const bool use_parallel = pool != nullptr;
for (const ImageHeader::Block& block : image_header.GetBlocks(temp_map.Begin())) {
auto function = [&](Thread*) {
const uint64_t start2 = NanoTime();
@@ -1089,11 +1139,8 @@
class FixupObjectVisitor : public FixupVisitor {
public:
template<typename... Args>
- explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited,
- const PointerSize pointer_size,
- Args... args)
+ explicit FixupObjectVisitor(gc::accounting::ContinuousSpaceBitmap* visited, Args... args)
: FixupVisitor(args...),
- pointer_size_(pointer_size),
visited_(visited) {}
// Fix up separately since we also need to fix up method entrypoints.
@@ -1105,39 +1152,14 @@
ALWAYS_INLINE void operator()(ObjPtr<mirror::Object> obj,
MemberOffset offset,
-
bool is_static ATTRIBUTE_UNUSED) const
NO_THREAD_SAFETY_ANALYSIS {
- // There could be overlap between ranges, we must avoid visiting the same reference twice.
- // Avoid the class field since we already fixed it up in FixupClassVisitor.
- if (offset.Uint32Value() != mirror::Object::ClassOffset().Uint32Value()) {
- // Space is not yet added to the heap, don't do a read barrier.
- mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
- offset);
- // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
- // image.
- obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
- }
- }
-
- // Visit a pointer array and forward corresponding native data. Ignores pointer arrays in the
- // boot image. Uses the bitmap to ensure the same array is not visited multiple times.
- template <typename Visitor>
- void UpdatePointerArrayContents(mirror::PointerArray* array, const Visitor& visitor) const
- NO_THREAD_SAFETY_ANALYSIS {
- DCHECK(array != nullptr);
- DCHECK(visitor.IsInAppImage(array));
- // The bit for the array contents is different than the bit for the array. Since we may have
- // already visited the array as a long / int array from walking the bitmap without knowing it
- // was a pointer array.
- static_assert(kObjectAlignment == 8u, "array bit may be in another object");
- mirror::Object* const contents_bit = reinterpret_cast<mirror::Object*>(
- reinterpret_cast<uintptr_t>(array) + kObjectAlignment);
- // If the bit is not set then the contents have not yet been updated.
- if (!visited_->Test(contents_bit)) {
- array->Fixup<kVerifyNone>(array, pointer_size_, visitor);
- visited_->Set(contents_bit);
- }
+ // Space is not yet added to the heap, don't do a read barrier.
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(
+ offset);
+ // Use SetFieldObjectWithoutWriteBarrier to avoid card marking since we are writing to the
+ // image.
+ obj->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(offset, ForwardObject(ref));
}
// java.lang.ref.Reference visitor.
@@ -1152,81 +1174,16 @@
void operator()(mirror::Object* obj) const
NO_THREAD_SAFETY_ANALYSIS {
- if (visited_->Test(obj)) {
- // Already visited.
- return;
- }
- visited_->Set(obj);
-
- // Handle class specially first since we need it to be updated to properly visit the rest of
- // the instance fields.
- {
- mirror::Class* klass = obj->GetClass<kVerifyNone, kWithoutReadBarrier>();
- DCHECK(klass != nullptr) << "Null class in image";
- // No AsClass since our fields aren't quite fixed up yet.
- mirror::Class* new_klass = down_cast<mirror::Class*>(ForwardObject(klass));
- if (klass != new_klass) {
- obj->SetClass<kVerifyNone>(new_klass);
- }
- if (new_klass != klass && IsInAppImage(new_klass)) {
- // Make sure the klass contents are fixed up since we depend on it to walk the fields.
- operator()(new_klass);
- }
- }
-
- if (obj->IsClass()) {
- mirror::Class* klass = obj->AsClass<kVerifyNone>();
- // Fixup super class before visiting instance fields which require
- // information from their super class to calculate offsets.
- mirror::Class* super_class =
- klass->GetSuperClass<kVerifyNone, kWithoutReadBarrier>().Ptr();
- if (super_class != nullptr) {
- mirror::Class* new_super_class = down_cast<mirror::Class*>(ForwardObject(super_class));
- if (new_super_class != super_class && IsInAppImage(new_super_class)) {
- // Recursively fix all dependencies.
- operator()(new_super_class);
- }
- }
- }
-
- obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
- *this,
- *this);
- // Note that this code relies on no circular dependencies.
- // We want to use our own class loader and not the one in the image.
- if (obj->IsClass<kVerifyNone>()) {
- mirror::Class* as_klass = obj->AsClass<kVerifyNone>();
- FixupObjectAdapter visitor(boot_image_, app_image_, app_oat_);
- as_klass->FixupNativePointers<kVerifyNone>(as_klass, pointer_size_, visitor);
- // Deal with the pointer arrays. Use the helper function since multiple classes can reference
- // the same arrays.
- mirror::PointerArray* const vtable = as_klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
- if (vtable != nullptr && IsInAppImage(vtable)) {
- operator()(vtable);
- UpdatePointerArrayContents(vtable, visitor);
- }
- mirror::IfTable* iftable = as_klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
- // Ensure iftable arrays are fixed up since we need GetMethodArray to return the valid
- // contents.
- if (IsInAppImage(iftable)) {
- operator()(iftable);
- for (int32_t i = 0, count = iftable->Count(); i < count; ++i) {
- if (iftable->GetMethodArrayCount<kVerifyNone, kWithoutReadBarrier>(i) > 0) {
- mirror::PointerArray* methods =
- iftable->GetMethodArray<kVerifyNone, kWithoutReadBarrier>(i);
- if (visitor.IsInAppImage(methods)) {
- operator()(methods);
- DCHECK(methods != nullptr);
- UpdatePointerArrayContents(methods, visitor);
- }
- }
- }
- }
+ if (!visited_->Set(obj)) {
+ // Not already visited.
+ obj->VisitReferences</*visit native roots*/false, kVerifyNone, kWithoutReadBarrier>(
+ *this,
+ *this);
+ CHECK(!obj->IsClass());
}
}
private:
- const PointerSize pointer_size_;
gc::accounting::ContinuousSpaceBitmap* const visited_;
};
@@ -1306,6 +1263,7 @@
// Relocate an image space mapped at target_base which possibly used to be at a different base
// address. In place means modifying a single ImageSpace in place rather than relocating from
// one ImageSpace to another.
+ template <PointerSize kPointerSize>
static bool RelocateInPlace(ImageHeader& image_header,
uint8_t* target_base,
accounting::ContinuousSpaceBitmap* bitmap,
@@ -1317,7 +1275,6 @@
uint32_t boot_image_end = 0;
uint32_t boot_oat_begin = 0;
uint32_t boot_oat_end = 0;
- const PointerSize pointer_size = image_header.GetPointerSize();
gc::Heap* const heap = Runtime::Current()->GetHeap();
heap->GetBootImagesSize(&boot_image_begin, &boot_image_end, &boot_oat_begin, &boot_oat_end);
if (boot_image_begin == boot_image_end) {
@@ -1359,11 +1316,8 @@
return true;
}
ScopedDebugDisallowReadBarriers sddrb(Thread::Current());
- // Need to update the image to be at the target base.
- const ImageSection& objects_section = image_header.GetObjectsSection();
- uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
- uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
FixupObjectAdapter fixup_adapter(boot_image, app_image, app_oat);
+ PatchObjectVisitor<kPointerSize, FixupObjectAdapter> patch_object_visitor(fixup_adapter);
if (fixup_image) {
// Two pass approach, fix up all classes first, then fix up non class-objects.
// The visited bitmap is used to ensure that pointer arrays are not forwarded twice.
@@ -1371,16 +1325,64 @@
gc::accounting::ContinuousSpaceBitmap::Create("Relocate bitmap",
target_base,
image_header.GetImageSize()));
- FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(),
- pointer_size,
- boot_image,
- app_image,
- app_oat);
- TimingLogger::ScopedTiming timing("Fixup classes", &logger);
- // Fixup objects may read fields in the boot image, use the mutator lock here for sanity. Though
- // its probably not required.
+ FixupObjectVisitor fixup_object_visitor(visited_bitmap.get(), boot_image, app_image, app_oat);
+ {
+ TimingLogger::ScopedTiming timing("Fixup classes", &logger);
+ const auto& class_table_section = image_header.GetClassTableSection();
+ if (class_table_section.Size() > 0u) {
+ ScopedObjectAccess soa(Thread::Current());
+ ClassTableVisitor class_table_visitor(fixup_adapter);
+ size_t read_count = 0u;
+ const uint8_t* data = target_base + class_table_section.Offset();
+ // We avoid making a copy of the data since we want modifications to be propagated to the
+ // memory map.
+ ClassTable::ClassSet temp_set(data, /*make_copy_of_data=*/ false, &read_count);
+ for (ClassTable::TableSlot& slot : temp_set) {
+ slot.VisitRoot(class_table_visitor);
+ mirror::Class* klass = slot.Read<kWithoutReadBarrier>();
+ if (!fixup_adapter.IsInAppImage(klass)) {
+ continue;
+ }
+ const bool already_marked = visited_bitmap->Set(klass);
+ CHECK(!already_marked) << "App image class already visited";
+ patch_object_visitor.VisitClass(klass);
+ // Then patch the non-embedded vtable and iftable.
+ mirror::PointerArray* vtable = klass->GetVTable<kVerifyNone, kWithoutReadBarrier>();
+ if (vtable != nullptr &&
+ fixup_object_visitor.IsInAppImage(vtable) &&
+ !visited_bitmap->Set(vtable)) {
+ patch_object_visitor.VisitPointerArray(vtable);
+ }
+ auto* iftable = klass->GetIfTable<kVerifyNone, kWithoutReadBarrier>();
+ if (iftable != nullptr && fixup_object_visitor.IsInAppImage(iftable)) {
+ // Avoid processing the fields of iftable since we will process them later anyways
+ // below.
+ int32_t ifcount = klass->GetIfTableCount<kVerifyNone>();
+ for (int32_t i = 0; i != ifcount; ++i) {
+ mirror::PointerArray* unpatched_ifarray =
+ iftable->GetMethodArrayOrNull<kVerifyNone, kWithoutReadBarrier>(i);
+ if (unpatched_ifarray != nullptr) {
+ // The iftable has not been patched, so we need to explicitly adjust the pointer.
+ mirror::PointerArray* ifarray = fixup_adapter(unpatched_ifarray);
+ if (fixup_object_visitor.IsInAppImage(ifarray) &&
+ !visited_bitmap->Set(ifarray)) {
+ patch_object_visitor.VisitPointerArray(ifarray);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Fixup objects may read fields in the boot image, use the mutator lock here for sanity.
+ // Though its probably not required.
+ TimingLogger::ScopedTiming timing("Fixup cobjects", &logger);
ScopedObjectAccess soa(Thread::Current());
- timing.NewTiming("Fixup objects");
+ // Need to update the image to be at the target base.
+ const ImageSection& objects_section = image_header.GetObjectsSection();
+ uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset());
+ uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End());
bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor);
// Fixup image roots.
CHECK(app_image.InSource(reinterpret_cast<uintptr_t>(
@@ -1392,96 +1394,19 @@
AsObjectArray<mirror::DexCache, kVerifyNone>();
for (int32_t i = 0, count = dex_caches->GetLength(); i < count; ++i) {
mirror::DexCache* dex_cache = dex_caches->Get<kVerifyNone, kWithoutReadBarrier>(i);
- // Fix up dex cache pointers.
- mirror::StringDexCacheType* strings = dex_cache->GetStrings();
- if (strings != nullptr) {
- mirror::StringDexCacheType* new_strings = fixup_adapter.ForwardObject(strings);
- if (strings != new_strings) {
- dex_cache->SetStrings(new_strings);
- }
- dex_cache->FixupStrings<kWithoutReadBarrier>(new_strings, fixup_adapter);
- }
- mirror::TypeDexCacheType* types = dex_cache->GetResolvedTypes();
- if (types != nullptr) {
- mirror::TypeDexCacheType* new_types = fixup_adapter.ForwardObject(types);
- if (types != new_types) {
- dex_cache->SetResolvedTypes(new_types);
- }
- dex_cache->FixupResolvedTypes<kWithoutReadBarrier>(new_types, fixup_adapter);
- }
- mirror::MethodDexCacheType* methods = dex_cache->GetResolvedMethods();
- if (methods != nullptr) {
- mirror::MethodDexCacheType* new_methods = fixup_adapter.ForwardObject(methods);
- if (methods != new_methods) {
- dex_cache->SetResolvedMethods(new_methods);
- }
- for (size_t j = 0, num = dex_cache->NumResolvedMethods(); j != num; ++j) {
- auto pair = mirror::DexCache::GetNativePairPtrSize(new_methods, j, pointer_size);
- ArtMethod* orig = pair.object;
- ArtMethod* copy = fixup_adapter.ForwardObject(orig);
- if (orig != copy) {
- pair.object = copy;
- mirror::DexCache::SetNativePairPtrSize(new_methods, j, pair, pointer_size);
- }
- }
- }
- mirror::FieldDexCacheType* fields = dex_cache->GetResolvedFields();
- if (fields != nullptr) {
- mirror::FieldDexCacheType* new_fields = fixup_adapter.ForwardObject(fields);
- if (fields != new_fields) {
- dex_cache->SetResolvedFields(new_fields);
- }
- for (size_t j = 0, num = dex_cache->NumResolvedFields(); j != num; ++j) {
- mirror::FieldDexCachePair orig =
- mirror::DexCache::GetNativePairPtrSize(new_fields, j, pointer_size);
- mirror::FieldDexCachePair copy(fixup_adapter.ForwardObject(orig.object), orig.index);
- if (orig.object != copy.object) {
- mirror::DexCache::SetNativePairPtrSize(new_fields, j, copy, pointer_size);
- }
- }
- }
-
- mirror::MethodTypeDexCacheType* method_types = dex_cache->GetResolvedMethodTypes();
- if (method_types != nullptr) {
- mirror::MethodTypeDexCacheType* new_method_types =
- fixup_adapter.ForwardObject(method_types);
- if (method_types != new_method_types) {
- dex_cache->SetResolvedMethodTypes(new_method_types);
- }
- dex_cache->FixupResolvedMethodTypes<kWithoutReadBarrier>(new_method_types, fixup_adapter);
- }
- GcRoot<mirror::CallSite>* call_sites = dex_cache->GetResolvedCallSites();
- if (call_sites != nullptr) {
- GcRoot<mirror::CallSite>* new_call_sites = fixup_adapter.ForwardObject(call_sites);
- if (call_sites != new_call_sites) {
- dex_cache->SetResolvedCallSites(new_call_sites);
- }
- dex_cache->FixupResolvedCallSites<kWithoutReadBarrier>(new_call_sites, fixup_adapter);
- }
-
- GcRoot<mirror::String>* preresolved_strings = dex_cache->GetPreResolvedStrings();
- if (preresolved_strings != nullptr) {
- GcRoot<mirror::String>* new_array = fixup_adapter.ForwardObject(preresolved_strings);
- if (preresolved_strings != new_array) {
- dex_cache->SetPreResolvedStrings(new_array);
- }
- const size_t num_preresolved_strings = dex_cache->NumPreResolvedStrings();
- for (size_t j = 0; j < num_preresolved_strings; ++j) {
- new_array[j] = GcRoot<mirror::String>(
- fixup_adapter(new_array[j].Read<kWithoutReadBarrier>()));
- }
- }
+ CHECK(dex_cache != nullptr);
+ patch_object_visitor.VisitDexCacheArrays(dex_cache);
}
}
{
// Only touches objects in the app image, no need for mutator lock.
TimingLogger::ScopedTiming timing("Fixup methods", &logger);
FixupArtMethodVisitor method_visitor(fixup_image,
- pointer_size,
+ kPointerSize,
boot_image,
app_image,
app_oat);
- image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size);
+ image_header.VisitPackedArtMethods(&method_visitor, target_base, kPointerSize);
}
if (fixup_image) {
{
@@ -1492,26 +1417,14 @@
}
{
TimingLogger::ScopedTiming timing("Fixup imt", &logger);
- image_header.VisitPackedImTables(fixup_adapter, target_base, pointer_size);
+ image_header.VisitPackedImTables(fixup_adapter, target_base, kPointerSize);
}
{
TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger);
- image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size);
+ image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, kPointerSize);
}
// In the app image case, the image methods are actually in the boot image.
image_header.RelocateImageMethods(boot_image.Delta());
- const auto& class_table_section = image_header.GetClassTableSection();
- if (class_table_section.Size() > 0u) {
- // Note that we require that ReadFromMemory does not make an internal copy of the elements.
- // This also relies on visit roots not doing any verification which could fail after we update
- // the roots to be the image addresses.
- ScopedObjectAccess soa(Thread::Current());
- WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_);
- ClassTable temp_table;
- temp_table.ReadFromMemory(target_base + class_table_section.Offset());
- FixupRootVisitor root_visitor(boot_image, app_image, app_oat);
- temp_table.VisitRoots(root_visitor);
- }
// Fix up the intern table.
const auto& intern_table_section = image_header.GetInternedStringsSection();
if (intern_table_section.Size() > 0u) {
@@ -1654,8 +1567,10 @@
*error_msg = StringPrintf("Cannot read header of %s", filename.c_str());
return false;
}
- if (system_hdr.GetComponentCount() != boot_class_path_.size()) {
- *error_msg = StringPrintf("Unexpected component count in %s, received %u, expected %zu",
+ if (system_hdr.GetComponentCount() == 0u ||
+ system_hdr.GetComponentCount() > boot_class_path_.size()) {
+ *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
+ "expected non-zero and <= %zu",
filename.c_str(),
system_hdr.GetComponentCount(),
boot_class_path_.size());
@@ -1672,10 +1587,12 @@
return false;
}
+ ArrayRef<const std::string> provided_locations(boot_class_path_locations_.data(),
+ system_hdr.GetComponentCount());
std::vector<std::string> locations =
- ExpandMultiImageLocations(boot_class_path_locations_, image_location_);
+ ExpandMultiImageLocations(provided_locations, image_location_);
std::vector<std::string> filenames =
- ExpandMultiImageLocations(boot_class_path_locations_, filename);
+ ExpandMultiImageLocations(provided_locations, filename);
DCHECK_EQ(locations.size(), filenames.size());
std::vector<std::unique_ptr<ImageSpace>> spaces;
spaces.reserve(locations.size());
@@ -1694,7 +1611,7 @@
}
for (size_t i = 0u, size = spaces.size(); i != size; ++i) {
std::string expected_boot_class_path =
- (i == 0u) ? android::base::Join(boot_class_path_locations_, ':') : std::string();
+ (i == 0u) ? android::base::Join(provided_locations, ':') : std::string();
if (!OpenOatFile(spaces[i].get(),
boot_class_path_[i],
expected_boot_class_path,
@@ -1766,22 +1683,6 @@
BitMemoryRegion visited_objects_;
};
- template <typename ReferenceVisitor>
- class ClassTableVisitor final {
- public:
- explicit ClassTableVisitor(const ReferenceVisitor& reference_visitor)
- : reference_visitor_(reference_visitor) {}
-
- void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- REQUIRES_SHARED(Locks::mutator_lock_) {
- DCHECK(root->AsMirrorPtr() != nullptr);
- root->Assign(reference_visitor_(root->AsMirrorPtr()));
- }
-
- private:
- ReferenceVisitor reference_visitor_;
- };
-
template <PointerSize kPointerSize>
static void DoRelocateSpaces(const std::vector<std::unique_ptr<ImageSpace>>& spaces,
uint32_t diff) REQUIRES_SHARED(Locks::mutator_lock_) {
@@ -2444,9 +2345,113 @@
return true;
}
+std::string ImageSpace::GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path,
+ const std::string& image_location,
+ InstructionSet image_isa,
+ /*out*/std::string* error_msg) {
+ std::string system_filename;
+ bool has_system = false;
+ std::string cache_filename;
+ bool has_cache = false;
+ bool dalvik_cache_exists = false;
+ bool is_global_cache = false;
+ if (!FindImageFilename(image_location.c_str(),
+ image_isa,
+ &system_filename,
+ &has_system,
+ &cache_filename,
+ &dalvik_cache_exists,
+ &has_cache,
+ &is_global_cache)) {
+ *error_msg = StringPrintf("Unable to find image file for %s and %s",
+ image_location.c_str(),
+ GetInstructionSetString(image_isa));
+ return std::string();
+ }
+
+ DCHECK(has_system || has_cache);
+ const std::string& filename = has_system ? system_filename : cache_filename;
+ std::unique_ptr<ImageHeader> header = ReadSpecificImageHeader(filename.c_str(), error_msg);
+ if (header == nullptr) {
+ return std::string();
+ }
+ if (header->GetComponentCount() == 0u || header->GetComponentCount() > boot_class_path.size()) {
+ *error_msg = StringPrintf("Unexpected component count in %s, received %u, "
+ "expected non-zero and <= %zu",
+ filename.c_str(),
+ header->GetComponentCount(),
+ boot_class_path.size());
+ return std::string();
+ }
+
+ std::string boot_image_checksum =
+ StringPrintf("i;%d/%08x", header->GetComponentCount(), header->GetImageChecksum());
+ ArrayRef<const std::string> boot_class_path_tail =
+ ArrayRef<const std::string>(boot_class_path).SubArray(header->GetComponentCount());
+ for (const std::string& bcp_filename : boot_class_path_tail) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ const ArtDexFileLoader dex_file_loader;
+ if (!dex_file_loader.Open(bcp_filename.c_str(),
+ bcp_filename, // The location does not matter here.
+ /*verify=*/ false,
+ /*verify_checksum=*/ false,
+ error_msg,
+ &dex_files)) {
+ return std::string();
+ }
+ DCHECK(!dex_files.empty());
+ StringAppendF(&boot_image_checksum, ":d");
+ for (const std::unique_ptr<const DexFile>& dex_file : dex_files) {
+ StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
+ }
+ }
+ return boot_image_checksum;
+}
+
+std::string ImageSpace::GetBootClassPathChecksums(
+ const std::vector<ImageSpace*>& image_spaces,
+ const std::vector<const DexFile*>& boot_class_path) {
+ DCHECK(!image_spaces.empty());
+ const ImageHeader& primary_header = image_spaces.front()->GetImageHeader();
+ uint32_t component_count = primary_header.GetComponentCount();
+ DCHECK_EQ(component_count, image_spaces.size());
+ std::string boot_image_checksum =
+ StringPrintf("i;%d/%08x", component_count, primary_header.GetImageChecksum());
+ size_t pos = 0u;
+ for (const ImageSpace* space : image_spaces) {
+ size_t num_dex_files = space->oat_file_non_owned_->GetOatDexFiles().size();
+ if (kIsDebugBuild) {
+ CHECK_NE(num_dex_files, 0u);
+ CHECK_LE(space->oat_file_non_owned_->GetOatDexFiles().size(), boot_class_path.size() - pos);
+ for (size_t i = 0; i != num_dex_files; ++i) {
+ CHECK_EQ(space->oat_file_non_owned_->GetOatDexFiles()[i]->GetDexFileLocation(),
+ boot_class_path[pos + i]->GetLocation());
+ }
+ }
+ pos += num_dex_files;
+ }
+ ArrayRef<const DexFile* const> boot_class_path_tail =
+ ArrayRef<const DexFile* const>(boot_class_path).SubArray(pos);
+ DCHECK(boot_class_path_tail.empty() ||
+ !DexFileLoader::IsMultiDexLocation(boot_class_path_tail.front()->GetLocation().c_str()));
+ for (const DexFile* dex_file : boot_class_path_tail) {
+ if (!DexFileLoader::IsMultiDexLocation(dex_file->GetLocation().c_str())) {
+ StringAppendF(&boot_image_checksum, ":d");
+ }
+ StringAppendF(&boot_image_checksum, "/%08x", dex_file->GetLocationChecksum());
+ }
+ return boot_image_checksum;
+}
+
std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
const std::vector<std::string>& dex_locations,
const std::string& image_location) {
+ return ExpandMultiImageLocations(ArrayRef<const std::string>(dex_locations), image_location);
+}
+
+std::vector<std::string> ImageSpace::ExpandMultiImageLocations(
+ ArrayRef<const std::string> dex_locations,
+ const std::string& image_location) {
DCHECK(!dex_locations.empty());
// Find the path.
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index dbc12d1..14e364a 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -24,6 +24,8 @@
namespace art {
+template <typename T> class ArrayRef;
+class DexFile;
class OatFile;
namespace gc {
@@ -124,6 +126,19 @@
bool* has_data,
bool *is_global_cache);
+ // Returns the checksums for the boot image and extra boot class path dex files,
+ // based on the boot class path, image location and ISA (may differ from the ISA of an
+ // initialized Runtime). The boot image and dex files do not need to be loaded in memory.
+ static std::string GetBootClassPathChecksums(const std::vector<std::string>& boot_class_path,
+ const std::string& image_location,
+ InstructionSet image_isa,
+ /*out*/std::string* error_msg);
+
+ // Returns the checksums for the boot image and extra boot class path dex files,
+ // based on the boot image and boot class path dex files loaded in memory.
+ static std::string GetBootClassPathChecksums(const std::vector<ImageSpace*>& image_spaces,
+ const std::vector<const DexFile*>& boot_class_path);
+
// Expand a single image location to multi-image locations based on the dex locations.
static std::vector<std::string> ExpandMultiImageLocations(
const std::vector<std::string>& dex_locations,
@@ -188,7 +203,14 @@
friend class Space;
private:
+ // Internal overload that takes ArrayRef<> instead of vector<>.
+ static std::vector<std::string> ExpandMultiImageLocations(
+ ArrayRef<const std::string> dex_locations,
+ const std::string& image_location);
+
class BootImageLoader;
+ template <typename ReferenceVisitor>
+ class ClassTableVisitor;
class Loader;
template <typename PatchObjectVisitor>
class PatchArtFieldVisitor;
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index 5ff1270..9f5c117 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -193,6 +193,40 @@
return bytes;
}
+template <typename Visitor>
+inline void RegionSpace::ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
+ Visitor&& visitor) {
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_ : std::min(num_regions_, non_free_region_index_limit_);
+ // Instead of region-wise scan, find contiguous blocks of un-evac regions and then
+ // visit them. Everything before visit_block_begin has been processed, while
+ // [visit_block_begin, visit_block_end) still needs to be visited.
+ uint8_t* visit_block_begin = nullptr;
+ uint8_t* visit_block_end = nullptr;
+ for (size_t i = 0; i < iter_limit; ++i) {
+ Region* r = ®ions_[i];
+ if (r->IsInUnevacFromSpace()) {
+ // visit_block_begin set to nullptr means a new visit block needs to be stated.
+ if (visit_block_begin == nullptr) {
+ visit_block_begin = r->Begin();
+ }
+ visit_block_end = r->End();
+ } else if (visit_block_begin != nullptr) {
+ // Visit the block range as r is not adjacent to current visit block.
+ bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
+ reinterpret_cast<uintptr_t>(visit_block_end),
+ visitor);
+ visit_block_begin = nullptr;
+ }
+ }
+ // Visit last block, if not processed yet.
+ if (visit_block_begin != nullptr) {
+ bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(visit_block_begin),
+ reinterpret_cast<uintptr_t>(visit_block_end),
+ visitor);
+ }
+}
+
template<bool kToSpaceOnly, typename Visitor>
inline void RegionSpace::WalkInternal(Visitor&& visitor) {
// TODO: MutexLock on region_lock_ won't work due to lock order
@@ -205,9 +239,10 @@
continue;
}
if (r->IsLarge()) {
- // Avoid visiting dead large objects since they may contain dangling pointers to the
- // from-space.
- DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
+ // We may visit a large object with live_bytes = 0 here. However, it is
+ // safe as it cannot contain dangling pointers because corresponding regions
+ // (and regions corresponding to dead referents) cannot be allocated for new
+ // allocations without first clearing regions' live_bytes and state.
mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
DCHECK(obj->GetClass() != nullptr);
visitor(obj);
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 21cae93..07783ba 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -319,6 +319,7 @@
state == RegionState::kRegionStateLarge) &&
type == RegionType::kRegionTypeToSpace);
bool should_evacuate = r->ShouldBeEvacuated(evac_mode);
+ bool is_newly_allocated = r->IsNewlyAllocated();
if (should_evacuate) {
r->SetAsFromSpace();
DCHECK(r->IsInFromSpace());
@@ -329,6 +330,17 @@
if (UNLIKELY(state == RegionState::kRegionStateLarge &&
type == RegionType::kRegionTypeToSpace)) {
prev_large_evacuated = should_evacuate;
+ // In 2-phase full heap GC, this function is called after marking is
+ // done. So, it is possible that some newly allocated large object is
+ // marked but its live_bytes is still -1. We need to clear the
+ // mark-bit otherwise the live_bytes will not be updated in
+ // ConcurrentCopying::ProcessMarkStackRef() and hence will break the
+ // logic.
+ if (kEnableGenerationalConcurrentCopyingCollection
+ && !should_evacuate
+ && is_newly_allocated) {
+ GetMarkBitmap()->Clear(reinterpret_cast<mirror::Object*>(r->Begin()));
+ }
num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
DCHECK_GT(num_expected_large_tails, 0U);
}
@@ -367,7 +379,8 @@
}
void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
- /* out */ uint64_t* cleared_objects) {
+ /* out */ uint64_t* cleared_objects,
+ const bool clear_bitmap) {
DCHECK(cleared_bytes != nullptr);
DCHECK(cleared_objects != nullptr);
*cleared_bytes = 0;
@@ -395,13 +408,18 @@
// (see b/62194020).
uint8_t* clear_block_begin = nullptr;
uint8_t* clear_block_end = nullptr;
- auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
+ auto clear_region = [this, &clear_block_begin, &clear_block_end, clear_bitmap](Region* r) {
r->Clear(/*zero_and_release_pages=*/false);
if (clear_block_end != r->Begin()) {
// Region `r` is not adjacent to the current clear block; zero and release
// pages within the current block and restart a new clear block at the
// beginning of region `r`.
ZeroAndProtectRegion(clear_block_begin, clear_block_end);
+ if (clear_bitmap) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(clear_block_begin),
+ reinterpret_cast<mirror::Object*>(clear_block_end));
+ }
clear_block_begin = r->Begin();
}
// Add region `r` to the clear block.
@@ -426,20 +444,23 @@
// It is also better to clear these regions now instead of at the end of the next GC to
// save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
// live percent evacuation logic.
+ *cleared_bytes += r->BytesAllocated();
+ *cleared_objects += r->ObjectsAllocated();
+ clear_region(r);
size_t free_regions = 1;
// Also release RAM for large tails.
while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
- DCHECK(r->IsLarge());
clear_region(®ions_[i + free_regions]);
++free_regions;
}
- *cleared_bytes += r->BytesAllocated();
- *cleared_objects += r->ObjectsAllocated();
num_non_free_regions_ -= free_regions;
- clear_region(r);
- GetLiveBitmap()->ClearRange(
- reinterpret_cast<mirror::Object*>(r->Begin()),
- reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ // When clear_bitmap is true, this clearing of bitmap is taken care in
+ // clear_region().
+ if (!clear_bitmap) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(r->Begin()),
+ reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
+ }
continue;
}
r->SetUnevacFromSpaceAsToSpace();
@@ -519,6 +540,11 @@
}
// Clear pages for the last block since clearing happens when a new block opens.
ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
+ if (clear_bitmap) {
+ GetLiveBitmap()->ClearRange(
+ reinterpret_cast<mirror::Object*>(clear_block_begin),
+ reinterpret_cast<mirror::Object*>(clear_block_end));
+ }
// Update non_free_region_index_limit_.
SetNonFreeRegionLimit(new_non_free_region_index_limit);
evac_region_ = nullptr;
@@ -809,8 +835,14 @@
<< " type=" << type_
<< " objects_allocated=" << objects_allocated_
<< " alloc_time=" << alloc_time_
- << " live_bytes=" << live_bytes_
- << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
+ << " live_bytes=" << live_bytes_;
+
+ if (live_bytes_ != static_cast<size_t>(-1)) {
+ os << " ratio over allocated bytes="
+ << (static_cast<float>(live_bytes_) / RoundUp(BytesAllocated(), kRegionSize));
+ }
+
+ os << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
<< " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha
<< " thread=" << thread_ << '\n';
}
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 8810f8c..75c99ec 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -209,6 +209,15 @@
template <typename Visitor>
ALWAYS_INLINE void WalkToSpace(Visitor&& visitor) REQUIRES(Locks::mutator_lock_);
+ // Scans regions and calls visitor for objects in unevac-space corresponding
+ // to the bits set in 'bitmap'.
+ // Cannot acquire region_lock_ as visitor may need to acquire it for allocation.
+ // Should not be called concurrently with functions (like SetFromSpace()) which
+ // change regions' type.
+ template <typename Visitor>
+ ALWAYS_INLINE void ScanUnevacFromSpace(accounting::ContinuousSpaceBitmap* bitmap,
+ Visitor&& visitor) NO_THREAD_SAFETY_ANALYSIS;
+
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() override {
return nullptr;
}
@@ -228,6 +237,11 @@
return false;
}
+ bool IsRegionNewlyAllocated(size_t idx) const NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK_LT(idx, num_regions_);
+ return regions_[idx].IsNewlyAllocated();
+ }
+
bool IsInNewlyAllocatedRegion(mirror::Object* ref) {
if (HasAddress(ref)) {
Region* r = RefToRegionUnlocked(ref);
@@ -291,7 +305,9 @@
size_t FromSpaceSize() REQUIRES(!region_lock_);
size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
size_t ToSpaceSize() REQUIRES(!region_lock_);
- void ClearFromSpace(/* out */ uint64_t* cleared_bytes, /* out */ uint64_t* cleared_objects)
+ void ClearFromSpace(/* out */ uint64_t* cleared_bytes,
+ /* out */ uint64_t* cleared_objects,
+ const bool clear_bitmap)
REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
@@ -310,6 +326,40 @@
}
}
+ void SetAllRegionLiveBytesZero() REQUIRES(!region_lock_) {
+ MutexLock mu(Thread::Current(), region_lock_);
+ const size_t iter_limit = kUseTableLookupReadBarrier
+ ? num_regions_
+ : std::min(num_regions_, non_free_region_index_limit_);
+ for (size_t i = 0; i < iter_limit; ++i) {
+ Region* r = ®ions_[i];
+ // Newly allocated regions don't need up-to-date live_bytes_ for deciding
+ // whether to be evacuated or not. See Region::ShouldBeEvacuated().
+ if (!r->IsFree() && !r->IsNewlyAllocated()) {
+ r->ZeroLiveBytes();
+ }
+ }
+ }
+
+ size_t RegionIdxForRefUnchecked(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+ DCHECK(HasAddress(ref));
+ uintptr_t offset = reinterpret_cast<uintptr_t>(ref) - reinterpret_cast<uintptr_t>(Begin());
+ size_t reg_idx = offset / kRegionSize;
+ DCHECK_LT(reg_idx, num_regions_);
+ Region* reg = ®ions_[reg_idx];
+ DCHECK_EQ(reg->Idx(), reg_idx);
+ DCHECK(reg->Contains(ref));
+ return reg_idx;
+ }
+ // Return -1 as region index for references outside this region space.
+ size_t RegionIdxForRef(mirror::Object* ref) const NO_THREAD_SAFETY_ANALYSIS {
+ if (HasAddress(ref)) {
+ return RegionIdxForRefUnchecked(ref);
+ } else {
+ return static_cast<size_t>(-1);
+ }
+ }
+
void RecordAlloc(mirror::Object* ref) REQUIRES(!region_lock_);
bool AllocNewTlab(Thread* self, size_t min_bytes) REQUIRES(!region_lock_);
@@ -515,11 +565,10 @@
ALWAYS_INLINE bool ShouldBeEvacuated(EvacMode evac_mode);
void AddLiveBytes(size_t live_bytes) {
- DCHECK(IsInUnevacFromSpace());
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection || IsInUnevacFromSpace());
DCHECK(!IsLargeTail());
DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
- // For large allocations, we always consider all bytes in the
- // regions live.
+ // For large allocations, we always consider all bytes in the regions live.
live_bytes_ += IsLarge() ? Top() - begin_ : live_bytes;
DCHECK_LE(live_bytes_, BytesAllocated());
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index d004d64..2e41a9d 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -366,6 +366,13 @@
}
} else {
// Enter the "with access check" interpreter.
+
+ // The boot classpath should really not have to run access checks.
+ DCHECK(method->GetDeclaringClass()->GetClassLoader() != nullptr
+ || Runtime::Current()->IsVerificationSoftFail()
+ || Runtime::Current()->IsAotCompiler())
+ << method->PrettyMethod();
+
if (kInterpreterImplKind == kMterpImplKind) {
// No access check variants for Mterp. Just use the switch version.
if (transaction_active) {
diff --git a/runtime/intrinsics_list.h b/runtime/intrinsics_list.h
index 82ea476..db43b24 100644
--- a/runtime/intrinsics_list.h
+++ b/runtime/intrinsics_list.h
@@ -219,7 +219,7 @@
V(VarHandleLoadLoadFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "loadLoadFence", "()V") \
V(VarHandleStoreStoreFence, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kNoThrow, "Ljava/lang/invoke/VarHandle;", "storeStoreFence", "()V") \
V(ReachabilityFence, kStatic, kNeedsEnvironmentOrCache, kWriteSideEffects, kNoThrow, "Ljava/lang/ref/Reference;", "reachabilityFence", "(Ljava/lang/Object;)V") \
- V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
+ V(CRC32Update, kStatic, kNeedsEnvironmentOrCache, kNoSideEffects, kNoThrow, "Ljava/util/zip/CRC32;", "update", "(II)I") \
V(CRC32UpdateBytes, kStatic, kNeedsEnvironmentOrCache, kReadSideEffects, kCanThrow, "Ljava/util/zip/CRC32;", "updateBytes", "(I[BII)I") \
SIGNATURE_POLYMORPHIC_INTRINSICS_LIST(V)
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index e43d771..03c97f4 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -58,7 +58,7 @@
void* Jit::jit_compiler_handle_ = nullptr;
void* (*Jit::jit_load_)(void) = nullptr;
void (*Jit::jit_unload_)(void*) = nullptr;
-bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool) = nullptr;
+bool (*Jit::jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool) = nullptr;
void (*Jit::jit_types_loaded_)(void*, mirror::Class**, size_t count) = nullptr;
bool (*Jit::jit_generate_debug_info_)(void*) = nullptr;
void (*Jit::jit_update_options_)(void*) = nullptr;
@@ -242,7 +242,7 @@
return true;
}
-bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) {
+bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr) {
DCHECK(Runtime::Current()->UseJitCompilation());
DCHECK(!method->IsRuntimeMethod());
@@ -272,7 +272,7 @@
VLOG(jit) << "Compiling method "
<< ArtMethod::PrettyMethod(method_to_compile)
<< " osr=" << std::boolalpha << osr;
- bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, osr);
+ bool success = jit_compile_method_(jit_compiler_handle_, method_to_compile, self, baseline, osr);
code_cache_->DoneCompiling(method_to_compile, self, osr);
if (!success) {
VLOG(jit) << "Failed to compile method "
@@ -291,6 +291,12 @@
return success;
}
+void Jit::WaitForWorkersToBeCreated() {
+ if (thread_pool_ != nullptr) {
+ thread_pool_->WaitForWorkersToBeCreated();
+ }
+}
+
void Jit::DeleteThreadPool() {
Thread* self = Thread::Current();
DCHECK(Runtime::Current()->IsShuttingDown(self));
@@ -549,6 +555,7 @@
enum class TaskKind {
kAllocateProfile,
kCompile,
+ kCompileBaseline,
kCompileOsr,
};
@@ -568,10 +575,12 @@
ScopedObjectAccess soa(self);
switch (kind_) {
case TaskKind::kCompile:
+ case TaskKind::kCompileBaseline:
case TaskKind::kCompileOsr: {
Runtime::Current()->GetJit()->CompileMethod(
method_,
self,
+ /* baseline= */ (kind_ == TaskKind::kCompileBaseline),
/* osr= */ (kind_ == TaskKind::kCompileOsr));
break;
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 7ce5f07..e5c9766 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -161,7 +161,7 @@
// Create JIT itself.
static Jit* Create(JitCodeCache* code_cache, JitOptions* options);
- bool CompileMethod(ArtMethod* method, Thread* self, bool osr)
+ bool CompileMethod(ArtMethod* method, Thread* self, bool baseline, bool osr)
REQUIRES_SHARED(Locks::mutator_lock_);
const JitCodeCache* GetCodeCache() const {
@@ -174,6 +174,7 @@
void CreateThreadPool();
void DeleteThreadPool();
+ void WaitForWorkersToBeCreated();
// Dump interesting info: #methods compiled, code vs data size, compile / verify cumulative
// loggers.
@@ -304,7 +305,7 @@
static void* jit_compiler_handle_;
static void* (*jit_load_)(void);
static void (*jit_unload_)(void*);
- static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool);
+ static bool (*jit_compile_method_)(void*, ArtMethod*, Thread*, bool, bool);
static void (*jit_types_loaded_)(void*, mirror::Class**, size_t count);
static void (*jit_update_options_)(void*);
static bool (*jit_generate_debug_info_)(void*);
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 185ae3b..679ca43 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -871,6 +871,9 @@
}
inline void Class::SetAccessFlags(uint32_t new_access_flags) {
+ if (kIsDebugBuild) {
+ SetAccessFlagsDCheck(new_access_flags);
+ }
// Called inside a transaction when setting pre-verified flag during boot image compilation.
if (Runtime::Current()->IsActiveTransaction()) {
SetField32<true>(AccessFlagsOffset(), new_access_flags);
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 83d76a9..c5ed1bf 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -206,6 +206,10 @@
}
}
+ if (kIsDebugBuild && new_status >= ClassStatus::kInitialized) {
+ CHECK(h_this->WasVerificationAttempted()) << h_this->PrettyClassAndClassLoader();
+ }
+
if (!class_linker_initialized) {
// When the class linker is being initialized its single threaded and by definition there can be
// no waiters. During initialization classes may appear temporary but won't be retired as their
@@ -1461,5 +1465,12 @@
template void Class::GetAccessFlagsDCheck<kVerifyWrites>();
template void Class::GetAccessFlagsDCheck<kVerifyAll>();
+void Class::SetAccessFlagsDCheck(uint32_t new_access_flags) {
+ uint32_t old_access_flags = GetField32<kVerifyNone>(AccessFlagsOffset());
+ // kAccVerificationAttempted is retained.
+ CHECK((old_access_flags & kAccVerificationAttempted) == 0 ||
+ (new_access_flags & kAccVerificationAttempted) != 0);
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 66b1405..d5aa514 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -1306,6 +1306,8 @@
template<VerifyObjectFlags kVerifyFlags>
void GetAccessFlagsDCheck() REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetAccessFlagsDCheck(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
+
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 3e5003c..892d4cc 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -271,7 +271,7 @@
#endif
}
-static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) {
+static void VMRuntime_registerNativeAllocationInternal(JNIEnv* env, jobject, jint bytes) {
if (UNLIKELY(bytes < 0)) {
ScopedObjectAccess soa(env);
ThrowRuntimeException("allocation size negative %d", bytes);
@@ -280,11 +280,7 @@
Runtime::Current()->GetHeap()->RegisterNativeAllocation(env, static_cast<size_t>(bytes));
}
-static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
- Runtime::Current()->RegisterSensitiveThread();
-}
-
-static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
+static void VMRuntime_registerNativeFreeInternal(JNIEnv* env, jobject, jint bytes) {
if (UNLIKELY(bytes < 0)) {
ScopedObjectAccess soa(env);
ThrowRuntimeException("allocation size negative %d", bytes);
@@ -293,6 +289,18 @@
Runtime::Current()->GetHeap()->RegisterNativeFree(env, static_cast<size_t>(bytes));
}
+static jint VMRuntime_getNotifyNativeInterval(JNIEnv*, jclass) {
+ return Runtime::Current()->GetHeap()->GetNotifyNativeInterval();
+}
+
+static void VMRuntime_notifyNativeAllocationsInternal(JNIEnv* env, jobject) {
+ Runtime::Current()->GetHeap()->NotifyNativeAllocations(env);
+}
+
+static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) {
+ Runtime::Current()->RegisterSensitiveThread();
+}
+
static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) {
Runtime* runtime = Runtime::Current();
runtime->UpdateProcessState(static_cast<ProcessState>(process_state));
@@ -710,9 +718,11 @@
FAST_NATIVE_METHOD(VMRuntime, newUnpaddedArray, "(Ljava/lang/Class;I)Ljava/lang/Object;"),
NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"),
- NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"),
+ NATIVE_METHOD(VMRuntime, registerNativeAllocationInternal, "(I)V"),
+ NATIVE_METHOD(VMRuntime, registerNativeFreeInternal, "(I)V"),
+ NATIVE_METHOD(VMRuntime, getNotifyNativeInterval, "()I"),
+ NATIVE_METHOD(VMRuntime, notifyNativeAllocationsInternal, "()V"),
NATIVE_METHOD(VMRuntime, registerSensitiveThread, "()V"),
- NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"),
NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"),
NATIVE_METHOD(VMRuntime, requestHeapTrim, "()V"),
NATIVE_METHOD(VMRuntime, runHeapTasks, "()V"),
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index b7ac1e8..9ce4749 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -240,11 +240,6 @@
runtime->PreZygoteFork();
- if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) {
- // Tracing active, pause it.
- Trace::Pause();
- }
-
// Grab thread before fork potentially makes Thread::pthread_key_self_ unusable.
return reinterpret_cast<jlong>(ThreadForEnv(env));
}
diff --git a/runtime/oat.cc b/runtime/oat.cc
index e931b28..d7c968f 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -79,8 +79,7 @@
quick_generic_jni_trampoline_offset_(0),
quick_imt_conflict_trampoline_offset_(0),
quick_resolution_trampoline_offset_(0),
- quick_to_interpreter_bridge_offset_(0),
- boot_image_checksum_(0) {
+ quick_to_interpreter_bridge_offset_(0) {
// Don't want asserts in header as they would be checked in each file that includes it. But the
// fields are private, so we check inside a method.
static_assert(sizeof(magic_) == sizeof(kOatMagic),
@@ -316,16 +315,6 @@
quick_to_interpreter_bridge_offset_ = offset;
}
-uint32_t OatHeader::GetBootImageChecksum() const {
- CHECK(IsValid());
- return boot_image_checksum_;
-}
-
-void OatHeader::SetBootImageChecksum(uint32_t boot_image_checksum) {
- CHECK(IsValid());
- boot_image_checksum_ = boot_image_checksum;
-}
-
uint32_t OatHeader::GetKeyValueStoreSize() const {
CHECK(IsValid());
return key_value_store_size_;
diff --git a/runtime/oat.h b/runtime/oat.h
index b09c81e..ded1489 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -31,8 +31,8 @@
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- // Last oat version changed reason: Pass boot class path to LoadBootImage.
- static constexpr uint8_t kOatVersion[] = { '1', '6', '5', '\0' };
+ // Last oat version changed reason: Partial boot image.
+ static constexpr uint8_t kOatVersion[] = { '1', '6', '6', '\0' };
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
static constexpr const char* kDebuggableKey = "debuggable";
@@ -40,6 +40,7 @@
static constexpr const char* kCompilerFilter = "compiler-filter";
static constexpr const char* kClassPathKey = "classpath";
static constexpr const char* kBootClassPathKey = "bootclasspath";
+ static constexpr const char* kBootClassPathChecksumsKey = "bootclasspath-checksums";
static constexpr const char* kConcurrentCopying = "concurrent-copying";
static constexpr const char* kCompilationReasonKey = "compilation-reason";
@@ -93,9 +94,6 @@
InstructionSet GetInstructionSet() const;
uint32_t GetInstructionSetFeaturesBitmap() const;
- uint32_t GetBootImageChecksum() const;
- void SetBootImageChecksum(uint32_t boot_image_checksum);
-
uint32_t GetKeyValueStoreSize() const;
const uint8_t* GetKeyValueStore() const;
const char* GetStoreValueByKey(const char* key) const;
@@ -137,8 +135,6 @@
uint32_t quick_resolution_trampoline_offset_;
uint32_t quick_to_interpreter_bridge_offset_;
- uint32_t boot_image_checksum_;
-
uint32_t key_value_store_size_;
uint8_t key_value_store_[0]; // note variable width data at end
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
index 6f32b98..8b81bb9 100644
--- a/runtime/oat_file_assistant.cc
+++ b/runtime/oat_file_assistant.cc
@@ -419,7 +419,7 @@
// starts up.
LOG(WARNING) << "Dex location " << dex_location_ << " does not seem to include dex file. "
<< "Allow oat file use. This is potentially dangerous.";
- } else if (file.GetOatHeader().GetBootImageChecksum() != image_info->boot_image_checksum) {
+ } else if (!image_info->ValidateBootClassPathChecksums(file)) {
VLOG(oat) << "Oat image checksum does not match image checksum.";
return kOatBootImageOutOfDate;
}
@@ -560,6 +560,13 @@
return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr;
}
+bool OatFileAssistant::ImageInfo::ValidateBootClassPathChecksums(const OatFile& oat_file) const {
+ const char* oat_boot_class_path_checksums =
+ oat_file.GetOatHeader().GetStoreValueByKey(OatHeader::kBootClassPathChecksumsKey);
+ return oat_boot_class_path_checksums != nullptr &&
+ oat_boot_class_path_checksums == boot_class_path_checksums;
+}
+
std::unique_ptr<OatFileAssistant::ImageInfo>
OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) {
CHECK(error_msg != nullptr);
@@ -567,14 +574,11 @@
Runtime* runtime = Runtime::Current();
std::unique_ptr<ImageInfo> info(new ImageInfo());
info->location = runtime->GetImageLocation();
-
- std::unique_ptr<ImageHeader> image_header(
- gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg));
- if (image_header == nullptr) {
+ info->boot_class_path_checksums = gc::space::ImageSpace::GetBootClassPathChecksums(
+ runtime->GetBootClassPath(), info->location, isa, error_msg);
+ if (info->boot_class_path_checksums.empty()) {
return nullptr;
}
-
- info->boot_image_checksum = image_header->GetImageChecksum();
return info;
}
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
index 09c9d3b..def55b8 100644
--- a/runtime/oat_file_assistant.h
+++ b/runtime/oat_file_assistant.h
@@ -246,8 +246,10 @@
private:
struct ImageInfo {
- uint32_t boot_image_checksum = 0;
+ bool ValidateBootClassPathChecksums(const OatFile& oat_file) const;
+
std::string location;
+ std::string boot_class_path_checksums;
static std::unique_ptr<ImageInfo> GetRuntimeImageInfo(InstructionSet isa,
std::string* error_msg);
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 17ff3a2..4a04259 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -151,6 +151,10 @@
.IntoKey(M::LongGCLogThreshold)
.Define("-XX:DumpGCPerformanceOnShutdown")
.IntoKey(M::DumpGCPerformanceOnShutdown)
+ .Define("-XX:DumpRegionInfoBeforeGC")
+ .IntoKey(M::DumpRegionInfoBeforeGC)
+ .Define("-XX:DumpRegionInfoAfterGC")
+ .IntoKey(M::DumpRegionInfoAfterGC)
.Define("-XX:DumpJITInfoOnShutdown")
.IntoKey(M::DumpJITInfoOnShutdown)
.Define("-XX:IgnoreMaxFootprint")
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 69ef2fb..7eac3d9 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -320,7 +320,8 @@
}
if (dump_gc_performance_on_shutdown_) {
- heap_->CalculateWeightedAllocatedBytes();
+ heap_->CalculatePreGcWeightedAllocatedBytes();
+ heap_->CalculatePostGcWeightedAllocatedBytes();
uint64_t process_cpu_end_time = ProcessCpuNanoTime();
ScopedLogSeverity sls(LogSeverity::INFO);
// This can't be called from the Heap destructor below because it
@@ -335,13 +336,22 @@
<< " out of process CPU time " << PrettyDuration(process_cpu_time)
<< " (" << ratio << ")"
<< "\n";
- double weighted_allocated_bytes = heap_->GetWeightedAllocatedBytes() / process_cpu_time;
- LOG_STREAM(INFO) << "Weighted bytes allocated over CPU time: "
- << " (" << PrettySize(weighted_allocated_bytes) << ")"
+ double pre_gc_weighted_allocated_bytes =
+ heap_->GetPreGcWeightedAllocatedBytes() / process_cpu_time;
+ double post_gc_weighted_allocated_bytes =
+ heap_->GetPostGcWeightedAllocatedBytes() / process_cpu_time;
+
+ LOG_STREAM(INFO) << "Pre GC weighted bytes allocated over CPU time: "
+ << " (" << PrettySize(pre_gc_weighted_allocated_bytes) << ")";
+ LOG_STREAM(INFO) << "Post GC weighted bytes allocated over CPU time: "
+ << " (" << PrettySize(post_gc_weighted_allocated_bytes) << ")"
<< "\n";
}
if (jit_ != nullptr) {
+ // Wait for the workers to be created since there can't be any threads attaching during
+ // shutdown.
+ jit_->WaitForWorkersToBeCreated();
// Stop the profile saver thread before marking the runtime as shutting down.
// The saver will try to dump the profiles before being sopped and that
// requires holding the mutator lock.
@@ -964,8 +974,8 @@
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
-static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
- const std::vector<std::string>& dex_locations,
+static size_t OpenDexFiles(ArrayRef<const std::string> dex_filenames,
+ ArrayRef<const std::string> dex_locations,
std::vector<std::unique_ptr<const DexFile>>* dex_files) {
DCHECK(dex_files != nullptr) << "OpenDexFiles: out-param is nullptr";
size_t failure_count = 0;
@@ -1239,7 +1249,9 @@
xgc_option.gcstress_,
xgc_option.measure_,
runtime_options.GetOrDefault(Opt::EnableHSpaceCompactForOOM),
- runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs));
+ runtime_options.GetOrDefault(Opt::HSpaceCompactForOOMMinIntervalsMs),
+ runtime_options.Exists(Opt::DumpRegionInfoBeforeGC),
+ runtime_options.Exists(Opt::DumpRegionInfoAfterGC));
if (!heap_->HasBootImageSpace() && !allow_dex_file_fallback_) {
LOG(ERROR) << "Dex file fallback disabled, cannot continue without image.";
@@ -1422,6 +1434,21 @@
GetInternTable()->AddImageStringsToTable(image_space, VoidFunctor());
}
}
+ if (heap_->GetBootImageSpaces().size() != GetBootClassPath().size()) {
+ // The boot image did not contain all boot class path components. Load the rest.
+ DCHECK_LT(heap_->GetBootImageSpaces().size(), GetBootClassPath().size());
+ size_t start = heap_->GetBootImageSpaces().size();
+ DCHECK_LT(start, GetBootClassPath().size());
+ std::vector<std::unique_ptr<const DexFile>> extra_boot_class_path;
+ if (runtime_options.Exists(Opt::BootClassPathDexList)) {
+ extra_boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
+ } else {
+ OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()).SubArray(start),
+ ArrayRef<const std::string>(GetBootClassPathLocations()).SubArray(start),
+ &extra_boot_class_path);
+ }
+ class_linker_->AddExtraBootDexFiles(self, std::move(extra_boot_class_path));
+ }
if (IsJavaDebuggable()) {
// Now that we have loaded the boot image, deoptimize its methods if we are running
// debuggable, as the code may have been compiled non-debuggable.
@@ -1432,7 +1459,9 @@
if (runtime_options.Exists(Opt::BootClassPathDexList)) {
boot_class_path.swap(*runtime_options.GetOrDefault(Opt::BootClassPathDexList));
} else {
- OpenDexFiles(GetBootClassPath(), GetBootClassPathLocations(), &boot_class_path);
+ OpenDexFiles(ArrayRef<const std::string>(GetBootClassPath()),
+ ArrayRef<const std::string>(GetBootClassPathLocations()),
+ &boot_class_path);
}
if (!class_linker_->InitWithoutImage(std::move(boot_class_path), &error_msg)) {
LOG(ERROR) << "Could not initialize without image: " << error_msg;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index 2b2919e..222c821 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -64,6 +64,8 @@
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
ThreadSuspendTimeout, ThreadList::kDefaultThreadSuspendTimeout)
RUNTIME_OPTIONS_KEY (Unit, DumpGCPerformanceOnShutdown)
+RUNTIME_OPTIONS_KEY (Unit, DumpRegionInfoBeforeGC)
+RUNTIME_OPTIONS_KEY (Unit, DumpRegionInfoAfterGC)
RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown)
RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8bec2d9..f459f9c 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -160,6 +160,7 @@
}
void Thread::InitTlsEntryPoints() {
+ ScopedTrace trace("InitTlsEntryPoints");
// Insert a placeholder so we can easily tell if we call an unimplemented entry point.
uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints);
uintptr_t* end = reinterpret_cast<uintptr_t*>(
@@ -903,6 +904,8 @@
tlsPtr_.pthread_self = pthread_self();
CHECK(is_started_);
+ ScopedTrace trace("Thread::Init");
+
SetUpAlternateSignalStack();
if (!InitStackHwm()) {
return false;
@@ -912,7 +915,10 @@
RemoveSuspendTrigger();
InitCardTable();
InitTid();
- interpreter::InitInterpreterTls(this);
+ {
+ ScopedTrace trace2("InitInterpreterTls");
+ interpreter::InitInterpreterTls(this);
+ }
#ifdef ART_TARGET_ANDROID
__get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
@@ -936,6 +942,7 @@
}
}
+ ScopedTrace trace3("ThreadList::Register");
thread_list->Register(this);
return true;
}
@@ -943,6 +950,7 @@
template <typename PeerAction>
Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) {
Runtime* runtime = Runtime::Current();
+ ScopedTrace trace("Thread::Attach");
if (runtime == nullptr) {
LOG(ERROR) << "Thread attaching to non-existent runtime: " <<
((thread_name != nullptr) ? thread_name : "(Unnamed)");
@@ -950,6 +958,7 @@
}
Thread* self;
{
+ ScopedTrace trace2("Thread birth");
MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
if (runtime->IsShuttingDownLocked()) {
LOG(WARNING) << "Thread attaching while runtime is shutting down: " <<
@@ -1251,6 +1260,7 @@
}
bool Thread::InitStackHwm() {
+ ScopedTrace trace("InitStackHwm");
void* read_stack_base;
size_t read_stack_size;
size_t read_guard_size;
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index de698c2..e1c756d 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2012 The Android Open Source Project
*
@@ -86,7 +87,7 @@
void ThreadPoolWorker::Run() {
Thread* self = Thread::Current();
Task* task = nullptr;
- thread_pool_->creation_barier_.Wait(self);
+ thread_pool_->creation_barier_.Pass(self);
while ((task = thread_pool_->GetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
@@ -150,7 +151,7 @@
MutexLock mu(self, task_queue_lock_);
shutting_down_ = false;
// Add one since the caller of constructor waits on the barrier too.
- creation_barier_.Init(self, max_active_workers_ + 1);
+ creation_barier_.Init(self, max_active_workers_);
while (GetThreadCount() < max_active_workers_) {
const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(),
GetThreadCount());
@@ -158,8 +159,16 @@
new ThreadPoolWorker(this, worker_name, worker_stack_size_));
}
}
- // Wait for all of the threads to attach.
- creation_barier_.Wait(Thread::Current());
+}
+
+void ThreadPool::WaitForWorkersToBeCreated() {
+ creation_barier_.Increment(Thread::Current(), 0);
+}
+
+const std::vector<ThreadPoolWorker*>& ThreadPool::GetWorkers() {
+ // Wait for all the workers to be created before returning them.
+ WaitForWorkersToBeCreated();
+ return threads_;
}
void ThreadPool::DeleteThreads() {
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index f55d72e..0a2a50c 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -101,9 +101,7 @@
return threads_.size();
}
- const std::vector<ThreadPoolWorker*>& GetWorkers() const {
- return threads_;
- }
+ const std::vector<ThreadPoolWorker*>& GetWorkers();
// Broadcast to the workers and tell them to empty out the work queue.
void StartWorkers(Thread* self) REQUIRES(!task_queue_lock_);
@@ -154,6 +152,9 @@
// Set the "nice" priorty for threads in the pool.
void SetPthreadPriority(int priority);
+ // Wait for workers to be created.
+ void WaitForWorkersToBeCreated();
+
protected:
// get a task to run, blocks if there are no tasks left
virtual Task* GetTask(Thread* self) REQUIRES(!task_queue_lock_);
diff --git a/runtime/trace.cc b/runtime/trace.cc
index f6c36cf..ce955d8 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -517,106 +517,6 @@
}
}
-void Trace::Pause() {
- bool stop_alloc_counting = false;
- Runtime* runtime = Runtime::Current();
- Trace* the_trace = nullptr;
-
- Thread* const self = Thread::Current();
- pthread_t sampling_pthread = 0U;
- {
- MutexLock mu(self, *Locks::trace_lock_);
- if (the_trace_ == nullptr) {
- LOG(ERROR) << "Trace pause requested, but no trace currently running";
- return;
- } else {
- the_trace = the_trace_;
- sampling_pthread = sampling_pthread_;
- }
- }
-
- if (sampling_pthread != 0U) {
- {
- MutexLock mu(self, *Locks::trace_lock_);
- the_trace_ = nullptr;
- }
- CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown");
- sampling_pthread_ = 0U;
- {
- MutexLock mu(self, *Locks::trace_lock_);
- the_trace_ = the_trace;
- }
- }
-
- if (the_trace != nullptr) {
- gc::ScopedGCCriticalSection gcs(self,
- gc::kGcCauseInstrumentation,
- gc::kCollectorTypeInstrumentation);
- ScopedSuspendAll ssa(__FUNCTION__);
- stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
-
- if (the_trace->trace_mode_ == TraceMode::kSampling) {
- MutexLock mu(self, *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
- } else {
- runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
- runtime->GetInstrumentation()->RemoveListener(
- the_trace,
- instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kMethodExited |
- instrumentation::Instrumentation::kMethodUnwind);
- }
- }
-
- if (stop_alloc_counting) {
- // Can be racy since SetStatsEnabled is not guarded by any locks.
- Runtime::Current()->SetStatsEnabled(false);
- }
-}
-
-void Trace::Resume() {
- Thread* self = Thread::Current();
- Trace* the_trace;
- {
- MutexLock mu(self, *Locks::trace_lock_);
- if (the_trace_ == nullptr) {
- LOG(ERROR) << "No trace to resume (or sampling mode), ignoring this request";
- return;
- }
- the_trace = the_trace_;
- }
-
- Runtime* runtime = Runtime::Current();
-
- // Enable count of allocs if specified in the flags.
- bool enable_stats = (the_trace->flags_ & kTraceCountAllocs) != 0;
-
- {
- gc::ScopedGCCriticalSection gcs(self,
- gc::kGcCauseInstrumentation,
- gc::kCollectorTypeInstrumentation);
- ScopedSuspendAll ssa(__FUNCTION__);
-
- // Reenable.
- if (the_trace->trace_mode_ == TraceMode::kSampling) {
- CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
- reinterpret_cast<void*>(the_trace->interval_us_)), "Sampling profiler thread");
- } else {
- runtime->GetInstrumentation()->AddListener(the_trace,
- instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kMethodExited |
- instrumentation::Instrumentation::kMethodUnwind);
- // TODO: In full-PIC mode, we don't need to fully deopt.
- runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey);
- }
- }
-
- // Can't call this when holding the mutator lock.
- if (enable_stats) {
- runtime->SetStatsEnabled(true);
- }
-}
-
TracingMode Trace::GetMethodTracingMode() {
MutexLock mu(Thread::Current(), *Locks::trace_lock_);
if (the_trace_ == nullptr) {
diff --git a/runtime/trace.h b/runtime/trace.h
index 1089962..582f756 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -156,9 +156,6 @@
REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
!Locks::trace_lock_);
- static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_);
- static void Resume() REQUIRES(!Locks::trace_lock_);
-
// Stop tracing. This will finish the trace and write it to file/send it via DDMS.
static void Stop()
REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
diff --git a/test/175-alloc-big-bignums/expected.txt b/test/175-alloc-big-bignums/expected.txt
new file mode 100644
index 0000000..f75da10
--- /dev/null
+++ b/test/175-alloc-big-bignums/expected.txt
@@ -0,0 +1 @@
+Test complete
diff --git a/test/175-alloc-big-bignums/info.txt b/test/175-alloc-big-bignums/info.txt
new file mode 100644
index 0000000..8f6bcc3
--- /dev/null
+++ b/test/175-alloc-big-bignums/info.txt
@@ -0,0 +1,11 @@
+Allocate large numbers of huge BigIntegers in rapid succession. Most of the
+associated memory will be in the C++ heap. This makes sure that we trigger
+the garbage collector often enough to prevent us from running out of memory.
+
+The test allocates roughly 10GB of native memory, approximately 1MB of which
+will be live at any point. Basically all native memory deallocation is
+triggered by Java garbage collection.
+
+This test is a lot nastier than it looks. In particular, failure on target tends
+to exhaust device memory, and kill off all processes on the device, including the
+adb daemon :-( .
diff --git a/test/175-alloc-big-bignums/src/Main.java b/test/175-alloc-big-bignums/src/Main.java
new file mode 100644
index 0000000..5fbeb46
--- /dev/null
+++ b/test/175-alloc-big-bignums/src/Main.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.math.BigInteger;
+
+// This is motivated by the assumption that BigInteger allocates malloc memory
+// underneath. That's true (in 2018) on Android.
+
+public class Main {
+ public static void main(String[] args) throws Exception {
+ final int nIters = 20_000; // Presumed < 1_000_000.
+ final BigInteger big2_20 = BigInteger.valueOf(1024*1024); // 2^20
+ BigInteger huge = BigInteger.valueOf(1).shiftLeft(4_000_000); // ~0.5MB
+ for (int i = 0; i < nIters; ++i) { // 10 GB total
+ huge = huge.add(BigInteger.ONE);
+ }
+ if (huge.bitLength() != 4_000_001) {
+ System.out.println("Wrong answer length: " + huge.bitLength());
+ } else if (huge.mod(big2_20).compareTo(BigInteger.valueOf(nIters)) != 0) {
+ System.out.println("Wrong answer: ..." + huge.mod(big2_20));
+ } else {
+ System.out.println("Test complete");
+ }
+ }
+}
diff --git a/test/1934-jvmti-signal-thread/signal_threads.cc b/test/1934-jvmti-signal-thread/signal_threads.cc
index 726a7a86..dfb08c1 100644
--- a/test/1934-jvmti-signal-thread/signal_threads.cc
+++ b/test/1934-jvmti-signal-thread/signal_threads.cc
@@ -47,19 +47,19 @@
jvmti_env,
jvmti_env->Allocate(sizeof(NativeMonitor),
reinterpret_cast<unsigned char**>(&mon)))) {
- return -1l;
+ return -1L;
}
if (JvmtiErrorToException(env,
jvmti_env,
jvmti_env->CreateRawMonitor("test-1934 start",
&mon->start_monitor))) {
- return -1l;
+ return -1L;
}
if (JvmtiErrorToException(env,
jvmti_env,
jvmti_env->CreateRawMonitor("test-1934 continue",
&mon->continue_monitor))) {
- return -1l;
+ return -1L;
}
mon->should_continue = false;
mon->should_start = false;
@@ -92,7 +92,7 @@
while (!mon->should_continue) {
if (JvmtiErrorToException(env,
jvmti_env,
- jvmti_env->RawMonitorWait(mon->continue_monitor, -1l))) {
+ jvmti_env->RawMonitorWait(mon->continue_monitor, -1L))) {
JvmtiErrorToException(env, jvmti_env, jvmti_env->RawMonitorExit(mon->continue_monitor));
return;
}
@@ -112,7 +112,7 @@
while (!mon->should_start) {
if (JvmtiErrorToException(env,
jvmti_env,
- jvmti_env->RawMonitorWait(mon->start_monitor, -1l))) {
+ jvmti_env->RawMonitorWait(mon->start_monitor, -1L))) {
return;
}
}
diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc
index 17ccd9a..00827cf 100644
--- a/test/566-polymorphic-inlining/polymorphic_inline.cc
+++ b/test/566-polymorphic-inlining/polymorphic_inline.cc
@@ -46,7 +46,7 @@
usleep(1000);
}
// Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(method, soa.Self(), /* osr */ false);
+ jit->CompileMethod(method, soa.Self(), /*baseline=*/ false, /*osr=*/ false);
}
CodeInfo info(header);
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index b2b3634..dc0e94c 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -128,7 +128,7 @@
// Sleep to yield to the compiler thread.
usleep(1000);
// Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(m, Thread::Current(), /* osr */ true);
+ jit->CompileMethod(m, Thread::Current(), /*baseline=*/ false, /*osr=*/ true);
}
});
}
diff --git a/test/626-const-class-linking/clear_dex_cache_types.cc b/test/626-const-class-linking/clear_dex_cache_types.cc
index 52367c7..82c82c6 100644
--- a/test/626-const-class-linking/clear_dex_cache_types.cc
+++ b/test/626-const-class-linking/clear_dex_cache_types.cc
@@ -41,6 +41,7 @@
if (status == ClassStatus::kResolved) {
ObjectLock<mirror::Class> lock(soa.Self(), klass);
klass->SetStatus(klass, ClassStatus::kVerified, soa.Self());
+ klass->SetVerificationAttempted();
} else {
LOG(ERROR) << klass->PrettyClass() << " has unexpected status: " << status;
}
diff --git a/test/918-fields/expected.txt b/test/918-fields/expected.txt
index af78615..0114ccc 100644
--- a/test/918-fields/expected.txt
+++ b/test/918-fields/expected.txt
@@ -2,9 +2,9 @@
class java.lang.Math
25
false
-[value, I, null]
-class java.lang.Integer
-18
+[bytesTransferred, I, null]
+class java.io.InterruptedIOException
+1
false
[this$0, Lart/Test918;, null]
class art.Test918$Foo
@@ -18,3 +18,7 @@
class art.Test918$Generics
0
false
+[privateValue, I, null]
+class art.Test918$Generics
+2
+false
diff --git a/test/918-fields/src/art/Test918.java b/test/918-fields/src/art/Test918.java
index ca23c03..5328b0b 100644
--- a/test/918-fields/src/art/Test918.java
+++ b/test/918-fields/src/art/Test918.java
@@ -16,6 +16,7 @@
package art;
+import java.io.InterruptedIOException;
import java.lang.reflect.Field;
import java.util.Arrays;
@@ -26,10 +27,11 @@
public static void doTest() throws Exception {
testField(Math.class, "PI");
- testField(Integer.class, "value");
+ testField(InterruptedIOException.class, "bytesTransferred");
testField(Foo.class, "this$0");
testField(Bar.class, "VAL");
testField(Generics.class, "generics");
+ testField(Generics.class, "privateValue");
}
private static void testField(Class<?> base, String fieldName)
@@ -71,5 +73,6 @@
private static class Generics<T> {
T generics;
+ private int privateValue = 42;
}
}
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 5d07601..e3157ef 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -47,12 +47,8 @@
# Also need libopenjdkjvmti.
ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += libopenjdkjvmti-target libopenjdkjvmtid-target
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/core-simple-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/conscrypt-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/okhttp-testdex.jar
-ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += $(TARGET_OUT_JAVA_LIBRARIES)/bouncycastle-testdex.jar
+ART_TEST_TARGET_RUN_TEST_DEPENDENCIES += \
+ $(foreach jar,$(TARGET_TEST_CORE_JARS),$(TARGET_OUT_JAVA_LIBRARIES)/$(jar).jar)
# All tests require the host executables. The tests also depend on the core images, but on
# specific version depending on the compiler.
@@ -74,6 +70,7 @@
$(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkd$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmti$(ART_HOST_SHLIB_EXTENSION) \
$(ART_HOST_OUT_SHARED_LIBRARIES)/libopenjdkjvmtid$(ART_HOST_SHLIB_EXTENSION) \
+ $(HOST_CORE_DEX_LOCATIONS) \
ifneq ($(HOST_PREFER_32_BIT),true)
ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 65127fc..55631a9 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -227,7 +227,7 @@
// Make sure there is a profiling info, required by the compiler.
ProfilingInfo::Create(self, method, /* retry_allocation */ true);
// Will either ensure it's compiled or do the compilation itself.
- jit->CompileMethod(method, self, /* osr */ false);
+ jit->CompileMethod(method, self, /*baseline=*/ false, /*osr=*/ false);
}
}
}
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 4e5152b..25b8b4b 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -34,6 +34,8 @@
HOST="n"
BIONIC="n"
CREATE_ANDROID_ROOT="n"
+USE_ZIPAPEX="n"
+ZIPAPEX_LOC=""
INTERPRETER="n"
JIT="n"
INVOKE_WITH=""
@@ -49,6 +51,7 @@
STRIP_DEX="n"
SECONDARY_DEX=""
TIME_OUT="gdb" # "n" (disabled), "timeout" (use timeout), "gdb" (use gdb)
+TIMEOUT_DUMPER=timeout_dumper
# Value in seconds
if [ "$ART_USE_READ_BARRIER" != "false" ]; then
TIME_OUT_VALUE=2400 # 40 minutes.
@@ -220,6 +223,11 @@
# host ones which are in a different location.
CREATE_ANDROID_ROOT="y"
shift
+ elif [ "x$1" = "x--runtime-zipapex" ]; then
+ shift
+ USE_ZIPAPEX="y"
+ ZIPAPEX_LOC="$1"
+ shift
elif [ "x$1" = "x--no-prebuild" ]; then
PREBUILD="n"
shift
@@ -541,7 +549,10 @@
exit
fi
-bpath_modules="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+# because that's what we use for compiling the core.art image.
+# It may contain additional modules from TEST_CORE_JARS.
+bpath_modules="core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt"
if [ "${HOST}" = "y" ]; then
framework="${ANDROID_HOST_OUT}/framework"
if [ "${ANDROID_HOST_OUT:0:${#ANDROID_BUILD_TOP}+1}" = "${ANDROID_BUILD_TOP}/" ]; then
@@ -681,6 +692,8 @@
echo "linux_bionic-x86 target doesn't seem to have been built!" >&2
exit 1
fi
+ # Set timeout_dumper manually so it works even with apex's
+ TIMEOUT_DUMPER=$OUT_DIR/soong/host/linux_bionic-x86/bin/timeout_dumper
fi
# Prevent test from silently falling back to interpreter in no-prebuild mode. This happens
@@ -703,6 +716,8 @@
exit 1
fi
+BIN_DIR=$ANDROID_ROOT/bin
+
profman_cmdline="true"
dex2oat_cmdline="true"
vdex_cmdline="true"
@@ -712,6 +727,8 @@
sync_cmdline="true"
linkroot_cmdline="true"
linkroot_overlay_cmdline="true"
+setupapex_cmdline="true"
+installapex_cmdline="true"
linkdirs() {
find "$1" -maxdepth 1 -mindepth 1 -type d | xargs -i ln -sf '{}' "$2"
@@ -726,10 +743,23 @@
fi
fi
+if [ "$USE_ZIPAPEX" = "y" ]; then
+ # TODO Currently this only works for linux_bionic zipapexes because those are
+ # stripped and so small enough that the ulimit doesn't kill us.
+ mkdir_locations="${mkdir_locations} $DEX_LOCATION/zipapex"
+ zip_options="-qq"
+ if [ "$DEV_MODE" = "y" ]; then
+ zip_options=""
+ fi
+ setupapex_cmdline="unzip -u ${zip_options} ${ZIPAPEX_LOC} apex_payload.zip -d ${DEX_LOCATION}"
+ installapex_cmdline="unzip -u ${zip_options} ${DEX_LOCATION}/apex_payload.zip -d ${DEX_LOCATION}/zipapex"
+ BIN_DIR=$DEX_LOCATION/zipapex/bin
+fi
+
# PROFILE takes precedence over RANDOM_PROFILE, since PROFILE tests require a
# specific profile to run properly.
if [ "$PROFILE" = "y" ] || [ "$RANDOM_PROFILE" = "y" ]; then
- profman_cmdline="${ANDROID_ROOT}/bin/profman \
+ profman_cmdline="$BIN_DIR/profman \
--apk=$DEX_LOCATION/$TEST_NAME.jar \
--dex-location=$DEX_LOCATION/$TEST_NAME.jar"
if [ -f $DEX_LOCATION/$TEST_NAME-ex.jar ]; then
@@ -759,7 +789,7 @@
if [[ "$TEST_IS_NDEBUG" = "y" ]]; then
dex2oat_binary=dex2oat
fi
- dex2oat_cmdline="$INVOKE_WITH $ANDROID_ROOT/bin/$dex2oat_binary \
+ dex2oat_cmdline="$INVOKE_WITH $BIN_DIR/$dex2oat_binary \
$COMPILE_FLAGS \
--boot-image=${BOOT_IMAGE} \
--dex-file=$DEX_LOCATION/$TEST_NAME.jar \
@@ -816,7 +846,7 @@
# We set DumpNativeStackOnSigQuit to false to avoid stressing libunwind.
# b/27185632
# b/24664297
-dalvikvm_cmdline="$INVOKE_WITH $GDB $ANDROID_ROOT/bin/$DALVIKVM \
+dalvikvm_cmdline="$INVOKE_WITH $GDB $BIN_DIR/$DALVIKVM \
$GDB_ARGS \
$FLAGS \
$DEX_VERIFY \
@@ -924,7 +954,7 @@
rm -rf ${DEX_LOCATION}/dalvik-cache/ && \
mkdir -p ${mkdir_locations} && \
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH && \
- export PATH=$ANDROID_ROOT/bin:$PATH && \
+ export PATH=$BIN_DIR:$PATH && \
$profman_cmdline && \
$dex2oat_cmdline && \
$dm_cmdline && \
@@ -966,8 +996,12 @@
export ANDROID_ROOT="${ANDROID_ROOT}"
export ANDROID_RUNTIME_ROOT="${ANDROID_RUNTIME_ROOT}"
export LD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}"
+ if [ "$USE_ZIPAPEX" = "y" ]; then
+ # Put the zipapex files in front of the ld-library-path
+ export LD_LIBRARY_PATH="${ANDROID_DATA}/zipapex/${LIBRARY_DIRECTORY}:${LD_LIBRARY_PATH}"
+ fi
export DYLD_LIBRARY_PATH="${ANDROID_ROOT}/${LIBRARY_DIRECTORY}:${ANDROID_ROOT}/${TEST_DIRECTORY}"
- export PATH="$PATH:${ANDROID_ROOT}/bin"
+ export PATH="$PATH:$BIN_DIR"
# Temporarily disable address space layout randomization (ASLR).
# This is needed on the host so that the linker loads core.oat at the necessary address.
@@ -998,7 +1032,8 @@
# Note: We first send SIGRTMIN+2 (usually 36) to ART, which will induce a full thread dump
# before abort. However, dumping threads might deadlock, so we also use the "-k"
# option to definitely kill the child.
- cmdline="timeout -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s timeout_dumper $cmdline"
+ # Note: Using "--foreground" to not propagate the signal to children, i.e., the runtime.
+ cmdline="timeout --foreground -k 120s -s SIGRTMIN+2 ${TIME_OUT_VALUE}s ${TIMEOUT_DUMPER} $cmdline"
fi
if [ "$DEV_MODE" = "y" ]; then
@@ -1006,7 +1041,7 @@
echo EXPORT $var=${!var}
done
echo "$(declare -f linkdirs)"
- echo "mkdir -p ${mkdir_locations} && $linkroot_cmdline && $linkroot_overlay_cmdline && $profman_cmdline && $dex2oat_cmdline && $dm_cmdline && $vdex_cmdline && $strip_cmdline && $sync_cmdline && $cmdline"
+ echo "mkdir -p ${mkdir_locations} && $setupapex_cmdline && $installapex_cmdline && $linkroot_cmdline && $linkroot_overlay_cmdline && $profman_cmdline && $dex2oat_cmdline && $dm_cmdline && $vdex_cmdline && $strip_cmdline && $sync_cmdline && $cmdline"
fi
cd $ANDROID_BUILD_TOP
@@ -1020,6 +1055,8 @@
export ASAN_OPTIONS=$RUN_TEST_ASAN_OPTIONS
mkdir -p ${mkdir_locations} || exit 1
+ $setupapex_cmdline || { echo "zipapex extraction failed." >&2 ; exit 2; }
+ $installapex_cmdline || { echo "zipapex install failed." >&2 ; exit 2; }
$linkroot_cmdline || { echo "create symlink android-root failed." >&2 ; exit 2; }
$linkroot_overlay_cmdline || { echo "overlay android-root failed." >&2 ; exit 2; }
$profman_cmdline || { echo "Profman failed." >&2 ; exit 2; }
diff --git a/test/knownfailures.json b/test/knownfailures.json
index ae20557..879f2fd 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -447,19 +447,63 @@
},
{
"tests": [
- "137-cfi",
- "595-profile-saving",
- "900-hello-plugin",
- "909-attach-agent",
- "981-dedup-original-dex",
- "1900-track-alloc"
+ "004-ThreadStress",
+ "130-hprof",
+ "579-inline-infinite",
+ "1946-list-descriptors"
],
- "description": ["Tests that require exact knowledge of the number of plugins and agents."],
+ "description": ["Too slow to finish in the timeout"],
"variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress"
},
{
"tests": [
+ "911-get-stack-trace"
+ ],
+ "description": ["Tests that fail when run with step-stress for unknown reasons."],
+ "bug": "b/120995005",
+ "variant": "jvmti-stress | step-stress"
+ },
+ {
+ "tests": [
+ "004-SignalTest",
+ "004-StackWalk",
+ "064-field-access",
+ "083-compiler-regressions",
+ "098-ddmc",
+ "107-int-math2",
+ "129-ThreadGetId",
+ "135-MirandaDispatch",
"132-daemon-locks-shutdown",
+ "163-app-image-methods",
+ "607-daemon-stress",
+ "674-hiddenapi",
+ "687-deopt",
+ "904-object-allocation"
+ ],
+ "description": ["Tests that sometimes fail when run with jvmti-stress for unknown reasons."],
+ "bug": "b/120995005",
+ "variant": "jvmti-stress | trace-stress | field-stress | step-stress"
+ },
+ {
+ "tests": [
+ "018-stack-overflow",
+ "137-cfi",
+ "595-profile-saving",
+ "597-deopt-busy-loop",
+ "597-deopt-new-string",
+ "660-clinit",
+ "900-hello-plugin",
+ "909-attach-agent",
+ "924-threads",
+ "981-dedup-original-dex",
+ "1900-track-alloc"
+ ],
+ "description": ["Tests that require exact knowledge of the deoptimization state, the ",
+ "number of plugins and agents, or breaks other openjdkjvmti assumptions."],
+ "variant": "jvmti-stress | redefine-stress | trace-stress | field-stress | step-stress"
+ },
+ {
+ "tests": [
"607-daemon-stress",
"602-deoptimizeable",
"121-simple-suspend-check",
@@ -569,6 +613,12 @@
"env_vars": {"SANITIZE_HOST": "address"}
},
{
+ "tests": "175-alloc-big-bignums",
+ "description": "ASAN runs out of memory due to huge allocations.",
+ "variant": "host",
+ "env_vars": {"SANITIZE_HOST": "address"}
+ },
+ {
"tests": "202-thread-oome",
"description": "ASAN aborts when large thread stacks are requested.",
"variant": "host",
@@ -1111,5 +1161,11 @@
"tests": ["454-get-vreg", "457-regs"],
"variant": "baseline",
"description": ["Tests are expected to fail with baseline."]
+ },
+ {
+ "tests": ["708-jit-cache-churn"],
+ "variant": "jit-on-first-use",
+ "bug": "b/120112467",
+ "description": [ "Fails on Android Build hosts with uncaught std::bad_alloc." ]
}
]
diff --git a/test/run-test b/test/run-test
index 2363152..83c726e 100755
--- a/test/run-test
+++ b/test/run-test
@@ -164,6 +164,15 @@
image_suffix=""
run_optimizing="false"
+# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and
+# ART output to approximately 128MB. This should be more than sufficient
+# for any test while still catching cases of runaway output.
+# Set a hard limit to encourage ART developers to increase the ulimit here if
+# needed to support a test case rather than resetting the limit in the run
+# script for the particular test in question. Adjust this if needed for
+# particular configurations.
+file_ulimit=128000
+
while true; do
if [ "x$1" = "x--host" ]; then
target_mode="no"
@@ -395,6 +404,18 @@
DEX_LOCATION=$tmp_dir
host_lib_root=$OUT_DIR/soong/host/linux_bionic-x86
shift
+ elif [ "x$1" = "x--runtime-zipapex" ]; then
+ shift
+ # TODO Should we allow the java.library.path to search the zipapex too?
+ # Not needed at the moment and adding it will be complicated so for now
+ # we'll ignore this.
+ run_args="${run_args} --host --runtime-zipapex $1"
+ target_mode="no"
+ DEX_LOCATION=$tmp_dir
+ # apex_payload.zip is quite large we need a high enough ulimit to
+ # extract it. 512mb should be good enough.
+ file_ulimit=512000
+ shift
elif [ "x$1" = "x--trace" ]; then
trace="true"
shift
@@ -722,6 +743,8 @@
"files."
echo " --64 Run the test in 64-bit mode"
echo " --bionic Use the (host, 64-bit only) linux_bionic libc runtime"
+ echo " --runtime-zipapex [file]"
+ echo " Use the given zipapex file to provide runtime binaries"
echo " --trace Run with method tracing"
echo " --strace Run with syscall tracing from strace."
echo " --stream Run method tracing in streaming mode (requires --trace)"
@@ -823,13 +846,7 @@
run_args="${run_args} --testlib ${testlib}"
-# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and
-# ART output to approximately 128MB. This should be more than sufficient
-# for any test while still catching cases of runaway output.
-# Set a hard limit to encourage ART developers to increase the ulimit here if
-# needed to support a test case rather than resetting the limit in the run
-# script for the particular test in question.
-if ! ulimit -f 128000; then
+if ! ulimit -f ${file_ulimit}; then
err_echo "ulimit file size setting failed"
fi
diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py
index b4a4ada..139d1af 100755
--- a/test/testrunner/run_build_test_target.py
+++ b/test/testrunner/run_build_test_target.py
@@ -108,7 +108,7 @@
run_test_command = [os.path.join(env.ANDROID_BUILD_TOP,
'art/test/testrunner/testrunner.py')]
test_flags = target.get('run-test', [])
- run_test_command += test_flags
+ run_test_command += list(map(lambda a: a.format(SOONG_OUT_DIR=env.SOONG_OUT_DIR), test_flags))
# Let testrunner compute concurrency based on #cpus.
# b/65822340
# run_test_command += ['-j', str(n_threads)]
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 077129f..bc22360 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -336,4 +336,12 @@
'--64',
'--no-build-dependencies'],
},
+ 'art-linux-bionic-x64-zipapex': {
+ 'build': '{ANDROID_BUILD_TOP}/art/tools/build_linux_bionic_tests.sh {MAKE_OPTIONS} com.android.runtime.host',
+ 'run-test': ['--run-test-option=--bionic',
+ "--run-test-option='--runtime-zipapex {SOONG_OUT_DIR}/host/linux_bionic-x86/apex/com.android.runtime.host.zipapex'",
+ '--host',
+ '--64',
+ '--no-build-dependencies'],
+ },
}
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index e123e9f..cd7af10 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -157,14 +157,12 @@
: jvmtienv_(jvmtienv),
class_(c),
name_(nullptr),
- generic_(nullptr),
file_(nullptr),
debug_ext_(nullptr) {}
~ScopedClassInfo() {
if (class_ != nullptr) {
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
- jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(file_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(debug_ext_));
}
@@ -173,12 +171,11 @@
bool Init() {
if (class_ == nullptr) {
name_ = const_cast<char*>("<NONE>");
- generic_ = const_cast<char*>("<NONE>");
return true;
} else {
jvmtiError ret1 = jvmtienv_->GetSourceFileName(class_, &file_);
jvmtiError ret2 = jvmtienv_->GetSourceDebugExtension(class_, &debug_ext_);
- return jvmtienv_->GetClassSignature(class_, &name_, &generic_) == JVMTI_ERROR_NONE &&
+ return jvmtienv_->GetClassSignature(class_, &name_, nullptr) == JVMTI_ERROR_NONE &&
ret1 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
ret1 != JVMTI_ERROR_INVALID_CLASS &&
ret2 != JVMTI_ERROR_MUST_POSSESS_CAPABILITY &&
@@ -192,9 +189,6 @@
const char* GetName() const {
return name_;
}
- const char* GetGeneric() const {
- return generic_;
- }
const char* GetSourceDebugExtension() const {
if (debug_ext_ == nullptr) {
return "<UNKNOWN_SOURCE_DEBUG_EXTENSION>";
@@ -214,7 +208,6 @@
jvmtiEnv* jvmtienv_;
jclass class_;
char* name_;
- char* generic_;
char* file_;
char* debug_ext_;
};
@@ -229,14 +222,12 @@
class_info_(nullptr),
name_(nullptr),
signature_(nullptr),
- generic_(nullptr),
first_line_(-1) {}
~ScopedMethodInfo() {
DeleteLocalRef(env_, declaring_class_);
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(signature_));
- jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
}
bool Init() {
@@ -257,7 +248,7 @@
return false;
}
return class_info_->Init() &&
- (jvmtienv_->GetMethodName(method_, &name_, &signature_, &generic_) == JVMTI_ERROR_NONE);
+ (jvmtienv_->GetMethodName(method_, &name_, &signature_, nullptr) == JVMTI_ERROR_NONE);
}
const ScopedClassInfo& GetDeclaringClassInfo() const {
@@ -276,10 +267,6 @@
return signature_;
}
- const char* GetGeneric() const {
- return generic_;
- }
-
jint GetFirstLine() const {
return first_line_;
}
@@ -292,7 +279,6 @@
std::unique_ptr<ScopedClassInfo> class_info_;
char* name_;
char* signature_;
- char* generic_;
jint first_line_;
friend std::ostream& operator<<(std::ostream &os, ScopedMethodInfo const& m);
@@ -306,20 +292,18 @@
field_(field),
class_info_(nullptr),
name_(nullptr),
- type_(nullptr),
- generic_(nullptr) {}
+ type_(nullptr) {}
~ScopedFieldInfo() {
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(name_));
jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(type_));
- jvmtienv_->Deallocate(reinterpret_cast<unsigned char*>(generic_));
}
bool Init() {
class_info_.reset(new ScopedClassInfo(jvmtienv_, declaring_class_));
return class_info_->Init() &&
(jvmtienv_->GetFieldName(
- declaring_class_, field_, &name_, &type_, &generic_) == JVMTI_ERROR_NONE);
+ declaring_class_, field_, &name_, &type_, nullptr) == JVMTI_ERROR_NONE);
}
const ScopedClassInfo& GetDeclaringClassInfo() const {
@@ -338,10 +322,6 @@
return type_;
}
- const char* GetGeneric() const {
- return generic_;
- }
-
private:
jvmtiEnv* jvmtienv_;
jclass declaring_class_;
@@ -349,7 +329,6 @@
std::unique_ptr<ScopedClassInfo> class_info_;
char* name_;
char* type_;
- char* generic_;
friend std::ostream& operator<<(std::ostream &os, ScopedFieldInfo const& m);
};
diff --git a/tools/bootjars.sh b/tools/bootjars.sh
index ad6ee6b..9f22827 100755
--- a/tools/bootjars.sh
+++ b/tools/bootjars.sh
@@ -72,8 +72,10 @@
# FIXME: The soong invocation we're using for getting the variables does not give us anything
# defined in Android.common_path.mk, otherwise we would just use HOST-/TARGET_TEST_CORE_JARS.
- # The core_jars_list must match the TEST_CORE_JARS variable in the Android.common_path.mk .
- core_jars_list="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+ # Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
+ # because that's what we use for compiling the core.art image.
+ # It may contain additional modules from TEST_CORE_JARS.
+ core_jars_list="core-oj core-libart core-simple"
core_jars_suffix=
if [[ $mode == target ]]; then
core_jars_suffix=-testdex
diff --git a/tools/build_linux_bionic.sh b/tools/build_linux_bionic.sh
new file mode 100755
index 0000000..94ccc41
--- /dev/null
+++ b/tools/build_linux_bionic.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This will build a target using linux_bionic. It can be called with normal make
+# flags.
+#
+# TODO This runs a 'm clean' prior to building the targets in order to ensure
+# that obsolete kati files don't mess up the build.
+
+if [[ -z $ANDROID_BUILD_TOP ]]; then
+ pushd .
+else
+ pushd $ANDROID_BUILD_TOP
+fi
+
+if [ ! -d art ]; then
+ echo "Script needs to be run at the root of the android tree"
+ exit 1
+fi
+
+source build/envsetup.sh >&/dev/null # for get_build_var
+# Soong needs a bunch of variables set and will not run if they are missing.
+# The default values of these variables is only contained in make, so use
+# nothing to create the variables then remove all the other artifacts.
+build/soong/soong_ui.bash --make-mode nothing
+if [ $? != 0 ]; then
+ exit 1
+fi
+
+out_dir=$(get_build_var OUT_DIR)
+host_out=$(get_build_var HOST_OUT)
+
+# TODO(b/31559095) Figure out a better way to do this.
+#
+# There is no good way to force soong to generate host-bionic builds currently
+# so this is a hacky workaround.
+tmp_soong_var=$(mktemp --tmpdir soong.variables.bak.XXXXXX)
+
+cat $out_dir/soong/soong.variables > ${tmp_soong_var}
+build/soong/soong_ui.bash --make-mode clean
+mkdir -p $out_dir/soong
+
+python3 <<END - ${tmp_soong_var} ${out_dir}/soong/soong.variables
+import json
+import sys
+x = json.load(open(sys.argv[1]))
+x['Allow_missing_dependencies'] = True
+x['HostArch'] = 'x86_64'
+x['CrossHost'] = 'linux_bionic'
+x['CrossHostArch'] = 'x86_64'
+if 'CrossHostSecondaryArch' in x:
+ del x['CrossHostSecondaryArch']
+json.dump(x, open(sys.argv[2], mode='w'))
+END
+
+rm $tmp_soong_var
+
+build/soong/soong_ui.bash --make-mode --skip-make $@
diff --git a/tools/build_linux_bionic_tests.sh b/tools/build_linux_bionic_tests.sh
index 2b178f2..c532c90 100755
--- a/tools/build_linux_bionic_tests.sh
+++ b/tools/build_linux_bionic_tests.sh
@@ -81,6 +81,7 @@
$soong_out/bin/hiddenapi
$soong_out/bin/hprof-conv
$soong_out/bin/timeout_dumper
+ $(find $host_out/apex -type f | sed "s:$host_out:$soong_out:g")
$(find $host_out/lib64 -type f | sed "s:$host_out:$soong_out:g")
$(find $host_out/nativetest64 -type f | sed "s:$host_out:$soong_out:g"))
diff --git a/tools/dist_linux_bionic.sh b/tools/dist_linux_bionic.sh
new file mode 100755
index 0000000..4c7ba1c
--- /dev/null
+++ b/tools/dist_linux_bionic.sh
@@ -0,0 +1,44 @@
+#!/bin/bash
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+
+# Builds the given targets using linux-bionic and moves the output files to the
+# DIST_DIR. Takes normal make arguments.
+
+if [[ -z $ANDROID_BUILD_TOP ]]; then
+ pushd .
+else
+ pushd $ANDROID_BUILD_TOP
+fi
+
+if [[ -z $DIST_DIR ]]; then
+ echo "DIST_DIR must be set!"
+ exit 1
+fi
+
+if [ ! -d art ]; then
+ echo "Script needs to be run at the root of the android tree"
+ exit 1
+fi
+
+source build/envsetup.sh >&/dev/null # for get_build_var
+out_dir=$(get_build_var OUT_DIR)
+
+./art/tools/build_linux_bionic.sh $@
+
+mkdir -p $DIST_DIR
+cp -R ${out_dir}/soong/host/* $DIST_DIR/
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index f97dd4f..3c65b01 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -155,41 +155,6 @@
bug: 25437292
},
{
- description: "Missing resource in classpath",
- result: EXEC_FAILED,
- names: ["libcore.java.util.prefs.OldAbstractPreferencesTest#testClear",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportNode",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportSubtree",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testGet",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetBoolean",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetByteArray",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetDouble",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetFloat",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetInt",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testGetLong",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testKeys",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testNodeExists",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testPut",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutBoolean",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutByteArray",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutDouble",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutFloat",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutInt",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testPutLong",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemove",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testRemoveNode",
- "libcore.java.util.prefs.OldAbstractPreferencesTest#testSync",
- "libcore.java.util.prefs.PreferencesTest#testHtmlEncoding",
- "libcore.java.util.prefs.PreferencesTest#testPreferencesClobbersExistingFiles",
- "org.apache.harmony.tests.java.util.PropertiesTest#test_storeToXMLLjava_io_OutputStreamLjava_lang_StringLjava_lang_String",
- "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportNode",
- "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportSubtree",
- "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush",
- "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync",
- "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"],
- bug: 120526172
-},
-{
description: "Only work with --mode=activity",
result: EXEC_FAILED,
names: [ "libcore.java.io.FileTest#testJavaIoTmpdirMutable" ]
@@ -233,5 +198,30 @@
modes: [device],
bug: 116446372,
names: ["libcore.libcore.io.FdsanTest#testSocket"]
+},
+{
+ description: "Host implementation of android_getaddrinfo differs from device implementation",
+ result: EXEC_FAILED,
+ modes: [host],
+ bug: 121230364,
+ names: [
+ "libcore.libcore.net.InetAddressUtilsTest#parseNumericAddress[8]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[10]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[11]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[12]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[5]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[6]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[7]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[8]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[9]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[10]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[11]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[12]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[5]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[6]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[7]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[8]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[9]"
+ ]
}
]
diff --git a/tools/luci/config/cr-buildbucket.cfg b/tools/luci/config/cr-buildbucket.cfg
index 29cca39..8df8433 100644
--- a/tools/luci/config/cr-buildbucket.cfg
+++ b/tools/luci/config/cr-buildbucket.cfg
@@ -27,8 +27,6 @@
swarming {
hostname: "chromium-swarm.appspot.com"
builder_defaults {
- dimensions: "cores:8"
- dimensions: "cpu:x86-64"
dimensions: "pool:luci.art.ci"
service_account: "art-ci-builder@chops-service-accounts.iam.gserviceaccount.com"
execution_timeout_secs: 10800 # 3h
diff --git a/tools/luci/config/luci-milo.cfg b/tools/luci/config/luci-milo.cfg
index ce22293..60e8404 100644
--- a/tools/luci/config/luci-milo.cfg
+++ b/tools/luci/config/luci-milo.cfg
@@ -6,6 +6,7 @@
repo_url: "https://android.googlesource.com/platform/art"
refs: "refs/heads/master"
manifest_name: "REVISION"
+ include_experimental_builds: true
builders {
name: "buildbucket/luci.art.ci/angler-armv7-debug"
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index f4a2dc1..c7503bb 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -55,9 +55,10 @@
done
}
-# Note: This must match the TEST_CORE_JARS in Android.common_path.mk
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
# because that's what we use for compiling the core.art image.
-BOOT_CLASSPATH_JARS="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+# It may contain additional modules from TEST_CORE_JARS.
+BOOT_CLASSPATH_JARS="core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt"
vm_args=""
art="$android_root/bin/art"
diff --git a/tools/run-libcore-tests.sh b/tools/run-libcore-tests.sh
index 63f1fce..68c4fd8 100755
--- a/tools/run-libcore-tests.sh
+++ b/tools/run-libcore-tests.sh
@@ -57,9 +57,10 @@
done
}
-# Note: This must match the TEST_CORE_JARS in Android.common_path.mk
+# Note: This must start with the CORE_IMG_JARS in Android.common_path.mk
# because that's what we use for compiling the core.art image.
-BOOT_CLASSPATH_JARS="core-oj core-libart core-simple conscrypt okhttp bouncycastle"
+# It may contain additional modules from TEST_CORE_JARS.
+BOOT_CLASSPATH_JARS="core-oj core-libart core-simple okhttp bouncycastle apache-xml conscrypt"
DEPS="core-tests jsr166-tests mockito-target"
diff --git a/tools/timeout_dumper/timeout_dumper.cc b/tools/timeout_dumper/timeout_dumper.cc
index 96d165c..e04aefb 100644
--- a/tools/timeout_dumper/timeout_dumper.cc
+++ b/tools/timeout_dumper/timeout_dumper.cc
@@ -29,6 +29,7 @@
#include <thread>
#include <memory>
#include <set>
+#include <string>
#include <android-base/file.h>
#include <android-base/logging.h>
@@ -103,9 +104,22 @@
}
}
- std::string path = std::string(".") + kAddr2linePath;
- if (access(path.c_str(), X_OK) == 0) {
- return std::make_unique<std::string>(path);
+ {
+ std::string path = std::string(".") + kAddr2linePath;
+ if (access(path.c_str(), X_OK) == 0) {
+ return std::make_unique<std::string>(path);
+ }
+ }
+
+ {
+ using android::base::Dirname;
+
+ std::string exec_dir = android::base::GetExecutableDirectory();
+ std::string derived_top = Dirname(Dirname(Dirname(Dirname(exec_dir))));
+ std::string path = derived_top + kAddr2linePath;
+ if (access(path.c_str(), X_OK) == 0) {
+ return std::make_unique<std::string>(path);
+ }
}
constexpr const char* kHostAddr2line = "/usr/bin/addr2line";
@@ -356,6 +370,91 @@
return ret;
}
+void DumpABI(pid_t forked_pid) {
+ enum class ABI { kArm, kArm64, kMips, kMips64, kX86, kX86_64 };
+#if defined(__arm__)
+ constexpr ABI kDumperABI = ABI::kArm;
+#elif defined(__aarch64__)
+ constexpr ABI kDumperABI = ABI::kArm64;
+#elif defined(__mips__) && !defined(__LP64__)
+ constexpr ABI kDumperABI = ABI::kMips;
+#elif defined(__mips__) && defined(__LP64__)
+ constexpr ABI kDumperABI = ABI::kMips64;
+#elif defined(__i386__)
+ constexpr ABI kDumperABI = ABI::kX86;
+#elif defined(__x86_64__)
+ constexpr ABI kDumperABI = ABI::kX86_64;
+#else
+#error Unsupported architecture
+#endif
+
+ char data[1024]; // Should be more than enough.
+ struct iovec io_vec;
+ io_vec.iov_base = &data;
+ io_vec.iov_len = 1024;
+ ABI to_print;
+ if (0 != ::ptrace(PTRACE_GETREGSET, forked_pid, /* NT_PRSTATUS */ 1, &io_vec)) {
+ LOG(ERROR) << "Could not get registers to determine abi.";
+ // Use 64-bit as default.
+ switch (kDumperABI) {
+ case ABI::kArm:
+ case ABI::kArm64:
+ to_print = ABI::kArm64;
+ break;
+ case ABI::kMips:
+ case ABI::kMips64:
+ to_print = ABI::kMips64;
+ break;
+ case ABI::kX86:
+ case ABI::kX86_64:
+ to_print = ABI::kX86_64;
+ break;
+ default:
+ __builtin_unreachable();
+ }
+ } else {
+ // Check the length of the data. Assume that it's the same arch as the tool.
+ switch (kDumperABI) {
+ case ABI::kArm:
+ case ABI::kArm64:
+ to_print = io_vec.iov_len == 18 * sizeof(uint32_t) ? ABI::kArm : ABI::kArm64;
+ break;
+ case ABI::kMips:
+ case ABI::kMips64:
+ to_print = ABI::kMips64; // TODO Figure out how this should work.
+ break;
+ case ABI::kX86:
+ case ABI::kX86_64:
+ to_print = io_vec.iov_len == 17 * sizeof(uint32_t) ? ABI::kX86 : ABI::kX86_64;
+ break;
+ default:
+ __builtin_unreachable();
+ }
+ }
+ std::string abi_str;
+ switch (to_print) {
+ case ABI::kArm:
+ abi_str = "arm";
+ break;
+ case ABI::kArm64:
+ abi_str = "arm64";
+ break;
+ case ABI::kMips:
+ abi_str = "mips";
+ break;
+ case ABI::kMips64:
+ abi_str = "mips64";
+ break;
+ case ABI::kX86:
+ abi_str = "x86";
+ break;
+ case ABI::kX86_64:
+ abi_str = "x86_64";
+ break;
+ }
+ std::cerr << "ABI: '" << abi_str << "'" << std::endl;
+}
+
} // namespace ptrace
template <typename T>
@@ -495,19 +594,22 @@
}
void DumpProcess(pid_t forked_pid, const std::atomic<bool>& saw_wif_stopped_for_main) {
+ LOG(ERROR) << "Timeout for process " << forked_pid;
+
CHECK_EQ(0, ::ptrace(PTRACE_ATTACH, forked_pid, 0, 0));
std::set<pid_t> tids = ptrace::PtraceSiblings(forked_pid);
tids.insert(forked_pid);
+ ptrace::DumpABI(forked_pid);
+
// Check whether we have and should use addr2line.
- std::unique_ptr<std::string> addr2line_path = addr2line::FindAddr2line();
- if (addr2line_path != nullptr) {
- LOG(ERROR) << "Found addr2line at " << *addr2line_path;
- } else {
- LOG(ERROR) << "Did not find usable addr2line";
+ std::unique_ptr<std::string> addr2line_path;
+ if (kUseAddr2line) {
+ addr2line_path = addr2line::FindAddr2line();
+ if (addr2line_path == nullptr) {
+ LOG(ERROR) << "Did not find usable addr2line";
+ }
}
- bool use_addr2line = kUseAddr2line && addr2line_path != nullptr;
- LOG(ERROR) << (use_addr2line ? "U" : "Not u") << "sing addr2line";
if (!WaitForMainSigStop(saw_wif_stopped_for_main)) {
LOG(ERROR) << "Did not receive SIGSTOP for pid " << forked_pid;
@@ -520,11 +622,7 @@
}
for (pid_t tid : tids) {
- DumpThread(forked_pid,
- tid,
- use_addr2line ? addr2line_path.get() : nullptr,
- " ",
- backtrace_map.get());
+ DumpThread(forked_pid, tid, addr2line_path.get(), " ", backtrace_map.get());
}
}