Merge changes I3f825746,Ia4284ccd,I437ffd43
* changes:
Extend the InvokeRuntime() changes to mips64.
Extend the InvokeRuntime() changes to mips.
Extend the InvokeRuntime() changes to x86 and x86_64.
diff --git a/Android.mk b/Android.mk
index 9d0062b..4ea169a 100644
--- a/Android.mk
+++ b/Android.mk
@@ -77,6 +77,7 @@
# product rules
include $(art_path)/dexdump/Android.mk
+include $(art_path)/dexlayout/Android.mk
include $(art_path)/dexlist/Android.mk
include $(art_path)/dex2oat/Android.mk
include $(art_path)/disassembler/Android.mk
diff --git a/build/Android.bp b/build/Android.bp
index be7dafd..ed9f308 100644
--- a/build/Android.bp
+++ b/build/Android.bp
@@ -150,9 +150,19 @@
"-DDYNAMIC_ANNOTATIONS_ENABLED=1",
"-DVIXL_DEBUG",
"-UNDEBUG",
- "-Wno-frame-larger-than=",
],
asflags: [
"-UNDEBUG",
],
+ target: {
+ // This has to be duplicated for android and host to make sure it
+ // comes after the -Wframe-larger-than warnings inserted by art.go
+ // target-specific properties
+ android: {
+ cflags: ["-Wno-frame-larger-than="],
+ },
+ host: {
+ cflags: ["-Wno-frame-larger-than="],
+ },
+ },
}
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk
index 86bb475..3f25ae1 100644
--- a/build/Android.common_path.mk
+++ b/build/Android.common_path.mk
@@ -18,6 +18,7 @@
ART_ANDROID_COMMON_PATH_MK := true
include art/build/Android.common.mk
+include art/build/Android.common_build.mk
# Directory used for dalvik-cache on device.
ART_TARGET_DALVIK_CACHE_DIR := /data/dalvik-cache
@@ -113,4 +114,29 @@
ART_TARGET_SHARED_LIBRARY_DEPENDENCIES += $(foreach lib,$(ART_CORE_SHARED_LIBRARIES), $(2ND_TARGET_OUT_SHARED_LIBRARIES)/$(lib).so)
endif
+ART_CORE_EXECUTABLES := \
+ dex2oat \
+ imgdiag \
+ oatdump \
+ patchoat \
+ profman \
+
+# Depend on the -target or -host phony targets generated by the build system
+# for each module
+ART_TARGET_EXECUTABLES :=
+ifneq ($(ART_BUILD_TARGET_NDEBUG),false)
+ART_TARGET_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES),$(name)-target)
+endif
+ifneq ($(ART_BUILD_TARGET_DEBUG),false)
+ART_TARGET_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES),$(name)d-target)
+endif
+
+ART_HOST_EXECUTABLES :=
+ifneq ($(ART_BUILD_HOST_NDEBUG),false)
+ART_HOST_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES),$(name)-host)
+endif
+ifneq ($(ART_BUILD_HOST_DEBUG),false)
+ART_HOST_EXECUTABLES += $(foreach name,$(ART_CORE_EXECUTABLES),$(name)d-host)
+endif
+
endif # ART_ANDROID_COMMON_PATH_MK
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index 2db16af..e66b30c 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -16,9 +16,6 @@
include art/build/Android.common_build.mk
-ART_HOST_EXECUTABLES ?=
-ART_TARGET_EXECUTABLES ?=
-
ART_EXECUTABLES_CFLAGS :=
# $(1): executable ("d" will be appended for debug version)
@@ -169,11 +166,9 @@
ifeq ($$(art_target_or_host),target)
include $(BUILD_EXECUTABLE)
- ART_TARGET_EXECUTABLES := $(ART_TARGET_EXECUTABLES) $$(foreach name,$$(art_out_binary_name),$(TARGET_OUT_EXECUTABLES)/$$(name))
else # host
LOCAL_IS_HOST_MODULE := true
include $(BUILD_HOST_EXECUTABLE)
- ART_HOST_EXECUTABLES := $(ART_HOST_EXECUTABLES) $$(foreach name,$$(art_out_binary_name),$(HOST_OUT_EXECUTABLES)/$$(name))
endif
# Clear out local variables now that we're done with them.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 9ec072f..c09241a 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -126,6 +126,19 @@
$(TARGET_CORE_IMAGE_default_no-pic_32) \
dexdump2
+# The dexlayout test requires an image and the dexlayout utility.
+# TODO: rename into dexdump when migration completes
+ART_GTEST_dexlayout_test_HOST_DEPS := \
+ $(HOST_CORE_IMAGE_default_no-pic_64) \
+ $(HOST_CORE_IMAGE_default_no-pic_32) \
+ $(HOST_OUT_EXECUTABLES)/dexlayout \
+ $(HOST_OUT_EXECUTABLES)/dexdump2
+ART_GTEST_dexlayout_test_TARGET_DEPS := \
+ $(TARGET_CORE_IMAGE_default_no-pic_64) \
+ $(TARGET_CORE_IMAGE_default_no-pic_32) \
+ dexlayout \
+ dexdump2
+
# The dexlist test requires an image and the dexlist utility.
ART_GTEST_dexlist_test_HOST_DEPS := \
$(HOST_CORE_IMAGE_default_no-pic_64) \
@@ -170,6 +183,7 @@
RUNTIME_GTEST_COMMON_SRC_FILES := \
cmdline/cmdline_parser_test.cc \
dexdump/dexdump_test.cc \
+ dexlayout/dexlayout_test.cc \
dexlist/dexlist_test.cc \
dex2oat/dex2oat_test.cc \
imgdiag/imgdiag_test.cc \
@@ -521,6 +535,7 @@
(adb shell "$(GCOV_ENV) LD_LIBRARY_PATH=$(3) ANDROID_ROOT=$(ART_GTEST_TARGET_ANDROID_ROOT) \
valgrind --leak-check=full --error-exitcode=1 --workaround-gcc296-bugs=yes \
--suppressions=$(ART_TARGET_TEST_DIR)/valgrind-target-suppressions.txt \
+ --num-callers=50 \
$(ART_TARGET_NATIVETEST_DIR)/$(TARGET_$(2)ARCH)/$(1) && touch $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID" \
&& (adb pull $(ART_TARGET_TEST_DIR)/$(TARGET_$(2)ARCH)/$$@-$$$$PPID /tmp/ \
&& $$(call ART_TEST_PASSED,$$@)) \
@@ -577,7 +592,8 @@
$(hide) $$(call ART_TEST_SKIP,$$@) && \
VALGRIND_LIB=$(HOST_OUT)/lib64/valgrind \
$(HOST_OUT_EXECUTABLES)/valgrind --leak-check=full --error-exitcode=1 \
- --suppressions=art/test/valgrind-suppressions.txt $$< && \
+ --suppressions=art/test/valgrind-suppressions.txt --num-callers=50 \
+ $$< && \
$$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
ART_TEST_HOST_VALGRIND_GTEST$$($(2)ART_PHONY_TEST_HOST_SUFFIX)_RULES += valgrind-$$(gtest_rule)
diff --git a/compiler/Android.bp b/compiler/Android.bp
index 289adf8..0143268 100644
--- a/compiler/Android.bp
+++ b/compiler/Android.bp
@@ -245,8 +245,8 @@
art_cc_library {
name: "libartd-compiler",
defaults: [
- "libart-compiler-defaults",
"art_debug_defaults",
+ "libart-compiler-defaults",
],
codegen: {
arm: {
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index c942375..5239121 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -47,12 +47,12 @@
// Create an OatMethod based on pointers (for unit tests).
OatFile::OatMethod CreateOatMethod(const void* code);
- void MakeExecutable(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void MakeExecutable(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
static void MakeExecutable(const void* code_start, size_t code_length);
void MakeExecutable(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
protected:
virtual void SetUp();
@@ -81,17 +81,17 @@
virtual void TearDown();
void CompileClass(mirror::ClassLoader* class_loader, const char* class_name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void CompileMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void CompileMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
const char* method_name, const char* signature)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateCompilerDriver(Compiler::Kind kind, InstructionSet isa, size_t number_of_threads = 2U);
diff --git a/compiler/compiler.h b/compiler/compiler.h
index a955f3c..9e5fb83 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -69,12 +69,12 @@
jit::JitCodeCache* code_cache ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
bool osr ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return false;
}
virtual uintptr_t GetEntryPointOf(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
uint64_t GetMaximumCompilationTimeBeforeWarning() const {
return maximum_compilation_time_before_warning_;
diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h
index e8e278d..0a4f094 100644
--- a/compiler/debug/elf_debug_info_writer.h
+++ b/compiler/debug/elf_debug_info_writer.h
@@ -275,7 +275,7 @@
owner_->builder_->GetDebugInfo()->WriteFully(buffer.data(), buffer.size());
}
- void Write(const ArrayRef<mirror::Class*>& types) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Write(const ArrayRef<mirror::Class*>& types) REQUIRES_SHARED(Locks::mutator_lock_) {
using namespace dwarf; // NOLINT. For easy access to DWARF constants.
info_.StartTag(DW_TAG_compile_unit);
@@ -466,7 +466,7 @@
// Linkage name uniquely identifies type.
// It is used to determine the dynamic type of objects.
// We use the methods_ field of class since it is unique and it is not moved by the GC.
- void WriteLinkageName(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void WriteLinkageName(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
auto* methods_ptr = type->GetMethodsPtr();
if (methods_ptr == nullptr) {
// Some types might have no methods. Allocate empty array instead.
diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc
index b7e000a..5bfdd16 100644
--- a/compiler/debug/elf_debug_writer.cc
+++ b/compiler/debug/elf_debug_writer.cc
@@ -145,7 +145,7 @@
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<mirror::Class*>& types)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::vector<uint8_t> buffer;
buffer.reserve(KB);
VectorOutputStream out("Debug ELF file", &buffer);
diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h
index 6f52249..b0542c7 100644
--- a/compiler/debug/elf_debug_writer.h
+++ b/compiler/debug/elf_debug_writer.h
@@ -56,7 +56,7 @@
InstructionSet isa,
const InstructionSetFeatures* features,
const ArrayRef<mirror::Class*>& types)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::vector<MethodDebugInfo> MakeTrampolineInfos(const OatHeader& oat_header);
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index dbdfa24..43fc687 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -64,7 +64,7 @@
* @return true if the method is a candidate for inlining, false otherwise.
*/
bool AnalyseMethodCode(verifier::MethodVerifier* verifier)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
/**
* Check whether a particular method index corresponds to an intrinsic or special function.
diff --git a/compiler/dex/quick_compiler_callbacks.h b/compiler/dex/quick_compiler_callbacks.h
index 4f5ea76..1f69686 100644
--- a/compiler/dex/quick_compiler_callbacks.h
+++ b/compiler/dex/quick_compiler_callbacks.h
@@ -38,7 +38,7 @@
~QuickCompilerCallbacks() { }
void MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
void ClassRejected(ClassReference ref) OVERRIDE;
diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h
index 1af11a8..6afd1ab 100644
--- a/compiler/dex/verification_results.h
+++ b/compiler/dex/verification_results.h
@@ -43,7 +43,7 @@
~VerificationResults();
void ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!verified_methods_lock_);
const VerifiedMethod* GetVerifiedMethod(MethodReference ref)
diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h
index 495acf0..04331e5 100644
--- a/compiler/dex/verified_method.h
+++ b/compiler/dex/verified_method.h
@@ -44,7 +44,7 @@
typedef SafeMap<uint32_t, DexFileReference> DequickenMap;
static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
~VerifiedMethod() = default;
const DevirtualizationMap& GetDevirtMap() const {
@@ -100,15 +100,15 @@
// Generate devirtualizaion map into devirt_map_.
void GenerateDevirtMap(verifier::MethodVerifier* method_verifier)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Generate dequickening map into dequicken_map_. Returns false if there is an error.
bool GenerateDequickenMap(verifier::MethodVerifier* method_verifier)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Generate safe case set into safe_cast_set_.
void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DevirtualizationMap devirt_map_;
// Dequicken map is required for compiling quickened byte codes. The quicken maps from
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 77ec4b7..b5bc2fb 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -492,7 +492,7 @@
static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel(
Thread* self, const CompilerDriver& driver, Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file, const DexFile::ClassDef& class_def)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto* const runtime = Runtime::Current();
if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) {
// Verify at runtime shouldn't dex to dex since we didn't resolve of verify.
@@ -1026,7 +1026,7 @@
std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve)
: exceptions_to_resolve_(exceptions_to_resolve) {}
- virtual bool operator()(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual bool operator()(mirror::Class* c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
const auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& m : c->GetMethods(pointer_size)) {
ResolveExceptionsForMethod(&m, pointer_size);
@@ -1036,7 +1036,7 @@
private:
void ResolveExceptionsForMethod(ArtMethod* method_handle, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method_handle->GetCodeItem();
if (code_item == nullptr) {
return; // native or abstract method
@@ -1080,7 +1080,7 @@
explicit RecordImageClassesVisitor(std::unordered_set<std::string>* image_classes)
: image_classes_(image_classes) {}
- bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
image_classes_->insert(klass->GetDescriptor(&temp));
return true;
@@ -1161,7 +1161,7 @@
static void MaybeAddToImageClasses(Handle<mirror::Class> c,
std::unordered_set<std::string>* image_classes)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
// Make a copy of the handle so that we don't clobber it doing Assign.
@@ -1216,7 +1216,7 @@
// Visitor for VisitReferences.
void operator()(mirror::Object* object, MemberOffset field_offset, bool /* is_static */) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = object->GetFieldObject<mirror::Object>(field_offset);
if (ref != nullptr) {
VisitClinitClassesObject(ref);
@@ -1232,7 +1232,7 @@
const {}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root ATTRIBUTE_UNUSED) const {}
- void Walk() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Walk() REQUIRES_SHARED(Locks::mutator_lock_) {
// Use the initial classes as roots for a search.
for (mirror::Class* klass_root : image_classes_) {
VisitClinitClassesObject(klass_root);
@@ -1244,7 +1244,7 @@
public:
explicit FindImageClassesVisitor(ClinitImageUpdate* data) : data_(data) {}
- bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
const char* name = klass->GetDescriptor(&temp);
if (data_->image_class_descriptors_->find(name) != data_->image_class_descriptors_->end()) {
@@ -1265,7 +1265,7 @@
ClinitImageUpdate(std::unordered_set<std::string>* image_class_descriptors, Thread* self,
ClassLinker* linker)
- SHARED_REQUIRES(Locks::mutator_lock_) :
+ REQUIRES_SHARED(Locks::mutator_lock_) :
image_class_descriptors_(image_class_descriptors), self_(self) {
CHECK(linker != nullptr);
CHECK(image_class_descriptors != nullptr);
@@ -1284,7 +1284,7 @@
}
void VisitClinitClassesObject(mirror::Object* object) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(object != nullptr);
if (marked_objects_.find(object) != marked_objects_.end()) {
// Already processed.
@@ -1983,7 +1983,7 @@
// A fast version of SkipClass above if the class pointer is available
// that avoids the expensive FindInClassPath search.
static bool SkipClass(jobject class_loader, const DexFile& dex_file, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(klass != nullptr);
const DexFile& original_dex_file = *klass->GetDexCache()->GetDexFile();
if (&dex_file != &original_dex_file) {
@@ -1998,7 +1998,7 @@
}
static void CheckAndClearResolveException(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(self->IsExceptionPending());
mirror::Throwable* exception = self->GetException();
std::string temp;
@@ -2289,7 +2289,7 @@
&dex_file,
dex_cache,
class_loader,
- &class_def,
+ class_def,
Runtime::Current()->GetCompilerCallbacks(),
true /* allow soft failures */,
log_level_,
@@ -2529,7 +2529,7 @@
class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor {
public:
- virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
return true;
}
@@ -2546,7 +2546,7 @@
}
private:
- void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void FillIMTAndConflictTables(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
if (!klass->ShouldHaveImt()) {
return;
}
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 2dd4651..fbc1edd 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -125,7 +125,7 @@
// Compile a single Method.
void CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!compiled_methods_lock_, !compiled_classes_lock_, !dex_to_dex_references_lock_);
VerificationResults* GetVerificationResults() const {
@@ -199,7 +199,7 @@
bool CanAssumeTypeIsPresentInDexCache(Handle<mirror::DexCache> dex_cache,
uint32_t type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, uint32_t string_idx)
REQUIRES(!Locks::mutator_lock_);
@@ -208,7 +208,7 @@
bool CanAccessTypeWithoutChecks(uint32_t referrer_idx,
Handle<mirror::DexCache> dex_cache,
uint32_t type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Are runtime access and instantiable checks necessary in the code?
// out_is_finalizable is set to whether the type is finalizable.
@@ -216,7 +216,7 @@
Handle<mirror::DexCache> dex_cache,
uint32_t type_idx,
bool* out_is_finalizable)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_idx,
bool* is_type_initialized, bool* use_direct_type_ptr,
@@ -230,23 +230,23 @@
// Get the DexCache for the
mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa,
const DexCompilationUnit* mUnit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve compiling method's class. Returns null on failure.
mirror::Class* ResolveCompilingMethodsClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* ResolveClass(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, uint16_t type_index,
const DexCompilationUnit* mUnit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a field. Returns null on failure, including incompatible class change.
// NOTE: Unlike ClassLinker's ResolveField(), this method enforces is_static.
@@ -254,40 +254,40 @@
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t field_idx, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a field with a given dex file.
ArtField* ResolveFieldWithDexFile(
const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexFile* dex_file,
uint32_t field_idx, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get declaration location of a resolved field.
void GetResolvedFieldDexFileLocation(
ArtField* resolved_field, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsFieldVolatile(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
- MemberOffset GetFieldOffset(ArtField* field) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsFieldVolatile(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
+ MemberOffset GetFieldOffset(ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_);
// Find a dex cache for a dex file.
inline mirror::DexCache* FindDexCache(const DexFile* dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset.
std::pair<bool, bool> IsFastInstanceField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast-path an SGET/SPUT access to a static field? If yes, compute the type index
// of the declaring class in the referrer's dex file.
std::pair<bool, bool> IsFastStaticField(
mirror::DexCache* dex_cache, mirror::Class* referrer_class,
ArtField* resolved_field, uint16_t field_idx, uint32_t* storage_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return whether the declaring class of `resolved_method` is
// available to `referrer_class`. If this is true, compute the type
@@ -299,34 +299,34 @@
ArtMethod* resolved_method,
uint16_t method_idx,
uint32_t* storage_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Is static field's in referrer's class?
bool IsStaticFieldInReferrerClass(mirror::Class* referrer_class, ArtField* resolved_field)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Is static field's class initialized?
bool IsStaticFieldsClassInitialized(mirror::Class* referrer_class,
ArtField* resolved_field)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a method. Returns null on failure, including incompatible class change.
ArtMethod* ResolveMethod(
ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit,
uint32_t method_idx, InvokeType invoke_type, bool check_incompatible_class_change = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get declaration location of a resolved field.
void GetResolvedMethodDexFileLocation(
ArtMethod* resolved_method, const DexFile** declaring_dex_file,
uint16_t* declaring_class_idx, uint16_t* declaring_method_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the index in the vtable of the method.
uint16_t GetResolvedMethodVTableIndex(
ArtMethod* resolved_method, InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast-path an INVOKE? If no, returns 0. If yes, returns a non-zero opaque flags value
// for ProcessedInvoke() and computes the necessary lowering info.
@@ -336,13 +336,13 @@
mirror::Class* referrer_class, ArtMethod* resolved_method, InvokeType* invoke_type,
MethodReference* target_method, const MethodReference* devirt_target,
uintptr_t* direct_code, uintptr_t* direct_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Is method's class initialized for an invoke?
// For static invokes to determine whether we need to consider potential call to <clinit>().
// For non-static invokes, assuming a non-null reference, the class is always initialized.
bool IsMethodsClassInitialized(mirror::Class* referrer_class, ArtMethod* resolved_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the layout of dex cache arrays for a dex file. Returns invalid layout if the
// dex cache arrays don't have a fixed layout.
@@ -357,7 +357,7 @@
ArtField** resolved_field,
mirror::Class** referrer_class,
mirror::DexCache** dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast path instance field access? Computes field's offset and volatility.
bool ComputeInstanceFieldInfo(uint32_t field_idx, const DexCompilationUnit* mUnit, bool is_put,
@@ -368,7 +368,7 @@
const DexCompilationUnit* mUnit,
bool is_put,
const ScopedObjectAccess& soa)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fastpath a interface, super class or virtual method call? Computes method's vtable
@@ -467,7 +467,7 @@
// Can we assume that the klass is loaded?
bool CanAssumeClassIsLoaded(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool MayInline(const DexFile* inlined_from, const DexFile* inlined_into) const {
if (!kIsTargetBuild) {
@@ -497,7 +497,7 @@
ArtMember* resolved_member,
uint16_t member_idx,
uint32_t* storage_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can `referrer_class` access the resolved `member`?
// Dispatch call to mirror::Class::CanAccessResolvedField or
@@ -509,13 +509,13 @@
ArtMember* member,
mirror::DexCache* dex_cache,
uint32_t field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we assume that the klass is initialized?
bool CanAssumeClassIsInitialized(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CanReferrerAssumeClassIsInitialized(mirror::Class* referrer_class, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// These flags are internal to CompilerDriver for collecting INVOKE resolution statistics.
// The only external contract is that unresolved method has flags 0 and resolved non-0.
@@ -546,7 +546,7 @@
/*out*/int* stats_flags,
MethodReference* target_method,
uintptr_t* direct_code, uintptr_t* direct_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
void PreCompile(jobject class_loader,
@@ -605,7 +605,7 @@
void UpdateImageClasses(TimingLogger* timings) REQUIRES(!Locks::mutator_lock_);
static void FindClinitImageClassesCallback(mirror::Object* object, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Compile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index bb45999..7634510 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -111,7 +111,7 @@
}
static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* klass = obj->GetClass();
CHECK_NE(PrettyClass(klass), "com.android.dex.Dex");
}
@@ -687,7 +687,7 @@
class ComputeLazyFieldsForClassesVisitor : public ClassVisitor {
public:
- bool operator()(Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(Class* c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(Thread::Current());
mirror::Class::ComputeName(hs.NewHandle(c));
return true;
@@ -700,7 +700,7 @@
class_linker->VisitClassesWithoutClassesLock(&visitor);
}
-static bool IsBootClassLoaderClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+static bool IsBootClassLoaderClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
return klass->GetClassLoader() == nullptr;
}
@@ -828,7 +828,7 @@
public:
explicit NonImageClassesVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
- bool operator()(Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (!image_writer_->KeepClass(klass)) {
classes_to_prune_.insert(klass);
}
@@ -1603,7 +1603,7 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
*roots[i] = image_writer_->GetImageAddress(*roots[i]);
}
@@ -1611,7 +1611,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr()));
}
@@ -1864,7 +1864,7 @@
// java.lang.ref.Reference visitor.
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
mirror::Reference::ReferentOffset(),
image_writer_->GetImageAddress(ref->GetReferent()));
@@ -1888,7 +1888,7 @@
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
mirror::Reference* ref ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
}
};
@@ -1904,14 +1904,14 @@
}
template <typename T>
-std::string PrettyPrint(T* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+std::string PrettyPrint(T* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream oss;
oss << ptr;
return oss.str();
}
template <>
-std::string PrettyPrint(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+std::string PrettyPrint(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
return PrettyMethod(method);
}
@@ -1945,7 +1945,7 @@
explicit NativeLocationVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
template <typename T>
- T* operator()(T* ptr) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
return image_writer_->NativeLocationInImage(ptr);
}
@@ -2023,7 +2023,7 @@
explicit ImageAddressVisitor(ImageWriter* image_writer) : image_writer_(image_writer) {}
template <typename T>
- T* operator()(T* ptr) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* operator()(T* ptr) const REQUIRES_SHARED(Locks::mutator_lock_) {
return image_writer_->GetImageAddress(ptr);
}
diff --git a/compiler/image_writer.h b/compiler/image_writer.h
index 7d13656..76749cf 100644
--- a/compiler/image_writer.h
+++ b/compiler/image_writer.h
@@ -77,7 +77,7 @@
}
template <typename T>
- T* GetImageAddress(T* object) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* GetImageAddress(T* object) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (object == nullptr || IsInBootImage(object)) {
return object;
} else {
@@ -87,11 +87,11 @@
}
}
- ArtMethod* GetImageMethodAddress(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* GetImageMethodAddress(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename PtrType>
PtrType GetDexCacheArrayElementImageAddress(const DexFile* dex_file, uint32_t offset)
- const SHARED_REQUIRES(Locks::mutator_lock_) {
+ const REQUIRES_SHARED(Locks::mutator_lock_) {
auto oat_it = dex_file_oat_index_map_.find(dex_file);
DCHECK(oat_it != dex_file_oat_index_map_.end());
const ImageInfo& image_info = GetImageInfo(oat_it->second);
@@ -132,7 +132,7 @@
// Get the index of the oat file containing the dex file served by the dex cache.
size_t GetOatIndexForDexCache(mirror::DexCache* dex_cache) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update the oat layout for the given oat file.
// This will make the oat_offset for the next oat file valid.
@@ -147,7 +147,7 @@
bool AllocMemory();
// Mark the objects defined in this space in the given live bitmap.
- void RecordImageAllocations() SHARED_REQUIRES(Locks::mutator_lock_);
+ void RecordImageAllocations() REQUIRES_SHARED(Locks::mutator_lock_);
// Classify different kinds of bins that objects end up getting packed into during image writing.
// Ordered from dirtiest to cleanest (until ArtMethods).
@@ -311,34 +311,34 @@
// We use the lock word to store the offset of the object in the image.
void AssignImageOffset(mirror::Object* object, BinSlot bin_slot)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetImageOffset(mirror::Object* object, size_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsImageOffsetAssigned(mirror::Object* object) const
- SHARED_REQUIRES(Locks::mutator_lock_);
- size_t GetImageOffset(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ size_t GetImageOffset(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateImageOffset(mirror::Object* obj, uintptr_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void PrepareDexCacheArraySlots() SHARED_REQUIRES(Locks::mutator_lock_);
- void AssignImageBinSlot(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ void PrepareDexCacheArraySlots() REQUIRES_SHARED(Locks::mutator_lock_);
+ void AssignImageBinSlot(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
void SetImageBinSlot(mirror::Object* object, BinSlot bin_slot)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsImageBinSlotAssigned(mirror::Object* object) const
- SHARED_REQUIRES(Locks::mutator_lock_);
- BinSlot GetImageBinSlot(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ BinSlot GetImageBinSlot(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
void AddDexCacheArrayRelocation(void* array, size_t offset, mirror::DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void AddMethodPointerArray(mirror::PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void AddMethodPointerArray(mirror::PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_);
static void* GetImageAddressCallback(void* writer, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ImageWriter*>(writer)->GetImageAddress(obj);
}
mirror::Object* GetLocalAddress(mirror::Object* object) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t offset = GetImageOffset(object);
size_t oat_index = GetOatIndex(object);
const ImageInfo& image_info = GetImageInfo(oat_index);
@@ -358,94 +358,94 @@
}
// Returns true if the class was in the original requested image classes list.
- bool KeepClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool KeepClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Debug aid that list of requested image classes.
void DumpImageClasses();
// Preinitializes some otherwise lazy fields (such as Class name) to avoid runtime image dirtying.
void ComputeLazyFieldsForImageClasses()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Remove unwanted classes from various roots.
- void PruneNonImageClasses() SHARED_REQUIRES(Locks::mutator_lock_);
+ void PruneNonImageClasses() REQUIRES_SHARED(Locks::mutator_lock_);
// Verify unwanted classes removed.
- void CheckNonImageClassesRemoved() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckNonImageClassesRemoved() REQUIRES_SHARED(Locks::mutator_lock_);
static void CheckNonImageClassesRemovedCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Lays out where the image objects will be at runtime.
void CalculateNewObjectOffsets()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateHeader(size_t oat_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* CreateImageRoots(size_t oat_index) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CalculateObjectBinSlots(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UnbinObjectsIntoOffset(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void WalkInstanceFields(mirror::Object* obj, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void WalkFieldsInOrder(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void WalkFieldsCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void UnbinObjectsIntoOffsetCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Creates the contiguous image in memory and adjusts pointers.
- void CopyAndFixupNativeData(size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
- void CopyAndFixupObjects() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CopyAndFixupNativeData(size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupObjects() REQUIRES_SHARED(Locks::mutator_lock_);
static void CopyAndFixupObjectsCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void CopyAndFixupImTable(ImTable* orig, ImTable* copy) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CopyAndFixupImTable(ImTable* orig, ImTable* copy) REQUIRES_SHARED(Locks::mutator_lock_);
void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupClass(mirror::Class* orig, mirror::Class* copy)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupObject(mirror::Object* orig, mirror::Object* copy)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupDexCache(mirror::DexCache* orig_dex_cache, mirror::DexCache* copy_dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupPointerArray(mirror::Object* dst,
mirror::PointerArray* arr,
mirror::Class* klass,
Bin array_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get quick code for non-resolution/imt_conflict/abstract method.
const uint8_t* GetQuickCode(ArtMethod* method,
const ImageInfo& image_info,
bool* quick_is_interpreted)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Calculate the sum total of the bin slot sizes in [0, up_to). Defaults to all bins.
size_t GetBinSizeSum(ImageInfo& image_info, Bin up_to = kBinSize) const;
// Return true if a method is likely to be dirtied at runtime.
- bool WillMethodBeDirty(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool WillMethodBeDirty(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_);
// Assign the offset for an ArtMethod.
void AssignMethodOffset(ArtMethod* method,
NativeObjectRelocationType type,
size_t oat_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void TryAssignImTableOffset(ImTable* imt, size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_);
+ void TryAssignImTableOffset(ImTable* imt, size_t oat_index) REQUIRES_SHARED(Locks::mutator_lock_);
// Assign the offset for an IMT conflict table. Does nothing if the table already has a native
// relocation.
void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if klass is loaded by the boot class loader but not in the boot image.
- bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsBootClassLoaderNonImageClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if klass depends on a boot class loader non image class. We want to prune these
// classes since we do not want any boot class loader classes in the image. This means that
@@ -453,25 +453,25 @@
// PruneAppImageClass also prunes if klass depends on a non-image class according to the compiler
// driver.
bool PruneAppImageClass(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// early_exit is true if we had a cyclic dependency anywhere down the chain.
bool PruneAppImageClassInternal(mirror::Class* klass,
bool* early_exit,
std::unordered_set<mirror::Class*>* visited)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Bin BinTypeForNativeRelocationType(NativeObjectRelocationType type);
- uintptr_t NativeOffsetInImage(void* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ uintptr_t NativeOffsetInImage(void* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Location of where the object will be when the image is loaded at runtime.
template <typename T>
- T* NativeLocationInImage(T* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ T* NativeLocationInImage(T* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Location of where the temporary copy of the object currently is.
template <typename T>
- T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
+ T* NativeCopyLocation(T* obj, mirror::DexCache* dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
// Return true of obj is inside of the boot image space. This may only return true if we are
// compiling an app image.
@@ -481,7 +481,7 @@
bool IsInBootOatFile(const void* ptr) const;
// Get the index of the oat file associated with the object.
- size_t GetOatIndex(mirror::Object* object) const SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t GetOatIndex(mirror::Object* object) const REQUIRES_SHARED(Locks::mutator_lock_);
// The oat index for shared data in multi-image and all data in single-image compilation.
size_t GetDefaultOatIndex() const {
@@ -498,7 +498,7 @@
// Find an already strong interned string in the other images or in the boot image. Used to
// remove duplicates in the multi image and app image case.
- mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::String* FindInternedString(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if there already exists a native allocation for an object.
bool NativeRelocationAssigned(void* ptr) const;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 6f6a8f5..7246ace 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -58,14 +58,14 @@
extern "C" bool jit_compile_method(
void* handle, ArtMethod* method, Thread* self, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
return jit_compiler->CompileMethod(self, method, osr);
}
extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t count)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto* jit_compiler = reinterpret_cast<JitCompiler*>(handle);
DCHECK(jit_compiler != nullptr);
if (jit_compiler->GetCompilerOptions()->GetGenerateDebugInfo()) {
diff --git a/compiler/jit/jit_compiler.h b/compiler/jit/jit_compiler.h
index 533dccf..18e3155 100644
--- a/compiler/jit/jit_compiler.h
+++ b/compiler/jit/jit_compiler.h
@@ -37,7 +37,7 @@
// Compilation entrypoint. Returns whether the compilation succeeded.
bool CompileMethod(Thread* self, ArtMethod* method, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
CompilerOptions* GetCompilerOptions() const {
return compiler_options_.get();
@@ -59,7 +59,7 @@
// This is in the compiler since the runtime doesn't have access to the compiled method
// structures.
bool AddToCodeCache(ArtMethod* method, const CompiledMethod* compiled_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(JitCompiler);
};
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index bf53bb2..b1e3811 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -60,7 +60,7 @@
void CheckMethod(ArtMethod* method,
const OatFile::OatMethod& oat_method,
const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const CompiledMethod* compiled_method =
compiler_driver_->GetCompiledMethod(MethodReference(&dex_file,
method->GetDexMethodIndex()));
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 8a80982..c9c5d24 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -226,7 +226,6 @@
return dex_file_location_data_;
}
- void ReserveTypeLookupTable(OatWriter* oat_writer);
void ReserveClassOffsets(OatWriter* oat_writer);
size_t SizeOf() const;
@@ -436,36 +435,35 @@
instruction_set_features,
dchecked_integral_cast<uint32_t>(oat_dex_files_.size()),
key_value_store);
- offset = InitOatDexFiles(offset);
- size_ = offset;
+ size_ = InitOatDexFiles(offset);
std::unique_ptr<MemMap> dex_files_map;
std::vector<std::unique_ptr<const DexFile>> dex_files;
- if (!WriteDexFiles(rodata, file)) {
+ if (!WriteDexFiles(rodata, file) ||
+ !OpenDexFiles(file, verify, &dex_files_map, &dex_files)) {
return false;
}
- // Reserve space for type lookup tables and update type_lookup_table_offset_.
- for (OatDexFile& oat_dex_file : oat_dex_files_) {
- oat_dex_file.ReserveTypeLookupTable(this);
+
+ // Do a bulk checksum update for Dex[]. Doing it piece by piece would be
+ // difficult because we're not using the OutputStream directly.
+ if (!oat_dex_files_.empty()) {
+ size_t size = size_ - oat_dex_files_[0].dex_file_offset_;
+ oat_header_->UpdateChecksum(dex_files_map->Begin(), size);
}
- size_t size_after_type_lookup_tables = size_;
+
+ ChecksumUpdatingOutputStream checksum_updating_rodata(rodata, oat_header_.get());
+
+ if (!WriteTypeLookupTables(&checksum_updating_rodata, dex_files)) {
+ return false;
+ }
+
// Reserve space for class offsets and update class_offsets_offset_.
for (OatDexFile& oat_dex_file : oat_dex_files_) {
oat_dex_file.ReserveClassOffsets(this);
}
- ChecksumUpdatingOutputStream checksum_updating_rodata(rodata, oat_header_.get());
- if (!WriteOatDexFiles(&checksum_updating_rodata) ||
- !ExtendForTypeLookupTables(rodata, file, size_after_type_lookup_tables) ||
- !OpenDexFiles(file, verify, &dex_files_map, &dex_files) ||
- !WriteTypeLookupTables(dex_files_map.get(), dex_files)) {
- return false;
- }
- // Do a bulk checksum update for Dex[] and TypeLookupTable[]. Doing it piece by
- // piece would be difficult because we're not using the OutpuStream directly.
- if (!oat_dex_files_.empty()) {
- size_t size = size_after_type_lookup_tables - oat_dex_files_[0].dex_file_offset_;
- oat_header_->UpdateChecksum(dex_files_map->Begin(), size);
+ if (!WriteOatDexFiles(&checksum_updating_rodata)) {
+ return false;
}
*opened_dex_files_map = std::move(dex_files_map);
@@ -684,7 +682,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -845,7 +843,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -887,7 +885,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::TypeId& type_id =
dex_file_->GetTypeId(dex_file_->GetClassDef(class_def_index_).class_idx_);
const char* class_descriptor = dex_file_->GetTypeDescriptor(type_id);
@@ -974,7 +972,7 @@
}
bool StartClass(const DexFile* dex_file, size_t class_def_index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OatDexMethodVisitor::StartClass(dex_file, class_def_index);
if (dex_cache_ == nullptr || dex_cache_->GetDexFile() != dex_file) {
dex_cache_ = class_linker_->FindDexCache(Thread::Current(), *dex_file);
@@ -983,7 +981,7 @@
return true;
}
- bool EndClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool EndClass() REQUIRES_SHARED(Locks::mutator_lock_) {
bool result = OatDexMethodVisitor::EndClass();
if (oat_class_index_ == writer_->oat_classes_.size()) {
DCHECK(result); // OatDexMethodVisitor::EndClass() never fails.
@@ -997,7 +995,7 @@
}
bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OatClass* oat_class = &writer_->oat_classes_[oat_class_index_];
const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
@@ -1138,7 +1136,7 @@
}
ArtMethod* GetTargetMethod(const LinkerPatch& patch)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
MethodReference ref = patch.TargetMethod();
mirror::DexCache* dex_cache =
(dex_file_ == ref.dex_file) ? dex_cache_ : class_linker_->FindDexCache(
@@ -1149,7 +1147,7 @@
return method;
}
- uint32_t GetTargetOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetTargetOffset(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t target_offset = writer_->relative_patcher_->GetOffset(patch.TargetMethod());
// If there's no new compiled code, either we're compiling an app and the target method
// is in the boot image, or we need to point to the correct trampoline.
@@ -1175,20 +1173,20 @@
}
mirror::DexCache* GetDexCache(const DexFile* target_dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return (target_dex_file == dex_file_)
? dex_cache_
: class_linker_->FindDexCache(Thread::Current(), *target_dex_file);
}
- mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetTargetType(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile());
mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex());
CHECK(type != nullptr);
return type;
}
- mirror::String* GetTargetString(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::String* GetTargetString(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
StackHandleScope<1> hs(soa.Self());
ClassLinker* linker = Runtime::Current()->GetClassLinker();
@@ -1202,7 +1200,7 @@
return string;
}
- uint32_t GetDexCacheOffset(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetDexCacheOffset(const LinkerPatch& patch) REQUIRES_SHARED(Locks::mutator_lock_) {
if (writer_->HasBootImage()) {
uintptr_t element = writer_->image_writer_->GetDexCacheArrayElementImageAddress<uintptr_t>(
patch.TargetDexCacheDexFile(), patch.TargetDexCacheElementOffset());
@@ -1215,7 +1213,7 @@
}
}
- uint32_t GetTargetObjectOffset(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetTargetObjectOffset(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(writer_->HasBootImage());
object = writer_->image_writer_->GetImageAddress(object);
size_t oat_index = writer_->image_writer_->GetOatIndexForDexFile(dex_file_);
@@ -1225,7 +1223,7 @@
}
void PatchObjectAddress(std::vector<uint8_t>* code, uint32_t offset, mirror::Object* object)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (writer_->HasBootImage()) {
object = writer_->image_writer_->GetImageAddress(object);
} else {
@@ -1245,7 +1243,7 @@
}
void PatchMethodAddress(std::vector<uint8_t>* code, uint32_t offset, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (writer_->HasBootImage()) {
method = writer_->image_writer_->GetImageMethodAddress(method);
} else if (kIsDebugBuild) {
@@ -1273,7 +1271,7 @@
}
void PatchCodeAddress(std::vector<uint8_t>* code, uint32_t offset, uint32_t target_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t address = target_offset;
if (writer_->HasBootImage()) {
size_t oat_index = writer_->image_writer_->GetOatIndexForDexCache(dex_cache_);
@@ -1525,7 +1523,7 @@
off_t tables_end_offset = out->Seek(0, kSeekCurrent);
if (tables_end_offset == static_cast<off_t>(-1)) {
- LOG(ERROR) << "Failed to seek to oat code position in " << out->GetLocation();
+ LOG(ERROR) << "Failed to get oat code position in " << out->GetLocation();
return false;
}
size_t file_offset = oat_data_offset_;
@@ -2094,6 +2092,12 @@
bool OatWriter::WriteOatDexFiles(OutputStream* rodata) {
TimingLogger::ScopedTiming split("WriteOatDexFiles", timings_);
+ off_t initial_offset = rodata->Seek(0, kSeekCurrent);
+ if (initial_offset == static_cast<off_t>(-1)) {
+ LOG(ERROR) << "Failed to get current position in " << rodata->GetLocation();
+ return false;
+ }
+
// Seek to the start of OatDexFiles, i.e. to the end of the OatHeader. If there are
// no OatDexFiles, no data is actually written to .rodata before WriteHeader() and
// this Seek() ensures that we reserve the space for OatHeader in .rodata.
@@ -2119,30 +2123,13 @@
}
}
- return true;
-}
+ // Seek back to the initial position.
+ if (rodata->Seek(initial_offset, kSeekSet) != initial_offset) {
+ PLOG(ERROR) << "Failed to seek to initial position. Actual: " << actual_offset
+ << " Expected: " << initial_offset << " File: " << rodata->GetLocation();
+ return false;
+ }
-bool OatWriter::ExtendForTypeLookupTables(OutputStream* rodata, File* file, size_t offset) {
- TimingLogger::ScopedTiming split("ExtendForTypeLookupTables", timings_);
-
- int64_t new_length = oat_data_offset_ + dchecked_integral_cast<int64_t>(offset);
- if (file->SetLength(new_length) != 0) {
- PLOG(ERROR) << "Failed to extend file for type lookup tables. new_length: " << new_length
- << "File: " << file->GetPath();
- return false;
- }
- off_t actual_offset = rodata->Seek(new_length, kSeekSet);
- if (actual_offset != static_cast<off_t>(new_length)) {
- PLOG(ERROR) << "Failed to seek stream after extending file for type lookup tables."
- << " Actual: " << actual_offset << " Expected: " << new_length
- << " File: " << rodata->GetLocation();
- return false;
- }
- if (!rodata->Flush()) {
- PLOG(ERROR) << "Failed to flush stream after extending for type lookup tables."
- << " File: " << rodata->GetLocation();
- return false;
- }
return true;
}
@@ -2223,26 +2210,66 @@
}
bool OatWriter::WriteTypeLookupTables(
- MemMap* opened_dex_files_map,
+ OutputStream* rodata,
const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files) {
TimingLogger::ScopedTiming split("WriteTypeLookupTables", timings_);
DCHECK_EQ(opened_dex_files.size(), oat_dex_files_.size());
for (size_t i = 0, size = opened_dex_files.size(); i != size; ++i) {
OatDexFile* oat_dex_file = &oat_dex_files_[i];
- if (oat_dex_file->lookup_table_offset_ != 0u) {
- DCHECK(oat_dex_file->create_type_lookup_table_ == CreateTypeLookupTable::kCreate);
- DCHECK_NE(oat_dex_file->class_offsets_.size(), 0u);
- size_t map_offset = oat_dex_files_[0].dex_file_offset_;
- size_t lookup_table_offset = oat_dex_file->lookup_table_offset_;
- uint8_t* lookup_table = opened_dex_files_map->Begin() + (lookup_table_offset - map_offset);
- opened_dex_files[i]->CreateTypeLookupTable(lookup_table);
+ DCHECK_EQ(oat_dex_file->lookup_table_offset_, 0u);
+
+ if (oat_dex_file->create_type_lookup_table_ != CreateTypeLookupTable::kCreate ||
+ oat_dex_file->class_offsets_.empty()) {
+ continue;
}
+
+ size_t table_size = TypeLookupTable::RawDataLength(oat_dex_file->class_offsets_.size());
+ if (table_size == 0u) {
+ continue;
+ }
+
+ // Create the lookup table. When `nullptr` is given as the storage buffer,
+ // TypeLookupTable allocates its own and DexFile takes ownership.
+ opened_dex_files[i]->CreateTypeLookupTable(/* storage */ nullptr);
+ TypeLookupTable* table = opened_dex_files[i]->GetTypeLookupTable();
+
+ // Type tables are required to be 4 byte aligned.
+ size_t original_offset = size_;
+ size_t rodata_offset = RoundUp(original_offset, 4);
+ size_t padding_size = rodata_offset - original_offset;
+
+ if (padding_size != 0u) {
+ std::vector<uint8_t> buffer(padding_size, 0u);
+ if (!rodata->WriteFully(buffer.data(), padding_size)) {
+ PLOG(ERROR) << "Failed to write lookup table alignment padding."
+ << " File: " << oat_dex_file->GetLocation()
+ << " Output: " << rodata->GetLocation();
+ return false;
+ }
+ }
+
+ DCHECK_EQ(oat_data_offset_ + rodata_offset,
+ static_cast<size_t>(rodata->Seek(0u, kSeekCurrent)));
+ DCHECK_EQ(table_size, table->RawDataLength());
+
+ if (!rodata->WriteFully(table->RawData(), table_size)) {
+ PLOG(ERROR) << "Failed to write lookup table."
+ << " File: " << oat_dex_file->GetLocation()
+ << " Output: " << rodata->GetLocation();
+ return false;
+ }
+
+ oat_dex_file->lookup_table_offset_ = rodata_offset;
+
+ size_ += padding_size + table_size;
+ size_oat_lookup_table_ += table_size;
+ size_oat_lookup_table_alignment_ += padding_size;
}
- DCHECK_EQ(opened_dex_files_map == nullptr, opened_dex_files.empty());
- if (opened_dex_files_map != nullptr && !opened_dex_files_map->Sync()) {
- PLOG(ERROR) << "Failed to Sync() type lookup tables. Map: " << opened_dex_files_map->GetName();
+ if (!rodata->Flush()) {
+ PLOG(ERROR) << "Failed to flush stream after writing type lookup tables."
+ << " File: " << rodata->GetLocation();
return false;
}
@@ -2298,22 +2325,6 @@
+ sizeof(lookup_table_offset_);
}
-void OatWriter::OatDexFile::ReserveTypeLookupTable(OatWriter* oat_writer) {
- DCHECK_EQ(lookup_table_offset_, 0u);
- if (create_type_lookup_table_ == CreateTypeLookupTable::kCreate && !class_offsets_.empty()) {
- size_t table_size = TypeLookupTable::RawDataLength(class_offsets_.size());
- if (table_size != 0u) {
- // Type tables are required to be 4 byte aligned.
- size_t original_offset = oat_writer->size_;
- size_t offset = RoundUp(original_offset, 4);
- oat_writer->size_oat_lookup_table_alignment_ += offset - original_offset;
- lookup_table_offset_ = offset;
- oat_writer->size_ = offset + table_size;
- oat_writer->size_oat_lookup_table_ += table_size;
- }
- }
-}
-
void OatWriter::OatDexFile::ReserveClassOffsets(OatWriter* oat_writer) {
DCHECK_EQ(class_offsets_offset_, 0u);
if (!class_offsets_.empty()) {
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index decb7db..93e2e44 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -262,12 +262,11 @@
bool WriteDexFile(OutputStream* rodata, File* file, OatDexFile* oat_dex_file, File* dex_file);
bool WriteDexFile(OutputStream* rodata, OatDexFile* oat_dex_file, const uint8_t* dex_file);
bool WriteOatDexFiles(OutputStream* rodata);
- bool ExtendForTypeLookupTables(OutputStream* rodata, File* file, size_t offset);
bool OpenDexFiles(File* file,
bool verify,
/*out*/ std::unique_ptr<MemMap>* opened_dex_files_map,
/*out*/ std::vector<std::unique_ptr<const DexFile>>* opened_dex_files);
- bool WriteTypeLookupTables(MemMap* opened_dex_files_map,
+ bool WriteTypeLookupTables(OutputStream* rodata,
const std::vector<std::unique_ptr<const DexFile>>& opened_dex_files);
bool WriteCodeAlignment(OutputStream* out, uint32_t aligned_code_delta);
void SetMultiOatRelativePatcherAdjustment();
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index fd396c4..78a8afb 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -23,8 +23,6 @@
#include "base/arena_object.h"
#include "base/bit_field.h"
#include "base/enums.h"
-#include "compiled_method.h"
-#include "driver/compiler_options.h"
#include "globals.h"
#include "graph_visualizer.h"
#include "locations.h"
@@ -54,6 +52,7 @@
class Assembler;
class CodeGenerator;
class CompilerDriver;
+class CompilerOptions;
class LinkerPatch;
class ParallelMoveResolver;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index a07dd6b..ac10e23 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -63,9 +63,9 @@
DISALLOW_COPY_AND_ASSIGN(InvokeRuntimeCallingConvention);
};
-static constexpr DRegister FromLowSToD(SRegister reg) {
- return DCHECK_CONSTEXPR(reg % 2 == 0, , D0)
- static_cast<DRegister>(reg / 2);
+constexpr DRegister FromLowSToD(SRegister reg) {
+ DCHECK_EQ(reg % 2, 0);
+ return static_cast<DRegister>(reg / 2);
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index cec3ca1..fe6069c 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -20,6 +20,7 @@
#include "arch/mips/instruction_set_features_mips.h"
#include "art_method.h"
#include "code_generator_utils.h"
+#include "compiled_method.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "gc/accounting/card_table.h"
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 0614945..5f39a49 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -47,6 +47,9 @@
private:
void VisitShift(HBinaryOperation* shift);
+ void VisitEqual(HEqual* instruction) OVERRIDE;
+ void VisitNotEqual(HNotEqual* instruction) OVERRIDE;
+
void VisitAbove(HAbove* instruction) OVERRIDE;
void VisitAboveOrEqual(HAboveOrEqual* instruction) OVERRIDE;
void VisitBelow(HBelow* instruction) OVERRIDE;
@@ -140,6 +143,30 @@
}
}
+void InstructionWithAbsorbingInputSimplifier::VisitEqual(HEqual* instruction) {
+ if ((instruction->GetLeft()->IsNullConstant() && !instruction->GetRight()->CanBeNull()) ||
+ (instruction->GetRight()->IsNullConstant() && !instruction->GetLeft()->CanBeNull())) {
+ // Replace code looking like
+ // EQUAL lhs, null
+ // where lhs cannot be null with
+ // CONSTANT false
+ instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 0));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
+void InstructionWithAbsorbingInputSimplifier::VisitNotEqual(HNotEqual* instruction) {
+ if ((instruction->GetLeft()->IsNullConstant() && !instruction->GetRight()->CanBeNull()) ||
+ (instruction->GetRight()->IsNullConstant() && !instruction->GetLeft()->CanBeNull())) {
+ // Replace code looking like
+ // NOT_EQUAL lhs, null
+ // where lhs cannot be null with
+ // CONSTANT true
+ instruction->ReplaceWith(GetGraph()->GetConstant(Primitive::kPrimBoolean, 1));
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ }
+}
+
void InstructionWithAbsorbingInputSimplifier::VisitAbove(HAbove* instruction) {
if (instruction->GetLeft()->IsConstant() &&
instruction->GetLeft()->AsConstant()->IsArithmeticZero()) {
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 451aa38..1e5f0b6 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -109,7 +109,7 @@
}
static bool IsMethodOrDeclaringClassFinal(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return method->IsFinal() || method->GetDeclaringClass()->IsFinal();
}
@@ -119,7 +119,7 @@
* Return nullptr if the runtime target cannot be proven.
*/
static ArtMethod* FindVirtualOrInterfaceTarget(HInvoke* invoke, ArtMethod* resolved_method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsMethodOrDeclaringClassFinal(resolved_method)) {
// No need to lookup further, the resolved method will be the target.
return resolved_method;
@@ -189,7 +189,7 @@
static uint32_t FindMethodIndexIn(ArtMethod* method,
const DexFile& dex_file,
uint32_t name_and_signature_index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsSameDexFile(*method->GetDexFile(), dex_file)) {
return method->GetDexMethodIndex();
} else {
@@ -200,7 +200,7 @@
static uint32_t FindClassIndexIn(mirror::Class* cls,
const DexFile& dex_file,
Handle<mirror::DexCache> dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t index = DexFile::kDexNoIndex;
if (cls->GetDexCache() == nullptr) {
DCHECK(cls->IsArrayClass()) << PrettyClass(cls);
@@ -894,7 +894,7 @@
static HInstruction* GetInvokeInputForArgVRegIndex(HInvoke* invoke_instruction,
size_t arg_vreg_index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t input_index = 0;
for (size_t i = 0; i < arg_vreg_index; ++i, ++input_index) {
DCHECK_LT(input_index, invoke_instruction->GetNumberOfArguments());
@@ -1030,7 +1030,7 @@
HInstanceFieldGet* HInliner::CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
uint32_t field_index,
HInstruction* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
@@ -1058,7 +1058,7 @@
uint32_t field_index,
HInstruction* obj,
HInstruction* value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
PointerSize pointer_size = InstructionSetPointerSize(codegen_->GetInstructionSet());
ArtField* resolved_field = dex_cache->GetResolvedField(field_index, pointer_size);
DCHECK(resolved_field != nullptr);
@@ -1374,7 +1374,7 @@
static bool IsReferenceTypeRefinement(ReferenceTypeInfo declared_rti,
bool declared_can_be_null,
HInstruction* actual_obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (declared_can_be_null && !actual_obj->CanBeNull()) {
return true;
}
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index 02d3a5f..486626b 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -64,12 +64,12 @@
// reference type propagation can run after the inlining. If the inlining is successful, this
// method will replace and remove the `invoke_instruction`.
bool TryInlineAndReplace(HInvoke* invoke_instruction, ArtMethod* resolved_method, bool do_rtp)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInline(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
HInstruction** return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool TryBuildAndInlineHelper(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
@@ -86,7 +86,7 @@
bool TryPatternSubstitution(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
HInstruction** return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create a new HInstanceFieldGet.
HInstanceFieldGet* CreateInstanceFieldGet(Handle<mirror::DexCache> dex_cache,
@@ -105,38 +105,38 @@
bool TryInlineMonomorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to inline targets of a polymorphic call.
bool TryInlinePolymorphicCall(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction,
ArtMethod* resolved_method,
const InlineCache& ic)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
HInstanceFieldGet* BuildGetReceiverClass(ClassLinker* class_linker,
HInstruction* receiver,
uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixUpReturnReferenceType(ArtMethod* resolved_method, HInstruction* return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Creates an instance of ReferenceTypeInfo from `klass` if `klass` is
// admissible (see ReferenceTypePropagation::IsAdmissible for details).
// Otherwise returns inexact Object RTI.
- ReferenceTypeInfo GetClassRTI(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ ReferenceTypeInfo GetClassRTI(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
bool ArgumentTypesMoreSpecific(HInvoke* invoke_instruction, ArtMethod* resolved_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool ReturnTypeMoreSpecific(HInvoke* invoke_instruction, HInstruction* return_replacement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Add a type guard on the given `receiver`. This will add to the graph:
// i0 = HFieldGet(receiver, klass)
@@ -154,7 +154,7 @@
bool is_referrer,
HInstruction* invoke_instruction,
bool with_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Ad-hoc implementation for implementing a diamond pattern in the graph for
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index e5dab56..453068b 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -957,7 +957,7 @@
}
static bool IsSubClass(mirror::Class* to_test, mirror::Class* super_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return to_test != nullptr && !to_test->IsInterface() && to_test->IsSubClass(super_class);
}
@@ -1607,7 +1607,7 @@
}
static TypeCheckKind ComputeTypeCheckKind(Handle<mirror::Class> cls)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (cls.Get() == nullptr) {
return TypeCheckKind::kUnresolvedCheck;
} else if (cls->IsInterface()) {
diff --git a/compiler/optimizing/instruction_builder.h b/compiler/optimizing/instruction_builder.h
index 517cf76..aa34ddd 100644
--- a/compiler/optimizing/instruction_builder.h
+++ b/compiler/optimizing/instruction_builder.h
@@ -103,7 +103,7 @@
bool NeedsAccessCheck(uint32_t type_index,
Handle<mirror::DexCache> dex_cache,
/*out*/bool* finalizable) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool NeedsAccessCheck(uint32_t type_index, /*out*/bool* finalizable) const;
template<typename T>
@@ -255,14 +255,14 @@
ArtMethod* method,
uint32_t method_idx,
HInvokeStaticOrDirect::ClinitCheckRequirement* clinit_check_requirement)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Build a HNewInstance instruction.
bool BuildNewInstance(uint16_t type_index, uint32_t dex_pc);
// Return whether the compiler can assume `cls` is initialized.
bool IsInitialized(Handle<mirror::Class> cls) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to resolve a method using the class linker. Return null if a method could
// not be resolved.
diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc
index 99ad898..5239f8f 100644
--- a/compiler/optimizing/intrinsics_mips.cc
+++ b/compiler/optimizing/intrinsics_mips.cc
@@ -634,7 +634,7 @@
// For 64-bit quantities, this algorithm gets executed twice, (once
// for in_lo, and again for in_hi), but saves a few instructions
// because the mask values only have to be loaded once. Using this
- // algorithm the count for a 64-bit operand can be performed in 33
+ // algorithm the count for a 64-bit operand can be performed in 29
// instructions compared to a loop-based algorithm which required 47
// instructions.
@@ -687,37 +687,36 @@
__ Srl(tmp_lo, tmp_lo, 2);
__ And(tmp_lo, tmp_lo, AT);
__ Addu(tmp_lo, out_lo, tmp_lo);
- __ Srl(out_lo, tmp_lo, 4);
- __ Addu(out_lo, out_lo, tmp_lo);
__ And(out_hi, tmp_hi, AT);
__ Srl(tmp_hi, tmp_hi, 2);
__ And(tmp_hi, tmp_hi, AT);
__ Addu(tmp_hi, out_hi, tmp_hi);
- __ Srl(out_hi, tmp_hi, 4);
- __ Addu(out_hi, out_hi, tmp_hi);
+ // Here we deviate from the original algorithm a bit. We've reached
+ // the stage where the bitfields holding the subtotals are large
+ // enough to hold the combined subtotals for both the low word, and
+ // the high word. This means that we can add the subtotals for the
+ // the high, and low words into a single word, and compute the final
+ // result for both the high, and low words using fewer instructions.
__ LoadConst32(AT, 0x0F0F0F0F);
- __ And(out_lo, out_lo, AT);
- __ And(out_hi, out_hi, AT);
+ __ Addu(TMP, tmp_hi, tmp_lo);
+
+ __ Srl(out, TMP, 4);
+ __ And(out, out, AT);
+ __ And(TMP, TMP, AT);
+ __ Addu(out, out, TMP);
__ LoadConst32(AT, 0x01010101);
if (isR6) {
- __ MulR6(out_lo, out_lo, AT);
-
- __ MulR6(out_hi, out_hi, AT);
+ __ MulR6(out, out, AT);
} else {
- __ MulR2(out_lo, out_lo, AT);
-
- __ MulR2(out_hi, out_hi, AT);
+ __ MulR2(out, out, AT);
}
- __ Srl(out_lo, out_lo, 24);
- __ Srl(out_hi, out_hi, 24);
-
- __ Addu(out, out_hi, out_lo);
+ __ Srl(out, out, 24);
}
}
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 2808e1b..8f37236 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -2242,7 +2242,7 @@
}
static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (rti.IsValid()) {
DCHECK(upper_bound_rti.IsSupertypeOf(rti))
<< " upper_bound_rti: " << upper_bound_rti
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 94913fc..19e499b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -171,7 +171,7 @@
static ReferenceTypeInfo Create(TypeHandle type_handle, bool is_exact);
- static ReferenceTypeInfo Create(TypeHandle type_handle) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static ReferenceTypeInfo Create(TypeHandle type_handle) REQUIRES_SHARED(Locks::mutator_lock_) {
return Create(type_handle, type_handle->CannotBeAssignedFromOtherTypes());
}
@@ -191,49 +191,49 @@
bool IsExact() const { return is_exact_; }
- bool IsObjectClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsObjectClass();
}
- bool IsStringClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStringClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsStringClass();
}
- bool IsObjectArray() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectArray() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return IsArrayClass() && GetTypeHandle()->GetComponentType()->IsObjectClass();
}
- bool IsInterface() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInterface() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsInterface();
}
- bool IsArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsArrayClass();
}
- bool IsPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsPrimitiveArray();
}
- bool IsNonPrimitiveArrayClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsNonPrimitiveArrayClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
return GetTypeHandle()->IsArrayClass() && !GetTypeHandle()->IsPrimitiveArray();
}
- bool CanArrayHold(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CanArrayHold(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
if (!IsExact()) return false;
if (!IsArrayClass()) return false;
return GetTypeHandle()->GetComponentType()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
- bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CanArrayHoldValuesOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
if (!IsExact()) return false;
if (!IsArrayClass()) return false;
@@ -244,13 +244,13 @@
Handle<mirror::Class> GetTypeHandle() const { return type_handle_; }
- bool IsSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
DCHECK(rti.IsValid());
return GetTypeHandle()->IsAssignableFrom(rti.GetTypeHandle().Get());
}
- bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStrictSupertypeOf(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsValid());
DCHECK(rti.IsValid());
return GetTypeHandle().Get() != rti.GetTypeHandle().Get() &&
@@ -260,7 +260,7 @@
// Returns true if the type information provide the same amount of details.
// Note that it does not mean that the instructions have the same actual type
// (because the type can be the result of a merge).
- bool IsEqual(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsEqual(ReferenceTypeInfo rti) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsValid() && !rti.IsValid()) {
// Invalid types are equal.
return true;
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index 8c0231e..a1e923b 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -19,6 +19,7 @@
#include "arch/instruction_set.h"
#include "cfi_test.h"
+#include "driver/compiler_options.h"
#include "gtest/gtest.h"
#include "optimizing/code_generator.h"
#include "optimizing/optimizing_unit_test.h"
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 6e98b4d..c5d7611 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -294,7 +294,7 @@
}
uintptr_t GetEntryPointOf(ArtMethod* method) const OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
}
@@ -311,7 +311,7 @@
bool JitCompile(Thread* self, jit::JitCodeCache* code_cache, ArtMethod* method, bool osr)
OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
void RunOptimizations(HGraph* graph,
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index e96ab19..4289cf7 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -27,7 +27,7 @@
static inline mirror::DexCache* FindDexCacheWithHint(Thread* self,
const DexFile& dex_file,
Handle<mirror::DexCache> hint_dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(hint_dex_cache->GetDexFile() == &dex_file)) {
return hint_dex_cache.Get();
} else {
@@ -85,7 +85,7 @@
void VisitParameterValue(HParameterValue* instr) OVERRIDE;
void UpdateFieldAccessTypeInfo(HInstruction* instr, const FieldInfo& info);
void SetClassAsTypeInfo(HInstruction* instr, mirror::Class* klass, bool is_exact)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitInstanceFieldGet(HInstanceFieldGet* instr) OVERRIDE;
void VisitStaticFieldGet(HStaticFieldGet* instr) OVERRIDE;
void VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet* instr) OVERRIDE;
@@ -194,7 +194,7 @@
ReferenceTypeInfo upper_bound,
HInstruction* dominator_instr,
HBasicBlock* dominator_block)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// If the position where we should insert the bound type is not already a
// a bound type then we need to create one.
if (position == nullptr || !position->IsBoundType()) {
@@ -487,7 +487,7 @@
const DexFile& dex_file,
uint16_t type_idx,
Handle<mirror::DexCache> hint_dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::DexCache* dex_cache = FindDexCacheWithHint(self, dex_file, hint_dex_cache);
// Get type from dex cache assuming it was populated by the verifier.
return dex_cache->GetResolvedType(type_idx);
diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h
index edd83bf..1fa6624 100644
--- a/compiler/optimizing/reference_type_propagation.h
+++ b/compiler/optimizing/reference_type_propagation.h
@@ -44,7 +44,7 @@
// Returns true if klass is admissible to the propagation: non-null and resolved.
// For an array type, we also check if the component type is admissible.
- static bool IsAdmissible(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static bool IsAdmissible(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
return klass != nullptr &&
klass->IsResolved() &&
(!klass->IsArrayClass() || IsAdmissible(klass->GetComponentType()));
@@ -58,7 +58,7 @@
explicit HandleCache(StackHandleScopeCollection* handles) : handles_(handles) { }
template <typename T>
- MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) {
+ MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
return handles_->NewHandle(object);
}
@@ -80,8 +80,8 @@
void VisitPhi(HPhi* phi);
void VisitBasicBlock(HBasicBlock* block);
- void UpdateBoundType(HBoundType* bound_type) SHARED_REQUIRES(Locks::mutator_lock_);
- void UpdatePhi(HPhi* phi) SHARED_REQUIRES(Locks::mutator_lock_);
+ void UpdateBoundType(HBoundType* bound_type) REQUIRES_SHARED(Locks::mutator_lock_);
+ void UpdatePhi(HPhi* phi) REQUIRES_SHARED(Locks::mutator_lock_);
void BoundTypeForIfNotNull(HBasicBlock* block);
void BoundTypeForIfInstanceOf(HBasicBlock* block);
void ProcessWorklist();
@@ -92,10 +92,10 @@
bool UpdateReferenceTypeInfo(HInstruction* instr);
static void UpdateArrayGet(HArrayGet* instr, HandleCache* handle_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a, const ReferenceTypeInfo& b)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ValidateTypes();
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index 7649b50..75a4eac 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -46,7 +46,7 @@
// Relay method to merge type in reference type propagation.
ReferenceTypeInfo MergeTypes(const ReferenceTypeInfo& a,
- const ReferenceTypeInfo& b) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ReferenceTypeInfo& b) REQUIRES_SHARED(Locks::mutator_lock_) {
return propagation_->MergeTypes(a, b);
}
@@ -56,12 +56,12 @@
}
// Helper method to construct the Object type.
- ReferenceTypeInfo ObjectType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ReferenceTypeInfo ObjectType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) {
return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetObjectClassHandle(), is_exact);
}
// Helper method to construct the String type.
- ReferenceTypeInfo StringType(bool is_exact = true) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ReferenceTypeInfo StringType(bool is_exact = true) REQUIRES_SHARED(Locks::mutator_lock_) {
return ReferenceTypeInfo::Create(propagation_->handle_cache_.GetStringClassHandle(), is_exact);
}
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index 40fff8a..81163e2 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -20,6 +20,7 @@
#include "base/enums.h"
#include "class_linker.h"
#include "code_generator.h"
+#include "driver/compiler_options.h"
#include "driver/dex_compilation_unit.h"
#include "utils/dex_cache_arrays_layout-inl.h"
#include "driver/compiler_driver.h"
@@ -295,15 +296,7 @@
DCHECK(!runtime->UseJitCompilation());
mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache);
CHECK(string != nullptr);
- if (compiler_driver_->GetSupportBootImageFixup()) {
- DCHECK(ContainsElement(compiler_driver_->GetDexFilesForOatFile(), &dex_file));
- desired_load_kind = codegen_->GetCompilerOptions().GetCompilePic()
- ? HLoadString::LoadKind::kBootImageLinkTimePcRelative
- : HLoadString::LoadKind::kBootImageLinkTimeAddress;
- } else {
- // MIPS64 or compiler_driver_test. Do not sharpen.
- DCHECK_EQ(desired_load_kind, HLoadString::LoadKind::kDexCacheViaMethod);
- }
+ // TODO: In follow up CL, add PcRelative and Address back in.
} else if (runtime->UseJitCompilation()) {
// TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus.
// DCHECK(!codegen_->GetCompilerOptions().GetCompilePic());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index 5a574d9..f7dc112 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -303,7 +303,7 @@
}
static Primitive::Type GetPrimitiveArrayComponentType(HInstruction* array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ReferenceTypeInfo array_type = array->GetReferenceTypeInfo();
DCHECK(array_type.IsPrimitiveArrayClass());
return array_type.GetTypeHandle()->GetComponentType()->GetPrimitiveType();
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index a01e107..a4d52d7 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -59,6 +59,38 @@
worklist->insert(insert_pos.base(), block);
}
+static bool IsLinearOrderWellFormed(const HGraph& graph) {
+ for (HBasicBlock* header : graph.GetBlocks()) {
+ if (header == nullptr || !header->IsLoopHeader()) {
+ continue;
+ }
+
+ HLoopInformation* loop = header->GetLoopInformation();
+ size_t num_blocks = loop->GetBlocks().NumSetBits();
+ size_t found_blocks = 0u;
+
+ for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) {
+ HBasicBlock* current = it.Current();
+ if (loop->Contains(*current)) {
+ found_blocks++;
+ if (found_blocks == 1u && current != header) {
+ // First block is not the header.
+ return false;
+ } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) {
+ // Last block is not a back edge.
+ return false;
+ }
+ } else if (found_blocks != 0u && found_blocks != num_blocks) {
+ // Blocks are not adjacent.
+ return false;
+ }
+ }
+ DCHECK_EQ(found_blocks, num_blocks);
+ }
+
+ return true;
+}
+
void SsaLivenessAnalysis::LinearizeGraph() {
// Create a reverse post ordering with the following properties:
// - Blocks in a loop are consecutive,
@@ -100,6 +132,8 @@
forward_predecessors[block_id] = number_of_remaining_predecessors - 1;
}
} while (!worklist.empty());
+
+ DCHECK(graph_->HasIrreducibleLoops() || IsLinearOrderWellFormed(*graph_));
}
void SsaLivenessAnalysis::NumberInstructions() {
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 92788fe..9f94c83 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -983,38 +983,6 @@
return false;
}
- bool IsLinearOrderWellFormed(const HGraph& graph) {
- for (HBasicBlock* header : graph.GetBlocks()) {
- if (header == nullptr || !header->IsLoopHeader()) {
- continue;
- }
-
- HLoopInformation* loop = header->GetLoopInformation();
- size_t num_blocks = loop->GetBlocks().NumSetBits();
- size_t found_blocks = 0u;
-
- for (HLinearOrderIterator it(graph); !it.Done(); it.Advance()) {
- HBasicBlock* current = it.Current();
- if (loop->Contains(*current)) {
- found_blocks++;
- if (found_blocks == 1u && current != header) {
- // First block is not the header.
- return false;
- } else if (found_blocks == num_blocks && !loop->IsBackEdge(*current)) {
- // Last block is not a back edge.
- return false;
- }
- } else if (found_blocks != 0u && found_blocks != num_blocks) {
- // Blocks are not adjacent.
- return false;
- }
- }
- DCHECK_EQ(found_blocks, num_blocks);
- }
-
- return true;
- }
-
void AddBackEdgeUses(const HBasicBlock& block_at_use) {
DCHECK(block_at_use.IsInLoop());
if (block_at_use.GetGraph()->HasIrreducibleLoops()) {
@@ -1024,8 +992,6 @@
return;
}
- DCHECK(IsLinearOrderWellFormed(*block_at_use.GetGraph()));
-
// Add synthesized uses at the back edge of loops to help the register allocator.
// Note that this method is called in decreasing liveness order, to faciliate adding
// uses at the head of the `first_use_` linked list. Because below
diff --git a/compiler/optimizing/x86_memory_gen.cc b/compiler/optimizing/x86_memory_gen.cc
index 8aa315a..4e25683 100644
--- a/compiler/optimizing/x86_memory_gen.cc
+++ b/compiler/optimizing/x86_memory_gen.cc
@@ -16,6 +16,7 @@
#include "x86_memory_gen.h"
#include "code_generator.h"
+#include "driver/compiler_options.h"
namespace art {
namespace x86 {
diff --git a/dexlayout/Android.mk b/dexlayout/Android.mk
new file mode 100755
index 0000000..3095866
--- /dev/null
+++ b/dexlayout/Android.mk
@@ -0,0 +1,50 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# TODO(sehr): Art-i-fy this makefile
+
+LOCAL_PATH:= $(call my-dir)
+
+dexlayout_src_files := dexlayout_main.cc dexlayout.cc dex_ir.cc
+dexlayout_c_includes := art/runtime
+dexlayout_libraries := libart
+
+##
+## Build the device command line tool dexlayout.
+##
+
+ifneq ($(SDK_ONLY),true) # SDK_only doesn't need device version
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := cc
+LOCAL_SRC_FILES := $(dexlayout_src_files)
+LOCAL_C_INCLUDES := $(dexlayout_c_includes)
+LOCAL_CFLAGS += -Wall
+LOCAL_SHARED_LIBRARIES += $(dexlayout_libraries)
+LOCAL_MODULE := dexlayout
+include $(BUILD_EXECUTABLE)
+endif # !SDK_ONLY
+
+##
+## Build the host command line tool dexlayout.
+##
+
+include $(CLEAR_VARS)
+LOCAL_CPP_EXTENSION := cc
+LOCAL_SRC_FILES := $(dexlayout_src_files)
+LOCAL_C_INCLUDES := $(dexlayout_c_includes)
+LOCAL_CFLAGS += -Wall
+LOCAL_SHARED_LIBRARIES += $(dexlayout_libraries)
+LOCAL_MODULE := dexlayout
+LOCAL_MULTILIB := $(ART_MULTILIB_OVERRIDE_host)
+include $(BUILD_HOST_EXECUTABLE)
diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc
new file mode 100644
index 0000000..0ed040e
--- /dev/null
+++ b/dexlayout/dex_ir.cc
@@ -0,0 +1,390 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Implementation file of the dex file intermediate representation.
+ *
+ * Utilities for reading dex files into an internal representation,
+ * manipulating them, and writing them out.
+ */
+
+#include "dex_ir.h"
+
+#include <map>
+#include <vector>
+
+#include "dex_file.h"
+#include "dex_file-inl.h"
+#include "utils.h"
+
+namespace art {
+namespace dex_ir {
+
+namespace {
+static uint64_t ReadVarWidth(const uint8_t** data, uint8_t length, bool sign_extend) {
+ uint64_t value = 0;
+ for (uint32_t i = 0; i <= length; i++) {
+ value |= static_cast<uint64_t>(*(*data)++) << (i * 8);
+ }
+ if (sign_extend) {
+ int shift = (7 - length) * 8;
+ return (static_cast<int64_t>(value) << shift) >> shift;
+ }
+ return value;
+}
+
+static bool GetPositionsCb(void* context, const DexFile::PositionInfo& entry) {
+ DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
+ std::vector<std::unique_ptr<PositionInfo>>& positions = debug_info->GetPositionInfo();
+ positions.push_back(std::unique_ptr<PositionInfo>(new PositionInfo(entry.address_, entry.line_)));
+ return false;
+}
+
+static void GetLocalsCb(void* context, const DexFile::LocalInfo& entry) {
+ DebugInfoItem* debug_info = reinterpret_cast<DebugInfoItem*>(context);
+ std::vector<std::unique_ptr<LocalInfo>>& locals = debug_info->GetLocalInfo();
+ const char* name = entry.name_ != nullptr ? entry.name_ : "(null)";
+ const char* signature = entry.signature_ != nullptr ? entry.signature_ : "";
+ locals.push_back(std::unique_ptr<LocalInfo>(
+ new LocalInfo(name, entry.descriptor_, signature, entry.start_address_,
+ entry.end_address_, entry.reg_)));
+}
+} // namespace
+
+Header::Header(const DexFile& dex_file) : dex_file_(dex_file) {
+ const DexFile::Header& disk_header = dex_file.GetHeader();
+ memcpy(magic_, disk_header.magic_, sizeof(magic_));
+ checksum_ = disk_header.checksum_;
+ // TODO(sehr): clearly the signature will need to be recomputed before dumping.
+ memcpy(signature_, disk_header.signature_, sizeof(signature_));
+ endian_tag_ = disk_header.endian_tag_;
+ file_size_ = disk_header.file_size_;
+ header_size_ = disk_header.header_size_;
+ link_size_ = disk_header.link_size_;
+ link_offset_ = disk_header.link_off_;
+ data_size_ = disk_header.data_size_;
+ data_offset_ = disk_header.data_off_;
+ // Walk the rest of the header fields.
+ string_ids_.SetOffset(disk_header.string_ids_off_);
+ for (uint32_t i = 0; i < dex_file_.NumStringIds(); ++i) {
+ string_ids_.AddWithPosition(i, new StringId(dex_file_.GetStringId(i), *this));
+ }
+ type_ids_.SetOffset(disk_header.type_ids_off_);
+ for (uint32_t i = 0; i < dex_file_.NumTypeIds(); ++i) {
+ type_ids_.AddWithPosition(i, new TypeId(dex_file_.GetTypeId(i), *this));
+ }
+ proto_ids_.SetOffset(disk_header.proto_ids_off_);
+ for (uint32_t i = 0; i < dex_file_.NumProtoIds(); ++i) {
+ proto_ids_.AddWithPosition(i, new ProtoId(dex_file_.GetProtoId(i), *this));
+ }
+ field_ids_.SetOffset(disk_header.field_ids_off_);
+ for (uint32_t i = 0; i < dex_file_.NumFieldIds(); ++i) {
+ field_ids_.AddWithPosition(i, new FieldId(dex_file_.GetFieldId(i), *this));
+ }
+ method_ids_.SetOffset(disk_header.method_ids_off_);
+ for (uint32_t i = 0; i < dex_file_.NumMethodIds(); ++i) {
+ method_ids_.AddWithPosition(i, new MethodId(dex_file_.GetMethodId(i), *this));
+ }
+ class_defs_.SetOffset(disk_header.class_defs_off_);
+ for (uint32_t i = 0; i < dex_file_.NumClassDefs(); ++i) {
+ class_defs_.AddWithPosition(i, new ClassDef(dex_file_.GetClassDef(i), *this));
+ }
+}
+
+ArrayItem::ArrayItem(Header& header, const uint8_t** data, uint8_t type, uint8_t length) {
+ Read(header, data, type, length);
+}
+
+ArrayItem::ArrayItem(Header& header, const uint8_t** data) {
+ const uint8_t encoded_value = *(*data)++;
+ Read(header, data, encoded_value & 0x1f, encoded_value >> 5);
+}
+
+void ArrayItem::Read(Header& header, const uint8_t** data, uint8_t type, uint8_t length) {
+ type_ = type;
+ switch (type_) {
+ case DexFile::kDexAnnotationByte:
+ item_.byte_val_ = static_cast<int8_t>(ReadVarWidth(data, length, false));
+ break;
+ case DexFile::kDexAnnotationShort:
+ item_.short_val_ = static_cast<int16_t>(ReadVarWidth(data, length, true));
+ break;
+ case DexFile::kDexAnnotationChar:
+ item_.char_val_ = static_cast<uint16_t>(ReadVarWidth(data, length, false));
+ break;
+ case DexFile::kDexAnnotationInt:
+ item_.int_val_ = static_cast<int32_t>(ReadVarWidth(data, length, true));
+ break;
+ case DexFile::kDexAnnotationLong:
+ item_.long_val_ = static_cast<int64_t>(ReadVarWidth(data, length, true));
+ break;
+ case DexFile::kDexAnnotationFloat: {
+ // Fill on right.
+ union {
+ float f;
+ uint32_t data;
+ } conv;
+ conv.data = static_cast<uint32_t>(ReadVarWidth(data, length, false)) << (3 - length) * 8;
+ item_.float_val_ = conv.f;
+ break;
+ }
+ case DexFile::kDexAnnotationDouble: {
+ // Fill on right.
+ union {
+ double d;
+ uint64_t data;
+ } conv;
+ conv.data = ReadVarWidth(data, length, false) << (7 - length) * 8;
+ item_.double_val_ = conv.d;
+ break;
+ }
+ case DexFile::kDexAnnotationString: {
+ const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item_.string_val_ = header.StringIds()[string_index].get();
+ break;
+ }
+ case DexFile::kDexAnnotationType: {
+ const uint32_t string_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item_.string_val_ = header.TypeIds()[string_index]->GetStringId();
+ break;
+ }
+ case DexFile::kDexAnnotationField:
+ case DexFile::kDexAnnotationEnum: {
+ const uint32_t field_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item_.field_val_ = header.FieldIds()[field_index].get();
+ break;
+ }
+ case DexFile::kDexAnnotationMethod: {
+ const uint32_t method_index = static_cast<uint32_t>(ReadVarWidth(data, length, false));
+ item_.method_val_ = header.MethodIds()[method_index].get();
+ break;
+ }
+ case DexFile::kDexAnnotationArray: {
+ item_.annotation_array_val_ = new std::vector<std::unique_ptr<ArrayItem>>();
+ // Decode all elements.
+ const uint32_t size = DecodeUnsignedLeb128(data);
+ for (uint32_t i = 0; i < size; i++) {
+ item_.annotation_array_val_->push_back(
+ std::unique_ptr<ArrayItem>(new ArrayItem(header, data)));
+ }
+ break;
+ }
+ case DexFile::kDexAnnotationAnnotation: {
+ const uint32_t type_idx = DecodeUnsignedLeb128(data);
+ item_.annotation_annotation_val_.string_ = header.TypeIds()[type_idx]->GetStringId();
+ item_.annotation_annotation_val_.array_ = new std::vector<std::unique_ptr<NameValuePair>>();
+ // Decode all name=value pairs.
+ const uint32_t size = DecodeUnsignedLeb128(data);
+ for (uint32_t i = 0; i < size; i++) {
+ const uint32_t name_index = DecodeUnsignedLeb128(data);
+ item_.annotation_annotation_val_.array_->push_back(std::unique_ptr<NameValuePair>(
+ new NameValuePair(header.StringIds()[name_index].get(), new ArrayItem(header, data))));
+ }
+ break;
+ }
+ case DexFile::kDexAnnotationNull:
+ break;
+ case DexFile::kDexAnnotationBoolean:
+ item_.bool_val_ = (length != 0);
+ break;
+ default:
+ break;
+ }
+}
+
+ClassDef::ClassDef(const DexFile::ClassDef& disk_class_def, Header& header) {
+ class_type_ = header.TypeIds()[disk_class_def.class_idx_].get();
+ access_flags_ = disk_class_def.access_flags_;
+ superclass_ = header.GetTypeIdOrNullPtr(disk_class_def.superclass_idx_);
+
+ const DexFile::TypeList* type_list = header.GetDexFile().GetInterfacesList(disk_class_def);
+ interfaces_offset_ = disk_class_def.interfaces_off_;
+ if (type_list != nullptr) {
+ for (uint32_t index = 0; index < type_list->Size(); ++index) {
+ interfaces_.push_back(header.TypeIds()[type_list->GetTypeItem(index).type_idx_].get());
+ }
+ }
+ source_file_ = header.GetStringIdOrNullPtr(disk_class_def.source_file_idx_);
+ // Annotations.
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_directory_item =
+ header.GetDexFile().GetAnnotationsDirectory(disk_class_def);
+ if (disk_annotations_directory_item == nullptr) {
+ annotations_.reset(nullptr);
+ } else {
+ annotations_.reset(new AnnotationsDirectoryItem(disk_annotations_directory_item, header));
+ annotations_->SetOffset(disk_class_def.annotations_off_);
+ }
+ // Static field initializers.
+ static_values_ = nullptr;
+ const uint8_t* static_data = header.GetDexFile().GetEncodedStaticFieldValuesArray(disk_class_def);
+ if (static_data != nullptr) {
+ uint32_t static_value_count = static_data == nullptr ? 0 : DecodeUnsignedLeb128(&static_data);
+ if (static_value_count > 0) {
+ static_values_ = new std::vector<std::unique_ptr<ArrayItem>>();
+ for (uint32_t i = 0; i < static_value_count; ++i) {
+ static_values_->push_back(std::unique_ptr<ArrayItem>(new ArrayItem(header, &static_data)));
+ }
+ }
+ }
+ // Read the fields and methods defined by the class, resolving the circular reference from those
+ // to classes by setting class at the same time.
+ const uint8_t* encoded_data = header.GetDexFile().GetClassData(disk_class_def);
+ class_data_.SetOffset(disk_class_def.class_data_off_);
+ if (encoded_data != nullptr) {
+ ClassDataItemIterator cdii(header.GetDexFile(), encoded_data);
+ // Static fields.
+ for (uint32_t i = 0; cdii.HasNextStaticField(); i++, cdii.Next()) {
+ FieldId* field_item = header.FieldIds()[cdii.GetMemberIndex()].get();
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ class_data_.StaticFields().push_back(
+ std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
+ }
+ // Instance fields.
+ for (uint32_t i = 0; cdii.HasNextInstanceField(); i++, cdii.Next()) {
+ FieldId* field_item = header.FieldIds()[cdii.GetMemberIndex()].get();
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ class_data_.InstanceFields().push_back(
+ std::unique_ptr<FieldItem>(new FieldItem(access_flags, field_item)));
+ }
+ // Direct methods.
+ for (uint32_t i = 0; cdii.HasNextDirectMethod(); i++, cdii.Next()) {
+ class_data_.DirectMethods().push_back(
+ std::unique_ptr<MethodItem>(GenerateMethodItem(header, cdii)));
+ }
+ // Virtual methods.
+ for (uint32_t i = 0; cdii.HasNextVirtualMethod(); i++, cdii.Next()) {
+ class_data_.VirtualMethods().push_back(
+ std::unique_ptr<MethodItem>(GenerateMethodItem(header, cdii)));
+ }
+ }
+}
+
+MethodItem* ClassDef::GenerateMethodItem(Header& header, ClassDataItemIterator& cdii) {
+ MethodId* method_item = header.MethodIds()[cdii.GetMemberIndex()].get();
+ uint32_t access_flags = cdii.GetRawMemberAccessFlags();
+ const DexFile::CodeItem* disk_code_item = cdii.GetMethodCodeItem();
+ CodeItem* code_item = nullptr;
+ DebugInfoItem* debug_info = nullptr;
+ if (disk_code_item != nullptr) {
+ code_item = new CodeItem(*disk_code_item, header);
+ code_item->SetOffset(cdii.GetMethodCodeItemOffset());
+ debug_info = code_item->DebugInfo();
+ }
+ if (debug_info != nullptr) {
+ bool is_static = (access_flags & kAccStatic) != 0;
+ header.GetDexFile().DecodeDebugLocalInfo(
+ disk_code_item, is_static, cdii.GetMemberIndex(), GetLocalsCb, debug_info);
+ header.GetDexFile().DecodeDebugPositionInfo(disk_code_item, GetPositionsCb, debug_info);
+ }
+ return new MethodItem(access_flags, method_item, code_item);
+}
+
+CodeItem::CodeItem(const DexFile::CodeItem& disk_code_item, Header& header) {
+ registers_size_ = disk_code_item.registers_size_;
+ ins_size_ = disk_code_item.ins_size_;
+ outs_size_ = disk_code_item.outs_size_;
+ tries_size_ = disk_code_item.tries_size_;
+
+ const uint8_t* debug_info_stream = header.GetDexFile().GetDebugInfoStream(&disk_code_item);
+ if (debug_info_stream != nullptr) {
+ debug_info_.reset(new DebugInfoItem());
+ } else {
+ debug_info_.reset(nullptr);
+ }
+
+ insns_size_ = disk_code_item.insns_size_in_code_units_;
+ insns_.reset(new uint16_t[insns_size_]);
+ memcpy(insns_.get(), disk_code_item.insns_, insns_size_ * sizeof(uint16_t));
+
+ if (tries_size_ > 0) {
+ tries_ = new std::vector<std::unique_ptr<const TryItem>>();
+ for (uint32_t i = 0; i < tries_size_; ++i) {
+ const DexFile::TryItem* disk_try_item = header.GetDexFile().GetTryItems(disk_code_item, i);
+ tries_->push_back(std::unique_ptr<const TryItem>(
+ new TryItem(*disk_try_item, disk_code_item, header)));
+ }
+ } else {
+ tries_ = nullptr;
+ }
+}
+
+AnnotationSetItem::AnnotationSetItem(const DexFile::AnnotationSetItem& disk_annotations_item,
+ Header& header) {
+ if (disk_annotations_item.size_ == 0) {
+ return;
+ }
+ for (uint32_t i = 0; i < disk_annotations_item.size_; ++i) {
+ const DexFile::AnnotationItem* annotation =
+ header.GetDexFile().GetAnnotationItem(&disk_annotations_item, i);
+ if (annotation == nullptr) {
+ continue;
+ }
+ uint8_t visibility = annotation->visibility_;
+ const uint8_t* annotation_data = annotation->annotation_;
+ ArrayItem* array_item =
+ new ArrayItem(header, &annotation_data, DexFile::kDexAnnotationAnnotation, 0);
+ items_.push_back(std::unique_ptr<AnnotationItem>(new AnnotationItem(visibility, array_item)));
+ }
+}
+
+AnnotationsDirectoryItem::AnnotationsDirectoryItem(
+ const DexFile::AnnotationsDirectoryItem* disk_annotations_item, Header& header) {
+ const DexFile::AnnotationSetItem* class_set_item =
+ header.GetDexFile().GetClassAnnotationSet(disk_annotations_item);
+ if (class_set_item == nullptr) {
+ class_annotation_.reset(nullptr);
+ } else {
+ class_annotation_.reset(new AnnotationSetItem(*class_set_item, header));
+ }
+ const DexFile::FieldAnnotationsItem* fields =
+ header.GetDexFile().GetFieldAnnotations(disk_annotations_item);
+ if (fields != nullptr) {
+ for (uint32_t i = 0; i < disk_annotations_item->fields_size_; ++i) {
+ FieldId* field_id = header.FieldIds()[fields[i].field_idx_].get();
+ const DexFile::AnnotationSetItem* field_set_item =
+ header.GetDexFile().GetFieldAnnotationSetItem(fields[i]);
+ dex_ir::AnnotationSetItem* annotation_set_item =
+ new AnnotationSetItem(*field_set_item, header);
+ field_annotations_.push_back(std::unique_ptr<FieldAnnotation>(
+ new FieldAnnotation(field_id, annotation_set_item)));
+ }
+ }
+ const DexFile::MethodAnnotationsItem* methods =
+ header.GetDexFile().GetMethodAnnotations(disk_annotations_item);
+ if (methods != nullptr) {
+ for (uint32_t i = 0; i < disk_annotations_item->methods_size_; ++i) {
+ MethodId* method_id = header.MethodIds()[methods[i].method_idx_].get();
+ const DexFile::AnnotationSetItem* method_set_item =
+ header.GetDexFile().GetMethodAnnotationSetItem(methods[i]);
+ dex_ir::AnnotationSetItem* annotation_set_item =
+ new AnnotationSetItem(*method_set_item, header);
+ method_annotations_.push_back(std::unique_ptr<MethodAnnotation>(
+ new MethodAnnotation(method_id, annotation_set_item)));
+ }
+ }
+ const DexFile::ParameterAnnotationsItem* parameters =
+ header.GetDexFile().GetParameterAnnotations(disk_annotations_item);
+ if (parameters != nullptr) {
+ for (uint32_t i = 0; i < disk_annotations_item->parameters_size_; ++i) {
+ MethodId* method_id = header.MethodIds()[parameters[i].method_idx_].get();
+ const DexFile::AnnotationSetRefList* list =
+ header.GetDexFile().GetParameterAnnotationSetRefList(¶meters[i]);
+ parameter_annotations_.push_back(std::unique_ptr<ParameterAnnotation>(
+ new ParameterAnnotation(method_id, list, header)));
+ }
+ }
+}
+
+} // namespace dex_ir
+} // namespace art
diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h
new file mode 100644
index 0000000..fcd3ab0
--- /dev/null
+++ b/dexlayout/dex_ir.h
@@ -0,0 +1,693 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Header file of an in-memory representation of DEX files.
+ */
+
+#ifndef ART_DEXLAYOUT_DEX_IR_H_
+#define ART_DEXLAYOUT_DEX_IR_H_
+
+#include <iostream>
+#include <map>
+#include <vector>
+#include <stdint.h>
+
+#include "dex_file.h"
+
+namespace art {
+namespace dex_ir {
+
+// Forward declarations for classes used in containers or pointed to.
+class AnnotationsDirectoryItem;
+class AnnotationSetItem;
+class ArrayItem;
+class ClassData;
+class ClassDef;
+class CodeItem;
+class DebugInfoItem;
+class FieldId;
+class FieldItem;
+class Header;
+class MapList;
+class MapItem;
+class MethodId;
+class MethodItem;
+class ProtoId;
+class StringId;
+class TryItem;
+class TypeId;
+
+// Visitor support
+class AbstractDispatcher {
+ public:
+ AbstractDispatcher() = default;
+ virtual ~AbstractDispatcher() { }
+
+ virtual void Dispatch(Header* header) = 0;
+ virtual void Dispatch(const StringId* string_id) = 0;
+ virtual void Dispatch(const TypeId* type_id) = 0;
+ virtual void Dispatch(const ProtoId* proto_id) = 0;
+ virtual void Dispatch(const FieldId* field_id) = 0;
+ virtual void Dispatch(const MethodId* method_id) = 0;
+ virtual void Dispatch(ClassData* class_data) = 0;
+ virtual void Dispatch(ClassDef* class_def) = 0;
+ virtual void Dispatch(FieldItem* field_item) = 0;
+ virtual void Dispatch(MethodItem* method_item) = 0;
+ virtual void Dispatch(ArrayItem* array_item) = 0;
+ virtual void Dispatch(CodeItem* code_item) = 0;
+ virtual void Dispatch(TryItem* try_item) = 0;
+ virtual void Dispatch(DebugInfoItem* debug_info_item) = 0;
+ virtual void Dispatch(AnnotationSetItem* annotation_set_item) = 0;
+ virtual void Dispatch(AnnotationsDirectoryItem* annotations_directory_item) = 0;
+ virtual void Dispatch(MapList* map_list) = 0;
+ virtual void Dispatch(MapItem* map_item) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AbstractDispatcher);
+};
+
+// Collections become owners of the objects added by moving them into unique pointers.
+template<class T> class CollectionWithOffset {
+ public:
+ CollectionWithOffset() = default;
+ std::vector<std::unique_ptr<T>>& Collection() { return collection_; }
+ // Read-time support methods
+ void AddWithPosition(uint32_t position, T* object) {
+ collection_.push_back(std::unique_ptr<T>(object));
+ collection_.back()->SetOffset(position);
+ }
+ // Ordinary object insertion into collection.
+ void Insert(T object ATTRIBUTE_UNUSED) {
+ // TODO(sehr): add ordered insertion support.
+ UNIMPLEMENTED(FATAL) << "Insertion not ready";
+ }
+ uint32_t GetOffset() const { return offset_; }
+ void SetOffset(uint32_t new_offset) { offset_ = new_offset; }
+ uint32_t Size() const { return collection_.size(); }
+
+ private:
+ std::vector<std::unique_ptr<T>> collection_;
+ uint32_t offset_ = 0;
+ DISALLOW_COPY_AND_ASSIGN(CollectionWithOffset);
+};
+
+class Item {
+ public:
+ virtual ~Item() { }
+ uint32_t GetOffset() const { return offset_; }
+ void SetOffset(uint32_t offset) { offset_ = offset; }
+ protected:
+ uint32_t offset_ = 0;
+};
+
+class Header : public Item {
+ public:
+ explicit Header(const DexFile& dex_file);
+ ~Header() OVERRIDE { }
+
+ const DexFile& GetDexFile() const { return dex_file_; }
+
+ const uint8_t* Magic() const { return magic_; }
+ uint32_t Checksum() const { return checksum_; }
+ const uint8_t* Signature() const { return signature_; }
+ uint32_t EndianTag() const { return endian_tag_; }
+ uint32_t FileSize() const { return file_size_; }
+ uint32_t HeaderSize() const { return header_size_; }
+ uint32_t LinkSize() const { return link_size_; }
+ uint32_t LinkOffset() const { return link_offset_; }
+ uint32_t DataSize() const { return data_size_; }
+ uint32_t DataOffset() const { return data_offset_; }
+
+ void SetChecksum(uint32_t new_checksum) { checksum_ = new_checksum; }
+ void SetSignature(const uint8_t* new_signature) {
+ memcpy(signature_, new_signature, sizeof(signature_));
+ }
+ void SetFileSize(uint32_t new_file_size) { file_size_ = new_file_size; }
+ void SetHeaderSize(uint32_t new_header_size) { header_size_ = new_header_size; }
+ void SetLinkSize(uint32_t new_link_size) { link_size_ = new_link_size; }
+ void SetLinkOffset(uint32_t new_link_offset) { link_offset_ = new_link_offset; }
+ void SetDataSize(uint32_t new_data_size) { data_size_ = new_data_size; }
+ void SetDataOffset(uint32_t new_data_offset) { data_offset_ = new_data_offset; }
+
+ // Collections.
+ std::vector<std::unique_ptr<StringId>>& StringIds() { return string_ids_.Collection(); }
+ std::vector<std::unique_ptr<TypeId>>& TypeIds() { return type_ids_.Collection(); }
+ std::vector<std::unique_ptr<ProtoId>>& ProtoIds() { return proto_ids_.Collection(); }
+ std::vector<std::unique_ptr<FieldId>>& FieldIds() { return field_ids_.Collection(); }
+ std::vector<std::unique_ptr<MethodId>>& MethodIds() { return method_ids_.Collection(); }
+ std::vector<std::unique_ptr<ClassDef>>& ClassDefs() { return class_defs_.Collection(); }
+ uint32_t StringIdsOffset() const { return string_ids_.GetOffset(); }
+ uint32_t TypeIdsOffset() const { return type_ids_.GetOffset(); }
+ uint32_t ProtoIdsOffset() const { return proto_ids_.GetOffset(); }
+ uint32_t FieldIdsOffset() const { return field_ids_.GetOffset(); }
+ uint32_t MethodIdsOffset() const { return method_ids_.GetOffset(); }
+ uint32_t ClassDefsOffset() const { return class_defs_.GetOffset(); }
+ void SetStringIdsOffset(uint32_t new_offset) { string_ids_.SetOffset(new_offset); }
+ void SetTypeIdsOffset(uint32_t new_offset) { type_ids_.SetOffset(new_offset); }
+ void SetProtoIdsOffset(uint32_t new_offset) { proto_ids_.SetOffset(new_offset); }
+ void SetFieldIdsOffset(uint32_t new_offset) { field_ids_.SetOffset(new_offset); }
+ void SetMethodIdsOffset(uint32_t new_offset) { method_ids_.SetOffset(new_offset); }
+ void SetClassDefsOffset(uint32_t new_offset) { class_defs_.SetOffset(new_offset); }
+ uint32_t StringIdsSize() const { return string_ids_.Size(); }
+ uint32_t TypeIdsSize() const { return type_ids_.Size(); }
+ uint32_t ProtoIdsSize() const { return proto_ids_.Size(); }
+ uint32_t FieldIdsSize() const { return field_ids_.Size(); }
+ uint32_t MethodIdsSize() const { return method_ids_.Size(); }
+ uint32_t ClassDefsSize() const { return class_defs_.Size(); }
+
+ TypeId* GetTypeIdOrNullPtr(uint16_t index) {
+ return index == DexFile::kDexNoIndex16 ? nullptr : TypeIds()[index].get();
+ }
+
+ StringId* GetStringIdOrNullPtr(uint32_t index) {
+ return index == DexFile::kDexNoIndex ? nullptr : StringIds()[index].get();
+ }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ const DexFile& dex_file_;
+ uint8_t magic_[8];
+ uint32_t checksum_;
+ uint8_t signature_[DexFile::kSha1DigestSize];
+ uint32_t endian_tag_;
+ uint32_t file_size_;
+ uint32_t header_size_;
+ uint32_t link_size_;
+ uint32_t link_offset_;
+ uint32_t data_size_;
+ uint32_t data_offset_;
+
+ CollectionWithOffset<StringId> string_ids_;
+ CollectionWithOffset<TypeId> type_ids_;
+ CollectionWithOffset<ProtoId> proto_ids_;
+ CollectionWithOffset<FieldId> field_ids_;
+ CollectionWithOffset<MethodId> method_ids_;
+ CollectionWithOffset<ClassDef> class_defs_;
+ DISALLOW_COPY_AND_ASSIGN(Header);
+};
+
+class StringId : public Item {
+ public:
+ StringId(const DexFile::StringId& disk_string_id, Header& header) :
+ data_(strdup(header.GetDexFile().GetStringData(disk_string_id))) {
+ }
+ ~StringId() OVERRIDE { }
+
+ const char* Data() const { return data_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ std::unique_ptr<const char> data_;
+ DISALLOW_COPY_AND_ASSIGN(StringId);
+};
+
+class TypeId : public Item {
+ public:
+ TypeId(const DexFile::TypeId& disk_type_id, Header& header) :
+ string_id_(header.StringIds()[disk_type_id.descriptor_idx_].get()) {
+ }
+ ~TypeId() OVERRIDE { }
+
+ StringId* GetStringId() const { return string_id_; }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ StringId* string_id_;
+ DISALLOW_COPY_AND_ASSIGN(TypeId);
+};
+
+class ProtoId : public Item {
+ public:
+ ProtoId(const DexFile::ProtoId& disk_proto_id, Header& header) {
+ shorty_ = header.StringIds()[disk_proto_id.shorty_idx_].get();
+ return_type_ = header.TypeIds()[disk_proto_id.return_type_idx_].get();
+ DexFileParameterIterator dfpi(header.GetDexFile(), disk_proto_id);
+ while (dfpi.HasNext()) {
+ parameters_.push_back(header.TypeIds()[dfpi.GetTypeIdx()].get());
+ dfpi.Next();
+ }
+ }
+ ~ProtoId() OVERRIDE { }
+
+ const StringId* Shorty() const { return shorty_; }
+ const TypeId* ReturnType() const { return return_type_; }
+ const std::vector<const TypeId*>& Parameters() const { return parameters_; }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ const StringId* shorty_;
+ const TypeId* return_type_;
+ std::vector<const TypeId*> parameters_;
+ DISALLOW_COPY_AND_ASSIGN(ProtoId);
+};
+
+class FieldId : public Item {
+ public:
+ FieldId(const DexFile::FieldId& disk_field_id, Header& header) {
+ class_ = header.TypeIds()[disk_field_id.class_idx_].get();
+ type_ = header.TypeIds()[disk_field_id.type_idx_].get();
+ name_ = header.StringIds()[disk_field_id.name_idx_].get();
+ }
+ ~FieldId() OVERRIDE { }
+
+ const TypeId* Class() const { return class_; }
+ const TypeId* Type() const { return type_; }
+ const StringId* Name() const { return name_; }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ const TypeId* class_;
+ const TypeId* type_;
+ const StringId* name_;
+ DISALLOW_COPY_AND_ASSIGN(FieldId);
+};
+
+class MethodId : public Item {
+ public:
+ MethodId(const DexFile::MethodId& disk_method_id, Header& header) {
+ class_ = header.TypeIds()[disk_method_id.class_idx_].get();
+ proto_ = header.ProtoIds()[disk_method_id.proto_idx_].get();
+ name_ = header.StringIds()[disk_method_id.name_idx_].get();
+ }
+ ~MethodId() OVERRIDE { }
+
+ const TypeId* Class() const { return class_; }
+ const ProtoId* Proto() const { return proto_; }
+ const StringId* Name() const { return name_; }
+
+ void Accept(AbstractDispatcher* dispatch) const { dispatch->Dispatch(this); }
+
+ private:
+ const TypeId* class_;
+ const ProtoId* proto_;
+ const StringId* name_;
+ DISALLOW_COPY_AND_ASSIGN(MethodId);
+};
+
+class FieldItem : public Item {
+ public:
+ FieldItem(uint32_t access_flags, const FieldId* field_id) :
+ access_flags_(access_flags), field_id_(field_id) { }
+ ~FieldItem() OVERRIDE { }
+
+ uint32_t GetAccessFlags() const { return access_flags_; }
+ const FieldId* GetFieldId() const { return field_id_; }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint32_t access_flags_;
+ const FieldId* field_id_;
+ DISALLOW_COPY_AND_ASSIGN(FieldItem);
+};
+
+class MethodItem : public Item {
+ public:
+ MethodItem(uint32_t access_flags, const MethodId* method_id, const CodeItem* code) :
+ access_flags_(access_flags), method_id_(method_id), code_(code) { }
+ ~MethodItem() OVERRIDE { }
+
+ uint32_t GetAccessFlags() const { return access_flags_; }
+ const MethodId* GetMethodId() const { return method_id_; }
+ const CodeItem* GetCodeItem() const { return code_.get(); }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint32_t access_flags_;
+ const MethodId* method_id_;
+ std::unique_ptr<const CodeItem> code_;
+ DISALLOW_COPY_AND_ASSIGN(MethodItem);
+};
+
+class ArrayItem : public Item {
+ public:
+ class NameValuePair {
+ public:
+ NameValuePair(StringId* name, ArrayItem* value) :
+ name_(name), value_(value) { }
+
+ StringId* Name() const { return name_; }
+ ArrayItem* Value() const { return value_.get(); }
+
+ private:
+ StringId* name_;
+ std::unique_ptr<ArrayItem> value_;
+ DISALLOW_COPY_AND_ASSIGN(NameValuePair);
+ };
+
+ ArrayItem(Header& header, const uint8_t** data, uint8_t type, uint8_t length);
+ ArrayItem(Header& header, const uint8_t** data);
+ ~ArrayItem() OVERRIDE { }
+
+ int8_t Type() const { return type_; }
+ bool GetBoolean() const { return item_.bool_val_; }
+ int8_t GetByte() const { return item_.byte_val_; }
+ int16_t GetShort() const { return item_.short_val_; }
+ uint16_t GetChar() const { return item_.char_val_; }
+ int32_t GetInt() const { return item_.int_val_; }
+ int64_t GetLong() const { return item_.long_val_; }
+ float GetFloat() const { return item_.float_val_; }
+ double GetDouble() const { return item_.double_val_; }
+ StringId* GetStringId() const { return item_.string_val_; }
+ FieldId* GetFieldId() const { return item_.field_val_; }
+ MethodId* GetMethodId() const { return item_.method_val_; }
+ std::vector<std::unique_ptr<ArrayItem>>* GetAnnotationArray() const {
+ return item_.annotation_array_val_;
+ }
+ StringId* GetAnnotationAnnotationString() const {
+ return item_.annotation_annotation_val_.string_;
+ }
+ std::vector<std::unique_ptr<NameValuePair>>* GetAnnotationAnnotationNameValuePairArray() const {
+ return item_.annotation_annotation_val_.array_;
+ }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ void Read(Header& header, const uint8_t** data, uint8_t type, uint8_t length);
+ uint8_t type_;
+ union {
+ bool bool_val_;
+ int8_t byte_val_;
+ int16_t short_val_;
+ uint16_t char_val_;
+ int32_t int_val_;
+ int64_t long_val_;
+ float float_val_;
+ double double_val_;
+ StringId* string_val_;
+ FieldId* field_val_;
+ MethodId* method_val_;
+ std::vector<std::unique_ptr<ArrayItem>>* annotation_array_val_;
+ struct {
+ StringId* string_;
+ std::vector<std::unique_ptr<NameValuePair>>* array_;
+ } annotation_annotation_val_;
+ } item_;
+ DISALLOW_COPY_AND_ASSIGN(ArrayItem);
+};
+
+class ClassData : public Item {
+ public:
+ ClassData() = default;
+ ~ClassData() OVERRIDE = default;
+ std::vector<std::unique_ptr<FieldItem>>& StaticFields() { return static_fields_; }
+ std::vector<std::unique_ptr<FieldItem>>& InstanceFields() { return instance_fields_; }
+ std::vector<std::unique_ptr<MethodItem>>& DirectMethods() { return direct_methods_; }
+ std::vector<std::unique_ptr<MethodItem>>& VirtualMethods() { return virtual_methods_; }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ std::vector<std::unique_ptr<FieldItem>> static_fields_;
+ std::vector<std::unique_ptr<FieldItem>> instance_fields_;
+ std::vector<std::unique_ptr<MethodItem>> direct_methods_;
+ std::vector<std::unique_ptr<MethodItem>> virtual_methods_;
+ DISALLOW_COPY_AND_ASSIGN(ClassData);
+};
+
+class ClassDef : public Item {
+ public:
+ ClassDef(const DexFile::ClassDef& disk_class_def, Header& header);
+ ~ClassDef() OVERRIDE { }
+
+ const TypeId* ClassType() const { return class_type_; }
+ uint32_t GetAccessFlags() const { return access_flags_; }
+ const TypeId* Superclass() const { return superclass_; }
+ std::vector<TypeId*>* Interfaces() { return &interfaces_; }
+ uint32_t InterfacesOffset() const { return interfaces_offset_; }
+ void SetInterfacesOffset(uint32_t new_offset) { interfaces_offset_ = new_offset; }
+ const StringId* SourceFile() const { return source_file_; }
+ AnnotationsDirectoryItem* Annotations() const { return annotations_.get(); }
+ std::vector<std::unique_ptr<ArrayItem>>* StaticValues() { return static_values_; }
+ ClassData* GetClassData() { return &class_data_; }
+
+ MethodItem* GenerateMethodItem(Header& header, ClassDataItemIterator& cdii);
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ const TypeId* class_type_;
+ uint32_t access_flags_;
+ const TypeId* superclass_;
+ std::vector<TypeId*> interfaces_;
+ uint32_t interfaces_offset_;
+ const StringId* source_file_;
+ std::unique_ptr<AnnotationsDirectoryItem> annotations_;
+ std::vector<std::unique_ptr<ArrayItem>>* static_values_;
+ ClassData class_data_;
+ DISALLOW_COPY_AND_ASSIGN(ClassDef);
+};
+
+class CodeItem : public Item {
+ public:
+ CodeItem(const DexFile::CodeItem& disk_code_item, Header& header);
+ ~CodeItem() OVERRIDE { }
+
+ uint16_t RegistersSize() const { return registers_size_; }
+ uint16_t InsSize() const { return ins_size_; }
+ uint16_t OutsSize() const { return outs_size_; }
+ uint16_t TriesSize() const { return tries_size_; }
+ DebugInfoItem* DebugInfo() const { return debug_info_.get(); }
+ uint32_t InsnsSize() const { return insns_size_; }
+ uint16_t* Insns() const { return insns_.get(); }
+ std::vector<std::unique_ptr<const TryItem>>* Tries() const { return tries_; }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint16_t registers_size_;
+ uint16_t ins_size_;
+ uint16_t outs_size_;
+ uint16_t tries_size_;
+ std::unique_ptr<DebugInfoItem> debug_info_;
+ uint32_t insns_size_;
+ std::unique_ptr<uint16_t[]> insns_;
+ std::vector<std::unique_ptr<const TryItem>>* tries_;
+ DISALLOW_COPY_AND_ASSIGN(CodeItem);
+};
+
+class TryItem : public Item {
+ public:
+ class CatchHandler {
+ public:
+ CatchHandler(const TypeId* type_id, uint32_t address) : type_id_(type_id), address_(address) { }
+
+ const TypeId* GetTypeId() const { return type_id_; }
+ uint32_t GetAddress() const { return address_; }
+
+ private:
+ const TypeId* type_id_;
+ uint32_t address_;
+ DISALLOW_COPY_AND_ASSIGN(CatchHandler);
+ };
+
+ TryItem(const DexFile::TryItem& disk_try_item,
+ const DexFile::CodeItem& disk_code_item,
+ Header& header) {
+ start_addr_ = disk_try_item.start_addr_;
+ insn_count_ = disk_try_item.insn_count_;
+ for (CatchHandlerIterator it(disk_code_item, disk_try_item); it.HasNext(); it.Next()) {
+ const uint16_t type_index = it.GetHandlerTypeIndex();
+ const TypeId* type_id = header.GetTypeIdOrNullPtr(type_index);
+ handlers_.push_back(std::unique_ptr<const CatchHandler>(
+ new CatchHandler(type_id, it.GetHandlerAddress())));
+ }
+ }
+ ~TryItem() OVERRIDE { }
+
+ uint32_t StartAddr() const { return start_addr_; }
+ uint16_t InsnCount() const { return insn_count_; }
+ const std::vector<std::unique_ptr<const CatchHandler>>& GetHandlers() const { return handlers_; }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ uint32_t start_addr_;
+ uint16_t insn_count_;
+ std::vector<std::unique_ptr<const CatchHandler>> handlers_;
+ DISALLOW_COPY_AND_ASSIGN(TryItem);
+};
+
+
+struct PositionInfo {
+ PositionInfo(uint32_t address, uint32_t line) : address_(address), line_(line) { }
+
+ uint32_t address_;
+ uint32_t line_;
+};
+
+struct LocalInfo {
+ LocalInfo(const char* name, const char* descriptor, const char* signature, uint32_t start_address,
+ uint32_t end_address, uint16_t reg) :
+ name_(name), descriptor_(descriptor), signature_(signature), start_address_(start_address),
+ end_address_(end_address), reg_(reg) { }
+
+ std::string name_;
+ std::string descriptor_;
+ std::string signature_;
+ uint32_t start_address_;
+ uint32_t end_address_;
+ uint16_t reg_;
+};
+
+class DebugInfoItem : public Item {
+ public:
+ DebugInfoItem() = default;
+
+ std::vector<std::unique_ptr<PositionInfo>>& GetPositionInfo() { return positions_; }
+ std::vector<std::unique_ptr<LocalInfo>>& GetLocalInfo() { return locals_; }
+
+ private:
+ std::vector<std::unique_ptr<PositionInfo>> positions_;
+ std::vector<std::unique_ptr<LocalInfo>> locals_;
+ DISALLOW_COPY_AND_ASSIGN(DebugInfoItem);
+};
+
+class AnnotationSetItem : public Item {
+ public:
+ class AnnotationItem {
+ public:
+ AnnotationItem(uint8_t visibility, ArrayItem* item) :
+ visibility_(visibility), item_(item) { }
+
+ uint8_t GetVisibility() const { return visibility_; }
+ ArrayItem* GetItem() const { return item_.get(); }
+
+ private:
+ uint8_t visibility_;
+ std::unique_ptr<ArrayItem> item_;
+ DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
+ };
+
+ AnnotationSetItem(const DexFile::AnnotationSetItem& disk_annotations_item, Header& header);
+ ~AnnotationSetItem() OVERRIDE { }
+
+ std::vector<std::unique_ptr<AnnotationItem>>& GetItems() { return items_; }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ std::vector<std::unique_ptr<AnnotationItem>> items_;
+ DISALLOW_COPY_AND_ASSIGN(AnnotationSetItem);
+};
+
+class AnnotationsDirectoryItem : public Item {
+ public:
+ class FieldAnnotation {
+ public:
+ FieldAnnotation(FieldId* field_id, AnnotationSetItem* annotation_set_item) :
+ field_id_(field_id), annotation_set_item_(annotation_set_item) { }
+
+ FieldId* GetFieldId() const { return field_id_; }
+ AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_.get(); }
+
+ private:
+ FieldId* field_id_;
+ std::unique_ptr<AnnotationSetItem> annotation_set_item_;
+ DISALLOW_COPY_AND_ASSIGN(FieldAnnotation);
+ };
+
+ class MethodAnnotation {
+ public:
+ MethodAnnotation(MethodId* method_id, AnnotationSetItem* annotation_set_item) :
+ method_id_(method_id), annotation_set_item_(annotation_set_item) { }
+
+ MethodId* GetMethodId() const { return method_id_; }
+ AnnotationSetItem* GetAnnotationSetItem() const { return annotation_set_item_.get(); }
+
+ private:
+ MethodId* method_id_;
+ std::unique_ptr<AnnotationSetItem> annotation_set_item_;
+ DISALLOW_COPY_AND_ASSIGN(MethodAnnotation);
+ };
+
+ class ParameterAnnotation {
+ public:
+ ParameterAnnotation(MethodId* method_id,
+ const DexFile::AnnotationSetRefList* annotation_set_ref_list,
+ Header& header) :
+ method_id_(method_id) {
+ for (uint32_t i = 0; i < annotation_set_ref_list->size_; ++i) {
+ const DexFile::AnnotationSetItem* annotation_set_item =
+ header.GetDexFile().GetSetRefItemItem(&annotation_set_ref_list->list_[i]);
+ annotations_.push_back(std::unique_ptr<AnnotationSetItem>(
+ new AnnotationSetItem(*annotation_set_item, header)));
+ }
+ }
+
+ MethodId* GetMethodId() const { return method_id_; }
+ std::vector<std::unique_ptr<AnnotationSetItem>>& GetAnnotations() { return annotations_; }
+
+ private:
+ MethodId* method_id_;
+ std::vector<std::unique_ptr<AnnotationSetItem>> annotations_;
+ DISALLOW_COPY_AND_ASSIGN(ParameterAnnotation);
+ };
+
+ AnnotationsDirectoryItem(const DexFile::AnnotationsDirectoryItem* disk_annotations_item,
+ Header& header);
+
+ AnnotationSetItem* GetClassAnnotation() const { return class_annotation_.get(); }
+
+ std::vector<std::unique_ptr<FieldAnnotation>>& GetFieldAnnotations() {
+ return field_annotations_;
+ }
+
+ std::vector<std::unique_ptr<MethodAnnotation>>& GetMethodAnnotations() {
+ return method_annotations_;
+ }
+
+ std::vector<std::unique_ptr<ParameterAnnotation>>& GetParameterAnnotations() {
+ return parameter_annotations_;
+ }
+
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ std::unique_ptr<AnnotationSetItem> class_annotation_;
+ std::vector<std::unique_ptr<FieldAnnotation>> field_annotations_;
+ std::vector<std::unique_ptr<MethodAnnotation>> method_annotations_;
+ std::vector<std::unique_ptr<ParameterAnnotation>> parameter_annotations_;
+ DISALLOW_COPY_AND_ASSIGN(AnnotationsDirectoryItem);
+};
+
+// TODO(sehr): implement MapList.
+class MapList : public Item {
+ public:
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MapList);
+};
+
+class MapItem : public Item {
+ public:
+ void Accept(AbstractDispatcher* dispatch) { dispatch->Dispatch(this); }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MapItem);
+};
+
+} // namespace dex_ir
+} // namespace art
+
+#endif // ART_DEXLAYOUT_DEX_IR_H_
diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc
new file mode 100644
index 0000000..0b31614
--- /dev/null
+++ b/dexlayout/dexlayout.cc
@@ -0,0 +1,1521 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Implementation file of the dexlayout utility.
+ *
+ * This is a tool to read dex files into an internal representation,
+ * reorganize the representation, and emit dex files with a better
+ * file layout.
+ */
+
+#include "dexlayout.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include <iostream>
+#include <memory>
+#include <sstream>
+#include <vector>
+
+#include "dex_ir.h"
+#include "dex_file-inl.h"
+#include "dex_instruction-inl.h"
+#include "utils.h"
+
+namespace art {
+
+/*
+ * Options parsed in main driver.
+ */
+struct Options options_;
+
+/*
+ * Output file. Defaults to stdout.
+ */
+FILE* out_file_ = stdout;
+
+/*
+ * Flags for use with createAccessFlagStr().
+ */
+enum AccessFor {
+ kAccessForClass = 0, kAccessForMethod = 1, kAccessForField = 2, kAccessForMAX
+};
+const int kNumFlags = 18;
+
+/*
+ * Gets 2 little-endian bytes.
+ */
+static inline uint16_t Get2LE(unsigned char const* src) {
+ return src[0] | (src[1] << 8);
+}
+
+/*
+ * Converts a type descriptor to human-readable "dotted" form. For
+ * example, "Ljava/lang/String;" becomes "java.lang.String", and
+ * "[I" becomes "int[]". Also converts '$' to '.', which means this
+ * form can't be converted back to a descriptor.
+ */
+static std::string DescriptorToDotWrapper(const char* descriptor) {
+ std::string result = DescriptorToDot(descriptor);
+ size_t found = result.find('$');
+ while (found != std::string::npos) {
+ result[found] = '.';
+ found = result.find('$', found);
+ }
+ return result;
+}
+
+/*
+ * Converts the class name portion of a type descriptor to human-readable
+ * "dotted" form. For example, "Ljava/lang/String;" becomes "String".
+ */
+static std::string DescriptorClassToDot(const char* str) {
+ std::string descriptor(str);
+ // Reduce to just the class name prefix.
+ size_t last_slash = descriptor.rfind('/');
+ if (last_slash == std::string::npos) {
+ last_slash = 0;
+ }
+ // Start past the '/' or 'L'.
+ last_slash++;
+
+ // Copy class name over, trimming trailing ';'.
+ size_t size = descriptor.size() - 1 - last_slash;
+ std::string result(descriptor.substr(last_slash, size));
+
+ // Replace '$' with '.'.
+ size_t dollar_sign = result.find('$');
+ while (dollar_sign != std::string::npos) {
+ result[dollar_sign] = '.';
+ dollar_sign = result.find('$', dollar_sign);
+ }
+
+ return result;
+}
+
+/*
+ * Returns string representing the boolean value.
+ */
+static const char* StrBool(bool val) {
+ return val ? "true" : "false";
+}
+
+/*
+ * Returns a quoted string representing the boolean value.
+ */
+static const char* QuotedBool(bool val) {
+ return val ? "\"true\"" : "\"false\"";
+}
+
+/*
+ * Returns a quoted string representing the access flags.
+ */
+static const char* QuotedVisibility(uint32_t access_flags) {
+ if (access_flags & kAccPublic) {
+ return "\"public\"";
+ } else if (access_flags & kAccProtected) {
+ return "\"protected\"";
+ } else if (access_flags & kAccPrivate) {
+ return "\"private\"";
+ } else {
+ return "\"package\"";
+ }
+}
+
+/*
+ * Counts the number of '1' bits in a word.
+ */
+static int CountOnes(uint32_t val) {
+ val = val - ((val >> 1) & 0x55555555);
+ val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
+ return (((val + (val >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
+}
+
+/*
+ * Creates a new string with human-readable access flags.
+ *
+ * In the base language the access_flags fields are type uint16_t; in Dalvik they're uint32_t.
+ */
+static char* CreateAccessFlagStr(uint32_t flags, AccessFor for_what) {
+ static const char* kAccessStrings[kAccessForMAX][kNumFlags] = {
+ {
+ "PUBLIC", /* 0x00001 */
+ "PRIVATE", /* 0x00002 */
+ "PROTECTED", /* 0x00004 */
+ "STATIC", /* 0x00008 */
+ "FINAL", /* 0x00010 */
+ "?", /* 0x00020 */
+ "?", /* 0x00040 */
+ "?", /* 0x00080 */
+ "?", /* 0x00100 */
+ "INTERFACE", /* 0x00200 */
+ "ABSTRACT", /* 0x00400 */
+ "?", /* 0x00800 */
+ "SYNTHETIC", /* 0x01000 */
+ "ANNOTATION", /* 0x02000 */
+ "ENUM", /* 0x04000 */
+ "?", /* 0x08000 */
+ "VERIFIED", /* 0x10000 */
+ "OPTIMIZED", /* 0x20000 */
+ }, {
+ "PUBLIC", /* 0x00001 */
+ "PRIVATE", /* 0x00002 */
+ "PROTECTED", /* 0x00004 */
+ "STATIC", /* 0x00008 */
+ "FINAL", /* 0x00010 */
+ "SYNCHRONIZED", /* 0x00020 */
+ "BRIDGE", /* 0x00040 */
+ "VARARGS", /* 0x00080 */
+ "NATIVE", /* 0x00100 */
+ "?", /* 0x00200 */
+ "ABSTRACT", /* 0x00400 */
+ "STRICT", /* 0x00800 */
+ "SYNTHETIC", /* 0x01000 */
+ "?", /* 0x02000 */
+ "?", /* 0x04000 */
+ "MIRANDA", /* 0x08000 */
+ "CONSTRUCTOR", /* 0x10000 */
+ "DECLARED_SYNCHRONIZED", /* 0x20000 */
+ }, {
+ "PUBLIC", /* 0x00001 */
+ "PRIVATE", /* 0x00002 */
+ "PROTECTED", /* 0x00004 */
+ "STATIC", /* 0x00008 */
+ "FINAL", /* 0x00010 */
+ "?", /* 0x00020 */
+ "VOLATILE", /* 0x00040 */
+ "TRANSIENT", /* 0x00080 */
+ "?", /* 0x00100 */
+ "?", /* 0x00200 */
+ "?", /* 0x00400 */
+ "?", /* 0x00800 */
+ "SYNTHETIC", /* 0x01000 */
+ "?", /* 0x02000 */
+ "ENUM", /* 0x04000 */
+ "?", /* 0x08000 */
+ "?", /* 0x10000 */
+ "?", /* 0x20000 */
+ },
+ };
+
+ // Allocate enough storage to hold the expected number of strings,
+ // plus a space between each. We over-allocate, using the longest
+ // string above as the base metric.
+ const int kLongest = 21; // The strlen of longest string above.
+ const int count = CountOnes(flags);
+ char* str;
+ char* cp;
+ cp = str = reinterpret_cast<char*>(malloc(count * (kLongest + 1) + 1));
+
+ for (int i = 0; i < kNumFlags; i++) {
+ if (flags & 0x01) {
+ const char* accessStr = kAccessStrings[for_what][i];
+ const int len = strlen(accessStr);
+ if (cp != str) {
+ *cp++ = ' ';
+ }
+ memcpy(cp, accessStr, len);
+ cp += len;
+ }
+ flags >>= 1;
+ } // for
+
+ *cp = '\0';
+ return str;
+}
+
+static std::string GetSignatureForProtoId(const dex_ir::ProtoId* proto) {
+ if (proto == nullptr) {
+ return "<no signature>";
+ }
+
+ const std::vector<const dex_ir::TypeId*>& params = proto->Parameters();
+ std::string result("(");
+ for (uint32_t i = 0; i < params.size(); ++i) {
+ result += params[i]->GetStringId()->Data();
+ }
+ result += ")";
+ result += proto->ReturnType()->GetStringId()->Data();
+ return result;
+}
+
+/*
+ * Copies character data from "data" to "out", converting non-ASCII values
+ * to fprintf format chars or an ASCII filler ('.' or '?').
+ *
+ * The output buffer must be able to hold (2*len)+1 bytes. The result is
+ * NULL-terminated.
+ */
+static void Asciify(char* out, const unsigned char* data, size_t len) {
+ while (len--) {
+ if (*data < 0x20) {
+ // Could do more here, but we don't need them yet.
+ switch (*data) {
+ case '\0':
+ *out++ = '\\';
+ *out++ = '0';
+ break;
+ case '\n':
+ *out++ = '\\';
+ *out++ = 'n';
+ break;
+ default:
+ *out++ = '.';
+ break;
+ } // switch
+ } else if (*data >= 0x80) {
+ *out++ = '?';
+ } else {
+ *out++ = *data;
+ }
+ data++;
+ } // while
+ *out = '\0';
+}
+
+/*
+ * Dumps a string value with some escape characters.
+ */
+static void DumpEscapedString(const char* p) {
+ fputs("\"", out_file_);
+ for (; *p; p++) {
+ switch (*p) {
+ case '\\':
+ fputs("\\\\", out_file_);
+ break;
+ case '\"':
+ fputs("\\\"", out_file_);
+ break;
+ case '\t':
+ fputs("\\t", out_file_);
+ break;
+ case '\n':
+ fputs("\\n", out_file_);
+ break;
+ case '\r':
+ fputs("\\r", out_file_);
+ break;
+ default:
+ putc(*p, out_file_);
+ } // switch
+ } // for
+ fputs("\"", out_file_);
+}
+
+/*
+ * Dumps a string as an XML attribute value.
+ */
+static void DumpXmlAttribute(const char* p) {
+ for (; *p; p++) {
+ switch (*p) {
+ case '&':
+ fputs("&", out_file_);
+ break;
+ case '<':
+ fputs("<", out_file_);
+ break;
+ case '>':
+ fputs(">", out_file_);
+ break;
+ case '"':
+ fputs(""", out_file_);
+ break;
+ case '\t':
+ fputs("	", out_file_);
+ break;
+ case '\n':
+ fputs("
", out_file_);
+ break;
+ case '\r':
+ fputs("
", out_file_);
+ break;
+ default:
+ putc(*p, out_file_);
+ } // switch
+ } // for
+}
+
+/*
+ * Dumps encoded value.
+ */
+static void DumpEncodedValue(const dex_ir::ArrayItem* data) {
+ switch (data->Type()) {
+ case DexFile::kDexAnnotationByte:
+ fprintf(out_file_, "%" PRId8, data->GetByte());
+ break;
+ case DexFile::kDexAnnotationShort:
+ fprintf(out_file_, "%" PRId16, data->GetShort());
+ break;
+ case DexFile::kDexAnnotationChar:
+ fprintf(out_file_, "%" PRIu16, data->GetChar());
+ break;
+ case DexFile::kDexAnnotationInt:
+ fprintf(out_file_, "%" PRId32, data->GetInt());
+ break;
+ case DexFile::kDexAnnotationLong:
+ fprintf(out_file_, "%" PRId64, data->GetLong());
+ break;
+ case DexFile::kDexAnnotationFloat: {
+ fprintf(out_file_, "%g", data->GetFloat());
+ break;
+ }
+ case DexFile::kDexAnnotationDouble: {
+ fprintf(out_file_, "%g", data->GetDouble());
+ break;
+ }
+ case DexFile::kDexAnnotationString: {
+ dex_ir::StringId* string_id = data->GetStringId();
+ if (options_.output_format_ == kOutputPlain) {
+ DumpEscapedString(string_id->Data());
+ } else {
+ DumpXmlAttribute(string_id->Data());
+ }
+ break;
+ }
+ case DexFile::kDexAnnotationType: {
+ dex_ir::StringId* string_id = data->GetStringId();
+ fputs(string_id->Data(), out_file_);
+ break;
+ }
+ case DexFile::kDexAnnotationField:
+ case DexFile::kDexAnnotationEnum: {
+ dex_ir::FieldId* field_id = data->GetFieldId();
+ fputs(field_id->Name()->Data(), out_file_);
+ break;
+ }
+ case DexFile::kDexAnnotationMethod: {
+ dex_ir::MethodId* method_id = data->GetMethodId();
+ fputs(method_id->Name()->Data(), out_file_);
+ break;
+ }
+ case DexFile::kDexAnnotationArray: {
+ fputc('{', out_file_);
+ // Display all elements.
+ for (auto& array : *data->GetAnnotationArray()) {
+ fputc(' ', out_file_);
+ DumpEncodedValue(array.get());
+ }
+ fputs(" }", out_file_);
+ break;
+ }
+ case DexFile::kDexAnnotationAnnotation: {
+ fputs(data->GetAnnotationAnnotationString()->Data(), out_file_);
+ // Display all name=value pairs.
+ for (auto& subannotation : *data->GetAnnotationAnnotationNameValuePairArray()) {
+ fputc(' ', out_file_);
+ fputs(subannotation->Name()->Data(), out_file_);
+ fputc('=', out_file_);
+ DumpEncodedValue(subannotation->Value());
+ }
+ break;
+ }
+ case DexFile::kDexAnnotationNull:
+ fputs("null", out_file_);
+ break;
+ case DexFile::kDexAnnotationBoolean:
+ fputs(StrBool(data->GetBoolean()), out_file_);
+ break;
+ default:
+ fputs("????", out_file_);
+ break;
+ } // switch
+}
+
+/*
+ * Dumps the file header.
+ */
+static void DumpFileHeader(const dex_ir::Header* header) {
+ char sanitized[8 * 2 + 1];
+ fprintf(out_file_, "DEX file header:\n");
+ Asciify(sanitized, header->Magic(), 8);
+ fprintf(out_file_, "magic : '%s'\n", sanitized);
+ fprintf(out_file_, "checksum : %08x\n", header->Checksum());
+ fprintf(out_file_, "signature : %02x%02x...%02x%02x\n",
+ header->Signature()[0], header->Signature()[1],
+ header->Signature()[DexFile::kSha1DigestSize - 2],
+ header->Signature()[DexFile::kSha1DigestSize - 1]);
+ fprintf(out_file_, "file_size : %d\n", header->FileSize());
+ fprintf(out_file_, "header_size : %d\n", header->HeaderSize());
+ fprintf(out_file_, "link_size : %d\n", header->LinkSize());
+ fprintf(out_file_, "link_off : %d (0x%06x)\n",
+ header->LinkOffset(), header->LinkOffset());
+ fprintf(out_file_, "string_ids_size : %d\n", header->StringIdsSize());
+ fprintf(out_file_, "string_ids_off : %d (0x%06x)\n",
+ header->StringIdsOffset(), header->StringIdsOffset());
+ fprintf(out_file_, "type_ids_size : %d\n", header->TypeIdsSize());
+ fprintf(out_file_, "type_ids_off : %d (0x%06x)\n",
+ header->TypeIdsOffset(), header->TypeIdsOffset());
+ fprintf(out_file_, "proto_ids_size : %d\n", header->ProtoIdsSize());
+ fprintf(out_file_, "proto_ids_off : %d (0x%06x)\n",
+ header->ProtoIdsOffset(), header->ProtoIdsOffset());
+ fprintf(out_file_, "field_ids_size : %d\n", header->FieldIdsSize());
+ fprintf(out_file_, "field_ids_off : %d (0x%06x)\n",
+ header->FieldIdsOffset(), header->FieldIdsOffset());
+ fprintf(out_file_, "method_ids_size : %d\n", header->MethodIdsSize());
+ fprintf(out_file_, "method_ids_off : %d (0x%06x)\n",
+ header->MethodIdsOffset(), header->MethodIdsOffset());
+ fprintf(out_file_, "class_defs_size : %d\n", header->ClassDefsSize());
+ fprintf(out_file_, "class_defs_off : %d (0x%06x)\n",
+ header->ClassDefsOffset(), header->ClassDefsOffset());
+ fprintf(out_file_, "data_size : %d\n", header->DataSize());
+ fprintf(out_file_, "data_off : %d (0x%06x)\n\n",
+ header->DataOffset(), header->DataOffset());
+}
+
+/*
+ * Dumps a class_def_item.
+ */
+static void DumpClassDef(dex_ir::Header* header, int idx) {
+ // General class information.
+ dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get();
+ fprintf(out_file_, "Class #%d header:\n", idx);
+ fprintf(out_file_, "class_idx : %d\n", class_def->ClassType()->GetOffset());
+ fprintf(out_file_, "access_flags : %d (0x%04x)\n",
+ class_def->GetAccessFlags(), class_def->GetAccessFlags());
+ uint32_t superclass_idx = class_def->Superclass() == nullptr ?
+ DexFile::kDexNoIndex16 : class_def->Superclass()->GetOffset();
+ fprintf(out_file_, "superclass_idx : %d\n", superclass_idx);
+ fprintf(out_file_, "interfaces_off : %d (0x%06x)\n",
+ class_def->InterfacesOffset(), class_def->InterfacesOffset());
+ uint32_t source_file_offset = 0xffffffffU;
+ if (class_def->SourceFile() != nullptr) {
+ source_file_offset = class_def->SourceFile()->GetOffset();
+ }
+ fprintf(out_file_, "source_file_idx : %d\n", source_file_offset);
+ uint32_t annotations_offset = 0;
+ if (class_def->Annotations() != nullptr) {
+ annotations_offset = class_def->Annotations()->GetOffset();
+ }
+ fprintf(out_file_, "annotations_off : %d (0x%06x)\n",
+ annotations_offset, annotations_offset);
+ fprintf(out_file_, "class_data_off : %d (0x%06x)\n",
+ class_def->GetClassData()->GetOffset(), class_def->GetClassData()->GetOffset());
+
+ // Fields and methods.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ if (class_data != nullptr) {
+ fprintf(out_file_, "static_fields_size : %zu\n", class_data->StaticFields().size());
+ fprintf(out_file_, "instance_fields_size: %zu\n", class_data->InstanceFields().size());
+ fprintf(out_file_, "direct_methods_size : %zu\n", class_data->DirectMethods().size());
+ fprintf(out_file_, "virtual_methods_size: %zu\n", class_data->VirtualMethods().size());
+ } else {
+ fprintf(out_file_, "static_fields_size : 0\n");
+ fprintf(out_file_, "instance_fields_size: 0\n");
+ fprintf(out_file_, "direct_methods_size : 0\n");
+ fprintf(out_file_, "virtual_methods_size: 0\n");
+ }
+ fprintf(out_file_, "\n");
+}
+
+/**
+ * Dumps an annotation set item.
+ */
+static void DumpAnnotationSetItem(dex_ir::AnnotationSetItem* set_item) {
+ if (set_item == nullptr || set_item->GetItems().size() == 0) {
+ fputs(" empty-annotation-set\n", out_file_);
+ return;
+ }
+ for (std::unique_ptr<dex_ir::AnnotationSetItem::AnnotationItem>& annotation :
+ set_item->GetItems()) {
+ if (annotation == nullptr) {
+ continue;
+ }
+ fputs(" ", out_file_);
+ switch (annotation->GetVisibility()) {
+ case DexFile::kDexVisibilityBuild: fputs("VISIBILITY_BUILD ", out_file_); break;
+ case DexFile::kDexVisibilityRuntime: fputs("VISIBILITY_RUNTIME ", out_file_); break;
+ case DexFile::kDexVisibilitySystem: fputs("VISIBILITY_SYSTEM ", out_file_); break;
+ default: fputs("VISIBILITY_UNKNOWN ", out_file_); break;
+ } // switch
+ // Decode raw bytes in annotation.
+ // const uint8_t* rData = annotation->annotation_;
+ dex_ir::ArrayItem* data = annotation->GetItem();
+ DumpEncodedValue(data);
+ fputc('\n', out_file_);
+ }
+}
+
+/*
+ * Dumps class annotations.
+ */
+static void DumpClassAnnotations(dex_ir::Header* header, int idx) {
+ dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get();
+ dex_ir::AnnotationsDirectoryItem* annotations_directory = class_def->Annotations();
+ if (annotations_directory == nullptr) {
+ return; // none
+ }
+
+ fprintf(out_file_, "Class #%d annotations:\n", idx);
+
+ dex_ir::AnnotationSetItem* class_set_item = annotations_directory->GetClassAnnotation();
+ std::vector<std::unique_ptr<dex_ir::AnnotationsDirectoryItem::FieldAnnotation>>& fields =
+ annotations_directory->GetFieldAnnotations();
+ std::vector<std::unique_ptr<dex_ir::AnnotationsDirectoryItem::MethodAnnotation>>& methods =
+ annotations_directory->GetMethodAnnotations();
+ std::vector<std::unique_ptr<dex_ir::AnnotationsDirectoryItem::ParameterAnnotation>>& parameters =
+ annotations_directory->GetParameterAnnotations();
+
+ // Annotations on the class itself.
+ if (class_set_item != nullptr) {
+ fprintf(out_file_, "Annotations on class\n");
+ DumpAnnotationSetItem(class_set_item);
+ }
+
+ // Annotations on fields.
+ for (auto& field : fields) {
+ const dex_ir::FieldId* field_id = field->GetFieldId();
+ const uint32_t field_idx = field_id->GetOffset();
+ const char* field_name = field_id->Name()->Data();
+ fprintf(out_file_, "Annotations on field #%u '%s'\n", field_idx, field_name);
+ DumpAnnotationSetItem(field->GetAnnotationSetItem());
+ }
+
+ // Annotations on methods.
+ for (auto& method : methods) {
+ const dex_ir::MethodId* method_id = method->GetMethodId();
+ const uint32_t method_idx = method_id->GetOffset();
+ const char* method_name = method_id->Name()->Data();
+ fprintf(out_file_, "Annotations on method #%u '%s'\n", method_idx, method_name);
+ DumpAnnotationSetItem(method->GetAnnotationSetItem());
+ }
+
+ // Annotations on method parameters.
+ for (auto& parameter : parameters) {
+ const dex_ir::MethodId* method_id = parameter->GetMethodId();
+ const uint32_t method_idx = method_id->GetOffset();
+ const char* method_name = method_id->Name()->Data();
+ fprintf(out_file_, "Annotations on method #%u '%s' parameters\n", method_idx, method_name);
+ uint32_t j = 0;
+ for (auto& annotation : parameter->GetAnnotations()) {
+ fprintf(out_file_, "#%u\n", j);
+ DumpAnnotationSetItem(annotation.get());
+ ++j;
+ }
+ }
+
+ fputc('\n', out_file_);
+}
+
+/*
+ * Dumps an interface that a class declares to implement.
+ */
+static void DumpInterface(dex_ir::TypeId* type_item, int i) {
+ const char* interface_name = type_item->GetStringId()->Data();
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " #%d : '%s'\n", i, interface_name);
+ } else {
+ std::string dot(DescriptorToDotWrapper(interface_name));
+ fprintf(out_file_, "<implements name=\"%s\">\n</implements>\n", dot.c_str());
+ }
+}
+
+/*
+ * Dumps the catches table associated with the code.
+ */
+static void DumpCatches(const dex_ir::CodeItem* code) {
+ const uint16_t tries_size = code->TriesSize();
+
+ // No catch table.
+ if (tries_size == 0) {
+ fprintf(out_file_, " catches : (none)\n");
+ return;
+ }
+
+ // Dump all table entries.
+ fprintf(out_file_, " catches : %d\n", tries_size);
+ std::vector<std::unique_ptr<const dex_ir::TryItem>>* tries = code->Tries();
+ for (uint32_t i = 0; i < tries_size; i++) {
+ const dex_ir::TryItem* try_item = (*tries)[i].get();
+ const uint32_t start = try_item->StartAddr();
+ const uint32_t end = start + try_item->InsnCount();
+ fprintf(out_file_, " 0x%04x - 0x%04x\n", start, end);
+ for (auto& handler : try_item->GetHandlers()) {
+ const dex_ir::TypeId* type_id = handler->GetTypeId();
+ const char* descriptor = (type_id == nullptr) ? "<any>" : type_id->GetStringId()->Data();
+ fprintf(out_file_, " %s -> 0x%04x\n", descriptor, handler->GetAddress());
+ } // for
+ } // for
+}
+
+/*
+ * Dumps all positions table entries associated with the code.
+ */
+static void DumpPositionInfo(const dex_ir::CodeItem* code) {
+ dex_ir::DebugInfoItem* debug_info = code->DebugInfo();
+ if (debug_info == nullptr) {
+ return;
+ }
+ std::vector<std::unique_ptr<dex_ir::PositionInfo>>& positions = debug_info->GetPositionInfo();
+ for (size_t i = 0; i < positions.size(); ++i) {
+ fprintf(out_file_, " 0x%04x line=%d\n", positions[i]->address_, positions[i]->line_);
+ }
+}
+
+/*
+ * Dumps all locals table entries associated with the code.
+ */
+static void DumpLocalInfo(const dex_ir::CodeItem* code) {
+ dex_ir::DebugInfoItem* debug_info = code->DebugInfo();
+ if (debug_info == nullptr) {
+ return;
+ }
+ std::vector<std::unique_ptr<dex_ir::LocalInfo>>& locals = debug_info->GetLocalInfo();
+ for (size_t i = 0; i < locals.size(); ++i) {
+ dex_ir::LocalInfo* entry = locals[i].get();
+ fprintf(out_file_, " 0x%04x - 0x%04x reg=%d %s %s %s\n",
+ entry->start_address_, entry->end_address_, entry->reg_,
+ entry->name_.c_str(), entry->descriptor_.c_str(), entry->signature_.c_str());
+ }
+}
+
+/*
+ * Helper for dumpInstruction(), which builds the string
+ * representation for the index in the given instruction.
+ * Returns a pointer to a buffer of sufficient size.
+ */
+static std::unique_ptr<char[]> IndexString(dex_ir::Header* header,
+ const Instruction* dec_insn,
+ size_t buf_size) {
+ std::unique_ptr<char[]> buf(new char[buf_size]);
+ // Determine index and width of the string.
+ uint32_t index = 0;
+ uint32_t width = 4;
+ switch (Instruction::FormatOf(dec_insn->Opcode())) {
+ // SOME NOT SUPPORTED:
+ // case Instruction::k20bc:
+ case Instruction::k21c:
+ case Instruction::k35c:
+ // case Instruction::k35ms:
+ case Instruction::k3rc:
+ // case Instruction::k3rms:
+ // case Instruction::k35mi:
+ // case Instruction::k3rmi:
+ index = dec_insn->VRegB();
+ width = 4;
+ break;
+ case Instruction::k31c:
+ index = dec_insn->VRegB();
+ width = 8;
+ break;
+ case Instruction::k22c:
+ // case Instruction::k22cs:
+ index = dec_insn->VRegC();
+ width = 4;
+ break;
+ default:
+ break;
+ } // switch
+
+ // Determine index type.
+ size_t outSize = 0;
+ switch (Instruction::IndexTypeOf(dec_insn->Opcode())) {
+ case Instruction::kIndexUnknown:
+ // This function should never get called for this type, but do
+ // something sensible here, just to help with debugging.
+ outSize = snprintf(buf.get(), buf_size, "<unknown-index>");
+ break;
+ case Instruction::kIndexNone:
+ // This function should never get called for this type, but do
+ // something sensible here, just to help with debugging.
+ outSize = snprintf(buf.get(), buf_size, "<no-index>");
+ break;
+ case Instruction::kIndexTypeRef:
+ if (index < header->TypeIdsSize()) {
+ const char* tp = header->TypeIds()[index]->GetStringId()->Data();
+ outSize = snprintf(buf.get(), buf_size, "%s // type@%0*x", tp, width, index);
+ } else {
+ outSize = snprintf(buf.get(), buf_size, "<type?> // type@%0*x", width, index);
+ }
+ break;
+ case Instruction::kIndexStringRef:
+ if (index < header->StringIdsSize()) {
+ const char* st = header->StringIds()[index]->Data();
+ outSize = snprintf(buf.get(), buf_size, "\"%s\" // string@%0*x", st, width, index);
+ } else {
+ outSize = snprintf(buf.get(), buf_size, "<string?> // string@%0*x", width, index);
+ }
+ break;
+ case Instruction::kIndexMethodRef:
+ if (index < header->MethodIdsSize()) {
+ dex_ir::MethodId* method_id = header->MethodIds()[index].get();
+ const char* name = method_id->Name()->Data();
+ char* type_descriptor = strdup(GetSignatureForProtoId(method_id->Proto()).c_str());
+ const char* back_descriptor = method_id->Class()->GetStringId()->Data();
+ outSize = snprintf(buf.get(), buf_size, "%s.%s:%s // method@%0*x",
+ back_descriptor, name, type_descriptor, width, index);
+ } else {
+ outSize = snprintf(buf.get(), buf_size, "<method?> // method@%0*x", width, index);
+ }
+ break;
+ case Instruction::kIndexFieldRef:
+ if (index < header->FieldIdsSize()) {
+ dex_ir::FieldId* field_id = header->FieldIds()[index].get();
+ const char* name = field_id->Name()->Data();
+ const char* type_descriptor = field_id->Type()->GetStringId()->Data();
+ const char* back_descriptor = field_id->Class()->GetStringId()->Data();
+ outSize = snprintf(buf.get(), buf_size, "%s.%s:%s // field@%0*x",
+ back_descriptor, name, type_descriptor, width, index);
+ } else {
+ outSize = snprintf(buf.get(), buf_size, "<field?> // field@%0*x", width, index);
+ }
+ break;
+ case Instruction::kIndexVtableOffset:
+ outSize = snprintf(buf.get(), buf_size, "[%0*x] // vtable #%0*x",
+ width, index, width, index);
+ break;
+ case Instruction::kIndexFieldOffset:
+ outSize = snprintf(buf.get(), buf_size, "[obj+%0*x]", width, index);
+ break;
+ // SOME NOT SUPPORTED:
+ // case Instruction::kIndexVaries:
+ // case Instruction::kIndexInlineMethod:
+ default:
+ outSize = snprintf(buf.get(), buf_size, "<?>");
+ break;
+ } // switch
+
+ // Determine success of string construction.
+ if (outSize >= buf_size) {
+ // The buffer wasn't big enough; retry with computed size. Note: snprintf()
+ // doesn't count/ the '\0' as part of its returned size, so we add explicit
+ // space for it here.
+ return IndexString(header, dec_insn, outSize + 1);
+ }
+ return buf;
+}
+
+/*
+ * Dumps a single instruction.
+ */
+static void DumpInstruction(dex_ir::Header* header, const dex_ir::CodeItem* code,
+ uint32_t code_offset, uint32_t insn_idx, uint32_t insn_width,
+ const Instruction* dec_insn) {
+ // Address of instruction (expressed as byte offset).
+ fprintf(out_file_, "%06x:", code_offset + 0x10 + insn_idx * 2);
+
+ // Dump (part of) raw bytes.
+ const uint16_t* insns = code->Insns();
+ for (uint32_t i = 0; i < 8; i++) {
+ if (i < insn_width) {
+ if (i == 7) {
+ fprintf(out_file_, " ... ");
+ } else {
+ // Print 16-bit value in little-endian order.
+ const uint8_t* bytePtr = (const uint8_t*) &insns[insn_idx + i];
+ fprintf(out_file_, " %02x%02x", bytePtr[0], bytePtr[1]);
+ }
+ } else {
+ fputs(" ", out_file_);
+ }
+ } // for
+
+ // Dump pseudo-instruction or opcode.
+ if (dec_insn->Opcode() == Instruction::NOP) {
+ const uint16_t instr = Get2LE((const uint8_t*) &insns[insn_idx]);
+ if (instr == Instruction::kPackedSwitchSignature) {
+ fprintf(out_file_, "|%04x: packed-switch-data (%d units)", insn_idx, insn_width);
+ } else if (instr == Instruction::kSparseSwitchSignature) {
+ fprintf(out_file_, "|%04x: sparse-switch-data (%d units)", insn_idx, insn_width);
+ } else if (instr == Instruction::kArrayDataSignature) {
+ fprintf(out_file_, "|%04x: array-data (%d units)", insn_idx, insn_width);
+ } else {
+ fprintf(out_file_, "|%04x: nop // spacer", insn_idx);
+ }
+ } else {
+ fprintf(out_file_, "|%04x: %s", insn_idx, dec_insn->Name());
+ }
+
+ // Set up additional argument.
+ std::unique_ptr<char[]> index_buf;
+ if (Instruction::IndexTypeOf(dec_insn->Opcode()) != Instruction::kIndexNone) {
+ index_buf = IndexString(header, dec_insn, 200);
+ }
+
+ // Dump the instruction.
+ //
+ // NOTE: pDecInsn->DumpString(pDexFile) differs too much from original.
+ //
+ switch (Instruction::FormatOf(dec_insn->Opcode())) {
+ case Instruction::k10x: // op
+ break;
+ case Instruction::k12x: // op vA, vB
+ fprintf(out_file_, " v%d, v%d", dec_insn->VRegA(), dec_insn->VRegB());
+ break;
+ case Instruction::k11n: // op vA, #+B
+ fprintf(out_file_, " v%d, #int %d // #%x",
+ dec_insn->VRegA(), (int32_t) dec_insn->VRegB(), (uint8_t)dec_insn->VRegB());
+ break;
+ case Instruction::k11x: // op vAA
+ fprintf(out_file_, " v%d", dec_insn->VRegA());
+ break;
+ case Instruction::k10t: // op +AA
+ case Instruction::k20t: { // op +AAAA
+ const int32_t targ = (int32_t) dec_insn->VRegA();
+ fprintf(out_file_, " %04x // %c%04x",
+ insn_idx + targ,
+ (targ < 0) ? '-' : '+',
+ (targ < 0) ? -targ : targ);
+ break;
+ }
+ case Instruction::k22x: // op vAA, vBBBB
+ fprintf(out_file_, " v%d, v%d", dec_insn->VRegA(), dec_insn->VRegB());
+ break;
+ case Instruction::k21t: { // op vAA, +BBBB
+ const int32_t targ = (int32_t) dec_insn->VRegB();
+ fprintf(out_file_, " v%d, %04x // %c%04x", dec_insn->VRegA(),
+ insn_idx + targ,
+ (targ < 0) ? '-' : '+',
+ (targ < 0) ? -targ : targ);
+ break;
+ }
+ case Instruction::k21s: // op vAA, #+BBBB
+ fprintf(out_file_, " v%d, #int %d // #%x",
+ dec_insn->VRegA(), (int32_t) dec_insn->VRegB(), (uint16_t)dec_insn->VRegB());
+ break;
+ case Instruction::k21h: // op vAA, #+BBBB0000[00000000]
+ // The printed format varies a bit based on the actual opcode.
+ if (dec_insn->Opcode() == Instruction::CONST_HIGH16) {
+ const int32_t value = dec_insn->VRegB() << 16;
+ fprintf(out_file_, " v%d, #int %d // #%x",
+ dec_insn->VRegA(), value, (uint16_t) dec_insn->VRegB());
+ } else {
+ const int64_t value = ((int64_t) dec_insn->VRegB()) << 48;
+ fprintf(out_file_, " v%d, #long %" PRId64 " // #%x",
+ dec_insn->VRegA(), value, (uint16_t) dec_insn->VRegB());
+ }
+ break;
+ case Instruction::k21c: // op vAA, thing@BBBB
+ case Instruction::k31c: // op vAA, thing@BBBBBBBB
+ fprintf(out_file_, " v%d, %s", dec_insn->VRegA(), index_buf.get());
+ break;
+ case Instruction::k23x: // op vAA, vBB, vCC
+ fprintf(out_file_, " v%d, v%d, v%d",
+ dec_insn->VRegA(), dec_insn->VRegB(), dec_insn->VRegC());
+ break;
+ case Instruction::k22b: // op vAA, vBB, #+CC
+ fprintf(out_file_, " v%d, v%d, #int %d // #%02x",
+ dec_insn->VRegA(), dec_insn->VRegB(),
+ (int32_t) dec_insn->VRegC(), (uint8_t) dec_insn->VRegC());
+ break;
+ case Instruction::k22t: { // op vA, vB, +CCCC
+ const int32_t targ = (int32_t) dec_insn->VRegC();
+ fprintf(out_file_, " v%d, v%d, %04x // %c%04x",
+ dec_insn->VRegA(), dec_insn->VRegB(),
+ insn_idx + targ,
+ (targ < 0) ? '-' : '+',
+ (targ < 0) ? -targ : targ);
+ break;
+ }
+ case Instruction::k22s: // op vA, vB, #+CCCC
+ fprintf(out_file_, " v%d, v%d, #int %d // #%04x",
+ dec_insn->VRegA(), dec_insn->VRegB(),
+ (int32_t) dec_insn->VRegC(), (uint16_t) dec_insn->VRegC());
+ break;
+ case Instruction::k22c: // op vA, vB, thing@CCCC
+ // NOT SUPPORTED:
+ // case Instruction::k22cs: // [opt] op vA, vB, field offset CCCC
+ fprintf(out_file_, " v%d, v%d, %s",
+ dec_insn->VRegA(), dec_insn->VRegB(), index_buf.get());
+ break;
+ case Instruction::k30t:
+ fprintf(out_file_, " #%08x", dec_insn->VRegA());
+ break;
+ case Instruction::k31i: { // op vAA, #+BBBBBBBB
+ // This is often, but not always, a float.
+ union {
+ float f;
+ uint32_t i;
+ } conv;
+ conv.i = dec_insn->VRegB();
+ fprintf(out_file_, " v%d, #float %g // #%08x",
+ dec_insn->VRegA(), conv.f, dec_insn->VRegB());
+ break;
+ }
+ case Instruction::k31t: // op vAA, offset +BBBBBBBB
+ fprintf(out_file_, " v%d, %08x // +%08x",
+ dec_insn->VRegA(), insn_idx + dec_insn->VRegB(), dec_insn->VRegB());
+ break;
+ case Instruction::k32x: // op vAAAA, vBBBB
+ fprintf(out_file_, " v%d, v%d", dec_insn->VRegA(), dec_insn->VRegB());
+ break;
+ case Instruction::k35c: { // op {vC, vD, vE, vF, vG}, thing@BBBB
+ // NOT SUPPORTED:
+ // case Instruction::k35ms: // [opt] invoke-virtual+super
+ // case Instruction::k35mi: // [opt] inline invoke
+ uint32_t arg[Instruction::kMaxVarArgRegs];
+ dec_insn->GetVarArgs(arg);
+ fputs(" {", out_file_);
+ for (int i = 0, n = dec_insn->VRegA(); i < n; i++) {
+ if (i == 0) {
+ fprintf(out_file_, "v%d", arg[i]);
+ } else {
+ fprintf(out_file_, ", v%d", arg[i]);
+ }
+ } // for
+ fprintf(out_file_, "}, %s", index_buf.get());
+ break;
+ }
+ case Instruction::k3rc: // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB
+ // NOT SUPPORTED:
+ // case Instruction::k3rms: // [opt] invoke-virtual+super/range
+ // case Instruction::k3rmi: // [opt] execute-inline/range
+ {
+ // This doesn't match the "dx" output when some of the args are
+ // 64-bit values -- dx only shows the first register.
+ fputs(" {", out_file_);
+ for (int i = 0, n = dec_insn->VRegA(); i < n; i++) {
+ if (i == 0) {
+ fprintf(out_file_, "v%d", dec_insn->VRegC() + i);
+ } else {
+ fprintf(out_file_, ", v%d", dec_insn->VRegC() + i);
+ }
+ } // for
+ fprintf(out_file_, "}, %s", index_buf.get());
+ }
+ break;
+ case Instruction::k51l: { // op vAA, #+BBBBBBBBBBBBBBBB
+ // This is often, but not always, a double.
+ union {
+ double d;
+ uint64_t j;
+ } conv;
+ conv.j = dec_insn->WideVRegB();
+ fprintf(out_file_, " v%d, #double %g // #%016" PRIx64,
+ dec_insn->VRegA(), conv.d, dec_insn->WideVRegB());
+ break;
+ }
+ // NOT SUPPORTED:
+ // case Instruction::k00x: // unknown op or breakpoint
+ // break;
+ default:
+ fprintf(out_file_, " ???");
+ break;
+ } // switch
+
+ fputc('\n', out_file_);
+}
+
+/*
+ * Dumps a bytecode disassembly.
+ */
+static void DumpBytecodes(dex_ir::Header* header, uint32_t idx,
+ const dex_ir::CodeItem* code, uint32_t code_offset) {
+ dex_ir::MethodId* method_id = header->MethodIds()[idx].get();
+ const char* name = method_id->Name()->Data();
+ const char* type_descriptor = strdup(GetSignatureForProtoId(method_id->Proto()).c_str());
+ const char* back_descriptor = method_id->Class()->GetStringId()->Data();
+
+ // Generate header.
+ std::string dot(DescriptorToDotWrapper(back_descriptor));
+ fprintf(out_file_, "%06x: |[%06x] %s.%s:%s\n",
+ code_offset, code_offset, dot.c_str(), name, type_descriptor);
+
+ // Iterate over all instructions.
+ const uint16_t* insns = code->Insns();
+ for (uint32_t insn_idx = 0; insn_idx < code->InsnsSize();) {
+ const Instruction* instruction = Instruction::At(&insns[insn_idx]);
+ const uint32_t insn_width = instruction->SizeInCodeUnits();
+ if (insn_width == 0) {
+ fprintf(stderr, "GLITCH: zero-width instruction at idx=0x%04x\n", insn_idx);
+ break;
+ }
+ DumpInstruction(header, code, code_offset, insn_idx, insn_width, instruction);
+ insn_idx += insn_width;
+ } // for
+}
+
+/*
+ * Dumps code of a method.
+ */
+static void DumpCode(dex_ir::Header* header, uint32_t idx, const dex_ir::CodeItem* code,
+ uint32_t code_offset) {
+ fprintf(out_file_, " registers : %d\n", code->RegistersSize());
+ fprintf(out_file_, " ins : %d\n", code->InsSize());
+ fprintf(out_file_, " outs : %d\n", code->OutsSize());
+ fprintf(out_file_, " insns size : %d 16-bit code units\n",
+ code->InsnsSize());
+
+ // Bytecode disassembly, if requested.
+ if (options_.disassemble_) {
+ DumpBytecodes(header, idx, code, code_offset);
+ }
+
+ // Try-catch blocks.
+ DumpCatches(code);
+
+ // Positions and locals table in the debug info.
+ fprintf(out_file_, " positions : \n");
+ DumpPositionInfo(code);
+ fprintf(out_file_, " locals : \n");
+ DumpLocalInfo(code);
+}
+
+/*
+ * Dumps a method.
+ */
+static void DumpMethod(dex_ir::Header* header, uint32_t idx, uint32_t flags,
+ const dex_ir::CodeItem* code, int i) {
+ // Bail for anything private if export only requested.
+ if (options_.exports_only_ && (flags & (kAccPublic | kAccProtected)) == 0) {
+ return;
+ }
+
+ dex_ir::MethodId* method_id = header->MethodIds()[idx].get();
+ const char* name = method_id->Name()->Data();
+ char* type_descriptor = strdup(GetSignatureForProtoId(method_id->Proto()).c_str());
+ const char* back_descriptor = method_id->Class()->GetStringId()->Data();
+ char* access_str = CreateAccessFlagStr(flags, kAccessForMethod);
+
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " #%d : (in %s)\n", i, back_descriptor);
+ fprintf(out_file_, " name : '%s'\n", name);
+ fprintf(out_file_, " type : '%s'\n", type_descriptor);
+ fprintf(out_file_, " access : 0x%04x (%s)\n", flags, access_str);
+ if (code == nullptr) {
+ fprintf(out_file_, " code : (none)\n");
+ } else {
+ fprintf(out_file_, " code -\n");
+ DumpCode(header, idx, code, code->GetOffset());
+ }
+ if (options_.disassemble_) {
+ fputc('\n', out_file_);
+ }
+ } else if (options_.output_format_ == kOutputXml) {
+ const bool constructor = (name[0] == '<');
+
+ // Method name and prototype.
+ if (constructor) {
+ std::string dot(DescriptorClassToDot(back_descriptor));
+ fprintf(out_file_, "<constructor name=\"%s\"\n", dot.c_str());
+ dot = DescriptorToDotWrapper(back_descriptor);
+ fprintf(out_file_, " type=\"%s\"\n", dot.c_str());
+ } else {
+ fprintf(out_file_, "<method name=\"%s\"\n", name);
+ const char* return_type = strrchr(type_descriptor, ')');
+ if (return_type == nullptr) {
+ fprintf(stderr, "bad method type descriptor '%s'\n", type_descriptor);
+ goto bail;
+ }
+ std::string dot(DescriptorToDotWrapper(return_type + 1));
+ fprintf(out_file_, " return=\"%s\"\n", dot.c_str());
+ fprintf(out_file_, " abstract=%s\n", QuotedBool((flags & kAccAbstract) != 0));
+ fprintf(out_file_, " native=%s\n", QuotedBool((flags & kAccNative) != 0));
+ fprintf(out_file_, " synchronized=%s\n", QuotedBool(
+ (flags & (kAccSynchronized | kAccDeclaredSynchronized)) != 0));
+ }
+
+ // Additional method flags.
+ fprintf(out_file_, " static=%s\n", QuotedBool((flags & kAccStatic) != 0));
+ fprintf(out_file_, " final=%s\n", QuotedBool((flags & kAccFinal) != 0));
+ // The "deprecated=" not knowable w/o parsing annotations.
+ fprintf(out_file_, " visibility=%s\n>\n", QuotedVisibility(flags));
+
+ // Parameters.
+ if (type_descriptor[0] != '(') {
+ fprintf(stderr, "ERROR: bad descriptor '%s'\n", type_descriptor);
+ goto bail;
+ }
+ char* tmp_buf = reinterpret_cast<char*>(malloc(strlen(type_descriptor) + 1));
+ const char* base = type_descriptor + 1;
+ int arg_num = 0;
+ while (*base != ')') {
+ char* cp = tmp_buf;
+ while (*base == '[') {
+ *cp++ = *base++;
+ }
+ if (*base == 'L') {
+ // Copy through ';'.
+ do {
+ *cp = *base++;
+ } while (*cp++ != ';');
+ } else {
+ // Primitive char, copy it.
+ if (strchr("ZBCSIFJD", *base) == nullptr) {
+ fprintf(stderr, "ERROR: bad method signature '%s'\n", base);
+ break; // while
+ }
+ *cp++ = *base++;
+ }
+ // Null terminate and display.
+ *cp++ = '\0';
+ std::string dot(DescriptorToDotWrapper(tmp_buf));
+ fprintf(out_file_, "<parameter name=\"arg%d\" type=\"%s\">\n"
+ "</parameter>\n", arg_num++, dot.c_str());
+ } // while
+ free(tmp_buf);
+ if (constructor) {
+ fprintf(out_file_, "</constructor>\n");
+ } else {
+ fprintf(out_file_, "</method>\n");
+ }
+ }
+
+ bail:
+ free(type_descriptor);
+ free(access_str);
+}
+
+/*
+ * Dumps a static (class) field.
+ */
+static void DumpSField(dex_ir::Header* header, uint32_t idx, uint32_t flags,
+ int i, dex_ir::ArrayItem* init) {
+ // Bail for anything private if export only requested.
+ if (options_.exports_only_ && (flags & (kAccPublic | kAccProtected)) == 0) {
+ return;
+ }
+
+ dex_ir::FieldId* field_id = header->FieldIds()[idx].get();
+ const char* name = field_id->Name()->Data();
+ const char* type_descriptor = field_id->Type()->GetStringId()->Data();
+ const char* back_descriptor = field_id->Class()->GetStringId()->Data();
+ char* access_str = CreateAccessFlagStr(flags, kAccessForField);
+
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " #%d : (in %s)\n", i, back_descriptor);
+ fprintf(out_file_, " name : '%s'\n", name);
+ fprintf(out_file_, " type : '%s'\n", type_descriptor);
+ fprintf(out_file_, " access : 0x%04x (%s)\n", flags, access_str);
+ if (init != nullptr) {
+ fputs(" value : ", out_file_);
+ DumpEncodedValue(init);
+ fputs("\n", out_file_);
+ }
+ } else if (options_.output_format_ == kOutputXml) {
+ fprintf(out_file_, "<field name=\"%s\"\n", name);
+ std::string dot(DescriptorToDotWrapper(type_descriptor));
+ fprintf(out_file_, " type=\"%s\"\n", dot.c_str());
+ fprintf(out_file_, " transient=%s\n", QuotedBool((flags & kAccTransient) != 0));
+ fprintf(out_file_, " volatile=%s\n", QuotedBool((flags & kAccVolatile) != 0));
+ // The "value=" is not knowable w/o parsing annotations.
+ fprintf(out_file_, " static=%s\n", QuotedBool((flags & kAccStatic) != 0));
+ fprintf(out_file_, " final=%s\n", QuotedBool((flags & kAccFinal) != 0));
+ // The "deprecated=" is not knowable w/o parsing annotations.
+ fprintf(out_file_, " visibility=%s\n", QuotedVisibility(flags));
+ if (init != nullptr) {
+ fputs(" value=\"", out_file_);
+ DumpEncodedValue(init);
+ fputs("\"\n", out_file_);
+ }
+ fputs(">\n</field>\n", out_file_);
+ }
+
+ free(access_str);
+}
+
+/*
+ * Dumps an instance field.
+ */
+static void DumpIField(dex_ir::Header* header, uint32_t idx, uint32_t flags, int i) {
+ DumpSField(header, idx, flags, i, nullptr);
+}
+
+/*
+ * Dumping a CFG. Note that this will do duplicate work. utils.h doesn't expose the code-item
+ * version, so the DumpMethodCFG code will have to iterate again to find it. But dexdump is a
+ * tool, so this is not performance-critical.
+ */
+
+static void DumpCFG(const DexFile* dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code) {
+ if (code != nullptr) {
+ std::ostringstream oss;
+ DumpMethodCFG(dex_file, dex_method_idx, oss);
+ fprintf(out_file_, "%s", oss.str().c_str());
+ }
+}
+
+static void DumpCFG(const DexFile* dex_file, int idx) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(idx);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
+ if (class_data == nullptr) { // empty class such as a marker interface?
+ return;
+ }
+ ClassDataItemIterator it(*dex_file, class_data);
+ while (it.HasNextStaticField()) {
+ it.Next();
+ }
+ while (it.HasNextInstanceField()) {
+ it.Next();
+ }
+ while (it.HasNextDirectMethod()) {
+ DumpCFG(dex_file,
+ it.GetMemberIndex(),
+ it.GetMethodCodeItem());
+ it.Next();
+ }
+ while (it.HasNextVirtualMethod()) {
+ DumpCFG(dex_file,
+ it.GetMemberIndex(),
+ it.GetMethodCodeItem());
+ it.Next();
+ }
+}
+
+/*
+ * Dumps the class.
+ *
+ * Note "idx" is a DexClassDef index, not a DexTypeId index.
+ *
+ * If "*last_package" is nullptr or does not match the current class' package,
+ * the value will be replaced with a newly-allocated string.
+ */
+static void DumpClass(dex_ir::Header* header, int idx, char** last_package) {
+ dex_ir::ClassDef* class_def = header->ClassDefs()[idx].get();
+ // Omitting non-public class.
+ if (options_.exports_only_ && (class_def->GetAccessFlags() & kAccPublic) == 0) {
+ return;
+ }
+
+ if (options_.show_section_headers_) {
+ DumpClassDef(header, idx);
+ }
+
+ if (options_.show_annotations_) {
+ DumpClassAnnotations(header, idx);
+ }
+
+ if (options_.show_cfg_) {
+ DumpCFG(&header->GetDexFile(), idx);
+ return;
+ }
+
+ // For the XML output, show the package name. Ideally we'd gather
+ // up the classes, sort them, and dump them alphabetically so the
+ // package name wouldn't jump around, but that's not a great plan
+ // for something that needs to run on the device.
+ const char* class_descriptor = header->ClassDefs()[idx]->ClassType()->GetStringId()->Data();
+ if (!(class_descriptor[0] == 'L' &&
+ class_descriptor[strlen(class_descriptor)-1] == ';')) {
+ // Arrays and primitives should not be defined explicitly. Keep going?
+ fprintf(stderr, "Malformed class name '%s'\n", class_descriptor);
+ } else if (options_.output_format_ == kOutputXml) {
+ char* mangle = strdup(class_descriptor + 1);
+ mangle[strlen(mangle)-1] = '\0';
+
+ // Reduce to just the package name.
+ char* last_slash = strrchr(mangle, '/');
+ if (last_slash != nullptr) {
+ *last_slash = '\0';
+ } else {
+ *mangle = '\0';
+ }
+
+ for (char* cp = mangle; *cp != '\0'; cp++) {
+ if (*cp == '/') {
+ *cp = '.';
+ }
+ } // for
+
+ if (*last_package == nullptr || strcmp(mangle, *last_package) != 0) {
+ // Start of a new package.
+ if (*last_package != nullptr) {
+ fprintf(out_file_, "</package>\n");
+ }
+ fprintf(out_file_, "<package name=\"%s\"\n>\n", mangle);
+ free(*last_package);
+ *last_package = mangle;
+ } else {
+ free(mangle);
+ }
+ }
+
+ // General class information.
+ char* access_str = CreateAccessFlagStr(class_def->GetAccessFlags(), kAccessForClass);
+ const char* superclass_descriptor = nullptr;
+ if (class_def->Superclass() != nullptr) {
+ superclass_descriptor = class_def->Superclass()->GetStringId()->Data();
+ }
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, "Class #%d -\n", idx);
+ fprintf(out_file_, " Class descriptor : '%s'\n", class_descriptor);
+ fprintf(out_file_, " Access flags : 0x%04x (%s)\n",
+ class_def->GetAccessFlags(), access_str);
+ if (superclass_descriptor != nullptr) {
+ fprintf(out_file_, " Superclass : '%s'\n", superclass_descriptor);
+ }
+ fprintf(out_file_, " Interfaces -\n");
+ } else {
+ std::string dot(DescriptorClassToDot(class_descriptor));
+ fprintf(out_file_, "<class name=\"%s\"\n", dot.c_str());
+ if (superclass_descriptor != nullptr) {
+ dot = DescriptorToDotWrapper(superclass_descriptor);
+ fprintf(out_file_, " extends=\"%s\"\n", dot.c_str());
+ }
+ fprintf(out_file_, " interface=%s\n",
+ QuotedBool((class_def->GetAccessFlags() & kAccInterface) != 0));
+ fprintf(out_file_, " abstract=%s\n",
+ QuotedBool((class_def->GetAccessFlags() & kAccAbstract) != 0));
+ fprintf(out_file_, " static=%s\n", QuotedBool((class_def->GetAccessFlags() & kAccStatic) != 0));
+ fprintf(out_file_, " final=%s\n", QuotedBool((class_def->GetAccessFlags() & kAccFinal) != 0));
+ // The "deprecated=" not knowable w/o parsing annotations.
+ fprintf(out_file_, " visibility=%s\n", QuotedVisibility(class_def->GetAccessFlags()));
+ fprintf(out_file_, ">\n");
+ }
+
+ // Interfaces.
+ std::vector<dex_ir::TypeId*>* interfaces = class_def->Interfaces();
+ for (uint32_t i = 0; i < interfaces->size(); i++) {
+ DumpInterface((*interfaces)[i], i);
+ } // for
+
+ // Fields and methods.
+ dex_ir::ClassData* class_data = class_def->GetClassData();
+ // Prepare data for static fields.
+ std::vector<std::unique_ptr<dex_ir::ArrayItem>>* static_values = class_def->StaticValues();
+ const uint32_t static_values_size = (static_values == nullptr) ? 0 : static_values->size();
+
+ // Static fields.
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " Static fields -\n");
+ }
+ std::vector<std::unique_ptr<dex_ir::FieldItem>>& static_fields = class_data->StaticFields();
+ for (uint32_t i = 0; i < static_fields.size(); i++) {
+ DumpSField(header,
+ static_fields[i]->GetFieldId()->GetOffset(),
+ static_fields[i]->GetAccessFlags(),
+ i,
+ i < static_values_size ? (*static_values)[i].get() : nullptr);
+ } // for
+
+ // Instance fields.
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " Instance fields -\n");
+ }
+ std::vector<std::unique_ptr<dex_ir::FieldItem>>& instance_fields = class_data->InstanceFields();
+ for (uint32_t i = 0; i < instance_fields.size(); i++) {
+ DumpIField(header,
+ instance_fields[i]->GetFieldId()->GetOffset(),
+ instance_fields[i]->GetAccessFlags(),
+ i);
+ } // for
+
+ // Direct methods.
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " Direct methods -\n");
+ }
+ std::vector<std::unique_ptr<dex_ir::MethodItem>>& direct_methods = class_data->DirectMethods();
+ for (uint32_t i = 0; i < direct_methods.size(); i++) {
+ DumpMethod(header,
+ direct_methods[i]->GetMethodId()->GetOffset(),
+ direct_methods[i]->GetAccessFlags(),
+ direct_methods[i]->GetCodeItem(),
+ i);
+ } // for
+
+ // Virtual methods.
+ if (options_.output_format_ == kOutputPlain) {
+ fprintf(out_file_, " Virtual methods -\n");
+ }
+ std::vector<std::unique_ptr<dex_ir::MethodItem>>& virtual_methods = class_data->VirtualMethods();
+ for (uint32_t i = 0; i < virtual_methods.size(); i++) {
+ DumpMethod(header,
+ virtual_methods[i]->GetMethodId()->GetOffset(),
+ virtual_methods[i]->GetAccessFlags(),
+ virtual_methods[i]->GetCodeItem(),
+ i);
+ } // for
+
+ // End of class.
+ if (options_.output_format_ == kOutputPlain) {
+ const char* file_name = "unknown";
+ if (class_def->SourceFile() != nullptr) {
+ file_name = class_def->SourceFile()->Data();
+ }
+ const dex_ir::StringId* source_file = class_def->SourceFile();
+ fprintf(out_file_, " source_file_idx : %d (%s)\n\n",
+ source_file == nullptr ? 0xffffffffU : source_file->GetOffset(), file_name);
+ } else if (options_.output_format_ == kOutputXml) {
+ fprintf(out_file_, "</class>\n");
+ }
+
+ free(access_str);
+}
+
+/*
+ * Dumps the requested sections of the file.
+ */
+static void ProcessDexFile(const char* file_name, const DexFile* dex_file) {
+ if (options_.verbose_) {
+ fprintf(out_file_, "Opened '%s', DEX version '%.3s'\n",
+ file_name, dex_file->GetHeader().magic_ + 4);
+ }
+ dex_ir::Header header(*dex_file);
+
+ // Headers.
+ if (options_.show_file_headers_) {
+ DumpFileHeader(&header);
+ }
+
+ // Open XML context.
+ if (options_.output_format_ == kOutputXml) {
+ fprintf(out_file_, "<api>\n");
+ }
+
+ // Iterate over all classes.
+ char* package = nullptr;
+ const uint32_t class_defs_size = header.ClassDefsSize();
+ for (uint32_t i = 0; i < class_defs_size; i++) {
+ DumpClass(&header, i, &package);
+ } // for
+
+ // Free the last package allocated.
+ if (package != nullptr) {
+ fprintf(out_file_, "</package>\n");
+ free(package);
+ }
+
+ // Close XML context.
+ if (options_.output_format_ == kOutputXml) {
+ fprintf(out_file_, "</api>\n");
+ }
+}
+
+/*
+ * Processes a single file (either direct .dex or indirect .zip/.jar/.apk).
+ */
+int ProcessFile(const char* file_name) {
+ if (options_.verbose_) {
+ fprintf(out_file_, "Processing '%s'...\n", file_name);
+ }
+
+ // If the file is not a .dex file, the function tries .zip/.jar/.apk files,
+ // all of which are Zip archives with "classes.dex" inside.
+ const bool verify_checksum = !options_.ignore_bad_checksum_;
+ std::string error_msg;
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ if (!DexFile::Open(file_name, file_name, verify_checksum, &error_msg, &dex_files)) {
+ // Display returned error message to user. Note that this error behavior
+ // differs from the error messages shown by the original Dalvik dexdump.
+ fputs(error_msg.c_str(), stderr);
+ fputc('\n', stderr);
+ return -1;
+ }
+
+ // Success. Either report checksum verification or process
+ // all dex files found in given file.
+ if (options_.checksum_only_) {
+ fprintf(out_file_, "Checksum verified\n");
+ } else {
+ for (size_t i = 0; i < dex_files.size(); i++) {
+ ProcessDexFile(file_name, dex_files[i].get());
+ }
+ }
+ return 0;
+}
+
+} // namespace art
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
new file mode 100644
index 0000000..bae587d
--- /dev/null
+++ b/dexlayout/dexlayout.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Header file of the dexlayout utility.
+ *
+ * This is a tool to read dex files into an internal representation,
+ * reorganize the representation, and emit dex files with a better
+ * file layout.
+ */
+
+#ifndef ART_DEXLAYOUT_DEXLAYOUT_H_
+#define ART_DEXLAYOUT_DEXLAYOUT_H_
+
+#include <stdint.h>
+#include <stdio.h>
+
+namespace art {
+
+/* Supported output formats. */
+enum OutputFormat {
+ kOutputPlain = 0, // default
+ kOutputXml, // XML-style
+};
+
+/* Command-line options. */
+struct Options {
+ bool build_dex_ir_;
+ bool checksum_only_;
+ bool disassemble_;
+ bool exports_only_;
+ bool ignore_bad_checksum_;
+ bool show_annotations_;
+ bool show_cfg_;
+ bool show_file_headers_;
+ bool show_section_headers_;
+ bool verbose_;
+ OutputFormat output_format_;
+ const char* output_file_name_;
+};
+
+/* Prototypes. */
+extern struct Options options_;
+extern FILE* out_file_;
+int ProcessFile(const char* file_name);
+
+} // namespace art
+
+#endif // ART_DEXLAYOUT_DEXLAYOUT_H_
diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc
new file mode 100644
index 0000000..286a0c6
--- /dev/null
+++ b/dexlayout/dexlayout_main.cc
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * Main driver of the dexlayout utility.
+ *
+ * This is a tool to read dex files into an internal representation,
+ * reorganize the representation, and emit dex files with a better
+ * file layout.
+ */
+
+#include "dexlayout.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "mem_map.h"
+#include "runtime.h"
+
+namespace art {
+
+static const char* kProgramName = "dexlayout";
+
+/*
+ * Shows usage.
+ */
+static void Usage(void) {
+ fprintf(stderr, "Copyright (C) 2007 The Android Open Source Project\n\n");
+ fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile]"
+ " dexfile...\n\n", kProgramName);
+ fprintf(stderr, " -a : display annotations\n");
+ fprintf(stderr, " -b : build dex_ir\n");
+ fprintf(stderr, " -c : verify checksum and exit\n");
+ fprintf(stderr, " -d : disassemble code sections\n");
+ fprintf(stderr, " -e : display exported items only\n");
+ fprintf(stderr, " -f : display summary information from file header\n");
+ fprintf(stderr, " -g : display CFG for dex\n");
+ fprintf(stderr, " -h : display file header details\n");
+ fprintf(stderr, " -i : ignore checksum failures\n");
+ fprintf(stderr, " -l : output layout, either 'plain' or 'xml'\n");
+ fprintf(stderr, " -o : output file name (defaults to stdout)\n");
+}
+
+/*
+ * Main driver of the dexlayout utility.
+ */
+int DexlayoutDriver(int argc, char** argv) {
+ // Art specific set up.
+ InitLogging(argv);
+ MemMap::Init();
+
+ // Reset options.
+ bool want_usage = false;
+ memset(&options_, 0, sizeof(options_));
+ options_.verbose_ = true;
+
+ // Parse all arguments.
+ while (1) {
+ const int ic = getopt(argc, argv, "abcdefghil:o:");
+ if (ic < 0) {
+ break; // done
+ }
+ switch (ic) {
+ case 'a': // display annotations
+ options_.show_annotations_ = true;
+ break;
+ case 'b': // build dex_ir
+ options_.build_dex_ir_ = true;
+ break;
+ case 'c': // verify the checksum then exit
+ options_.checksum_only_ = true;
+ break;
+ case 'd': // disassemble Dalvik instructions
+ options_.disassemble_ = true;
+ break;
+ case 'e': // exported items only
+ options_.exports_only_ = true;
+ break;
+ case 'f': // display outer file header
+ options_.show_file_headers_ = true;
+ break;
+ case 'g': // display cfg
+ options_.show_cfg_ = true;
+ break;
+ case 'h': // display section headers, i.e. all meta-data
+ options_.show_section_headers_ = true;
+ break;
+ case 'i': // continue even if checksum is bad
+ options_.ignore_bad_checksum_ = true;
+ break;
+ case 'l': // layout
+ if (strcmp(optarg, "plain") == 0) {
+ options_.output_format_ = kOutputPlain;
+ } else if (strcmp(optarg, "xml") == 0) {
+ options_.output_format_ = kOutputXml;
+ options_.verbose_ = false;
+ } else {
+ want_usage = true;
+ }
+ break;
+ case 'o': // output file
+ options_.output_file_name_ = optarg;
+ break;
+ default:
+ want_usage = true;
+ break;
+ } // switch
+ } // while
+
+ // Detect early problems.
+ if (optind == argc) {
+ fprintf(stderr, "%s: no file specified\n", kProgramName);
+ want_usage = true;
+ }
+ if (options_.checksum_only_ && options_.ignore_bad_checksum_) {
+ fprintf(stderr, "Can't specify both -c and -i\n");
+ want_usage = true;
+ }
+ if (want_usage) {
+ Usage();
+ return 2;
+ }
+
+ // Open alternative output file.
+ if (options_.output_file_name_) {
+ out_file_ = fopen(options_.output_file_name_, "w");
+ if (!out_file_) {
+ fprintf(stderr, "Can't open %s\n", options_.output_file_name_);
+ return 1;
+ }
+ }
+
+ // Process all files supplied on command line.
+ int result = 0;
+ while (optind < argc) {
+ result |= ProcessFile(argv[optind++]);
+ } // while
+ return result != 0;
+}
+
+} // namespace art
+
+int main(int argc, char** argv) {
+ return art::DexlayoutDriver(argc, argv);
+}
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
new file mode 100644
index 0000000..42b64c3
--- /dev/null
+++ b/dexlayout/dexlayout_test.cc
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+#include <vector>
+#include <sstream>
+
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "base/stringprintf.h"
+#include "common_runtime_test.h"
+#include "utils.h"
+
+namespace art {
+
+class DexLayoutTest : public CommonRuntimeTest {
+ protected:
+ virtual void SetUp() {
+ CommonRuntimeTest::SetUp();
+ // TODO: Test with other dex files for improved coverage.
+ // Dogfood our own lib core dex file.
+ dex_file_ = GetLibCoreDexFileNames()[0];
+ }
+
+ // Runs test with given arguments.
+ bool Exec(std::string* error_msg) {
+ // TODO: dexdump2 -> dexdump ?
+ ScratchFile dexdump_output;
+ std::string dexdump_filename = dexdump_output.GetFilename();
+ std::string dexdump = GetTestAndroidRoot() + "/bin/dexdump2";
+ EXPECT_TRUE(OS::FileExists(dexdump.c_str())) << dexdump << " should be a valid file path";
+ std::vector<std::string> dexdump_exec_argv =
+ { dexdump, "-d", "-f", "-h", "-l", "plain", "-o", dexdump_filename, dex_file_ };
+
+ ScratchFile dexlayout_output;
+ std::string dexlayout_filename = dexlayout_output.GetFilename();
+ std::string dexlayout = GetTestAndroidRoot() + "/bin/dexlayout";
+ EXPECT_TRUE(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
+ std::vector<std::string> dexlayout_exec_argv =
+ { dexlayout, "-d", "-f", "-h", "-l", "plain", "-o", dexlayout_filename, dex_file_ };
+
+ if (!::art::Exec(dexdump_exec_argv, error_msg)) {
+ return false;
+ }
+ if (!::art::Exec(dexlayout_exec_argv, error_msg)) {
+ return false;
+ }
+ std::vector<std::string> diff_exec_argv =
+ { "/usr/bin/diff", dexdump_filename, dexlayout_filename };
+ if (!::art::Exec(diff_exec_argv, error_msg)) {
+ return false;
+ }
+ return true;
+ }
+
+ std::string dex_file_;
+};
+
+
+TEST_F(DexLayoutTest, FullPlainOutput) {
+ // Disable test on target.
+ TEST_DISABLED_FOR_TARGET();
+ std::string error_msg;
+ ASSERT_TRUE(Exec(&error_msg)) << error_msg;
+}
+
+} // namespace art
diff --git a/imgdiag/imgdiag.cc b/imgdiag/imgdiag.cc
index 21a0ca0..106cf2f 100644
--- a/imgdiag/imgdiag.cc
+++ b/imgdiag/imgdiag.cc
@@ -59,7 +59,7 @@
image_diff_pid_(image_diff_pid),
zygote_diff_pid_(zygote_diff_pid) {}
- bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
os << "IMAGE LOCATION: " << image_location_ << "\n\n";
@@ -98,7 +98,7 @@
}
bool DumpImageDiff(pid_t image_diff_pid, pid_t zygote_diff_pid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
{
@@ -145,7 +145,7 @@
}
static std::string PrettyFieldValue(ArtField* field, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream oss;
switch (field->GetTypeAsPrimitiveType()) {
case Primitive::kPrimNot: {
@@ -217,7 +217,7 @@
void DiffObjectContents(mirror::Object* obj,
uint8_t* remote_bytes,
- std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_) {
const char* tabs = " ";
// Attempt to find fields for all dirty bytes.
mirror::Class* klass = obj->GetClass();
@@ -283,7 +283,7 @@
bool DumpImageDiffMap(pid_t image_diff_pid,
pid_t zygote_diff_pid,
const backtrace_map_t& boot_map)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
const PointerSize pointer_size = InstructionSetPointerSize(
Runtime::Current()->GetInstructionSet());
@@ -867,7 +867,7 @@
}
static std::string GetClassDescriptor(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(klass != nullptr);
std::string descriptor;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index 96c8e94..c87a18b 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -514,7 +514,7 @@
return oat_file_.GetOatHeader().GetInstructionSet();
}
- const void* GetQuickOatCode(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const void* GetQuickOatCode(ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < oat_dex_files_.size(); i++) {
const OatFile::OatDexFile* oat_dex_file = oat_dex_files_[i];
CHECK(oat_dex_file != nullptr);
@@ -1268,7 +1268,7 @@
DCHECK(options_.class_loader_ != nullptr);
return verifier::MethodVerifier::VerifyMethodAndDump(
soa.Self(), vios, dex_method_idx, dex_file, dex_cache, *options_.class_loader_,
- &class_def, code_item, nullptr, method_access_flags);
+ class_def, code_item, nullptr, method_access_flags);
}
return nullptr;
@@ -1424,7 +1424,7 @@
image_header_(image_header),
oat_dumper_options_(oat_dumper_options) {}
- bool Dump() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Dump() REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *os_;
std::ostream& indent_os = vios_.Stream();
@@ -1671,7 +1671,7 @@
public:
explicit DumpArtMethodVisitor(ImageDumper* image_dumper) : image_dumper_(image_dumper) {}
- virtual void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& indent_os = image_dumper_->vios_.Stream();
indent_os << method << " " << " ArtMethod: " << PrettyMethod(method) << "\n";
image_dumper_->DumpMethod(method, indent_os);
@@ -1683,7 +1683,7 @@
};
static void PrettyObjectValue(std::ostream& os, mirror::Class* type, mirror::Object* value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(type != nullptr);
if (value == nullptr) {
os << StringPrintf("null %s\n", PrettyDescriptor(type).c_str());
@@ -1700,7 +1700,7 @@
}
static void PrintField(std::ostream& os, ArtField* field, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
os << StringPrintf("%s: ", field->GetName());
switch (field->GetTypeAsPrimitiveType()) {
case Primitive::kPrimLong:
@@ -1753,7 +1753,7 @@
}
static void DumpFields(std::ostream& os, mirror::Object* obj, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* super = klass->GetSuperClass();
if (super != nullptr) {
DumpFields(os, obj, super);
@@ -1767,7 +1767,7 @@
return image_space_.Contains(object);
}
- const void* GetQuickOatCodeBegin(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_) {
+ const void* GetQuickOatCodeBegin(ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_) {
const void* quick_code = m->GetEntryPointFromQuickCompiledCodePtrSize(
image_header_.GetPointerSize());
if (Runtime::Current()->GetClassLinker()->IsQuickResolutionStub(quick_code)) {
@@ -1780,7 +1780,7 @@
}
uint32_t GetQuickOatCodeSize(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const uint32_t* oat_code_begin = reinterpret_cast<const uint32_t*>(GetQuickOatCodeBegin(m));
if (oat_code_begin == nullptr) {
return 0;
@@ -1789,7 +1789,7 @@
}
const void* GetQuickOatCodeEnd(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const uint8_t* oat_code_begin = reinterpret_cast<const uint8_t*>(GetQuickOatCodeBegin(m));
if (oat_code_begin == nullptr) {
return nullptr;
@@ -1797,7 +1797,7 @@
return oat_code_begin + GetQuickOatCodeSize(m);
}
- static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
DCHECK(arg != nullptr);
ImageDumper* state = reinterpret_cast<ImageDumper*>(arg);
@@ -1969,7 +1969,7 @@
}
void DumpMethod(ArtMethod* method, std::ostream& indent_os)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method != nullptr);
const void* quick_oat_code_begin = GetQuickOatCodeBegin(method);
const void* quick_oat_code_end = GetQuickOatCodeEnd(method);
@@ -2149,7 +2149,7 @@
}
void DumpOutliers(std::ostream& os)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t sum_of_sizes = 0;
size_t sum_of_sizes_squared = 0;
size_t sum_of_expansion = 0;
@@ -2253,7 +2253,7 @@
}
void Dump(std::ostream& os, std::ostream& indent_os)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
{
os << "art_file_bytes = " << PrettySize(file_bytes) << "\n\n"
<< "art_file_bytes = header_bytes + object_bytes + alignment_bytes\n";
@@ -2370,7 +2370,7 @@
static int DumpImage(gc::space::ImageSpace* image_space,
OatDumperOptions* options,
- std::ostream* os) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::ostream* os) REQUIRES_SHARED(Locks::mutator_lock_) {
const ImageHeader& image_header = image_space->GetImageHeader();
if (!image_header.IsValid()) {
fprintf(stderr, "Invalid image header %s\n", image_space->GetImageLocation().c_str());
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 3f6531b..b7ce02c 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -462,7 +462,7 @@
public:
explicit PatchOatArtFieldVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
- void Visit(ArtField* field) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Visit(ArtField* field) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dest = patch_oat_->RelocatedCopyOf(field);
dest->SetDeclaringClass(patch_oat_->RelocatedAddressOfPointer(field->GetDeclaringClass()));
}
@@ -480,7 +480,7 @@
public:
explicit PatchOatArtMethodVisitor(PatchOat* patch_oat) : patch_oat_(patch_oat) {}
- void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* const dest = patch_oat_->RelocatedCopyOf(method);
patch_oat_->FixupMethod(method, dest);
}
@@ -523,7 +523,7 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
*roots[i] = patch_oat_->RelocatedAddressOfPointer(*roots[i]);
}
@@ -531,7 +531,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
roots[i]->Assign(patch_oat_->RelocatedAddressOfPointer(roots[i]->AsMirrorPtr()));
}
diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h
index 64efea9d..a97b051 100644
--- a/patchoat/patchoat.h
+++ b/patchoat/patchoat.h
@@ -99,14 +99,14 @@
bool new_oat_out); // Output oat was newly created?
static void BitmapCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
reinterpret_cast<PatchOat*>(arg)->VisitObject(obj);
}
void VisitObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupMethod(ArtMethod* object, ArtMethod* copy)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Patches oat in place, modifying the oat_file given to the constructor.
bool PatchElf();
@@ -115,18 +115,18 @@
template <typename ElfFileImpl>
bool PatchOatHeader(ElfFileImpl* oat_file);
- bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_);
- void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
- void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
- void PatchImTables(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool PatchImage(bool primary_image) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PatchArtFields(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PatchArtMethods(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PatchImTables(const ImageHeader* image_header) REQUIRES_SHARED(Locks::mutator_lock_);
void PatchImtConflictTables(const ImageHeader* image_header)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PatchInternedStrings(const ImageHeader* image_header)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PatchClassTable(const ImageHeader* image_header)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PatchDexFileArrays(mirror::ObjectArray<mirror::Object>* img_roots)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool WriteElf(File* out);
bool WriteImage(File* out);
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 04aacfc..22d79cb 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -135,6 +135,7 @@
"native_bridge_art_interface.cc",
"native_stack_dump.cc",
"native/dalvik_system_DexFile.cc",
+ "native/dalvik_system_InMemoryDexClassLoader_DexData.cc",
"native/dalvik_system_VMDebug.cc",
"native/dalvik_system_VMRuntime.cc",
"native/dalvik_system_VMStack.cc",
@@ -380,6 +381,8 @@
"liblz4",
// For liblog, atrace, properties, ashmem, set_sched_policy and socket_peer_is_trusted.
"libcutils",
+ // For common macros.
+ "libbase",
],
static: {
static_libs: ["libsigchain_dummy"],
@@ -388,6 +391,8 @@
shared_libs: ["libsigchain"],
},
export_include_dirs: ["."],
+ // ART's macros.h depends on libbase's macros.h.
+ export_shared_lib_headers: ["libbase"],
}
gensrcs {
@@ -450,8 +455,8 @@
art_cc_library {
name: "libartd",
defaults: [
- "libart_defaults",
- "art_debug_defaults",
+ "art_debug_defaults",
+ "libart_defaults",
],
}
diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc
index 492a12d..cb8edff 100644
--- a/runtime/arch/arm/entrypoints_init_arm.cc
+++ b/runtime/arch/arm/entrypoints_init_arm.cc
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <math.h>
+#include <string.h>
+
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index e25e93f..c51c336 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -1086,25 +1086,6 @@
// Load the class (r2)
ldr r2, [r2, r0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
cbz r2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
- // Check class status.
- ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
- cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
- bne .Lart_quick_alloc_object_rosalloc_slow_path
- // Add a fake dependence from the
- // following access flag and size
- // loads to the status load.
- // This is to prevent those loads
- // from being reordered above the
- // status load and reading wrong
- // values (an alternative is to use
- // a load-acquire for the status).
- eor r3, r3, r3
- add r2, r2, r3
- // Check access flags has
- // kAccClassIsFinalizable
- ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
- bne .Lart_quick_alloc_object_rosalloc_slow_path
ldr r3, [r9, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
@@ -1113,22 +1094,21 @@
cmp r3, r12
bhs .Lart_quick_alloc_object_rosalloc_slow_path
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3)
+ ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3)
cmp r3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
- // local allocation
+ // local allocation. Also does the
+ // initialized and finalizable checks.
bhs .Lart_quick_alloc_object_rosalloc_slow_path
// Compute the rosalloc bracket index
- // from the size.
- // Align up the size by the rosalloc
- // bracket quantum size and divide
- // by the quantum size and subtract
- // by 1. This code is a shorter but
- // equivalent version.
- sub r3, r3, #1
- lsr r3, r3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
+ // from the size. Since the size is
+ // already aligned we can combine the
+ // two shifts together.
+ add r12, r9, r3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+ // Subtract pointer size since ther
+ // are no runs for 0 byte allocations
+ // and the size is already aligned.
// Load the rosalloc run (r12)
- add r12, r9, r3, lsl #POINTER_SIZE_SHIFT
- ldr r12, [r12, #THREAD_ROSALLOC_RUNS_OFFSET]
+ ldr r12, [r12, #(THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)]
// Load the free list head (r3). This
// will be the return val.
ldr r3, [r12, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
@@ -1153,7 +1133,7 @@
// to later accesses to the class
// object. Alternatively we could use
// "ishst" if we use load-acquire for
- // the class status load.)
+ // the object size load.
// Needs to be done before pushing on
// allocation since Heap::VisitObjects
// relies on seeing the class pointer.
@@ -1191,25 +1171,6 @@
// Need to preserve r0 and r1 to the slow path.
.macro ALLOC_OBJECT_TLAB_FAST_PATH slowPathLabel
cbz r2, \slowPathLabel // Check null class
- // Check class status.
- ldr r3, [r2, #MIRROR_CLASS_STATUS_OFFSET]
- cmp r3, #MIRROR_CLASS_STATUS_INITIALIZED
- bne \slowPathLabel
- // Add a fake dependence from the
- // following access flag and size
- // loads to the status load.
- // This is to prevent those loads
- // from being reordered above the
- // status load and reading wrong
- // values (an alternative is to use
- // a load-acquire for the status).
- eor r3, r3, r3
- add r2, r2, r3
- // Check access flags has
- // kAccClassIsFinalizable.
- ldr r3, [r2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tst r3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
- bne \slowPathLabel
// Load thread_local_pos (r12) and
// thread_local_end (r3) with ldrd.
// Check constraints for ldrd.
@@ -1218,16 +1179,10 @@
#endif
ldrd r12, r3, [r9, #THREAD_LOCAL_POS_OFFSET]
sub r12, r3, r12 // Compute the remaining buf size.
- ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (r3).
- cmp r3, r12 // Check if it fits. OK to do this
- // before rounding up the object size
- // assuming the buf size alignment.
+ ldr r3, [r2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (r3).
+ cmp r3, r12 // Check if it fits.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber r0 and r1.
- // Round up the object size by the
- // object alignment. (addr + 7) & ~7.
- add r3, r3, #OBJECT_ALIGNMENT_MASK
- and r3, r3, #OBJECT_ALIGNMENT_MASK_TOGGLED
// Reload old thread_local_pos (r0)
// for the return value.
ldr r0, [r9, #THREAD_LOCAL_POS_OFFSET]
@@ -1244,7 +1199,7 @@
// the fields of the class.
// Alternatively we could use "ishst"
// if we use load-acquire for the
- // class status load.)
+ // object size load.)
dmb ish
bx lr
.endm
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 55b09c3..c2078f0 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <math.h>
+#include <string.h>
+
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index 202846a..03768af 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1796,7 +1796,7 @@
ldr x1, [sp] // load referrer
ldr w2, [x1, #ART_METHOD_DECLARING_CLASS_OFFSET] // load declaring class
ldr x1, [x2, #DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET] // load string dex cache
- and x2, x0, #STRING_DEX_CACHE_SIZE_MINUS_ONE // get masked string index into x2
+ ubfx x2, x0, #0, #STRING_DEX_CACHE_HASH_BITS // get masked string index into x2
ldr x2, [x1, x2, lsl #STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT] // load dex cache pair into x2
cmp x0, x2, lsr #32 // compare against upper 32 bits
bne .Lart_quick_resolve_string_slow_path
@@ -1822,8 +1822,14 @@
tbnz x3, #LOCK_WORD_MARK_BIT_SHIFT, .Lart_quick_resolve_string_no_rb
// Save LR so that we can return, also x1 for alignment purposes.
stp x1, xLR, [sp, #-16]! // Save x1, LR.
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset x1, 0
+ .cfi_rel_offset xLR, 8
bl artReadBarrierMark // Get the marked string back.
ldp x1, xLR, [sp], #16 // Restore registers.
+ .cfi_restore xLR
+ .cfi_restore x1
+ .cfi_adjust_cfa_offset -16
.Lart_quick_resolve_string_no_rb:
ret
@@ -1854,47 +1860,27 @@
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
cbz x2, .Lart_quick_alloc_object_rosalloc_slow_path // Check null class
- // Check class status.
- ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET]
- cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
- bne .Lart_quick_alloc_object_rosalloc_slow_path
- // Add a fake dependence from the
- // following access flag and size
- // loads to the status load.
- // This is to prevent those loads
- // from being reordered above the
- // status load and reading wrong
- // values (an alternative is to use
- // a load-acquire for the status).
- eor x3, x3, x3
- add x2, x2, x3
- // Check access flags has
- // kAccClassIsFinalizable
- ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tst x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE
- bne .Lart_quick_alloc_object_rosalloc_slow_path
ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local
// allocation stack has room.
// ldp won't work due to large offset.
ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET]
cmp x3, x4
bhs .Lart_quick_alloc_object_rosalloc_slow_path
- ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (x3)
+ ldr w3, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3)
cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread
- // local allocation
+ // local allocation. Also does the
+ // finalizable and initialization
+ // checks.
bhs .Lart_quick_alloc_object_rosalloc_slow_path
// Compute the rosalloc bracket index
- // from the size.
- // Align up the size by the rosalloc
- // bracket quantum size and divide
- // by the quantum size and subtract
- // by 1. This code is a shorter but
- // equivalent version.
- sub x3, x3, #1
- lsr x3, x3, #ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT
- // Load the rosalloc run (x4)
- add x4, xSELF, x3, lsl #POINTER_SIZE_SHIFT
- ldr x4, [x4, #THREAD_ROSALLOC_RUNS_OFFSET]
+ // from the size. Since the size is
+ // already aligned we can combine the
+ // two shifts together.
+ add x4, xSELF, x3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT)
+ // Subtract pointer size since ther
+ // are no runs for 0 byte allocations
+ // and the size is already aligned.
+ ldr x4, [x4, #(THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)]
// Load the free list head (x3). This
// will be the return val.
ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)]
@@ -1915,11 +1901,11 @@
str w2, [x3, #MIRROR_OBJECT_CLASS_OFFSET]
// Fence. This is "ish" not "ishst" so
// that it also ensures ordering of
- // the class status load with respect
+ // the object size load with respect
// to later accesses to the class
// object. Alternatively we could use
// "ishst" if we use load-acquire for
- // the class status load.)
+ // the class status load.
// Needs to be done before pushing on
// allocation since Heap::VisitObjects
// relies on seeing the class pointer.
@@ -2027,48 +2013,24 @@
ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED \slowPathLabel
.endm
+// TODO: delete ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since it is the same as
+// ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED.
.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel
- ldr w3, [x2, #MIRROR_CLASS_STATUS_OFFSET] // Check class status.
- cmp x3, #MIRROR_CLASS_STATUS_INITIALIZED
- bne \slowPathLabel
- // Add a fake dependence from the
- // following access flag and size
- // loads to the status load.
- // This is to prevent those loads
- // from being reordered above the
- // status load and reading wrong
- // values (an alternative is to use
- // a load-acquire for the status).
- eor x3, x3, x3
- add x2, x2, x3
ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED \slowPathLabel
.endm
.macro ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED slowPathLabel
- // Check access flags has
- // kAccClassIsFinalizable.
- ldr w3, [x2, #MIRROR_CLASS_ACCESS_FLAGS_OFFSET]
- tbnz x3, #ACCESS_FLAGS_CLASS_IS_FINALIZABLE_BIT, \slowPathLabel
- // Load thread_local_pos (x4) and
- // thread_local_end (x5).
ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET]
ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET]
- sub x6, x5, x4 // Compute the remaining buf size.
- ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_OFFSET] // Load the object size (x7).
- cmp x7, x6 // Check if it fits. OK to do this
- // before rounding up the object size
- // assuming the buf size alignment.
+ ldr w7, [x2, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7).
+ add x6, x4, x7 // Add object size to tlab pos.
+ cmp x6, x5 // Check if it fits, overflow works
+ // since the tlab pos and end are 32
+ // bit values.
bhi \slowPathLabel
// "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1.
- // Round up the object size by the
- // object alignment. (addr + 7) & ~7.
- add x7, x7, #OBJECT_ALIGNMENT_MASK
- and x7, x7, #OBJECT_ALIGNMENT_MASK_TOGGLED
- // Move old thread_local_pos to x0
- // for the return value.
mov x0, x4
- add x5, x0, x7
- str x5, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
+ str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos.
ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects.
add x5, x5, #1
str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET]
@@ -2080,7 +2042,7 @@
// the fields of the class.
// Alternatively we could use "ishst"
// if we use load-acquire for the
- // class status load.)
+ // object size load.)
dmb ish
ret
.endm
@@ -2107,7 +2069,7 @@
END art_quick_alloc_object_tlab
// The common code for art_quick_alloc_object_*region_tlab
-.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved
+.macro GENERATE_ALLOC_OBJECT_REGION_TLAB name, entrypoint, fast_path, is_resolved, read_barrier
ENTRY \name
// Fast path region tlab allocation.
// x0: type_idx/resolved class/return value, x1: ArtMethod*, xSELF(x19): Thread::Current
@@ -2123,16 +2085,19 @@
ldr x2, [x1, #ART_METHOD_DEX_CACHE_TYPES_OFFSET_64] // Load dex cache resolved types array
// Load the class (x2)
ldr w2, [x2, x0, lsl #COMPRESSED_REFERENCE_SIZE_SHIFT]
+ // If the class is null, go slow path. The check is required to read the lock word.
+ cbz w2, .Lslow_path\name
.endif
+.if \read_barrier
// Most common case: GC is not marking.
ldr w3, [xSELF, #THREAD_IS_GC_MARKING_OFFSET]
cbnz x3, .Lmarking\name
+.endif
.Ldo_allocation\name:
\fast_path .Lslow_path\name
.Lmarking\name:
+.if \read_barrier
// GC is marking, check the lock word of the class for the mark bit.
- // If the class is null, go slow path. The check is required to read the lock word.
- cbz w2, .Lslow_path\name
// Class is not null, check mark bit in lock word.
ldr w3, [x2, #MIRROR_OBJECT_LOCK_WORD_OFFSET]
// If the bit is not zero, do the allocation.
@@ -2140,14 +2105,23 @@
// The read barrier slow path. Mark
// the class.
stp x0, x1, [sp, #-32]! // Save registers (x0, x1, lr).
+ .cfi_adjust_cfa_offset 32
+ .cfi_rel_offset x0, 0
+ .cfi_rel_offset x1, 8
str xLR, [sp, #16] // Align sp by 16 bytes.
+ .cfi_rel_offset xLR, 16
mov x0, x2 // Pass the class as the first param.
bl artReadBarrierMark
mov x2, x0 // Get the (marked) class back.
ldp x0, x1, [sp, #0] // Restore registers.
+ .cfi_restore x0
+ .cfi_restore x1
ldr xLR, [sp, #16]
+ .cfi_restore xLR
add sp, sp, #32
+ .cfi_adjust_cfa_offset -32
b .Ldo_allocation\name
+.endif
.Lslow_path\name:
SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC.
mov x2, xSELF // Pass Thread::Current.
@@ -2157,9 +2131,14 @@
END \name
.endm
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH, 0
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1
-GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1
+// Use ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED since the null check is already done in GENERATE_ALLOC_OBJECT_TLAB.
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_region_tlab, artAllocObjectFromCodeRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 0, 1
+// No read barrier for the resolved or initialized cases since the caller is responsible for the
+// read barrier due to the to-space invariant.
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED, 1, 0
+GENERATE_ALLOC_OBJECT_REGION_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, ALLOC_OBJECT_TLAB_FAST_PATH_INITIALIZED, 1, 0
+
+// TODO: We could use this macro for the normal tlab allocator too.
// The common code for art_quick_alloc_array_*region_tlab
.macro GENERATE_ALLOC_ARRAY_REGION_TLAB name, entrypoint, fast_path, is_resolved
diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc
index 09f8849..38aa67c 100644
--- a/runtime/arch/mips/entrypoints_init_mips.cc
+++ b/runtime/arch/mips/entrypoints_init_mips.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <string.h>
+
#include "atomic.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc
index 34b0638..a037905 100644
--- a/runtime/arch/mips64/entrypoints_init_mips64.cc
+++ b/runtime/arch/mips64/entrypoints_init_mips64.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <string.h>
+
#include "atomic.h"
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 10adb3a..507dbf0 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1287,7 +1287,7 @@
static void GetSetBooleanStatic(ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
constexpr size_t num_values = 5;
@@ -1318,7 +1318,7 @@
}
static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1349,7 +1349,7 @@
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint8_t values[] = { 0, true, 2, 128, 0xFF };
@@ -1384,7 +1384,7 @@
}
static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1419,7 +1419,7 @@
static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1449,7 +1449,7 @@
}
static void GetSetShortStatic(ArtField* f, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1480,7 +1480,7 @@
static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1514,7 +1514,7 @@
}
static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1549,7 +1549,7 @@
static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1585,7 +1585,7 @@
static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1626,7 +1626,7 @@
static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
reinterpret_cast<size_t>(val),
0U,
@@ -1646,7 +1646,7 @@
static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
@@ -1670,7 +1670,7 @@
static void set_and_check_instance(ArtField* f, mirror::Object* trg,
mirror::Object* val, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
reinterpret_cast<size_t>(trg),
reinterpret_cast<size_t>(val),
@@ -1693,7 +1693,7 @@
static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
(defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
@@ -1716,7 +1716,7 @@
static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) \
|| defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
@@ -1749,7 +1749,7 @@
static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
Thread* self, ArtMethod* referrer, StubTest* test)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc
index bdf11da..0a10a3c 100644
--- a/runtime/arch/x86/entrypoints_init_x86.cc
+++ b/runtime/arch/x86/entrypoints_init_x86.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <math.h>
+
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 282f10d..67ebf50 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -897,17 +897,6 @@
movl 0(%edx, %eax, COMPRESSED_REFERENCE_SIZE), %edx
testl %edx, %edx // Check null class
jz .Lart_quick_alloc_object_rosalloc_slow_path
- // Check class status
- cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%edx)
- jne .Lart_quick_alloc_object_rosalloc_slow_path
- // No fake dependence needed on x86
- // between status and flags load,
- // since each load is a load-acquire,
- // no loads reordering.
- // Check access flags has
- // kAccClassIsFinalizable
- testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%edx)
- jnz .Lart_quick_alloc_object_rosalloc_slow_path
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
// Check if the thread local allocation
@@ -916,21 +905,19 @@
cmpl THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%ebx), %edi
jae .Lart_quick_alloc_object_rosalloc_slow_path
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%edx), %edi // Load the object size (edi)
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %edi // Load the object size (edi)
// Check if the size is for a thread
- // local allocation
+ // local allocation. Also does the
+ // finalizable and initialization check.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %edi
ja .Lart_quick_alloc_object_rosalloc_slow_path
- decl %edi
shrl LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %edi // Calculate the rosalloc bracket index
// from object size.
- // Align up the size by the rosalloc
- // bracket quantum size and divide
- // by the quantum size and subtract
- // by 1. This code is a shorter but
- // equivalent version.
// Load thread local rosalloc run (ebx)
- movl THREAD_ROSALLOC_RUNS_OFFSET(%ebx, %edi, __SIZEOF_POINTER__), %ebx
+ // Subtract __SIZEOF_POINTER__ to subtract
+ // one from edi as there is no 0 byte run
+ // and the size is already aligned.
+ movl (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%ebx, %edi, __SIZEOF_POINTER__), %ebx
// Load free_list head (edi),
// this will be the return value.
movl (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%ebx), %edi
@@ -990,32 +977,17 @@
MACRO1(ALLOC_OBJECT_TLAB_FAST_PATH, slowPathLabel)
testl %edx, %edx // Check null class
jz VAR(slowPathLabel)
- // Check class status.
- cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%edx)
- jne VAR(slowPathLabel)
- // No fake dependence needed on x86
- // between status and flags load,
- // since each load is a load-acquire,
- // no loads reordering.
- // Check access flags has
- // kAccClassIsFinalizable
- testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%edx)
- jnz VAR(slowPathLabel)
movl %fs:THREAD_SELF_OFFSET, %ebx // ebx = thread
movl THREAD_LOCAL_END_OFFSET(%ebx), %edi // Load thread_local_end.
subl THREAD_LOCAL_POS_OFFSET(%ebx), %edi // Compute the remaining buffer size.
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%edx), %esi // Load the object size.
- cmpl %edi, %esi // Check if it fits. OK to do this
- // before rounding up the object size
- // assuming the buf size alignment.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%edx), %esi // Load the object size.
+ cmpl %edi, %esi // Check if it fits.
ja VAR(slowPathLabel)
- addl LITERAL(OBJECT_ALIGNMENT_MASK), %esi // Align the size by 8. (addr + 7) & ~7.
- andl LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED), %esi
movl THREAD_LOCAL_POS_OFFSET(%ebx), %eax // Load thread_local_pos
// as allocated object.
addl %eax, %esi // Add the object size.
movl %esi, THREAD_LOCAL_POS_OFFSET(%ebx) // Update thread_local_pos.
- addl LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
+ incl THREAD_LOCAL_OBJECTS_OFFSET(%ebx) // Increase thread_local_objects.
// Store the class pointer in the header.
// No fence needed for x86.
POISON_HEAP_REF edx
@@ -1109,18 +1081,15 @@
END_FUNCTION art_quick_alloc_object_region_tlab
DEFINE_FUNCTION art_quick_resolve_string
- SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
- movl FRAME_SIZE_SAVE_REFS_ONLY(%esp), %ecx // get referrer
+ movl 4(%esp), %ecx // get referrer
movl ART_METHOD_DECLARING_CLASS_OFFSET(%ecx), %ecx // get declaring class
movl DECLARING_CLASS_DEX_CACHE_STRINGS_OFFSET(%ecx), %ecx // get string dex cache
movl LITERAL(STRING_DEX_CACHE_SIZE_MINUS_ONE), %edx
andl %eax, %edx
- shl LITERAL(STRING_DEX_CACHE_ELEMENT_SIZE_SHIFT), %edx
- addl %ecx, %edx
- movlps (%edx), %xmm0 // load string idx and pointer to xmm0
- movd %xmm0, %ecx // extract pointer
- pshufd LITERAL(0x55), %xmm0, %xmm0 // shuffle index into lowest bits
- movd %xmm0, %edx // extract index
+ movlps (%ecx, %edx, STRING_DEX_CACHE_ELEMENT_SIZE), %xmm0 // load string idx and ptr to xmm0
+ movd %xmm0, %ecx // extract pointer
+ pshufd LITERAL(0x55), %xmm0, %xmm0 // shuffle index into lowest bits
+ movd %xmm0, %edx // extract index
cmp %edx, %eax
jne .Lart_quick_resolve_string_slow_path
movl %ecx, %eax
@@ -1128,10 +1097,10 @@
cmpl LITERAL(0), %fs:THREAD_IS_GC_MARKING_OFFSET
jne .Lart_quick_resolve_string_marking
#endif
- RESTORE_SAVE_REFS_ONLY_FRAME
ret
.Lart_quick_resolve_string_slow_path:
// Outgoing argument set up
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
subl LITERAL(8), %esp // push padding
CFI_ADJUST_CFA_OFFSET(8)
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
@@ -1143,6 +1112,7 @@
RESTORE_SAVE_REFS_ONLY_FRAME
RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
.Lart_quick_resolve_string_marking:
+ SETUP_SAVE_REFS_ONLY_FRAME ebx, ebx
testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%eax)
jnz .Lart_quick_resolve_string_no_rb
subl LITERAL(12), %esp // alignment padding
diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
index 42b9699..8c425d5 100644
--- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc
+++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <math.h>
+
#include "entrypoints/jni/jni_entrypoints.h"
#include "entrypoints/quick/quick_alloc_entrypoints.h"
#include "entrypoints/quick/quick_default_externs.h"
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index f941c52..b805703 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -935,17 +935,6 @@
movl 0(%rdx, %rdi, COMPRESSED_REFERENCE_SIZE), %edx
testl %edx, %edx // Check null class
jz .Lart_quick_alloc_object_rosalloc_slow_path
- // Check class status.
- cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
- jne .Lart_quick_alloc_object_rosalloc_slow_path
- // We don't need a fence (between the
- // the status and the access flag
- // loads) here because every load is
- // a load acquire on x86.
- // Check access flags has
- // kAccClassIsFinalizable
- testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
- jnz .Lart_quick_alloc_object_rosalloc_slow_path
// Check if the thread local
// allocation stack has room.
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
@@ -953,22 +942,21 @@
cmpq THREAD_LOCAL_ALLOC_STACK_END_OFFSET(%r8), %rcx
jae .Lart_quick_alloc_object_rosalloc_slow_path
// Load the object size
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %eax
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %eax
// Check if the size is for a thread
- // local allocation
+ // local allocation. Also does the
+ // initialized and finalizable checks.
cmpl LITERAL(ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE), %eax
ja .Lart_quick_alloc_object_rosalloc_slow_path
// Compute the rosalloc bracket index
// from the size.
- // Align up the size by the rosalloc
- // bracket quantum size and divide
- // by the quantum size and subtract
- // by 1. This code is a shorter but
- // equivalent version.
- subq LITERAL(1), %rax
shrq LITERAL(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT), %rax
// Load the rosalloc run (r9)
- movq THREAD_ROSALLOC_RUNS_OFFSET(%r8, %rax, __SIZEOF_POINTER__), %r9
+ // Subtract __SIZEOF_POINTER__ to
+ // subtract one from edi as there is no
+ // 0 byte run and the size is already
+ // aligned.
+ movq (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)(%r8, %rax, __SIZEOF_POINTER__), %r9
// Load the free list head (rax). This
// will be the return val.
movq (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)(%r9), %rax
@@ -1020,21 +1008,12 @@
END_MACRO
// The common fast path code for art_quick_alloc_object_resolved_region_tlab.
+// TODO: delete ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH since it is the same as
+// ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH.
//
// RDI: type_idx, RSI: ArtMethod*, RDX/EDX: the class, RAX: return value.
// RCX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH, slowPathLabel)
- // Check class status.
- cmpl LITERAL(MIRROR_CLASS_STATUS_INITIALIZED), MIRROR_CLASS_STATUS_OFFSET(%rdx)
- jne RAW_VAR(slowPathLabel)
- // No fake dependence needed on x86
- // between status and flags load,
- // since each load is a load-acquire,
- // no loads reordering.
- // Check access flags has
- // kAccClassIsFinalizable
- testl LITERAL(ACCESS_FLAGS_CLASS_IS_FINALIZABLE), MIRROR_CLASS_ACCESS_FLAGS_OFFSET(%rdx)
- jnz RAW_VAR(slowPathLabel)
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH(RAW_VAR(slowPathLabel))
END_MACRO
@@ -1044,19 +1023,16 @@
// RCX: scratch, r8: Thread::Current().
MACRO1(ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH, slowPathLabel)
movq %gs:THREAD_SELF_OFFSET, %r8 // r8 = thread
- movl MIRROR_CLASS_OBJECT_SIZE_OFFSET(%rdx), %ecx // Load the object size.
+ movl MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET(%rdx), %ecx // Load the object size.
movq THREAD_LOCAL_POS_OFFSET(%r8), %rax
- leaq OBJECT_ALIGNMENT_MASK(%rax, %rcx), %rcx // Add size to pos, note that these
+ addq %rax, %rcx // Add size to pos, note that these
// are both 32 bit ints, overflow
// will cause the add to be past the
// end of the thread local region.
- // Also sneak in alignment mask add.
- andq LITERAL(OBJECT_ALIGNMENT_MASK_TOGGLED64), %rcx // Align the size by 8. (addr + 7) &
- // ~7.
cmpq THREAD_LOCAL_END_OFFSET(%r8), %rcx // Check if it fits.
ja RAW_VAR(slowPathLabel)
movq %rcx, THREAD_LOCAL_POS_OFFSET(%r8) // Update thread_local_pos.
- addq LITERAL(1), THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increase thread_local_objects.
+ incq THREAD_LOCAL_OBJECTS_OFFSET(%r8) // Increase thread_local_objects.
// Store the class pointer in the
// header.
// No fence needed for x86.
@@ -1268,28 +1244,9 @@
int3
int3
#endif
+ // No read barrier since the caller is responsible for that.
movq %rdi, %rdx
- cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_marking
-.Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_slow_path_exit:
ALLOC_OBJECT_RESOLVED_TLAB_FAST_PATH .Lart_quick_alloc_object_resolved_region_tlab_slow_path
-.Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_marking:
- // Check the mark bit, if it is 1 avoid the read barrier.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jnz .Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_slow_path_exit
-.Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH rdi
- PUSH rsi
- subq LITERAL(8), %rsp // 16 byte alignment
- // Outgoing argument set up
- movq %rdx, %rdi // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movq %rax, %rdx
- addq LITERAL(8), %rsp
- POP rsi
- POP rdi
- jmp .Lart_quick_alloc_object_resolved_region_tlab_class_load_read_barrier_slow_path_exit
.Lart_quick_alloc_object_resolved_region_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeResolvedRegionTLAB
END_FUNCTION art_quick_alloc_object_resolved_region_tlab
@@ -1303,29 +1260,9 @@
int3
int3
#endif
- // Might need a special macro since rsi and edx is 32b/64b mismatched.
movq %rdi, %rdx
- cmpl LITERAL(0), %gs:THREAD_IS_GC_MARKING_OFFSET
- jne .Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_marking
-.Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_slow_path_exit:
+ // No read barrier since the caller is responsible for that.
ALLOC_OBJECT_INITIALIZED_TLAB_FAST_PATH .Lart_quick_alloc_object_initialized_region_tlab_slow_path
-.Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_marking:
- // Check the mark bit, if it is 1 avoid the read barrier.
- testl LITERAL(LOCK_WORD_MARK_BIT_MASK_SHIFTED), MIRROR_OBJECT_LOCK_WORD_OFFSET(%edx)
- jnz .Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_slow_path
-.Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_slow_path:
- // The read barrier slow path. Mark the class.
- PUSH rdi
- PUSH rsi
- subq LITERAL(8), %rsp // 16 byte alignment
- // Outgoing argument set up
- movq %rdx, %rdi // Pass the class as the first param.
- call SYMBOL(artReadBarrierMark) // cxx_name(mirror::Object* obj)
- movq %rax, %rdx
- addq LITERAL(8), %rsp
- POP rsi
- POP rdi
- jmp .Lart_quick_alloc_object_initialized_region_tlab_class_load_read_barrier_slow_path_exit
.Lart_quick_alloc_object_initialized_region_tlab_slow_path:
ALLOC_OBJECT_TLAB_SLOW_PATH artAllocObjectFromCodeInitializedRegionTLAB
END_FUNCTION art_quick_alloc_object_initialized_region_tlab
diff --git a/runtime/art_field-inl.h b/runtime/art_field-inl.h
index a102858..ef75f94 100644
--- a/runtime/art_field-inl.h
+++ b/runtime/art_field-inl.h
@@ -254,7 +254,7 @@
SetObj<kTransactionActive>(object, l);
}
-inline const char* ArtField::GetName() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline const char* ArtField::GetName() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t field_index = GetDexFieldIndex();
if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
DCHECK(IsStatic());
@@ -265,7 +265,7 @@
return dex_file->GetFieldName(dex_file->GetFieldId(field_index));
}
-inline const char* ArtField::GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline const char* ArtField::GetTypeDescriptor() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t field_index = GetDexFieldIndex();
if (UNLIKELY(GetDeclaringClass()->IsProxyClass())) {
DCHECK(IsStatic());
@@ -279,11 +279,11 @@
}
inline Primitive::Type ArtField::GetTypeAsPrimitiveType()
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Primitive::GetType(GetTypeDescriptor()[0]);
}
-inline bool ArtField::IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline bool ArtField::IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetTypeAsPrimitiveType() != Primitive::kPrimNot;
}
@@ -305,15 +305,15 @@
return type;
}
-inline size_t ArtField::FieldSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline size_t ArtField::FieldSize() REQUIRES_SHARED(Locks::mutator_lock_) {
return Primitive::ComponentSize(GetTypeAsPrimitiveType());
}
-inline mirror::DexCache* ArtField::GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline mirror::DexCache* ArtField::GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetDeclaringClass()->GetDexCache();
}
-inline const DexFile* ArtField::GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_) {
+inline const DexFile* ArtField::GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetDexCache()->GetDexFile();
}
@@ -349,7 +349,7 @@
template <bool kExactOffset>
static inline ArtField* FindFieldWithOffset(
const IterationRange<StrideIterator<ArtField>>& fields,
- uint32_t field_offset) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t field_offset) REQUIRES_SHARED(Locks::mutator_lock_) {
for (ArtField& field : fields) {
if (kExactOffset) {
if (field.GetOffset().Uint32Value() == field_offset) {
diff --git a/runtime/art_field.h b/runtime/art_field.h
index aaccbf3..16e6c75 100644
--- a/runtime/art_field.h
+++ b/runtime/art_field.h
@@ -42,27 +42,27 @@
ArtField();
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
void SetDeclaringClass(mirror::Class *new_declaring_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
access_flags_ = new_access_flags;
}
- bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPublic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPublic) != 0;
}
- bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStatic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccStatic) != 0;
}
- bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsFinal() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
@@ -76,86 +76,86 @@
}
// Offset to field within an Object.
- MemberOffset GetOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetOffset() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset OffsetOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtField, offset_));
}
- MemberOffset GetOffsetDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetOffsetDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetOffset(MemberOffset num_bytes) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetOffset(MemberOffset num_bytes) REQUIRES_SHARED(Locks::mutator_lock_);
// field access, null object for static fields
- uint8_t GetBoolean(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint8_t GetBoolean(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetBoolean(mirror::Object* object, uint8_t z) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetBoolean(mirror::Object* object, uint8_t z) REQUIRES_SHARED(Locks::mutator_lock_);
- int8_t GetByte(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ int8_t GetByte(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetByte(mirror::Object* object, int8_t b) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetByte(mirror::Object* object, int8_t b) REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetChar(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetChar(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetChar(mirror::Object* object, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetChar(mirror::Object* object, uint16_t c) REQUIRES_SHARED(Locks::mutator_lock_);
- int16_t GetShort(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ int16_t GetShort(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetShort(mirror::Object* object, int16_t s) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetShort(mirror::Object* object, int16_t s) REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t GetInt(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetInt(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetInt(mirror::Object* object, int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetInt(mirror::Object* object, int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
- int64_t GetLong(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ int64_t GetLong(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetLong(mirror::Object* object, int64_t j) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetLong(mirror::Object* object, int64_t j) REQUIRES_SHARED(Locks::mutator_lock_);
- float GetFloat(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ float GetFloat(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetFloat(mirror::Object* object, float f) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetFloat(mirror::Object* object, float f) REQUIRES_SHARED(Locks::mutator_lock_);
- double GetDouble(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ double GetDouble(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void SetDouble(mirror::Object* object, double d) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetDouble(mirror::Object* object, double d) REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetObject(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetObject(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetObject(mirror::Object* object, mirror::Object* l)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Raw field accesses.
- uint32_t Get32(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t Get32(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void Set32(mirror::Object* object, uint32_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- uint64_t Get64(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint64_t Get64(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void Set64(mirror::Object* object, uint64_t new_value) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Set64(mirror::Object* object, uint64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetObj(mirror::Object* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetObj(mirror::Object* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
void SetObj(mirror::Object* object, mirror::Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor) NO_THREAD_SAFETY_ANALYSIS;
- bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsVolatile() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccVolatile) != 0;
}
@@ -164,35 +164,35 @@
// offset.
template <bool kExactOffset = true>
static ArtField* FindInstanceFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a static field with this offset in the given class or null if not found.
// If kExactOffset is true then we only find the matching offset, not the field containing the
// offset.
template <bool kExactOffset = true>
static ArtField* FindStaticFieldWithOffset(mirror::Class* klass, uint32_t field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetName() REQUIRES_SHARED(Locks::mutator_lock_);
// Resolves / returns the name from the dex cache.
mirror::String* GetStringName(Thread* self, bool resolve)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetTypeDescriptor() REQUIRES_SHARED(Locks::mutator_lock_);
- Primitive::Type GetTypeAsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_);
+ Primitive::Type GetTypeAsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsPrimitiveType() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsPrimitiveType() REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kResolve>
- mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetType() REQUIRES_SHARED(Locks::mutator_lock_);
- size_t FieldSize() SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t FieldSize() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile* GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
GcRoot<mirror::Class>& DeclaringClassRoot() {
return declaring_class_;
@@ -201,15 +201,15 @@
// Update the declaring class with the passed in visitor. Does not use read barrier.
template <typename Visitor>
ALWAYS_INLINE void UpdateObjects(const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
mirror::Class* ProxyFindSystemClass(const char* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Class* ResolveGetType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::Class* ResolveGetType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
mirror::String* ResolveGetStringName(Thread* self, const DexFile& dex_file, uint32_t string_idx,
mirror::DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
GcRoot<mirror::Class> declaring_class_;
diff --git a/runtime/art_method.h b/runtime/art_method.h
index a90ef23..b25087c 100644
--- a/runtime/art_method.h
+++ b/runtime/art_method.h
@@ -29,7 +29,6 @@
#include "modifiers.h"
#include "mirror/object.h"
#include "read_barrier_option.h"
-#include "stack.h"
#include "utils.h"
namespace art {
@@ -228,20 +227,20 @@
static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
jobject jlr_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetDeclaringClass(mirror::Class *new_declaring_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CASDeclaringClass(mirror::Class* expected_class, mirror::Class* desired_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DeclaringClassOffset() {
return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
@@ -258,7 +257,7 @@
}
// Approximate what kind of method call would be used for this method.
- InvokeType GetInvokeType() SHARED_REQUIRES(Locks::mutator_lock_);
+ InvokeType GetInvokeType() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the method is declared public.
bool IsPublic() {
@@ -358,7 +357,7 @@
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsProxyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsProxyMethod() REQUIRES_SHARED(Locks::mutator_lock_);
bool SkipAccessChecks() {
return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
@@ -380,24 +379,24 @@
bool IsAnnotatedWithFastNative();
// Returns true if this method could be overridden by a default method.
- bool IsOverridableByDefaultMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsOverridableByDefaultMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- bool CheckIncompatibleClassChange(InvokeType type) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CheckIncompatibleClassChange(InvokeType type) REQUIRES_SHARED(Locks::mutator_lock_);
// Throws the error that would result from trying to invoke this method (i.e.
// IncompatibleClassChangeError or AbstractMethodError). Only call if !IsInvokable();
- void ThrowInvocationTimeError() SHARED_REQUIRES(Locks::mutator_lock_);
+ void ThrowInvocationTimeError() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_);
// Doesn't do erroneous / unresolved class checks.
- uint16_t GetMethodIndexDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetMethodIndexDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_);
- size_t GetVtableIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetVtableIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetMethodIndex();
}
- void SetMethodIndex(uint16_t new_method_index) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetMethodIndex(uint16_t new_method_index) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
method_index_ = new_method_index;
}
@@ -422,9 +421,9 @@
// Number of 32bit registers that would be required to hold all the arguments
static size_t NumArgRegisters(const StringPiece& shorty);
- ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetDexMethodIndex() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t GetImtIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetImtIndex() REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexMethodIndex(uint32_t new_idx) {
// Not called within a transaction.
@@ -432,45 +431,45 @@
}
ALWAYS_INLINE ArtMethod** GetDexCacheResolvedMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_index,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_index,
ArtMethod* new_method,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetDexCacheResolvedMethods(ArtMethod** new_dex_cache_methods,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasDexCacheResolvedMethods(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool HasDexCacheResolvedMethods(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedMethods(ArtMethod* other, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedMethods(ArtMethod** other_cache, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kWithCheck = true>
mirror::Class* GetDexCacheResolvedType(uint32_t type_idx, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexCacheResolvedTypes(GcRoot<mirror::Class>* new_dex_cache_types,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool HasDexCacheResolvedTypes(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool HasDexCacheResolvedTypes(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedTypes(ArtMethod* other, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool HasSameDexCacheResolvedTypes(GcRoot<mirror::Class>* other_cache, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the Class* from the type index into this method's dex cache.
mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if this method has the same name and signature of the other method.
- bool HasSameNameAndSignature(ArtMethod* other) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasSameNameAndSignature(ArtMethod* other) REQUIRES_SHARED(Locks::mutator_lock_);
// Find the method that this method overrides.
ArtMethod* FindOverriddenMethod(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Find the method index for this method within other_dexfile. If this method isn't present then
// return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same
@@ -478,10 +477,10 @@
// in the other_dexfile.
uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile,
uint32_t name_and_signature_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const void* GetEntryPointFromQuickCompiledCode() {
return GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
@@ -503,9 +502,9 @@
}
void RegisterNative(const void* native_method, bool is_fast)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void UnregisterNative() SHARED_REQUIRES(Locks::mutator_lock_);
+ void UnregisterNative() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DexCacheResolvedMethodsOffset(PointerSize pointer_size) {
return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
@@ -594,13 +593,13 @@
ALWAYS_INLINE bool IsRuntimeMethod();
// Is this a hand crafted method used for something like describing callee saves?
- bool IsCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsImtUnimplementedMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsImtUnimplementedMethod() REQUIRES_SHARED(Locks::mutator_lock_);
- MethodReference ToMethodReference() SHARED_REQUIRES(Locks::mutator_lock_) {
+ MethodReference ToMethodReference() REQUIRES_SHARED(Locks::mutator_lock_) {
return MethodReference(GetDexFile(), GetDexMethodIndex());
}
@@ -609,66 +608,66 @@
// a move-exception instruction is present.
uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc,
bool* has_no_move_exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename RootVisitorType>
void VisitRoots(RootVisitorType& visitor, PointerSize pointer_size) NO_THREAD_SAFETY_ANALYSIS;
- const DexFile* GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile* GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetDeclaringClassDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetDeclaringClassDescriptor() REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetShorty() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const char* GetShorty() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t unused_length;
return GetShorty(&unused_length);
}
- const char* GetShorty(uint32_t* out_length) SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetShorty(uint32_t* out_length) REQUIRES_SHARED(Locks::mutator_lock_);
- const Signature GetSignature() SHARED_REQUIRES(Locks::mutator_lock_);
+ const Signature GetSignature() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE const char* GetName() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE const char* GetName() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::String* GetNameAsString(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::String* GetNameAsString(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::CodeItem* GetCodeItem() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::CodeItem* GetCodeItem() REQUIRES_SHARED(Locks::mutator_lock_);
bool IsResolvedTypeIdx(uint16_t type_idx, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetLineNumFromDexPC(uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::ProtoId& GetPrototype() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::ProtoId& GetPrototype() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::TypeList* GetParameterTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::TypeList* GetParameterTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetDeclaringClassSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetDeclaringClassSourceFile() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::ClassDef& GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::ClassDef& GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetReturnTypeDescriptor() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetReturnTypeDescriptor() REQUIRES_SHARED(Locks::mutator_lock_);
const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
// number of bugs at call sites.
mirror::Class* GetReturnType(bool resolve, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// May cause thread suspension due to class resolution.
bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Size of an instance of this native class.
static size_t Size(PointerSize pointer_size) {
@@ -684,10 +683,10 @@
}
void CopyFrom(ArtMethod* src, PointerSize image_pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE GcRoot<mirror::Class>* GetDexCacheResolvedTypes(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Note, hotness_counter_ updates are non-atomic but it doesn't need to be precise. Also,
// given that the counter is only 16 bits wide we can expect wrap-around in some
@@ -708,15 +707,15 @@
return hotness_count_;
}
- const uint8_t* GetQuickenedInfo() SHARED_REQUIRES(Locks::mutator_lock_);
+ const uint8_t* GetQuickenedInfo() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the method header for the compiled code containing 'pc'. Note that runtime
// methods will return null for this method, as they are not oat based.
const OatQuickMethodHeader* GetOatQuickMethodHeader(uintptr_t pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns whether the method has any compiled code, JIT or AOT.
- bool HasAnyCompiledCode() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasAnyCompiledCode() REQUIRES_SHARED(Locks::mutator_lock_);
// Update heap objects and non-entrypoint pointers by the passed in visitor for image relocation.
@@ -724,7 +723,7 @@
template <typename Visitor>
ALWAYS_INLINE void UpdateObjectsForImageRelocation(const Visitor& visitor,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update entry points by passing them through the visitor.
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 102b993..f4addf7 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -178,10 +178,13 @@
#define MIRROR_CLASS_OBJECT_SIZE_OFFSET (96 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_OFFSET,
art::mirror::Class::ObjectSizeOffset().Int32Value())
-#define MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET (100 + MIRROR_OBJECT_HEADER_SIZE)
+ADD_TEST_EQ(MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET,
+ art::mirror::Class::ObjectSizeAllocFastPathOffset().Int32Value())
+#define MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET (104 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET,
art::mirror::Class::PrimitiveTypeOffset().Int32Value())
-#define MIRROR_CLASS_STATUS_OFFSET (108 + MIRROR_OBJECT_HEADER_SIZE)
+#define MIRROR_CLASS_STATUS_OFFSET (112 + MIRROR_OBJECT_HEADER_SIZE)
ADD_TEST_EQ(MIRROR_CLASS_STATUS_OFFSET,
art::mirror::Class::StatusOffset().Int32Value())
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index f279f45..378371d 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -28,43 +28,35 @@
namespace art {
template<typename T>
-static constexpr int CLZ(T x) {
+constexpr int CLZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
"T too large, must be smaller than long long");
- return
- DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0))
- (sizeof(T) == sizeof(uint32_t))
- ? __builtin_clz(x)
- : __builtin_clzll(x);
+ DCHECK_NE(x, 0u);
+ return (sizeof(T) == sizeof(uint32_t)) ? __builtin_clz(x) : __builtin_clzll(x);
}
template<typename T>
-static constexpr int CTZ(T x) {
+constexpr int CTZ(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
// It is not unreasonable to ask for trailing zeros in a negative number. As such, do not check
// that T is an unsigned type.
static_assert(sizeof(T) <= sizeof(long long), // NOLINT [runtime/int] [4]
"T too large, must be smaller than long long");
- return
- DCHECK_CONSTEXPR(x != 0, "x must not be zero", T(0))
- (sizeof(T) == sizeof(uint32_t))
- ? __builtin_ctz(x)
- : __builtin_ctzll(x);
+ DCHECK_NE(x, static_cast<T>(0));
+ return (sizeof(T) == sizeof(uint32_t)) ? __builtin_ctz(x) : __builtin_ctzll(x);
}
// Return the number of 1-bits in `x`.
template<typename T>
-static constexpr int POPCOUNT(T x) {
- return (sizeof(T) == sizeof(uint32_t))
- ? __builtin_popcount(x)
- : __builtin_popcountll(x);
+constexpr int POPCOUNT(T x) {
+ return (sizeof(T) == sizeof(uint32_t)) ? __builtin_popcount(x) : __builtin_popcountll(x);
}
// Find the bit position of the most significant bit (0-based), or -1 if there were no bits set.
template <typename T>
-static constexpr ssize_t MostSignificantBit(T value) {
+constexpr ssize_t MostSignificantBit(T value) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
static_assert(std::numeric_limits<T>::radix == 2, "Unexpected radix!");
@@ -73,7 +65,7 @@
// Find the bit position of the least significant bit (0-based), or -1 if there were no bits set.
template <typename T>
-static constexpr ssize_t LeastSignificantBit(T value) {
+constexpr ssize_t LeastSignificantBit(T value) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
return (value == 0) ? -1 : CTZ(value);
@@ -81,12 +73,12 @@
// How many bits (minimally) does it take to store the constant 'value'? i.e. 1 for 1, 3 for 5, etc.
template <typename T>
-static constexpr size_t MinimumBitsToStore(T value) {
+constexpr size_t MinimumBitsToStore(T value) {
return static_cast<size_t>(MostSignificantBit(value) + 1);
}
template <typename T>
-static constexpr inline T RoundUpToPowerOfTwo(T x) {
+constexpr T RoundUpToPowerOfTwo(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
static_assert(std::is_unsigned<T>::value, "T must be unsigned");
// NOTE: Undefined if x > (1 << (std::numeric_limits<T>::digits - 1)).
@@ -94,14 +86,14 @@
}
template<typename T>
-static constexpr bool IsPowerOfTwo(T x) {
+constexpr bool IsPowerOfTwo(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
// TODO: assert unsigned. There is currently many uses with signed values.
return (x & (x - 1)) == 0;
}
template<typename T>
-static inline int WhichPowerOf2(T x) {
+constexpr int WhichPowerOf2(T x) {
static_assert(std::is_integral<T>::value, "T must be integral");
// TODO: assert unsigned. There is currently many uses with signed values.
DCHECK((x != 0) && IsPowerOfTwo(x));
@@ -111,53 +103,52 @@
// For rounding integers.
// Note: Omit the `n` from T type deduction, deduce only from the `x` argument.
template<typename T>
-static constexpr T RoundDown(T x, typename Identity<T>::type n) WARN_UNUSED;
+constexpr T RoundDown(T x, typename Identity<T>::type n) WARN_UNUSED;
template<typename T>
-static constexpr T RoundDown(T x, typename Identity<T>::type n) {
- return
- DCHECK_CONSTEXPR(IsPowerOfTwo(n), , T(0))
- (x & -n);
+constexpr T RoundDown(T x, typename Identity<T>::type n) {
+ DCHECK(IsPowerOfTwo(n));
+ return (x & -n);
}
template<typename T>
-static constexpr T RoundUp(T x, typename std::remove_reference<T>::type n) WARN_UNUSED;
+constexpr T RoundUp(T x, typename std::remove_reference<T>::type n) WARN_UNUSED;
template<typename T>
-static constexpr T RoundUp(T x, typename std::remove_reference<T>::type n) {
+constexpr T RoundUp(T x, typename std::remove_reference<T>::type n) {
return RoundDown(x + n - 1, n);
}
// For aligning pointers.
template<typename T>
-static inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
+inline T* AlignDown(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
-static inline T* AlignDown(T* x, uintptr_t n) {
+inline T* AlignDown(T* x, uintptr_t n) {
return reinterpret_cast<T*>(RoundDown(reinterpret_cast<uintptr_t>(x), n));
}
template<typename T>
-static inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED;
+inline T* AlignUp(T* x, uintptr_t n) WARN_UNUSED;
template<typename T>
-static inline T* AlignUp(T* x, uintptr_t n) {
+inline T* AlignUp(T* x, uintptr_t n) {
return reinterpret_cast<T*>(RoundUp(reinterpret_cast<uintptr_t>(x), n));
}
template<int n, typename T>
-static constexpr bool IsAligned(T x) {
+constexpr bool IsAligned(T x) {
static_assert((n & (n - 1)) == 0, "n is not a power of two");
return (x & (n - 1)) == 0;
}
template<int n, typename T>
-static inline bool IsAligned(T* x) {
+inline bool IsAligned(T* x) {
return IsAligned<n>(reinterpret_cast<const uintptr_t>(x));
}
template<typename T>
-static inline bool IsAlignedParam(T x, int n) {
+inline bool IsAlignedParam(T x, int n) {
return (x & (n - 1)) == 0;
}
@@ -175,9 +166,9 @@
// Like sizeof, but count how many bits a type takes. Pass type explicitly.
template <typename T>
-static constexpr size_t BitSizeOf() {
+constexpr size_t BitSizeOf() {
static_assert(std::is_integral<T>::value, "T must be integral");
- typedef typename std::make_unsigned<T>::type unsigned_type;
+ using unsigned_type = typename std::make_unsigned<T>::type;
static_assert(sizeof(T) == sizeof(unsigned_type), "Unexpected type size mismatch!");
static_assert(std::numeric_limits<unsigned_type>::radix == 2, "Unexpected radix!");
return std::numeric_limits<unsigned_type>::digits;
@@ -185,29 +176,29 @@
// Like sizeof, but count how many bits a type takes. Infers type from parameter.
template <typename T>
-static constexpr size_t BitSizeOf(T /*x*/) {
+constexpr size_t BitSizeOf(T /*x*/) {
return BitSizeOf<T>();
}
-static inline uint16_t Low16Bits(uint32_t value) {
+inline uint16_t Low16Bits(uint32_t value) {
return static_cast<uint16_t>(value);
}
-static inline uint16_t High16Bits(uint32_t value) {
+inline uint16_t High16Bits(uint32_t value) {
return static_cast<uint16_t>(value >> 16);
}
-static inline uint32_t Low32Bits(uint64_t value) {
+inline uint32_t Low32Bits(uint64_t value) {
return static_cast<uint32_t>(value);
}
-static inline uint32_t High32Bits(uint64_t value) {
+inline uint32_t High32Bits(uint64_t value) {
return static_cast<uint32_t>(value >> 32);
}
// Check whether an N-bit two's-complement representation can hold value.
template <typename T>
-static inline bool IsInt(size_t N, T value) {
+inline bool IsInt(size_t N, T value) {
if (N == BitSizeOf<T>()) {
return true;
} else {
@@ -219,15 +210,14 @@
}
template <typename T>
-static constexpr T GetIntLimit(size_t bits) {
- return
- DCHECK_CONSTEXPR(bits > 0, "bits cannot be zero", 0)
- DCHECK_CONSTEXPR(bits < BitSizeOf<T>(), "kBits must be < max.", 0)
- static_cast<T>(1) << (bits - 1);
+constexpr T GetIntLimit(size_t bits) {
+ DCHECK_NE(bits, 0u);
+ DCHECK_LT(bits, BitSizeOf<T>());
+ return static_cast<T>(1) << (bits - 1);
}
template <size_t kBits, typename T>
-static constexpr bool IsInt(T value) {
+constexpr bool IsInt(T value) {
static_assert(kBits > 0, "kBits cannot be zero.");
static_assert(kBits <= BitSizeOf<T>(), "kBits must be <= max.");
static_assert(std::is_signed<T>::value, "Needs a signed type.");
@@ -239,7 +229,7 @@
}
template <size_t kBits, typename T>
-static constexpr bool IsUint(T value) {
+constexpr bool IsUint(T value) {
static_assert(kBits > 0, "kBits cannot be zero.");
static_assert(kBits <= BitSizeOf<T>(), "kBits must be <= max.");
static_assert(std::is_integral<T>::value, "Needs an integral type.");
@@ -247,17 +237,17 @@
// trivially true.
// NOTE: To avoid triggering assertion in GetIntLimit(kBits+1) if kBits+1==BitSizeOf<T>(),
// use GetIntLimit(kBits)*2u. The unsigned arithmetic works well for us if it overflows.
+ using unsigned_type = typename std::make_unsigned<T>::type;
return (0 <= value) &&
(kBits == BitSizeOf<T>() ||
- (static_cast<typename std::make_unsigned<T>::type>(value) <=
- GetIntLimit<typename std::make_unsigned<T>::type>(kBits) * 2u - 1u));
+ (static_cast<unsigned_type>(value) <= GetIntLimit<unsigned_type>(kBits) * 2u - 1u));
}
template <size_t kBits, typename T>
-static constexpr bool IsAbsoluteUint(T value) {
+constexpr bool IsAbsoluteUint(T value) {
static_assert(kBits <= BitSizeOf<T>(), "kBits must be <= max.");
static_assert(std::is_integral<T>::value, "Needs an integral type.");
- typedef typename std::make_unsigned<T>::type unsigned_type;
+ using unsigned_type = typename std::make_unsigned<T>::type;
return (kBits == BitSizeOf<T>())
? true
: IsUint<kBits>(value < 0
@@ -267,29 +257,26 @@
// Generate maximum/minimum values for signed/unsigned n-bit integers
template <typename T>
-static constexpr T MaxInt(size_t bits) {
- return
- DCHECK_CONSTEXPR(std::is_unsigned<T>::value || bits > 0, "bits cannot be zero for signed", 0)
- DCHECK_CONSTEXPR(bits <= BitSizeOf<T>(), "kBits must be < max.", 0)
- bits == BitSizeOf<T>()
- ? std::numeric_limits<T>::max()
- : std::is_signed<T>::value
- ? (bits == 1
- ? 0
- : static_cast<T>(MaxInt<typename std::make_unsigned<T>::type>(bits - 1)))
- : static_cast<T>(UINT64_C(1) << bits) - static_cast<T>(1);
+constexpr T MaxInt(size_t bits) {
+ DCHECK(std::is_unsigned<T>::value || bits > 0u) << "bits cannot be zero for signed.";
+ DCHECK_LE(bits, BitSizeOf<T>());
+ using unsigned_type = typename std::make_unsigned<T>::type;
+ return bits == BitSizeOf<T>()
+ ? std::numeric_limits<T>::max()
+ : std::is_signed<T>::value
+ ? ((bits == 1u) ? 0 : static_cast<T>(MaxInt<unsigned_type>(bits - 1)))
+ : static_cast<T>(UINT64_C(1) << bits) - static_cast<T>(1);
}
template <typename T>
-static constexpr T MinInt(size_t bits) {
- return
- DCHECK_CONSTEXPR(std::is_unsigned<T>::value || bits > 0, "bits cannot be zero for signed", 0)
- DCHECK_CONSTEXPR(bits <= BitSizeOf<T>(), "kBits must be < max.", 0)
- bits == BitSizeOf<T>()
- ? std::numeric_limits<T>::min()
- : std::is_signed<T>::value
- ? (bits == 1 ? -1 : static_cast<T>(-1) - MaxInt<T>(bits))
- : static_cast<T>(0);
+constexpr T MinInt(size_t bits) {
+ DCHECK(std::is_unsigned<T>::value || bits > 0) << "bits cannot be zero for signed.";
+ DCHECK_LE(bits, BitSizeOf<T>());
+ return bits == BitSizeOf<T>()
+ ? std::numeric_limits<T>::min()
+ : std::is_signed<T>::value
+ ? ((bits == 1u) ? -1 : static_cast<T>(-1) - MaxInt<T>(bits))
+ : static_cast<T>(0);
}
// Using the Curiously Recurring Template Pattern to implement everything shared
diff --git a/runtime/base/iteration_range.h b/runtime/base/iteration_range.h
index cf02d32..54ab174 100644
--- a/runtime/base/iteration_range.h
+++ b/runtime/base/iteration_range.h
@@ -45,12 +45,12 @@
};
template <typename Iter>
-static inline IterationRange<Iter> MakeIterationRange(const Iter& begin_it, const Iter& end_it) {
+inline IterationRange<Iter> MakeIterationRange(const Iter& begin_it, const Iter& end_it) {
return IterationRange<Iter>(begin_it, end_it);
}
template <typename Iter>
-static inline IterationRange<Iter> MakeEmptyIterationRange(const Iter& it) {
+inline IterationRange<Iter> MakeEmptyIterationRange(const Iter& it) {
return IterationRange<Iter>(it, it);
}
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index ac21a3f..f43cb8b 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -161,16 +161,6 @@
} \
} while (false)
-// CHECK that can be used in a constexpr function. For example,
-// constexpr int half(int n) {
-// return
-// DCHECK_CONSTEXPR(n >= 0, , 0)
-// CHECK_CONSTEXPR((n & 1) == 0), << "Extra debugging output: n = " << n, 0)
-// n / 2;
-// }
-#define CHECK_CONSTEXPR(x, out, dummy) \
- (UNLIKELY(!(x))) ? (LOG(::art::FATAL) << "Check failed: " << #x out, dummy) :
-
// DCHECKs are debug variants of CHECKs only enabled in debug builds. Generally CHECK should be
// used unless profiling identifies a CHECK as being in performance critical code.
@@ -189,11 +179,6 @@
#define DCHECK_GT(x, y) if (::art::kEnableDChecks) CHECK_GT(x, y)
#define DCHECK_STREQ(s1, s2) if (::art::kEnableDChecks) CHECK_STREQ(s1, s2)
#define DCHECK_STRNE(s1, s2) if (::art::kEnableDChecks) CHECK_STRNE(s1, s2)
-#if defined(NDEBUG)
-#define DCHECK_CONSTEXPR(x, out, dummy)
-#else
-#define DCHECK_CONSTEXPR(x, out, dummy) CHECK_CONSTEXPR(x, out, dummy)
-#endif
// Temporary class created to evaluate the LHS and RHS, used with MakeEagerEvaluator to infer the
// types of LHS and RHS.
@@ -206,7 +191,7 @@
// Helper function for CHECK_xx.
template <typename LHS, typename RHS>
-static inline constexpr EagerEvaluator<LHS, RHS> MakeEagerEvaluator(LHS lhs, RHS rhs) {
+constexpr EagerEvaluator<LHS, RHS> MakeEagerEvaluator(LHS lhs, RHS rhs) {
return EagerEvaluator<LHS, RHS>(lhs, rhs);
}
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 0ec6e6d..6cd7d60 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -20,15 +20,8 @@
#include <stddef.h> // for size_t
#include <unistd.h> // for TEMP_FAILURE_RETRY
-// bionic and glibc both have TEMP_FAILURE_RETRY, but eg Mac OS' libc doesn't.
-#ifndef TEMP_FAILURE_RETRY
-#define TEMP_FAILURE_RETRY(exp) ({ \
- decltype(exp) _rc; \
- do { \
- _rc = (exp); \
- } while (_rc == -1 && errno == EINTR); \
- _rc; })
-#endif
+#include "android-base/macros.h"
+#include "android-base/thread_annotations.h"
#define OVERRIDE override
#define FINAL final
@@ -42,23 +35,6 @@
#define ART_FRIEND_TYPED_TEST(test_set_name, individual_test)\
template<typename T> ART_FRIEND_TEST(test_set_name, individual_test)
-// DISALLOW_COPY_AND_ASSIGN disallows the copy and operator= functions. It goes in the private:
-// declarations in a class.
-#if !defined(DISALLOW_COPY_AND_ASSIGN)
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&) = delete; \
- void operator=(const TypeName&) = delete
-#endif
-
-// A macro to disallow all the implicit constructors, namely the default constructor, copy
-// constructor and operator= functions.
-//
-// This should be used in the private: declarations for a class that wants to prevent anyone from
-// instantiating it. This is especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName() = delete; \
- DISALLOW_COPY_AND_ASSIGN(TypeName)
-
// A macro to disallow new and delete operators for a class. It goes in the private: declarations.
// NOTE: Providing placement new (and matching delete) for constructing container elements.
#define DISALLOW_ALLOCATION() \
@@ -69,64 +45,6 @@
private: \
void* operator new(size_t) = delete // NOLINT
-// The arraysize(arr) macro returns the # of elements in an array arr.
-// The expression is a compile-time constant, and therefore can be
-// used in defining new arrays, for example. If you use arraysize on
-// a pointer by mistake, you will get a compile-time error.
-//
-// One caveat is that arraysize() doesn't accept any array of an
-// anonymous type or a type defined inside a function. In these rare
-// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below. This is
-// due to a limitation in C++'s template system. The limitation might
-// eventually be removed, but it hasn't happened yet.
-
-// This template function declaration is used in defining arraysize.
-// Note that the function doesn't need an implementation, as we only
-// use its type.
-template <typename T, size_t N>
-char (&ArraySizeHelper(T (&array)[N]))[N];
-
-#define arraysize(array) (sizeof(ArraySizeHelper(array)))
-
-// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
-// but can be used on anonymous types or types defined inside
-// functions. It's less safe than arraysize as it accepts some
-// (although not all) pointers. Therefore, you should use arraysize
-// whenever possible.
-//
-// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
-// size_t.
-//
-// ARRAYSIZE_UNSAFE catches a few type errors. If you see a compiler error
-//
-// "warning: division by zero in ..."
-//
-// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
-// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
-//
-// The following comments are on the implementation details, and can
-// be ignored by the users.
-//
-// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
-// the array) and sizeof(*(arr)) (the # of bytes in one array
-// element). If the former is divisible by the latter, perhaps arr is
-// indeed an array, in which case the division result is the # of
-// elements in the array. Otherwise, arr cannot possibly be an array,
-// and we generate a compiler error to prevent the code from
-// compiling.
-//
-// Since the size of bool is implementation-defined, we need to cast
-// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
-// result has type size_t.
-//
-// This macro is not perfect as it wrongfully accepts certain
-// pointers, namely where the pointer size is divisible by the pointee
-// size. Since all our code has to go through a 32-bit compiler,
-// where a pointer is 4 bytes, this means all pointers to a type whose
-// size is 3 or greater than 4 will be (righteously) rejected.
-#define ARRAYSIZE_UNSAFE(a) \
- ((sizeof(a) / sizeof(*(a))) / static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
-
#define SIZEOF_MEMBER(t, f) sizeof((reinterpret_cast<t*>(4096))->f) // NOLINT
#define OFFSETOF_MEMBER(t, f) \
@@ -137,9 +55,6 @@
#define PACKED(x) __attribute__ ((__aligned__(x), __packed__))
-#define LIKELY(x) __builtin_expect((x), true)
-#define UNLIKELY(x) __builtin_expect((x), false)
-
// Stringify the argument.
#define QUOTE(x) #x
#define STRINGIFY(x) QUOTE(x)
@@ -165,17 +80,6 @@
#endif
#define PURE __attribute__ ((__pure__))
-#define WARN_UNUSED __attribute__((warn_unused_result))
-
-// A deprecated function to call to create a false use of the parameter, for example:
-// int foo(int x) { UNUSED(x); return 10; }
-// to avoid compiler warnings. Going forward we prefer ATTRIBUTE_UNUSED.
-template<typename... T> void UNUSED(const T&...) {}
-
-// An attribute to place on a parameter to a function, for example:
-// int foo(int x ATTRIBUTE_UNUSED) { return 10; }
-// to avoid compiler warnings.
-#define ATTRIBUTE_UNUSED __attribute__((__unused__))
// Define that a position within code is unreachable, for example:
// int foo () { LOG(FATAL) << "Don't call me"; UNREACHABLE(); }
@@ -185,78 +89,7 @@
// Add the C++11 noreturn attribute.
#define NO_RETURN [[ noreturn ]] // NOLINT[whitespace/braces] [5]
-// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
-// between switch labels:
-// switch (x) {
-// case 40:
-// case 41:
-// if (truth_is_out_there) {
-// ++x;
-// FALLTHROUGH_INTENDED; // Use instead of/along with annotations in
-// // comments.
-// } else {
-// return x;
-// }
-// case 42:
-// ...
-//
-// As shown in the example above, the FALLTHROUGH_INTENDED macro should be
-// followed by a semicolon. It is designed to mimic control-flow statements
-// like 'break;', so it can be placed in most places where 'break;' can, but
-// only if there are no statements on the execution path between it and the
-// next switch label.
-//
-// When compiled with clang in C++11 mode, the FALLTHROUGH_INTENDED macro is
-// expanded to [[clang::fallthrough]] attribute, which is analysed when
-// performing switch labels fall-through diagnostic ('-Wimplicit-fallthrough').
-// See clang documentation on language extensions for details:
-// http://clang.llvm.org/docs/LanguageExtensions.html#clang__fallthrough
-//
-// When used with unsupported compilers, the FALLTHROUGH_INTENDED macro has no
-// effect on diagnostics.
-//
-// In either case this macro has no effect on runtime behavior and performance
-// of code.
-#if __has_feature(cxx_attributes) && __has_warning("-Wimplicit-fallthrough")
-#define FALLTHROUGH_INTENDED [[clang::fallthrough]] // NOLINT
-#endif
-
-#ifndef FALLTHROUGH_INTENDED
-#define FALLTHROUGH_INTENDED do { } while (0)
-#endif
-
-// Annotalysis thread-safety analysis support.
-
-#define ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__)))
-#define ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__)))
-#define GUARDED_BY(x) __attribute__((guarded_by(x)))
-#define GUARDED_VAR __attribute__((guarded))
-#define LOCK_RETURNED(x) __attribute__((lock_returned(x)))
-#define NO_THREAD_SAFETY_ANALYSIS __attribute__((no_thread_safety_analysis))
-#define PT_GUARDED_BY(x)
-// THREAD_ANNOTATION_ATTRIBUTE__(point_to_guarded_by(x))
-#define PT_GUARDED_VAR __attribute__((point_to_guarded))
-#define SCOPED_LOCKABLE __attribute__((scoped_lockable))
-
-#define EXCLUSIVE_LOCK_FUNCTION(...) __attribute__((exclusive_lock_function(__VA_ARGS__)))
-#define EXCLUSIVE_TRYLOCK_FUNCTION(...) __attribute__((exclusive_trylock_function(__VA_ARGS__)))
-#define SHARED_LOCK_FUNCTION(...) __attribute__((shared_lock_function(__VA_ARGS__)))
-#define SHARED_TRYLOCK_FUNCTION(...) __attribute__((shared_trylock_function(__VA_ARGS__)))
-#define UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__)))
-#define REQUIRES(...) __attribute__((requires_capability(__VA_ARGS__)))
-#define SHARED_REQUIRES(...) __attribute__((requires_shared_capability(__VA_ARGS__)))
-#define CAPABILITY(...) __attribute__((capability(__VA_ARGS__)))
-#define SHARED_CAPABILITY(...) __attribute__((shared_capability(__VA_ARGS__)))
-#define ASSERT_CAPABILITY(...) __attribute__((assert_capability(__VA_ARGS__)))
-#define ASSERT_SHARED_CAPABILITY(...) __attribute__((assert_shared_capability(__VA_ARGS__)))
-#define RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__)))
-#define TRY_ACQUIRE(...) __attribute__((try_acquire_capability(__VA_ARGS__)))
-#define TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__)))
-#define ACQUIRE(...) __attribute__((acquire_capability(__VA_ARGS__)))
-#define ACQUIRE_SHARED(...) __attribute__((acquire_shared_capability(__VA_ARGS__)))
-#define RELEASE(...) __attribute__((release_capability(__VA_ARGS__)))
-#define RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__)))
-#define SCOPED_CAPABILITY __attribute__((scoped_lockable))
+// Annotalysis thread-safety analysis support. Things that are not in base.
#define LOCKABLE CAPABILITY("mutex")
#define SHARED_LOCKABLE SHARED_CAPABILITY("mutex")
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 96fa53c..6683f13 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -268,7 +268,7 @@
* Assumes "jobj" has already been validated.
*/
bool CheckInstanceFieldID(ScopedObjectAccess& soa, jobject java_object, jfieldID fid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
if (o == nullptr) {
AbortF("field operation on NULL object: %p", java_object);
@@ -312,7 +312,7 @@
*/
bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
jmethodID mid, Primitive::Type type, InvokeType invoke)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
@@ -359,7 +359,7 @@
* Assumes "java_class" has already been validated.
*/
bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
ArtField* f = CheckFieldID(soa, fid);
if (f == nullptr) {
@@ -382,7 +382,7 @@
* Instances of "java_class" must be instances of the method's declaring class.
*/
bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
@@ -403,7 +403,7 @@
* will be handled automatically by the instanceof check.)
*/
bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = CheckMethodID(soa, mid);
if (m == nullptr) {
return false;
@@ -456,7 +456,7 @@
* Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
*/
bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* traceMethod = nullptr;
if (has_method_ && soa.Vm()->IsTracingEnabled()) {
// We need to guard some of the invocation interface's calls: a bad caller might
@@ -556,7 +556,7 @@
}
bool CheckReflectedMethod(ScopedObjectAccess& soa, jobject jmethod)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* method = soa.Decode<mirror::Object*>(jmethod);
if (method == nullptr) {
AbortF("expected non-null method");
@@ -574,7 +574,7 @@
}
bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = soa.DecodeMethod(mid);
if (method == nullptr) {
AbortF("expected non-null constructor");
@@ -588,7 +588,7 @@
}
bool CheckReflectedField(ScopedObjectAccess& soa, jobject jfield)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* field = soa.Decode<mirror::Object*>(jfield);
if (field == nullptr) {
AbortF("expected non-null java.lang.reflect.Field");
@@ -604,7 +604,7 @@
}
bool CheckThrowable(ScopedObjectAccess& soa, jthrowable jobj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = soa.Decode<mirror::Object*>(jobj);
if (!obj->GetClass()->IsThrowableClass()) {
AbortF("expected java.lang.Throwable but got object of type "
@@ -615,7 +615,7 @@
}
bool CheckThrowableClass(ScopedObjectAccess& soa, jclass jc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(jc);
if (!c->IsThrowableClass()) {
AbortF("expected java.lang.Throwable class but got object of "
@@ -646,7 +646,7 @@
}
bool CheckInstantiableNonArray(ScopedObjectAccess& soa, jclass jc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(jc);
if (!c->IsInstantiableNonArray()) {
AbortF("can't make objects of type %s: %p", PrettyDescriptor(c).c_str(), c);
@@ -656,7 +656,7 @@
}
bool CheckPrimitiveArrayType(ScopedObjectAccess& soa, jarray array, Primitive::Type type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!CheckArray(soa, array)) {
return false;
}
@@ -671,7 +671,7 @@
bool CheckFieldAccess(ScopedObjectAccess& soa, jobject obj, jfieldID fid, bool is_static,
Primitive::Type type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (is_static && !CheckStaticFieldID(soa, down_cast<jclass>(obj), fid)) {
return false;
}
@@ -732,7 +732,7 @@
* to "running" mode before doing the checks.
*/
bool CheckInstance(ScopedObjectAccess& soa, InstanceKind kind, jobject java_object, bool null_ok)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const char* what = nullptr;
switch (kind) {
case kClass:
@@ -828,7 +828,7 @@
}
bool CheckPossibleHeapValue(ScopedObjectAccess& soa, char fmt, JniValueType arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
switch (fmt) {
case 'a': // jarray
return CheckArray(soa, arg.a);
@@ -856,7 +856,7 @@
}
bool CheckVarArgs(ScopedObjectAccess& soa, const VarArgs* args_p)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(args_p != nullptr);
VarArgs args(args_p->Clone());
ArtMethod* m = CheckMethodID(soa, args.GetMethodID());
@@ -922,7 +922,7 @@
void TracePossibleHeapValue(ScopedObjectAccess& soa, bool entry, char fmt, JniValueType arg,
std::string* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
switch (fmt) {
case 'L': // jobject fall-through.
case 'a': // jarray fall-through.
@@ -1101,7 +1101,7 @@
* Since we're dealing with objects, switch to "running" mode.
*/
bool CheckArray(ScopedObjectAccess& soa, jarray java_array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(java_array == nullptr)) {
AbortF("jarray was NULL");
return false;
@@ -1138,7 +1138,7 @@
}
ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (fid == nullptr) {
AbortF("jfieldID was NULL");
return nullptr;
@@ -1154,7 +1154,7 @@
}
ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (mid == nullptr) {
AbortF("jmethodID was NULL");
return nullptr;
@@ -1169,7 +1169,7 @@
return m;
}
- bool CheckThread(JNIEnv* env) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CheckThread(JNIEnv* env) REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
if (self == nullptr) {
AbortF("a thread (tid %d) is making JNI calls without being attached", GetTid());
@@ -2855,7 +2855,7 @@
static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj,
jclass c, jmethodID mid, InvokeType invoke, const VarArgs* vargs)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
bool checked;
switch (invoke) {
case kVirtual: {
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 0e2f9f2..843d4c1 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -28,10 +28,10 @@
// holding references.
class CheckReferenceMapVisitor : public StackVisitor {
public:
- explicit CheckReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit CheckReferenceMapVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsCalleeSaveMethod() || m->IsNative()) {
CHECK_EQ(GetDexPc(), DexFile::kDexNoIndex);
@@ -52,14 +52,14 @@
}
void CheckReferences(int* registers, int number_of_references, uint32_t native_pc_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(GetCurrentOatQuickMethodHeader()->IsOptimized());
CheckOptimizedMethod(registers, number_of_references, native_pc_offset);
}
private:
void CheckOptimizedMethod(int* registers, int number_of_references, uint32_t native_pc_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
CodeInfo code_info = GetCurrentOatQuickMethodHeader()->GetOptimizedCodeInfo();
CodeInfoEncoding encoding = code_info.ExtractEncoding();
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 97aa499..caabcde 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -222,7 +222,7 @@
}
inline mirror::Class* ClassLinker::GetClassRoot(ClassRoot class_root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!class_roots_.IsNull());
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
mirror::Class* klass = class_roots->Get(class_root);
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index f4400c3..726e897 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -102,7 +102,7 @@
static void ThrowNoClassDefFoundError(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void ThrowNoClassDefFoundError(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
@@ -112,7 +112,7 @@
}
static bool HasInitWithString(Thread* self, ClassLinker* class_linker, const char* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = self->GetCurrentMethod(nullptr);
StackHandleScope<1> hs(self);
Handle<mirror::ClassLoader> class_loader(hs.NewHandle(method != nullptr ?
@@ -133,7 +133,7 @@
// Helper for ThrowEarlierClassFailure. Throws the stored error.
static void HandleEarlierVerifyError(Thread* self, ClassLinker* class_linker, mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = c->GetVerifyError();
DCHECK(obj != nullptr);
self->AssertNoPendingException();
@@ -201,7 +201,7 @@
}
static void VlogClassInitializationFailure(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (VLOG_IS_ON(class_linker)) {
std::string temp;
LOG(INFO) << "Failed to initialize class " << klass->GetDescriptor(&temp) << " from "
@@ -210,7 +210,7 @@
}
static void WrapExceptionInInitializer(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
JNIEnv* env = self->GetJniEnv();
@@ -272,7 +272,7 @@
MemberOffset* field_offset,
std::deque<ArtField*>* grouped_and_sorted_fields,
FieldGaps* gaps)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(current_field_idx != nullptr);
DCHECK(grouped_and_sorted_fields != nullptr);
DCHECK(gaps != nullptr);
@@ -761,7 +761,7 @@
static void SanityCheckArtMethod(ArtMethod* m,
mirror::Class* expected_class,
const std::vector<gc::space::ImageSpace*>& spaces)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (m->IsRuntimeMethod()) {
mirror::Class* declaring_class = m->GetDeclaringClassUnchecked();
CHECK(declaring_class == nullptr) << declaring_class << " " << PrettyMethod(m);
@@ -790,7 +790,7 @@
mirror::Class* expected_class,
PointerSize pointer_size,
const std::vector<gc::space::ImageSpace*>& spaces)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(arr != nullptr);
for (int32_t j = 0; j < arr->GetLength(); ++j) {
auto* method = arr->GetElementPtrSize<ArtMethod*>(j, pointer_size);
@@ -808,7 +808,7 @@
size_t size,
PointerSize pointer_size,
const std::vector<gc::space::ImageSpace*>& spaces)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK_EQ(arr != nullptr, size != 0u);
if (arr != nullptr) {
bool contains = false;
@@ -832,7 +832,7 @@
}
static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
CHECK(obj->GetClass() != nullptr) << "Null class in object " << obj;
CHECK(obj->GetClass()->GetClass() != nullptr) << "Null class class " << obj;
@@ -883,7 +883,7 @@
explicit SetInterpreterEntrypointArtMethodVisitor(PointerSize image_pointer_size)
: image_pointer_size_(image_pointer_size) {}
- void Visit(ArtMethod* method) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Visit(ArtMethod* method) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild && !method->IsRuntimeMethod()) {
CHECK(method->GetDeclaringClass() != nullptr);
}
@@ -1073,7 +1073,7 @@
static mirror::String* GetDexPathListElementName(ScopedObjectAccessUnchecked& soa,
mirror::Object* element)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* const dex_file_field =
soa.DecodeField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile);
ArtField* const dex_file_name_field =
@@ -1096,7 +1096,7 @@
static bool FlattenPathClassLoader(mirror::ClassLoader* class_loader,
std::list<mirror::String*>* out_dex_file_names,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(out_dex_file_names != nullptr);
DCHECK(error_msg != nullptr);
ScopedObjectAccessUnchecked soa(Thread::Current());
@@ -1147,7 +1147,7 @@
public:
explicit FixupArtMethodArrayVisitor(const ImageHeader& header) : header_(header) {}
- virtual void Visit(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual void Visit(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
GcRoot<mirror::Class>* resolved_types = method->GetDexCacheResolvedTypes(kRuntimePointerSize);
const bool is_copied = method->IsCopied();
if (resolved_types != nullptr) {
@@ -1194,7 +1194,7 @@
explicit VerifyClassInTableArtMethodVisitor(ClassTable* table) : table_(table) {}
virtual void Visit(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::classlinker_classes_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::classlinker_classes_lock_) {
mirror::Class* klass = method->GetDeclaringClass();
if (klass != nullptr && !Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
CHECK_EQ(table_->LookupByDescriptor(klass), klass) << PrettyClass(klass);
@@ -1207,11 +1207,11 @@
class VerifyDeclaringClassVisitor : public ArtMethodVisitor {
public:
- VerifyDeclaringClassVisitor() SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+ VerifyDeclaringClassVisitor() REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
: live_bitmap_(Runtime::Current()->GetHeap()->GetLiveBitmap()) {}
virtual void Visit(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mirror::Class* klass = method->GetDeclaringClassUnchecked();
if (klass != nullptr) {
CHECK(live_bitmap_->Test(klass)) << "Image method has unmarked declaring class";
@@ -1476,7 +1476,7 @@
class_loader_(class_loader),
forward_strings_(forward_strings) {}
- bool operator()(mirror::Class* klass) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (forward_strings_) {
mirror::StringDexCacheType* strings = klass->GetDexCacheStrings();
if (strings != nullptr) {
@@ -1503,7 +1503,7 @@
static std::unique_ptr<const DexFile> OpenOatDexFile(const OatFile* oat_file,
const char* location,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(error_msg != nullptr);
std::unique_ptr<const DexFile> dex_file;
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(location, nullptr, error_msg);
@@ -1894,7 +1894,7 @@
done_(false) {}
void Visit(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
if (!done_ && class_table != nullptr && !class_table->Visit(*visitor_)) {
// If the visitor ClassTable returns false it means that we don't need to continue.
@@ -1944,7 +1944,7 @@
explicit GetClassInToObjectArray(mirror::ObjectArray<mirror::Class>* arr)
: arr_(arr), index_(0) {}
- bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
++index_;
if (index_ <= arr_->GetLength()) {
arr_->Set(index_ - 1, klass);
@@ -1953,7 +1953,7 @@
return false;
}
- bool Succeeded() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Succeeded() const REQUIRES_SHARED(Locks::mutator_lock_) {
return index_ <= arr_->GetLength();
}
@@ -3669,7 +3669,7 @@
}
static std::vector<mirror::ObjectArray<mirror::DexCache>*> GetImageDexCaches(
- std::vector<gc::space::ImageSpace*> image_spaces) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::vector<gc::space::ImageSpace*> image_spaces) REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(!image_spaces.empty());
std::vector<mirror::ObjectArray<mirror::DexCache>*> dex_caches_vector;
for (gc::space::ImageSpace* image_space : image_spaces) {
@@ -3731,7 +3731,7 @@
void Visit(mirror::ClassLoader* class_loader)
REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
class_table->FreezeSnapshot();
@@ -3777,7 +3777,7 @@
result_(result) {}
void Visit(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
mirror::Class* klass = class_table->Lookup(descriptor_, hash_);
if (klass != nullptr) {
@@ -4758,7 +4758,7 @@
bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass,
Thread* self,
ObjectLock<mirror::Class>& lock)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
while (true) {
self->AssertNoPendingException();
CHECK(!klass->IsInitialized());
@@ -4802,7 +4802,7 @@
Handle<mirror::Class> super_klass,
ArtMethod* method,
ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Thread::Current()->IsExceptionPending());
DCHECK(!m->IsProxyMethod());
const DexFile* dex_file = m->GetDexFile();
@@ -4827,7 +4827,7 @@
ArtMethod* m,
uint32_t index,
uint32_t arg_type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Thread::Current()->IsExceptionPending());
DCHECK(!m->IsProxyMethod());
const DexFile* dex_file = m->GetDexFile();
@@ -4847,7 +4847,7 @@
Handle<mirror::Class> super_klass,
ArtMethod* method,
const std::string& error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ThrowLinkageError(klass.Get(),
"Class %s method %s resolves differently in %s %s: %s",
PrettyDescriptor(klass.Get()).c_str(),
@@ -4863,7 +4863,7 @@
Handle<mirror::Class> super_klass,
ArtMethod* method1,
ArtMethod* method2)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
{
StackHandleScope<1> hs(self);
Handle<mirror::Class> return_type(hs.NewHandle(method1->GetReturnType(true /* resolve */,
@@ -5075,7 +5075,7 @@
}
static ImTable* FindSuperImt(mirror::Class* klass, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
while (klass->HasSuperClass()) {
klass = klass->GetSuperClass();
if (klass->ShouldHaveImt()) {
@@ -5353,7 +5353,7 @@
const DexFile& dex_file,
const DexFile::ClassDef& class_def,
mirror::Class* super_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Check for unexpected changes in the superclass.
// Quick check 1) is the super_class class-loader the boot class loader? This always has
// precedence.
@@ -5565,7 +5565,7 @@
class MethodNameAndSignatureComparator FINAL : public ValueObject {
public:
explicit MethodNameAndSignatureComparator(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) :
+ REQUIRES_SHARED(Locks::mutator_lock_) :
dex_file_(method->GetDexFile()), mid_(&dex_file_->GetMethodId(method->GetDexMethodIndex())),
name_(nullptr), name_len_(0) {
DCHECK(!method->IsProxyMethod()) << PrettyMethod(method);
@@ -5579,7 +5579,7 @@
}
bool HasSameNameAndSignature(ArtMethod* other)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!other->IsProxyMethod()) << PrettyMethod(other);
const DexFile* other_dex_file = other->GetDexFile();
const DexFile::MethodId& other_mid = other_dex_file->GetMethodId(other->GetDexMethodIndex());
@@ -5620,7 +5620,7 @@
std::fill(hash_table_, hash_table_ + hash_size_, invalid_index_);
}
- void Add(uint32_t virtual_method_index) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Add(uint32_t virtual_method_index) REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* local_method = klass_->GetVirtualMethodDuringLinking(
virtual_method_index, image_pointer_size_);
const char* name = local_method->GetInterfaceMethodIfProxy(image_pointer_size_)->GetName();
@@ -5636,7 +5636,7 @@
}
uint32_t FindAndRemove(MethodNameAndSignatureComparator* comparator)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const char* name = comparator->GetName();
uint32_t hash = ComputeModifiedUtf8Hash(name);
size_t index = hash % hash_size_;
@@ -5936,7 +5936,7 @@
size_t ifstart,
Handle<mirror::Class> iface,
PointerSize image_pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(self != nullptr);
DCHECK(iface.Get() != nullptr);
DCHECK(iftable.Get() != nullptr);
@@ -6336,7 +6336,7 @@
static bool NotSubinterfaceOfAny(const std::unordered_set<mirror::Class*>& classes,
mirror::Class* val)
REQUIRES(Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(val != nullptr);
for (auto c : classes) {
if (val->IsAssignableFrom(&*c)) {
@@ -6369,7 +6369,7 @@
size_t super_ifcount,
std::vector<mirror::Class*> to_process)
REQUIRES(Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// This is the set of all class's already in the iftable. Used to make checking if a class has
// already been added quicker.
std::unordered_set<mirror::Class*> classes_in_iftable;
@@ -6537,7 +6537,7 @@
// methods must be unique.
static ArtMethod* FindSameNameAndSignature(MethodNameAndSignatureComparator& cmp,
const ScopedArenaVector<ArtMethod*>& list)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (ArtMethod* method : list) {
if (cmp.HasSameNameAndSignature(method)) {
return method;
@@ -6547,7 +6547,7 @@
}
static void SanityCheckVTable(Handle<mirror::Class> klass, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::PointerArray* check_vtable = klass->GetVTableDuringLinking();
mirror::Class* superclass = (klass->HasSuperClass()) ? klass->GetSuperClass() : nullptr;
int32_t super_vtable_length = (superclass != nullptr) ? superclass->GetVTableLength() : 0;
@@ -7165,7 +7165,7 @@
}
struct LinkFieldsComparator {
- explicit LinkFieldsComparator() SHARED_REQUIRES(Locks::mutator_lock_) {
+ explicit LinkFieldsComparator() REQUIRES_SHARED(Locks::mutator_lock_) {
}
// No thread safety analysis as will be called from STL. Checked lock held in constructor.
bool operator()(ArtField* field1, ArtField* field2)
@@ -7814,7 +7814,7 @@
public:
explicit DumpClassVisitor(int flags) : flags_(flags) {}
- bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* klass) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
klass->DumpClass(LOG(ERROR), flags_);
return true;
}
@@ -7886,7 +7886,7 @@
CountClassesVisitor() : num_zygote_classes(0), num_non_zygote_classes(0) {}
void Visit(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) OVERRIDE {
ClassTable* const class_table = class_loader->GetClassTable();
if (class_table != nullptr) {
num_zygote_classes += class_table->NumZygoteClasses();
@@ -8285,7 +8285,7 @@
: method_(method),
pointer_size_(pointer_size) {}
- bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE {
+ bool operator()(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE {
if (klass->GetVirtualMethodsSliceUnchecked(pointer_size_).Contains(method_)) {
holder_ = klass;
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index c3ab8c5..4bd1bd2 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -76,7 +76,7 @@
public:
virtual ~ClassLoaderVisitor() {}
virtual void Visit(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_) = 0;
};
class ClassLinker {
@@ -129,12 +129,12 @@
// Initialize class linker by bootstraping from dex files.
bool InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> boot_class_path,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Initialize class linker from one or more boot images.
bool InitFromBootImage(std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Add an image space to the class linker, may fix up classloader fields and dex cache fields.
@@ -149,20 +149,20 @@
std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
std::string* error_msg)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool OpenImageDexFiles(gc::space::ImageSpace* space,
std::vector<std::unique_ptr<const DexFile>>* out_dex_files,
std::string* error_msg)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds a class by its descriptor, loading it if necessary.
// If class_loader is null, searches boot_class_path_.
mirror::Class* FindClass(Thread* self,
const char* descriptor,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Finds a class in the path class loader, loading it if necessary without using JNI. Hash
@@ -176,18 +176,18 @@
size_t hash,
Handle<mirror::ClassLoader> class_loader,
mirror::Class** result)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Finds a class by its descriptor using the "system" class loader, ie by searching the
// boot_class_path_.
mirror::Class* FindSystemClass(Thread* self, const char* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Finds the array class given for the element class.
mirror::Class* FindArrayClass(Thread* self, mirror::Class** element_class)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Returns true if the class linker is initialized.
@@ -202,7 +202,7 @@
Handle<mirror::ClassLoader> class_loader,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Finds a class by its descriptor, returning null if it isn't wasn't loaded
@@ -212,65 +212,65 @@
size_t hash,
mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds all the classes with the given descriptor, regardless of ClassLoader.
void LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Class* FindPrimitiveClass(char type) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* FindPrimitiveClass(char type) REQUIRES_SHARED(Locks::mutator_lock_);
// General class unloading is not supported, this is used to prune
// unwanted classes during image writing.
bool RemoveClass(const char* descriptor, mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpAllClasses(int flags)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os) REQUIRES(!Locks::classlinker_classes_lock_);
size_t NumLoadedClasses()
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
mirror::String* ResolveString(uint32_t string_idx, ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a String with the given index from the DexFile, storing the
// result in the DexCache.
mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
Handle<mirror::DexCache> dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Find a String with the given index from the DexFile, storing the
// result in the DexCache if found. Return null if not found.
mirror::String* LookupString(const DexFile& dex_file, uint32_t string_idx,
Handle<mirror::DexCache> dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, mirror::Class* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a Type with the given index from the DexFile, storing the
// result in the DexCache. The referrer is used to identify the
// target DexCache and ClassLoader to use for resolution.
mirror::Class* ResolveType(uint16_t type_idx, ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
mirror::Class* ResolveType(uint16_t type_idx, ArtField* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a type with the given ID from the DexFile, storing the
@@ -281,7 +281,7 @@
uint16_t type_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Determine whether a dex cache result should be trusted, or an IncompatibleClassChangeError
@@ -303,11 +303,11 @@
Handle<mirror::ClassLoader> class_loader,
ArtMethod* referrer,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
ArtMethod* GetResolvedMethod(uint32_t method_idx, ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// This returns the class referred to by GetMethodId(method_idx).class_idx_. This might be
// different then the declaring class of the resolved method due to copied
@@ -315,25 +315,25 @@
mirror::Class* ResolveReferencedClassOfMethod(uint32_t method_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
template <ResolveMode kResolveMode>
ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, ArtMethod* referrer, InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
ArtMethod* ResolveMethodWithoutInvokeType(const DexFile& dex_file,
uint32_t method_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
ArtField* GetResolvedField(uint32_t field_idx, mirror::Class* field_declaring_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* GetResolvedField(uint32_t field_idx, mirror::DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* ResolveField(uint32_t field_idx, ArtMethod* referrer, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a field with a given ID from the DexFile, storing the
@@ -344,7 +344,7 @@
ArtField* ResolveField(const DexFile& dex_file, uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Resolve a field with a given ID from the DexFile, storing the
@@ -355,12 +355,12 @@
uint32_t field_idx,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Get shorty from method index without resolution. Used to do handlerization.
const char* MethodShorty(uint32_t method_idx, ArtMethod* referrer, uint32_t* length)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true on success, false if there's an exception pending.
// can_run_clinit=false allows the compiler to attempt to init a class,
@@ -369,22 +369,22 @@
Handle<mirror::Class> c,
bool can_init_fields,
bool can_init_parents)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// Initializes classes that have instances in the image but that have
// <clinit> methods so they could not be initialized by the compiler.
void RunRootClinits()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
mirror::DexCache* RegisterDexFile(const DexFile& dex_file,
mirror::ClassLoader* class_loader)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const std::vector<const DexFile*>& GetBootClassPath() {
return boot_class_path_;
@@ -392,49 +392,49 @@
void VisitClasses(ClassVisitor* visitor)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Less efficient variant of VisitClasses that copies the class_table_ into secondary storage
// so that it can visit individual classes without holding the doesn't hold the
// Locks::classlinker_classes_lock_. As the Locks::classlinker_classes_lock_ isn't held this code
// can race with insertion and deletion of classes while the visitor is being called.
void VisitClassesWithoutClassesLock(ClassVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void VisitClassRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::DexCache* FindDexCache(Thread* self,
const DexFile& dex_file,
bool allow_failure = false)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FixupDexCaches(ArtMethod* resolution_method)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate an instance of a java.lang.Object.
mirror::Object* AllocObject(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// TODO: replace this with multiple methods that allocate the correct managed type.
template <class T>
mirror::ObjectArray<T>* AllocObjectArray(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::ObjectArray<mirror::Class>* AllocClassArray(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
LengthPrefixedArray<ArtField>* AllocArtFieldArray(Thread* self,
@@ -446,33 +446,33 @@
size_t length);
mirror::PointerArray* AllocPointerArray(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray(Thread* self,
size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
void VerifyClass(Thread* self,
Handle<mirror::Class> klass,
LogSeverity log_level = LogSeverity::NONE)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
bool VerifyClassUsingOatFile(const DexFile& dex_file,
mirror::Class* klass,
mirror::Class::Status& oat_file_class_status)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void ResolveClassExceptionHandlerTypes(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void ResolveMethodExceptionHandlerTypes(ArtMethod* klass)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
mirror::Class* CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa,
@@ -481,31 +481,31 @@
jobject loader,
jobjectArray methods,
jobjectArray throws)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string GetDescriptorForProxy(mirror::Class* proxy_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ArtMethod* FindMethodForProxy(mirror::Class* proxy_class, ArtMethod* proxy_method)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the oat code for a method when its class isn't yet initialized
const void* GetQuickOatCodeFor(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get compiled code for a method, return null if no code
// exists. This is unlike Get..OatCodeFor which will return a bridge
// or interpreter entrypoint.
const void* GetOatMethodQuickCodeFor(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const OatFile::OatMethod FindOatMethodFor(ArtMethod* method, bool* found)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
pid_t GetClassesLockOwner(); // For SignalCatcher.
pid_t GetDexLockOwner(); // For SignalCatcher.
- mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Class* GetClassRoot(ClassRoot class_root) REQUIRES_SHARED(Locks::mutator_lock_);
static const char* GetClassRootDescriptor(ClassRoot class_root);
@@ -524,20 +524,20 @@
// Set the entrypoints up for method to the given code.
void SetEntryPointsToCompiledCode(ArtMethod* method, const void* method_code) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Set the entrypoints up for method to the enter the interpreter.
void SetEntryPointsToInterpreter(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Attempts to insert a class into a class table. Returns null if
// the class was inserted, otherwise returns an existing class with
// the same descriptor and ClassLoader.
mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::ObjectArray<mirror::Class>* GetClassRoots() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::ObjectArray<mirror::Class>* GetClassRoots() REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::ObjectArray<mirror::Class>* class_roots = class_roots_.Read();
DCHECK(class_roots != nullptr);
return class_roots;
@@ -546,25 +546,25 @@
// Move all of the boot image classes into the class table for faster lookups.
void AddBootImageClassesToClassTable()
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Add image classes to the class table.
void AddImageClassesToClassTable(std::vector<gc::space::ImageSpace*> image_spaces,
mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Move the class table to the pre-zygote table to reduce memory usage. This works by ensuring
// that no more classes are ever added to the pre zygote table which makes it that the pages
// always remain shared dirty instead of private dirty.
void MoveClassTableToPreZygote()
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Creates a GlobalRef PathClassLoader that can be used to load classes from the given dex files.
// Note: the objects are not completely set up. Do not use this outside of tests and the compiler.
jobject CreatePathClassLoader(Thread* self, const std::vector<const DexFile*>& dex_files)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
PointerSize GetImagePointerSize() const {
@@ -574,37 +574,37 @@
// Used by image writer for checking.
bool ClassInClassTable(mirror::Class* klass)
REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* CreateRuntimeMethod(LinearAlloc* linear_alloc);
// Clear the ArrayClass cache. This is necessary when cleaning up for the image, as the cache
// entries are roots, but potentially not image classes.
- void DropFindArrayClassCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ void DropFindArrayClassCache() REQUIRES_SHARED(Locks::mutator_lock_);
// Clean up class loaders, this needs to happen after JNI weak globals are cleared.
void CleanupClassLoaders()
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Unlike GetOrCreateAllocatorForClassLoader, GetAllocatorForClassLoader asserts that the
// allocator for this class loader is already created.
LinearAlloc* GetAllocatorForClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the linear alloc for a class loader if it is already allocated, otherwise allocate and
// set it. TODO: Consider using a lock other than classlinker_classes_lock_.
LinearAlloc* GetOrCreateAllocatorForClassLoader(mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// May be called with null class_loader due to legacy code. b/27954959
void InsertDexFileInToClassLoader(mirror::Object* dex_file, mirror::ClassLoader* class_loader)
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::set<DexCacheResolvedClasses> GetResolvedClasses(bool ignore_boot_classes)
REQUIRES(!dex_lock_);
@@ -615,14 +615,14 @@
static bool IsBootClassLoader(ScopedObjectAccessAlreadyRunnable& soa,
mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* AddMethodToConflictTable(mirror::Class* klass,
ArtMethod* conflict_method,
ArtMethod* interface_method,
ArtMethod* method,
bool force_new_conflict_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create a conflict table with a specified capacity.
ImtConflictTable* CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc);
@@ -634,23 +634,23 @@
// Create the IMT and conflict tables for a class.
- void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillIMTAndConflictTables(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Clear class table strong roots (other than classes themselves). This is done by dex2oat to
// allow pruning dex caches.
void ClearClassTableStrongRoots() const
REQUIRES(!Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Throw the class initialization failure recorded when first trying to initialize the given
// class.
void ThrowEarlierClassFailure(mirror::Class* c, bool wrap_in_no_class_def = false)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Get the actual holding class for a copied method. Pretty slow, don't call often.
mirror::Class* GetHoldingClassOfCopiedMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
struct DexCacheData {
// Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may
@@ -677,67 +677,67 @@
Handle<mirror::Class> klass,
Handle<mirror::Class> supertype)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DeleteClassLoader(Thread* self, const ClassLoaderData& data)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitClassLoaders(ClassLoaderVisitor* visitor) const
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
void VisitClassesInternal(ClassVisitor* visitor)
- SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::classlinker_classes_lock_, Locks::mutator_lock_);
// Returns the number of zygote and image classes.
size_t NumZygoteClasses() const
REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of non zygote nor image classes.
size_t NumNonZygoteClasses() const
REQUIRES(Locks::classlinker_classes_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FinishInit(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
// For early bootstrapping by Init
mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, uint32_t class_size)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// Alloc* convenience functions to avoid needing to pass in mirror::Class*
// values that are known to the ClassLinker such as
// kObjectArrayClass and kJavaLangString etc.
mirror::Class* AllocClass(Thread* self, uint32_t class_size)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::DexCache* AllocDexCache(Thread* self,
const DexFile& dex_file,
LinearAlloc* linear_alloc)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::Class* InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
mirror::Class* CreateArrayClass(Thread* self,
const char* descriptor,
size_t hash,
Handle<mirror::ClassLoader> class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_, !Roles::uninterruptible_);
void AppendToBootClassPath(Thread* self, const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
// Precomputes size needed for Class, in the case of a non-temporary class this size must be
@@ -751,86 +751,86 @@
const DexFile::ClassDef& dex_class_def,
Handle<mirror::Class> klass,
mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LoadClass(Thread* self,
const DexFile& dex_file,
const DexFile::ClassDef& dex_class_def,
Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LoadClassMembers(Thread* self,
const DexFile& dex_file,
const uint8_t* class_data,
Handle<mirror::Class> klass,
const OatFile::OatClass* oat_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LoadField(const ClassDataItemIterator& it, Handle<mirror::Class> klass, ArtField* dst)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LoadMethod(Thread* self,
const DexFile& dex_file,
const ClassDataItemIterator& it,
Handle<mirror::Class> klass, ArtMethod* dst)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void FixupStaticTrampolines(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupStaticTrampolines(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on
// error and sets found to false.
OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
REQUIRES(dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::DexCache* FindDexCacheLocked(Thread* self, const DexFile& dex_file, bool allow_failure)
REQUIRES(dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool InitializeClass(Thread* self,
Handle<mirror::Class> klass,
bool can_run_clinit,
bool can_init_parents)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
bool InitializeDefaultInterfaceRecursive(Thread* self,
Handle<mirror::Class> klass,
bool can_run_clinit,
bool can_init_parents)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool WaitForInitializeClass(Handle<mirror::Class> klass,
Thread* self,
ObjectLock<mirror::Class>& lock);
bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsSameDescriptorInDifferentClassContexts(Thread* self,
const char* descriptor,
Handle<mirror::ClassLoader> class_loader1,
Handle<mirror::ClassLoader> class_loader2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsSameMethodSignatureInDifferentClassContexts(Thread* self,
ArtMethod* method,
mirror::Class* klass1,
mirror::Class* klass2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LinkClass(Thread* self,
const char* descriptor,
Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
MutableHandle<mirror::Class>* h_new_class_out)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
bool LinkSuperClass(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
bool LinkMethods(Thread* self,
@@ -838,12 +838,12 @@
Handle<mirror::ObjectArray<mirror::Class>> interfaces,
bool* out_new_conflict,
ArtMethod** out_imt)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Does anything needed to make sure that the compiler will not generate a direct invoke to this
// method. Should only be called on non-invokable methods.
void EnsureThrowsInvocationError(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// A wrapper class representing the result of a method translation used for linking methods and
// updating superclass default methods. For each method in a classes vtable there are 4 states it
@@ -937,14 +937,14 @@
Thread* self,
Handle<mirror::Class> klass,
/*out*/std::unordered_map<size_t, MethodTranslation>* default_translations)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sets up the interface lookup table (IFTable) in the correct order to allow searching for
// default methods.
bool SetupInterfaceLookupTable(Thread* self,
Handle<mirror::Class> klass,
Handle<mirror::ObjectArray<mirror::Class>> interfaces)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
enum class DefaultMethodSearchResult {
@@ -976,7 +976,7 @@
ArtMethod* target_method,
Handle<mirror::Class> klass,
/*out*/ArtMethod** out_default_method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sets the imt entries and fixes up the vtable for the given class by linking all the interface
// methods. See LinkVirtualMethods for an explanation of what default_translations is.
@@ -986,67 +986,67 @@
const std::unordered_map<size_t, MethodTranslation>& default_translations,
bool* out_new_conflict,
ArtMethod** out_imt)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LinkStaticFields(Thread* self, Handle<mirror::Class> klass, size_t* class_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LinkInstanceFields(Thread* self, Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void LinkCode(ArtMethod* method,
const OatFile::OatClass* oat_class,
uint32_t class_def_method_index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckProxyConstructor(ArtMethod* constructor) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckProxyMethod(ArtMethod* method, ArtMethod* prototype) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// For use by ImageWriter to find DexCaches for its roots
ReaderWriterMutex* DexLock()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
LOCK_RETURNED(dex_lock_) {
return &dex_lock_;
}
- size_t GetDexCacheCount() SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
+ size_t GetDexCacheCount() REQUIRES_SHARED(Locks::mutator_lock_, dex_lock_) {
return dex_caches_.size();
}
const std::list<DexCacheData>& GetDexCachesData()
- SHARED_REQUIRES(Locks::mutator_lock_, dex_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, dex_lock_) {
return dex_caches_;
}
void CreateProxyConstructor(Handle<mirror::Class> klass, ArtMethod* out)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateProxyMethod(Handle<mirror::Class> klass, ArtMethod* prototype, ArtMethod* out)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Ensures that methods have the kAccSkipAccessChecks bit set. We use the
// kAccVerificationAttempted bit on the class access flags to determine whether this has been done
// before.
void EnsureSkipAccessChecksMethods(Handle<mirror::Class> c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* LookupClassFromBootImage(const char* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Register a class loader and create its class table and allocator. Should not be called if
// these are already created.
void RegisterClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::classlinker_classes_lock_);
// Returns null if not found.
ClassTable* ClassTableForClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Insert a new class table if not found.
ClassTable* InsertClassTableForClassLoader(mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::classlinker_classes_lock_);
// EnsureResolved is called to make sure that a class in the class_table_ has been resolved
@@ -1057,24 +1057,24 @@
// the class passed in.
mirror::Class* EnsureResolved(Thread* self, const char* descriptor, mirror::Class* klass)
WARN_UNUSED
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!dex_lock_);
void FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetClassRoot(ClassRoot class_root, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the quick generic JNI stub for testing.
const void* GetRuntimeQuickGenericJniStub() const;
bool CanWeInitializeClass(mirror::Class* klass, bool can_init_statics, bool can_init_parents)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateClassMethods(mirror::Class* klass,
LengthPrefixedArray<ArtMethod>* new_methods)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
// new_class_set is the set of classes that were read from the class table section in the image.
@@ -1087,12 +1087,12 @@
bool* out_forward_dex_cache_array,
std::string* out_error_msg)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Check that c1 == FindSystemClass(self, descriptor). Abort with class dumps otherwise.
void CheckSystemClass(Thread* self, Handle<mirror::Class> c1, const char* descriptor)
REQUIRES(!dex_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sets imt_ref appropriately for LinkInterfaceMethods.
// If there is no method in the imt location of imt_ref it will store the given method there.
@@ -1102,7 +1102,7 @@
ArtMethod* imt_conflict_method,
ArtMethod* current_method,
/*out*/bool* new_conflict,
- /*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_);
+ /*out*/ArtMethod** imt_ref) REQUIRES_SHARED(Locks::mutator_lock_);
void FillIMTFromIfTable(mirror::IfTable* if_table,
ArtMethod* unimplemented_method,
@@ -1111,13 +1111,13 @@
bool create_conflict_tables,
bool ignore_copied_methods,
/*out*/bool* new_conflict,
- /*out*/ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+ /*out*/ArtMethod** imt) REQUIRES_SHARED(Locks::mutator_lock_);
void FillImtFromSuperClass(Handle<mirror::Class> klass,
ArtMethod* unimplemented_method,
ArtMethod* imt_conflict_method,
bool* new_conflict,
- ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod** imt) REQUIRES_SHARED(Locks::mutator_lock_);
std::vector<const DexFile*> boot_class_path_;
std::vector<std::unique_ptr<const DexFile>> boot_dex_files_;
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 5f225be..3be39a1 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -49,7 +49,7 @@
class ClassLinkerTest : public CommonRuntimeTest {
protected:
void AssertNonExistentClass(const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == nullptr);
EXPECT_TRUE(self->IsExceptionPending());
@@ -61,13 +61,13 @@
}
void AssertPrimitiveClass(const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
AssertPrimitiveClass(descriptor, class_linker_->FindSystemClass(self, descriptor.c_str()));
}
void AssertPrimitiveClass(const std::string& descriptor, mirror::Class* primitive)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(primitive != nullptr);
ASSERT_TRUE(primitive->GetClass() != nullptr);
ASSERT_EQ(primitive->GetClass(), primitive->GetClass()->GetClass());
@@ -103,7 +103,7 @@
}
void AssertObjectClass(mirror::Class* JavaLangObject)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(JavaLangObject != nullptr);
ASSERT_TRUE(JavaLangObject->GetClass() != nullptr);
ASSERT_EQ(JavaLangObject->GetClass(),
@@ -161,7 +161,7 @@
void AssertArrayClass(const std::string& array_descriptor,
const std::string& component_type,
mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
StackHandleScope<2> hs(self);
Handle<mirror::ClassLoader> loader(hs.NewHandle(class_loader));
@@ -175,7 +175,7 @@
}
void AssertArrayClass(const std::string& array_descriptor, Handle<mirror::Class> array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(array.Get() != nullptr);
ASSERT_TRUE(array->GetClass() != nullptr);
ASSERT_EQ(array->GetClass(), array->GetClass()->GetClass());
@@ -226,7 +226,7 @@
ASSERT_EQ(JavaLangObject_imt, array->GetImt(pointer_size));
}
- void AssertMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void AssertMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) {
EXPECT_TRUE(method != nullptr);
EXPECT_TRUE(method->GetDeclaringClass() != nullptr);
EXPECT_TRUE(method->GetName() != nullptr);
@@ -243,7 +243,7 @@
}
void AssertField(mirror::Class* klass, ArtField* field)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
EXPECT_TRUE(field != nullptr);
EXPECT_EQ(klass, field->GetDeclaringClass());
EXPECT_TRUE(field->GetName() != nullptr);
@@ -251,7 +251,7 @@
}
void AssertClass(const std::string& descriptor, Handle<mirror::Class> klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
EXPECT_STREQ(descriptor.c_str(), klass->GetDescriptor(&temp));
if (descriptor == "Ljava/lang/Object;") {
@@ -397,7 +397,7 @@
}
void AssertDexFileClass(mirror::ClassLoader* class_loader, const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
@@ -417,7 +417,7 @@
}
void AssertDexFile(const DexFile& dex, mirror::ClassLoader* class_loader)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Verify all the classes defined in this file
for (size_t i = 0; i < dex.NumClassDefs(); i++) {
const DexFile::ClassDef& class_def = dex.GetClassDef(i);
@@ -464,7 +464,7 @@
std::string class_descriptor;
std::vector<CheckOffset> offsets;
- bool Check() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Check() REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
mirror::Class* klass =
Runtime::Current()->GetClassLinker()->FindSystemClass(self, class_descriptor.c_str());
@@ -586,6 +586,8 @@
addOffset(OFFSETOF_MEMBER(mirror::Class, num_reference_static_fields_),
"numReferenceStaticFields");
addOffset(OFFSETOF_MEMBER(mirror::Class, object_size_), "objectSize");
+ addOffset(OFFSETOF_MEMBER(mirror::Class, object_size_alloc_fast_path_),
+ "objectSizeAllocFastPath");
addOffset(OFFSETOF_MEMBER(mirror::Class, primitive_type_), "primitiveType");
addOffset(OFFSETOF_MEMBER(mirror::Class, reference_instance_offsets_),
"referenceInstanceOffsets");
@@ -1158,7 +1160,7 @@
}
static void CheckMethod(ArtMethod* method, bool verified)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!method->IsNative() && !method->IsAbstract()) {
EXPECT_EQ((method->GetAccessFlags() & kAccSkipAccessChecks) != 0U, verified)
<< PrettyMethod(method, true);
@@ -1166,7 +1168,7 @@
}
static void CheckVerificationAttempted(mirror::Class* c, bool preverified)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
EXPECT_EQ((c->GetAccessFlags() & kAccVerificationAttempted) != 0U, preverified)
<< "Class " << PrettyClass(c) << " not as expected";
for (auto& m : c->GetMethods(kRuntimePointerSize)) {
diff --git a/runtime/class_table.h b/runtime/class_table.h
index 6fb4206..66c241f 100644
--- a/runtime/class_table.h
+++ b/runtime/class_table.h
@@ -72,13 +72,13 @@
// Used by image writer for checking.
bool Contains(mirror::Class* klass)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Freeze the current class tables by allocating a new table and never updating or modifying the
// existing table. This helps prevents dirty pages after caused by inserting after zygote fork.
void FreezeSnapshot()
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of classes in previous snapshots.
size_t NumZygoteClasses() const REQUIRES(!lock_);
@@ -89,74 +89,74 @@
// Update a class in the table with the new class. Returns the existing class which was replaced.
mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS for object marking requiring heap bitmap lock.
template<class Visitor>
void VisitRoots(Visitor& visitor)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<class Visitor>
void VisitRoots(const Visitor& visitor)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Stops visit if the visitor returns false.
template <typename Visitor>
bool Visit(Visitor& visitor)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the first class that matches the descriptor. Returns null if there are none.
mirror::Class* Lookup(const char* descriptor, size_t hash)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the first class that matches the descriptor of klass. Returns null if there are none.
mirror::Class* LookupByDescriptor(mirror::Class* klass)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Insert(mirror::Class* klass)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void InsertWithHash(mirror::Class* klass, size_t hash)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the class was found and removed, false otherwise.
bool Remove(const char* descriptor)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if we inserted the strong root, false if it already exists.
bool InsertStrongRoot(mirror::Object* obj)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Combines all of the tables into one class set.
size_t WriteToMemory(uint8_t* ptr) const
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Read a table from ptr and put it at the front of the class set.
size_t ReadFromMemory(uint8_t* ptr)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Add a class set to the front of classes.
void AddClassSet(ClassSet&& set)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Clear strong roots (other than classes themselves).
void ClearStrongRoots()
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ReaderWriterMutex& GetLock() {
return lock_;
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 2d16a49..c07c03e 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -114,12 +114,12 @@
std::string GetTestDexFileName(const char* name) const;
std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::unique_ptr<const DexFile> OpenTestDexFile(const char* name)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- jobject LoadDex(const char* dex_name) SHARED_REQUIRES(Locks::mutator_lock_);
+ jobject LoadDex(const char* dex_name) REQUIRES_SHARED(Locks::mutator_lock_);
std::string android_data_;
std::string dalvik_cache_;
@@ -195,6 +195,12 @@
DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
};
+#define TEST_DISABLED_FOR_TARGET() \
+ if (kIsTargetBuild) { \
+ printf("WARNING: TEST DISABLED FOR TARGET\n"); \
+ return; \
+ }
+
#define TEST_DISABLED_FOR_MIPS() \
if (kRuntimeISA == kMips) { \
printf("WARNING: TEST DISABLED FOR MIPS\n"); \
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index e1da23c..77362a5 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -36,7 +36,7 @@
namespace art {
static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (referrer != nullptr) {
std::string location(referrer->GetLocation());
if (!location.empty()) {
@@ -48,7 +48,7 @@
static void ThrowException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
std::string vmsg;
@@ -64,7 +64,7 @@
static void ThrowWrappedException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
if (args != nullptr) {
std::string vmsg;
@@ -379,7 +379,7 @@
static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
const DexFile& dex_file,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream msg;
msg << "Attempt to invoke " << type << " method '"
<< PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
@@ -413,7 +413,7 @@
}
static bool IsValidImplicitCheck(uintptr_t addr, ArtMethod* method, const Instruction& instr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!CanDoImplicitNullCheckOn(addr)) {
return false;
}
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index cbd338d..ab25543 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -34,193 +34,193 @@
// AbstractMethodError
void ThrowAbstractMethodError(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowAbstractMethodError(uint32_t method_idx, const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ArithmeticException
-void ThrowArithmeticExceptionDivideByZero() SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+void ThrowArithmeticExceptionDivideByZero() REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ArrayIndexOutOfBoundsException
void ThrowArrayIndexOutOfBoundsException(int index, int length)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ArrayStoreException
void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassCircularityError
void ThrowClassCircularityError(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowClassCircularityError(mirror::Class* c, const char* fmt, ...)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassCastException
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowClassCastException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// ClassFormatError
void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IllegalAccessError
void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* accessed)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
ArtMethod* called,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, ArtMethod* accessed)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorField(mirror::Class* referrer, ArtField* accessed)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessErrorFinalField(ArtMethod* referrer, ArtField* accessed)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IllegalAccessException
void ThrowIllegalAccessException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IllegalArgumentException
void ThrowIllegalArgumentException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IncompatibleClassChangeError
void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType found_type,
ArtMethod* method, ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceSuper(ArtMethod* method,
mirror::Class* target_class,
mirror::Object* this_object,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(ArtMethod* interface_method,
mirror::Object* this_object,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorField(ArtField* resolved_field, bool is_static,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowIncompatibleClassChangeErrorForMethodConflict(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// IOException
void ThrowIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowWrappedIOException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// LinkageError
void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowWrappedLinkageError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NegativeArraySizeException
void ThrowNegativeArraySizeException(int size)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNegativeArraySizeException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NoSuchFieldError
void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
const StringPiece& type, const StringPiece& name)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NoSuchMethodError
void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece& name,
const Signature& signature)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNoSuchMethodError(uint32_t method_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// NullPointerException
void ThrowNullPointerExceptionForFieldAccess(ArtField* field,
bool is_read)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionForMethodAccess(ArtMethod* method,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerExceptionFromDexPC(bool check_address = false, uintptr_t addr = 0)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
void ThrowNullPointerException(const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// RuntimeException
void ThrowRuntimeException(const char* fmt, ...)
__attribute__((__format__(__printf__, 1, 2)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// Stack overflow.
-void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+void ThrowStackOverflowError(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// StringIndexOutOfBoundsException
void ThrowStringIndexOutOfBoundsException(int index, int length)
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
// VerifyError
void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) COLD_ATTR;
} // namespace art
diff --git a/runtime/compiler_callbacks.h b/runtime/compiler_callbacks.h
index a39d682..ee797e0 100644
--- a/runtime/compiler_callbacks.h
+++ b/runtime/compiler_callbacks.h
@@ -38,7 +38,7 @@
virtual ~CompilerCallbacks() { }
virtual void MethodVerified(verifier::MethodVerifier* verifier)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void ClassRejected(ClassReference ref) = 0;
// Return true if we should attempt to relocate to a random base address if we have not already
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index a5b0689..9f3c2aa 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -73,7 +73,7 @@
// copied from some other class). This ensures that the debugger does not get confused as to which
// method we are in.
static ArtMethod* GetCanonicalMethod(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(!m->IsDefault())) {
return m;
} else {
@@ -95,7 +95,7 @@
deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
}
- Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
+ Breakpoint(const Breakpoint& other) REQUIRES_SHARED(Locks::mutator_lock_)
: method_(other.method_),
dex_pc_(other.dex_pc_),
deoptimization_kind_(other.deoptimization_kind_) {}
@@ -116,7 +116,7 @@
// Returns true if the method of this breakpoint and the passed in method should be considered the
// same. That is, they are either the same method or they are copied from the same method.
- bool IsInMethod(ArtMethod* m) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInMethod(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_) {
return method_ == GetCanonicalMethod(m);
}
@@ -130,7 +130,7 @@
};
static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
return os;
}
@@ -142,7 +142,7 @@
void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -168,7 +168,7 @@
void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t dex_pc, const JValue& return_value)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
return;
@@ -185,7 +185,7 @@
void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method, uint32_t dex_pc)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
@@ -193,7 +193,7 @@
void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
uint32_t new_dex_pc)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
// We also listen to kMethodExited instrumentation event and the current instruction is a
// RETURN so we know the MethodExited method is going to be called right after us. Like in
@@ -214,25 +214,25 @@
void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::PostException(exception_object);
}
// We only care about branches in the Jit.
void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected branch event in debugger " << PrettyMethod(method)
<< " " << dex_pc << ", " << dex_pc_offset;
}
@@ -243,29 +243,29 @@
ArtMethod* method,
uint32_t dex_pc,
ArtMethod*)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected invoke event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
}
private:
static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = method->GetCodeItem();
const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
return instruction->IsReturn();
}
- static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static bool IsListeningToDexPcMoved() REQUIRES_SHARED(Locks::mutator_lock_) {
return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
}
- static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static bool IsListeningToMethodExit() REQUIRES_SHARED(Locks::mutator_lock_) {
return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
}
static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return (Dbg::GetInstrumentationEvents() & event) != 0;
}
@@ -329,7 +329,7 @@
static bool IsBreakpoint(ArtMethod* m, uint32_t dex_pc)
REQUIRES(!Locks::breakpoint_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].IsInMethod(m)) {
@@ -349,7 +349,7 @@
}
static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
if (o == nullptr) {
*error = JDWP::ERR_INVALID_OBJECT;
@@ -364,7 +364,7 @@
}
static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
if (o == nullptr) {
*error = JDWP::ERR_INVALID_OBJECT;
@@ -380,7 +380,7 @@
static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
if (thread_peer == nullptr) {
@@ -411,14 +411,14 @@
}
static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
const char* descriptor = klass->GetDescriptor(&temp);
return BasicTagFromDescriptor(descriptor);
}
static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(c != nullptr);
if (c->IsArrayClass()) {
return JDWP::JT_ARRAY;
@@ -822,7 +822,7 @@
OwnedMonitorVisitor(Thread* thread, Context* context,
std::vector<JDWP::ObjectId>* monitor_vector,
std::vector<uint32_t>* stack_depth_vector)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
current_stack_depth(0),
monitors(monitor_vector),
@@ -839,7 +839,7 @@
}
static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
visitor->monitors->push_back(gRegistry->Add(owned_monitor));
visitor->stack_depths->push_back(visitor->current_stack_depth);
@@ -1013,7 +1013,7 @@
public:
explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
- bool operator()(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool operator()(mirror::Class* c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (!c->IsPrimitive()) {
classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
}
@@ -1333,17 +1333,17 @@
}
static JDWP::MethodId ToMethodId(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(GetCanonicalMethod(m)));
}
static ArtField* FromFieldId(JDWP::FieldId fid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
}
static ArtMethod* FromMethodId(JDWP::MethodId mid)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
}
@@ -1436,7 +1436,7 @@
* the end.
*/
static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1454,7 +1454,7 @@
}
static size_t GetMethodNumArgRegistersIncludingThis(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t num_registers = ArtMethod::NumArgRegisters(method->GetShorty());
if (!method->IsStatic()) {
++num_registers;
@@ -1467,7 +1467,7 @@
* slots to dex style argument placement.
*/
static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
// We should not get here for a method without code (native, proxy or abstract). Log it and
@@ -1617,7 +1617,7 @@
bool with_generic;
static void Callback(void* context, const DexFile::LocalInfo& entry)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
uint16_t slot = entry.reg_;
@@ -1706,7 +1706,7 @@
}
static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
JValue field_value;
switch (fieldType) {
@@ -1753,7 +1753,7 @@
static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JDWP::JdwpError error;
mirror::Class* c = DecodeClass(ref_type_id, &error);
if (ref_type_id != 0 && c == nullptr) {
@@ -1809,7 +1809,7 @@
}
static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
// Debugging only happens at runtime so we know we are not running in a transaction.
static constexpr bool kNoTransactionMode = false;
@@ -1880,7 +1880,7 @@
static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JDWP::JdwpError error;
mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
@@ -2008,7 +2008,7 @@
static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
error);
if (*error != JDWP::ERR_NONE) {
@@ -2067,7 +2067,7 @@
static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
std::vector<JDWP::ObjectId>* child_thread_group_ids)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(thread_group != nullptr);
// Get the int "ngroups" count of this thread group...
@@ -2221,7 +2221,7 @@
static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
mirror::Object* desired_thread_group, mirror::Object* peer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Do we want threads from all thread groups?
if (desired_thread_group == nullptr) {
return true;
@@ -2265,7 +2265,7 @@
}
}
-static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) {
+static int GetStackDepth(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
struct CountStackDepthVisitor : public StackVisitor {
explicit CountStackDepthVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
@@ -2308,7 +2308,7 @@
public:
GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
JDWP::ExpandBuf* buf_in)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
depth_(0),
start_frame_(start_frame_in),
@@ -2317,7 +2317,7 @@
expandBufAdd4BE(buf_, frame_count_);
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (GetMethod()->IsRuntimeMethod()) {
return true; // The debugger can't do anything useful with a frame that has no Method*.
}
@@ -2433,7 +2433,7 @@
struct GetThisVisitor : public StackVisitor {
GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_object(nullptr),
frame_id(frame_id_in) {}
@@ -2475,7 +2475,7 @@
class FindFrameVisitor FINAL : public StackVisitor {
public:
FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
frame_id_(frame_id),
error_(JDWP::ERR_INVALID_FRAMEID) {}
@@ -2551,14 +2551,14 @@
constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
static std::string GetStackContextAsString(const StackVisitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
PrettyMethod(visitor.GetMethod()).c_str());
}
static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
JDWP::JdwpTag tag)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
<< GetStackContextAsString(visitor);
return kStackFrameLocalAccessError;
@@ -2720,7 +2720,7 @@
template<typename T>
static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
JDWP::JdwpTag tag, T value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Failed to write " << tag << " local " << value
<< " (0x" << std::hex << value << ") into register v" << vreg
<< GetStackContextAsString(visitor);
@@ -2814,7 +2814,7 @@
}
static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(location != nullptr);
if (m == nullptr) {
memset(location, 0, sizeof(*location));
@@ -2892,7 +2892,7 @@
class CatchLocationFinder : public StackVisitor {
public:
CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
exception_(exception),
handle_scope_(self),
@@ -2903,7 +2903,7 @@
throw_dex_pc_(DexFile::kDexNoIndex) {
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
DCHECK(method != nullptr);
if (method->IsRuntimeMethod()) {
@@ -2937,15 +2937,15 @@
return true; // Continue stack walk.
}
- ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* GetCatchMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
return catch_method_;
}
- ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* GetThrowMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
return throw_method_;
}
- mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetThisAtThrow() REQUIRES_SHARED(Locks::mutator_lock_) {
return this_at_throw_.Get();
}
@@ -3247,7 +3247,7 @@
}
static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.IsInMethod(m)) {
return &breakpoint;
@@ -3264,7 +3264,7 @@
// Sanity checks all existing breakpoints on the same method.
static void SanityCheckExistingBreakpoints(ArtMethod* m,
DeoptimizationRequest::Kind deoptimization_kind)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
for (const Breakpoint& breakpoint : gBreakpoints) {
if (breakpoint.IsInMethod(m)) {
CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
@@ -3293,7 +3293,7 @@
static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
ArtMethod* m,
const Breakpoint** existing_brkpt)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!Dbg::RequiresDeoptimization()) {
// We already run in interpreter-only mode so we don't need to deoptimize anything.
VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
@@ -3550,11 +3550,11 @@
class NeedsDeoptimizationVisitor : public StackVisitor {
public:
explicit NeedsDeoptimizationVisitor(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
needs_deoptimization_(false) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// The visitor is meant to be used when handling exception from compiled code only.
CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: " << PrettyMethod(GetMethod());
ArtMethod* method = GetMethod();
@@ -3616,7 +3616,7 @@
public:
ScopedDebuggerThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
REQUIRES(!Locks::thread_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) :
+ REQUIRES_SHARED(Locks::mutator_lock_) :
thread_(nullptr),
error_(JDWP::ERR_NONE),
self_suspend_(false),
@@ -3678,7 +3678,7 @@
// Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
// is for step-out.
struct SingleStepStackVisitor : public StackVisitor {
- explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit SingleStepStackVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
stack_depth(0),
method(nullptr),
@@ -4559,7 +4559,7 @@
needHeader_ = false;
}
- void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Flush() REQUIRES_SHARED(Locks::mutator_lock_) {
if (pieceLenField_ == nullptr) {
// Flush immediately post Reset (maybe back-to-back Flush). Ignore.
CHECK(needHeader_);
@@ -4575,13 +4575,13 @@
}
static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_,
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_) {
reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
}
static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
}
@@ -4601,7 +4601,7 @@
}
// Returns true if the object is not an empty chunk.
- bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ProcessRecord(void* start, size_t used_bytes) REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
// in the following code not to allocate memory, by ensuring buf_ is of the correct size
if (used_bytes == 0) {
@@ -4638,7 +4638,7 @@
}
void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
uint8_t state = ExamineNativeObject(start);
AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
@@ -4647,7 +4647,7 @@
}
void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
if (ProcessRecord(start, used_bytes)) {
// Determine the type of this chunk.
// OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
@@ -4659,7 +4659,7 @@
}
void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Make sure there's enough room left in the buffer.
// We need to use two bytes for every fractional 256 allocation units used by the chunk plus
// 17 bytes for any header.
@@ -4692,12 +4692,12 @@
*p_++ = length - 1;
}
- uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint8_t ExamineNativeObject(const void* p) REQUIRES_SHARED(Locks::mutator_lock_) {
return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
}
uint8_t ExamineJavaObject(mirror::Object* o)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
if (o == nullptr) {
return HPSG_STATE(SOLIDITY_FREE, 0);
}
@@ -4747,7 +4747,7 @@
};
static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
HeapChunkContext::HeapChunkJavaCallback(
obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
@@ -4901,7 +4901,7 @@
};
static const char* GetMethodSourceFile(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method != nullptr);
const char* source_file = method->GetDeclaringClassSourceFile();
return (source_file != nullptr) ? source_file : "";
diff --git a/runtime/debugger.h b/runtime/debugger.h
index e908304..7398c4e 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -80,7 +80,7 @@
JDWP::ExpandBuf* const reply;
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq);
@@ -156,15 +156,15 @@
DeoptimizationRequest() : kind_(kNothing), instrumentation_event_(0), method_(nullptr) {}
DeoptimizationRequest(const DeoptimizationRequest& other)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: kind_(other.kind_), instrumentation_event_(other.instrumentation_event_) {
// Create a new JNI global reference for the method.
SetMethod(other.Method());
}
- ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* Method() const REQUIRES_SHARED(Locks::mutator_lock_);
- void SetMethod(ArtMethod* m) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetMethod(ArtMethod* m) REQUIRES_SHARED(Locks::mutator_lock_);
// Name 'Kind()' would collide with the above enum name.
Kind GetKind() const {
@@ -240,7 +240,7 @@
// Returns true if a method has any breakpoints.
static bool MethodHasAnyBreakpoints(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::breakpoint_lock_);
static bool IsDisposed() {
return gDisposed;
@@ -260,178 +260,178 @@
* Class, Object, Array
*/
static std::string GetClassName(JDWP::RefTypeId id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static std::string GetClassName(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void GetClassList(std::vector<JDWP::RefTypeId>* classes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
uint32_t* pStatus, std::string* pDescriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetSignature(JDWP::RefTypeId ref_type_id, std::string* signature)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetSourceFile(JDWP::RefTypeId ref_type_id, std::string* source_file)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static size_t GetTagWidth(JDWP::JdwpTag tag);
static JDWP::JdwpError GetArrayLength(JDWP::ObjectId array_id, int32_t* length)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputArray(JDWP::ObjectId array_id, int offset, int count,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
JDWP::Request* request)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError CreateString(const std::string& str, JDWP::ObjectId* new_string_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
JDWP::ObjectId* new_array_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
//
// Event filtering.
//
static bool MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool MatchLocation(const JDWP::JdwpLocation& expected_location,
const JDWP::EventLocation& event_location)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
ArtField* event_field)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
//
// Monitors.
//
static JDWP::JdwpError GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetOwnedMonitors(JDWP::ObjectId thread_id,
std::vector<JDWP::ObjectId>* monitors,
std::vector<uint32_t>* stack_depths)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetContendedMonitor(JDWP::ObjectId thread_id,
JDWP::ObjectId* contended_monitor)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
//
// Heap.
//
static JDWP::JdwpError GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
std::vector<uint64_t>* counts)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
std::vector<JDWP::ObjectId>* instances)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
std::vector<JDWP::ObjectId>* referring_objects)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError DisableCollection(JDWP::ObjectId object_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError EnableCollection(JDWP::ObjectId object_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError IsCollected(JDWP::ObjectId object_id, bool* is_collected)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
//
// Methods and fields.
//
static std::string GetMethodName(JDWP::MethodId method_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredFields(JDWP::RefTypeId ref_type_id, bool with_generic,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredMethods(JDWP::RefTypeId ref_type_id, bool with_generic,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError OutputDeclaredInterfaces(JDWP::RefTypeId ref_type_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputLineTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId method_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputVariableTable(JDWP::RefTypeId ref_type_id, JDWP::MethodId id, bool with_generic,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetBytecodes(JDWP::RefTypeId class_id, JDWP::MethodId method_id,
std::vector<uint8_t>* bytecodes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static std::string GetFieldName(JDWP::FieldId field_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpTag GetFieldBasicTag(JDWP::FieldId field_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpTag GetStaticFieldBasicTag(JDWP::FieldId field_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
uint64_t value, int width)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError StringToUtf8(JDWP::ObjectId string_id, std::string* str)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Thread, ThreadGroup, Frame
*/
static JDWP::JdwpError GetThreadName(JDWP::ObjectId thread_id, std::string* name)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadGroupName(JDWP::ObjectId thread_group_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetThreadGroupParent(JDWP::ObjectId thread_group_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
JDWP::ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::ObjectId GetSystemThreadGroupId()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpThreadStatus ToJdwpThreadStatus(ThreadState state);
static JDWP::JdwpError GetThreadStatus(JDWP::ObjectId thread_id,
@@ -446,16 +446,16 @@
// Fills 'thread_ids' with the threads in the given thread group. If thread_group_id == 0,
// returns all threads.
static void GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result)
REQUIRES(!Locks::thread_list_lock_);
static JDWP::JdwpError GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
size_t frame_count, JDWP::ExpandBuf* buf)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- static JDWP::ObjectId GetThreadSelfId() SHARED_REQUIRES(Locks::mutator_lock_);
- static JDWP::ObjectId GetThreadId(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
+ static JDWP::ObjectId GetThreadSelfId() REQUIRES_SHARED(Locks::mutator_lock_);
+ static JDWP::ObjectId GetThreadId(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
static void SuspendVM()
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
@@ -467,17 +467,17 @@
static void ResumeThread(JDWP::ObjectId thread_id)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void SuspendSelf();
static JDWP::JdwpError GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
JDWP::ObjectId* result)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetLocalValues(JDWP::Request* request)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError Interrupt(JDWP::ObjectId thread_id)
REQUIRES(!Locks::thread_list_lock_);
@@ -493,42 +493,42 @@
};
static void PostFieldAccessEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
ArtField* f)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostFieldModificationEvent(ArtMethod* m, int dex_pc,
mirror::Object* this_object, ArtField* f,
const JValue* field_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostException(mirror::Throwable* exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostThreadStart(Thread* t)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostThreadDeath(Thread* t)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostClassPrepare(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void UpdateDebugger(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t new_dex_pc,
int event_flags, const JValue* return_value)
- REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Indicates whether we need deoptimization for debugging.
static bool RequiresDeoptimization();
// Records deoptimization request in the queue.
static void RequestDeoptimization(const DeoptimizationRequest& req)
- REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::deoptimization_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Manage deoptimization after updating JDWP events list. Suspends all threads, processes each
// request and finally resumes all threads.
static void ManageDeoptimization()
- REQUIRES(!Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::deoptimization_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Breakpoints.
static void WatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
- REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static void UnwatchLocation(const JDWP::JdwpLocation* pLoc, DeoptimizationRequest* req)
- REQUIRES(!Locks::breakpoint_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::breakpoint_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Forced interpreter checkers for single-step and continue support.
@@ -537,7 +537,7 @@
// Indicates whether we need to force the use of interpreter to invoke a method.
// This allows to single-step or continue into the called method.
static bool IsForcedInterpreterNeededForCalling(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -548,7 +548,7 @@
// method through the resolution trampoline. This allows to single-step or continue into
// the called method.
static bool IsForcedInterpreterNeededForResolution(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -559,7 +559,7 @@
// a method through the resolution trampoline. This allows to deoptimize the stack for
// debugging when we returned from the called method.
static bool IsForcedInstrumentationNeededForResolution(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive()) {
return false;
}
@@ -570,7 +570,7 @@
// interpreter into the runtime. This allows to deoptimize the stack and continue
// execution with interpreter for debugging.
static bool IsForcedInterpreterNeededForUpcall(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive() && !thread->HasDebuggerShadowFrames()) {
return false;
}
@@ -583,7 +583,7 @@
// Note: the interpreter will start by handling the exception when executing
// the deoptimized frames.
static bool IsForcedInterpreterNeededForException(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsDebuggerActive() && !thread->HasDebuggerShadowFrames()) {
return false;
}
@@ -593,9 +593,9 @@
// Single-stepping.
static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size,
JDWP::JdwpStepDepth depth)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void UnconfigureStep(JDWP::ObjectId thread_id)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Invoke support
@@ -616,7 +616,7 @@
uint64_t arg_values[], JDWP::JdwpTag* arg_types,
uint32_t options)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Called by the event thread to execute a method prepared by the JDWP thread in the given
// DebugInvokeReq object. Once the invocation completes, the event thread attaches a reply
@@ -633,29 +633,29 @@
* DDM support.
*/
static void DdmSendThreadNotification(Thread* t, uint32_t type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSetThreadNotification(bool enable)
REQUIRES(!Locks::thread_list_lock_);
static bool DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen);
- static void DdmConnected() SHARED_REQUIRES(Locks::mutator_lock_);
- static void DdmDisconnected() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void DdmConnected() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void DdmDisconnected() REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSendChunk(uint32_t type, size_t len, const uint8_t* buf)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Allocation tracking support.
*/
static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
static jbyteArray GetRecentAllocations()
- REQUIRES(!Locks::alloc_tracker_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::alloc_tracker_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static void DumpRecentAllocations() REQUIRES(!Locks::alloc_tracker_lock_);
enum HpifWhen {
@@ -665,7 +665,7 @@
HPIF_WHEN_EVERY_GC = 3
};
static int DdmHandleHpifChunk(HpifWhen when)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
enum HpsgWhen {
HPSG_WHEN_NEVER = 0,
@@ -678,79 +678,79 @@
static bool DdmHandleHpsgNhsgChunk(HpsgWhen when, HpsgWhat what, bool native);
static void DdmSendHeapInfo(HpifWhen reason)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DdmSendHeapSegments(bool native)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static ObjectRegistry* GetObjectRegistry() {
return gRegistry;
}
static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::FieldId ToFieldId(const ArtField* f)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
static JDWP::JdwpState* GetJdwpState();
- static uint32_t GetInstrumentationEvents() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uint32_t GetInstrumentationEvents() REQUIRES_SHARED(Locks::mutator_lock_) {
return instrumentation_events_;
}
private:
static void ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id,
JDWP::JdwpTag result_tag, uint64_t result_value,
JDWP::ObjectId exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError GetLocalValue(const StackVisitor& visitor,
ScopedObjectAccessUnchecked& soa, int slot,
JDWP::JdwpTag tag, uint8_t* buf, size_t width)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static JDWP::JdwpError SetLocalValue(Thread* thread, StackVisitor& visitor, int slot,
JDWP::JdwpTag tag, uint64_t value, size_t width)
- REQUIRES(!Locks::thread_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::thread_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
- static void DdmBroadcast(bool connect) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void DdmBroadcast(bool connect) REQUIRES_SHARED(Locks::mutator_lock_);
static void PostThreadStartOrStop(Thread*, uint32_t)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void PostLocationEvent(ArtMethod* method, int pcOffset,
mirror::Object* thisPtr, int eventFlags,
const JValue* return_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void ProcessDeoptimizationRequest(const DeoptimizationRequest& request)
REQUIRES(Locks::mutator_lock_, Roles::uninterruptible_);
static void RequestDeoptimizationLocked(const DeoptimizationRequest& req)
- REQUIRES(Locks::deoptimization_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::deoptimization_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsForcedInterpreterNeededForExceptionImpl(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Indicates whether the debugger is making requests.
static bool gDebuggerActive;
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index 90c678c..ebadd79 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -42,6 +42,7 @@
#include "dex_file_verifier.h"
#include "globals.h"
#include "handle_scope-inl.h"
+#include "jvalue.h"
#include "leb128.h"
#include "mirror/field.h"
#include "mirror/method.h"
@@ -68,6 +69,11 @@
{'0', '3', '8', '\0'}
};
+struct DexFile::AnnotationValue {
+ JValue value_;
+ uint8_t type_;
+};
+
bool DexFile::GetChecksum(const char* filename, uint32_t* checksum, std::string* error_msg) {
CHECK(checksum != nullptr);
uint32_t magic;
@@ -222,6 +228,10 @@
nullptr,
oat_dex_file,
error_msg);
+ if (dex_file == nullptr) {
+ return nullptr;
+ }
+
if (verify && !DexFileVerifier::Verify(dex_file.get(),
dex_file->Begin(),
dex_file->Size(),
@@ -230,7 +240,32 @@
error_msg)) {
return nullptr;
}
+ return dex_file;
+}
+std::unique_ptr<const DexFile> DexFile::Open(const std::string& location,
+ uint32_t location_checksum,
+ std::unique_ptr<MemMap> mem_map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg) {
+ ScopedTrace trace(std::string("Open dex file from mapped-memory ") + location);
+ std::unique_ptr<const DexFile> dex_file = OpenMemory(location,
+ location_checksum,
+ std::move(mem_map),
+ error_msg);
+ if (dex_file == nullptr) {
+ return nullptr;
+ }
+
+ if (verify && !DexFileVerifier::Verify(dex_file.get(),
+ dex_file->Begin(),
+ dex_file->Size(),
+ location.c_str(),
+ verify_checksum,
+ error_msg)) {
+ return nullptr;
+ }
return dex_file;
}
@@ -263,7 +298,7 @@
/*low_4gb*/false,
location,
error_msg));
- if (map.get() == nullptr) {
+ if (map == nullptr) {
DCHECK(!error_msg->empty());
return nullptr;
}
@@ -277,7 +312,9 @@
const Header* dex_header = reinterpret_cast<const Header*>(map->Begin());
- std::unique_ptr<const DexFile> dex_file(OpenMemory(location, dex_header->checksum_, map.release(),
+ std::unique_ptr<const DexFile> dex_file(OpenMemory(location,
+ dex_header->checksum_,
+ std::move(map),
error_msg));
if (dex_file.get() == nullptr) {
*error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location,
@@ -314,13 +351,13 @@
std::unique_ptr<const DexFile> DexFile::OpenMemory(const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
std::string* error_msg) {
return OpenMemory(mem_map->Begin(),
mem_map->Size(),
location,
location_checksum,
- mem_map,
+ std::move(mem_map),
nullptr,
error_msg);
}
@@ -350,9 +387,11 @@
*error_code = ZipOpenErrorCode::kExtractToMemoryError;
return nullptr;
}
- std::unique_ptr<const DexFile> dex_file(OpenMemory(location, zip_entry->GetCrc32(), map.release(),
- error_msg));
- if (dex_file.get() == nullptr) {
+ std::unique_ptr<const DexFile> dex_file(OpenMemory(location,
+ zip_entry->GetCrc32(),
+ std::move(map),
+ error_msg));
+ if (dex_file == nullptr) {
*error_msg = StringPrintf("Failed to open dex file '%s' from memory: %s", location.c_str(),
error_msg->c_str());
*error_code = ZipOpenErrorCode::kDexFileError;
@@ -437,14 +476,14 @@
size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file,
std::string* error_msg) {
DCHECK(base != nullptr);
DCHECK_NE(size, 0U);
CHECK_ALIGNED(base, 4); // various dex file structures must be word aligned
std::unique_ptr<DexFile> dex_file(
- new DexFile(base, size, location, location_checksum, mem_map, oat_dex_file));
+ new DexFile(base, size, location, location_checksum, std::move(mem_map), oat_dex_file));
if (!dex_file->Init(error_msg)) {
dex_file.reset();
}
@@ -454,13 +493,13 @@
DexFile::DexFile(const uint8_t* base, size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file)
: begin_(base),
size_(size),
location_(location),
location_checksum_(location_checksum),
- mem_map_(mem_map),
+ mem_map_(std::move(mem_map)),
header_(reinterpret_cast<const Header*>(base)),
string_ids_(reinterpret_cast<const StringId*>(base + header_->string_ids_off_)),
type_ids_(reinterpret_cast<const TypeId*>(base + header_->type_ids_off_)),
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 59339ef..ebbde0a 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -19,7 +19,6 @@
#include <memory>
#include <string>
-#include <unordered_map>
#include <vector>
#include "base/logging.h"
@@ -28,7 +27,6 @@
#include "globals.h"
#include "invoke_type.h"
#include "jni.h"
-#include "jvalue.h"
#include "mirror/object_array.h"
#include "modifiers.h"
#include "utf.h"
@@ -398,11 +396,6 @@
DISALLOW_COPY_AND_ASSIGN(AnnotationItem);
};
- struct AnnotationValue {
- JValue value_;
- uint8_t type_;
- };
-
enum AnnotationResultStyle { // private
kAllObjects,
kPrimitivesOrObjects,
@@ -422,10 +415,6 @@
std::string* error_msg,
std::vector<std::unique_ptr<const DexFile>>* dex_files);
- // Checks whether the given file has the dex magic, or is a zip file with a classes.dex entry.
- // If this function returns false, Open will not succeed. The inverse is not true, however.
- static bool MaybeDex(const char* filename);
-
// Opens .dex file, backed by existing memory
static std::unique_ptr<const DexFile> Open(const uint8_t* base, size_t size,
const std::string& location,
@@ -435,6 +424,18 @@
bool verify_checksum,
std::string* error_msg);
+ // Opens .dex file that has been memory-mapped by the caller.
+ static std::unique_ptr<const DexFile> Open(const std::string& location,
+ uint32_t location_checkum,
+ std::unique_ptr<MemMap> mem_map,
+ bool verify,
+ bool verify_checksum,
+ std::string* error_msg);
+
+ // Checks whether the given file has the dex magic, or is a zip file with a classes.dex entry.
+ // If this function returns false, Open will not succeed. The inverse is not true, however.
+ static bool MaybeDex(const char* filename);
+
// Open all classesXXX.dex files from a zip archive.
static bool OpenFromZip(const ZipArchive& zip_archive,
const std::string& location,
@@ -934,108 +935,104 @@
}
const AnnotationSetItem* FindAnnotationSetForField(ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> annotation_class)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_class) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> annotation_class)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* method) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMethodAnnotationPresent(ArtMethod* method,
Handle<mirror::Class> annotation_class,
uint32_t visibility = kDexVisibilityRuntime) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const AnnotationSetItem* FindAnnotationSetForClass(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass,
Handle<mirror::Class> annotation_class,
const uint8_t** annotation) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const AnnotationItem* GetAnnotationItemFromAnnotationSet(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set,
uint32_t visibility,
Handle<mirror::Class> annotation_class)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationObjectFromAnnotationSet(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set,
uint32_t visibility,
Handle<mirror::Class> annotation_class) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetAnnotationValue(Handle<mirror::Class> klass,
const AnnotationItem* annotation_item,
const char* annotation_name,
Handle<mirror::Class> array_class,
uint32_t expected_type) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set)
- const SHARED_REQUIRES(Locks::mutator_lock_);
+ const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet(Handle<mirror::Class> klass,
const AnnotationSetItem* annotation_set,
uint32_t visibility) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList(Handle<mirror::Class> klass,
const AnnotationSetRefList* set_ref_list, uint32_t size) const
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool ProcessAnnotationValue(Handle<mirror::Class> klass, const uint8_t** annotation_ptr,
- AnnotationValue* annotation_value, Handle<mirror::Class> return_class,
- DexFile::AnnotationResultStyle result_style) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass,
const uint8_t** annotation) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const AnnotationItem* SearchAnnotationSet(const AnnotationSetItem* annotation_set,
const char* descriptor, uint32_t visibility) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const uint8_t* SearchEncodedAnnotation(const uint8_t* annotation, const char* name) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool SkipAnnotationValue(const uint8_t** annotation_ptr) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Debug info opcodes and constants
enum {
@@ -1072,7 +1069,7 @@
//
// This is used by runtime; therefore use art::Method not art::DexFile::Method.
int32_t GetLineNumFromPC(ArtMethod* method, uint32_t rel_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns false if there is no debugging information or if it cannot be decoded.
bool DecodeDebugLocalInfo(const CodeItem* code_item, bool is_static, uint32_t method_idx,
@@ -1175,7 +1172,7 @@
// Opens a .dex file at the given address backed by a MemMap
static std::unique_ptr<const DexFile> OpenMemory(const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
std::string* error_msg);
// Opens a .dex file at the given address, optionally backed by a MemMap
@@ -1183,14 +1180,14 @@
size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file,
std::string* error_msg);
DexFile(const uint8_t* base, size_t size,
const std::string& location,
uint32_t location_checksum,
- MemMap* mem_map,
+ std::unique_ptr<MemMap> mem_map,
const OatDexFile* oat_dex_file);
// Top-level initializer that calls other Init methods.
@@ -1203,6 +1200,12 @@
// whether the string contains the separator character.
static bool IsMultiDexLocation(const char* location);
+ struct AnnotationValue;
+
+ bool ProcessAnnotationValue(Handle<mirror::Class> klass, const uint8_t** annotation_ptr,
+ AnnotationValue* annotation_value, Handle<mirror::Class> return_class,
+ DexFile::AnnotationResultStyle result_style) const
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The base address of the memory mapping.
const uint8_t* const begin_;
@@ -1518,10 +1521,10 @@
Handle<mirror::ClassLoader>* class_loader,
ClassLinker* linker,
const DexFile::ClassDef& class_def)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive>
- void ReadValueToField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_);
+ void ReadValueToField(ArtField* field) const REQUIRES_SHARED(Locks::mutator_lock_);
bool HasNext() const { return pos_ < array_size_; }
diff --git a/runtime/dex_file_test.cc b/runtime/dex_file_test.cc
index 2704d8a..2328e3d 100644
--- a/runtime/dex_file_test.cc
+++ b/runtime/dex_file_test.cc
@@ -22,6 +22,7 @@
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
#include "dex_file-inl.h"
+#include "mem_map.h"
#include "os.h"
#include "scoped_thread_state_change.h"
#include "thread-inl.h"
@@ -61,7 +62,7 @@
255, 255, 255, 255
};
-static inline uint8_t* DecodeBase64(const char* src, size_t* dst_size) {
+static inline std::vector<uint8_t> DecodeBase64(const char* src) {
std::vector<uint8_t> tmp;
uint32_t t = 0, y = 0;
int g = 3;
@@ -73,13 +74,11 @@
c = 0;
// prevent g < 0 which would potentially allow an overflow later
if (--g < 0) {
- *dst_size = 0;
- return nullptr;
+ return std::vector<uint8_t>();
}
} else if (g != 3) {
// we only allow = to be at the end
- *dst_size = 0;
- return nullptr;
+ return std::vector<uint8_t>();
}
t = (t << 6) | c;
if (++y == 4) {
@@ -94,17 +93,9 @@
}
}
if (y != 0) {
- *dst_size = 0;
- return nullptr;
+ return std::vector<uint8_t>();
}
- std::unique_ptr<uint8_t[]> dst(new uint8_t[tmp.size()]);
- if (dst_size != nullptr) {
- *dst_size = tmp.size();
- } else {
- *dst_size = 0;
- }
- std::copy(tmp.begin(), tmp.end(), dst.get());
- return dst.release();
+ return tmp;
}
// Although this is the same content logically as the Nested test dex,
@@ -175,14 +166,13 @@
static void DecodeAndWriteDexFile(const char* base64, const char* location) {
// decode base64
CHECK(base64 != nullptr);
- size_t length;
- std::unique_ptr<uint8_t[]> dex_bytes(DecodeBase64(base64, &length));
- CHECK(dex_bytes.get() != nullptr);
+ std::vector<uint8_t> dex_bytes = DecodeBase64(base64);
+ CHECK_NE(dex_bytes.size(), 0u);
// write to provided file
std::unique_ptr<File> file(OS::CreateEmptyFile(location));
CHECK(file.get() != nullptr);
- if (!file->WriteFully(dex_bytes.get(), length)) {
+ if (!file->WriteFully(dex_bytes.data(), dex_bytes.size())) {
PLOG(FATAL) << "Failed to write base64 as dex file";
}
if (file->FlushCloseOrErase() != 0) {
@@ -208,9 +198,67 @@
return dex_file;
}
+static std::unique_ptr<const DexFile> OpenDexFileInMemoryBase64(const char* base64,
+ const char* location,
+ uint32_t location_checksum) {
+ CHECK(base64 != nullptr);
+ std::vector<uint8_t> dex_bytes = DecodeBase64(base64);
+ CHECK_NE(dex_bytes.size(), 0u);
+
+ std::string error_message;
+ std::unique_ptr<MemMap> region(MemMap::MapAnonymous("test-region",
+ nullptr,
+ dex_bytes.size(),
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_message));
+ memcpy(region->Begin(), dex_bytes.data(), dex_bytes.size());
+ std::unique_ptr<const DexFile> dex_file(DexFile::Open(location,
+ location_checksum,
+ std::move(region),
+ /* verify */ true,
+ /* verify_checksum */ true,
+ &error_message));
+ CHECK(dex_file != nullptr) << error_message;
+ return dex_file;
+}
+
TEST_F(DexFileTest, Header) {
ScratchFile tmp;
std::unique_ptr<const DexFile> raw(OpenDexFileBase64(kRawDex, tmp.GetFilename().c_str()));
+ ASSERT_TRUE(raw != nullptr);
+
+ const DexFile::Header& header = raw->GetHeader();
+ // TODO: header.magic_
+ EXPECT_EQ(0x00d87910U, header.checksum_);
+ // TODO: header.signature_
+ EXPECT_EQ(904U, header.file_size_);
+ EXPECT_EQ(112U, header.header_size_);
+ EXPECT_EQ(0U, header.link_size_);
+ EXPECT_EQ(0U, header.link_off_);
+ EXPECT_EQ(15U, header.string_ids_size_);
+ EXPECT_EQ(112U, header.string_ids_off_);
+ EXPECT_EQ(7U, header.type_ids_size_);
+ EXPECT_EQ(172U, header.type_ids_off_);
+ EXPECT_EQ(2U, header.proto_ids_size_);
+ EXPECT_EQ(200U, header.proto_ids_off_);
+ EXPECT_EQ(1U, header.field_ids_size_);
+ EXPECT_EQ(224U, header.field_ids_off_);
+ EXPECT_EQ(3U, header.method_ids_size_);
+ EXPECT_EQ(232U, header.method_ids_off_);
+ EXPECT_EQ(2U, header.class_defs_size_);
+ EXPECT_EQ(256U, header.class_defs_off_);
+ EXPECT_EQ(584U, header.data_size_);
+ EXPECT_EQ(320U, header.data_off_);
+
+ EXPECT_EQ(header.checksum_, raw->GetLocationChecksum());
+}
+
+TEST_F(DexFileTest, HeaderInMemory) {
+ ScratchFile tmp;
+ std::unique_ptr<const DexFile> raw =
+ OpenDexFileInMemoryBase64(kRawDex, tmp.GetFilename().c_str(), 0x00d87910U);
ASSERT_TRUE(raw.get() != nullptr);
const DexFile::Header& header = raw->GetHeader();
diff --git a/runtime/dex_instruction_utils.h b/runtime/dex_instruction_utils.h
index 2849cd8..72d8244 100644
--- a/runtime/dex_instruction_utils.h
+++ b/runtime/dex_instruction_utils.h
@@ -134,74 +134,54 @@
return Instruction::ADD_INT_2ADDR <= code && code <= Instruction::REM_DOUBLE_2ADDR;
}
-// TODO: Remove the #if guards below when we fully migrate to C++14.
-
constexpr bool IsInvokeInstructionRange(Instruction::Code opcode) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionInvoke(opcode));
-#endif
return opcode >= Instruction::INVOKE_VIRTUAL_RANGE;
}
constexpr DexInvokeType InvokeInstructionType(Instruction::Code opcode) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionInvoke(opcode));
-#endif
return static_cast<DexInvokeType>(IsInvokeInstructionRange(opcode)
? (opcode - Instruction::INVOKE_VIRTUAL_RANGE)
: (opcode - Instruction::INVOKE_VIRTUAL));
}
constexpr DexMemAccessType IGetMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionIGet(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::IGET);
}
constexpr DexMemAccessType IPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionIPut(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::IPUT);
}
constexpr DexMemAccessType SGetMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionSGet(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::SGET);
}
constexpr DexMemAccessType SPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionSPut(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::SPUT);
}
constexpr DexMemAccessType AGetMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionAGet(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::AGET);
}
constexpr DexMemAccessType APutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionAPut(code));
-#endif
return static_cast<DexMemAccessType>(code - Instruction::APUT);
}
constexpr DexMemAccessType IGetOrIPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionIGetOrIPut(code));
-#endif
return (code >= Instruction::IPUT) ? IPutMemAccessType(code) : IGetMemAccessType(code);
}
-static inline DexMemAccessType IGetQuickOrIPutQuickMemAccessType(Instruction::Code code) {
+inline DexMemAccessType IGetQuickOrIPutQuickMemAccessType(Instruction::Code code) {
DCHECK(IsInstructionIGetQuickOrIPutQuick(code));
switch (code) {
case Instruction::IGET_QUICK: case Instruction::IPUT_QUICK:
@@ -225,16 +205,12 @@
}
constexpr DexMemAccessType SGetOrSPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionSGetOrSPut(code));
-#endif
return (code >= Instruction::SPUT) ? SPutMemAccessType(code) : SGetMemAccessType(code);
}
constexpr DexMemAccessType AGetOrAPutMemAccessType(Instruction::Code code) {
-#if __cplusplus >= 201402 // C++14 allows the DCHECK() in constexpr functions.
DCHECK(IsInstructionAGetOrAPut(code));
-#endif
return (code >= Instruction::APUT) ? APutMemAccessType(code) : AGetMemAccessType(code);
}
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 08fec91..d03a9d8 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -44,7 +44,7 @@
const InlineInfo& inline_info,
const InlineInfoEncoding& encoding,
uint8_t inlining_depth)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// This method is being used by artQuickResolutionTrampoline, before it sets up
// the passed parameters in a GC friendly way. Therefore we must never be
// suspended while executing it.
@@ -121,7 +121,7 @@
}
inline ArtMethod* GetCalleeSaveMethodCaller(Thread* self, Runtime::CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return GetCalleeSaveMethodCaller(
self->GetManagedStack()->GetTopQuickFrame(), type, true /* do_caller_check */);
}
@@ -457,7 +457,7 @@
// Explicit template declarations of FindFieldFromCode for all field access types.
#define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
-template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
+template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \
ArtMethod* referrer, \
Thread* self, size_t expected_size) \
@@ -640,7 +640,7 @@
// Explicit template declarations of FindMethodFromCode for all invoke types.
#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE \
+ template REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE \
ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \
mirror::Object** this_object, \
ArtMethod* referrer, \
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index fd1c02f..4056ec5 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -43,7 +43,7 @@
ArtMethod* referrer,
Thread* self,
bool access_check)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(component_count < 0)) {
ThrowNegativeArraySizeException(component_count);
return nullptr; // Failure
@@ -259,7 +259,7 @@
ArtMethod* GetCalleeSaveMethodCaller(ArtMethod** sp,
Runtime::CalleeSaveType type,
bool do_caller_check)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(*sp, Runtime::Current()->GetCalleeSaveMethod(type));
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, type);
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index a28376f..f88e81d 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -47,12 +47,12 @@
ALWAYS_INLINE inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
ArtMethod* method,
Thread* self, bool* slow_path)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
bool* slow_path)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to a Class. If it
// cannot be resolved, throw an error. If it can, use it to create an instance.
@@ -63,21 +63,21 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kAccessCheck>
@@ -85,7 +85,7 @@
int32_t component_count,
ArtMethod* method,
bool* slow_path)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the context of a calling Method, use its DexCache to resolve a type to an array Class. If
// it cannot be resolved, throw an error. If it can, use it to create an array.
@@ -97,7 +97,7 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
@@ -105,13 +105,13 @@
ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, int32_t component_count,
ArtMethod* method, Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
extern mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx,
int32_t component_count,
@@ -119,7 +119,7 @@
Thread* self,
bool access_check,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Type of find field operation for fast and slow case.
enum FindFieldType {
@@ -136,45 +136,45 @@
template<FindFieldType type, bool access_check>
inline ArtField* FindFieldFromCode(
uint32_t field_idx, ArtMethod* referrer, Thread* self, size_t expected_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<InvokeType type, bool access_check>
inline ArtMethod* FindMethodFromCode(
uint32_t method_idx, mirror::Object** this_object, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fast path field resolution that can't initialize classes or throw exceptions.
inline ArtField* FindFieldFast(
uint32_t field_idx, ArtMethod* referrer, FindFieldType type, size_t expected_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fast path method resolution that can't throw exceptions.
inline ArtMethod* FindMethodFast(
uint32_t method_idx, mirror::Object* this_object, ArtMethod* referrer, bool access_check,
InvokeType type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
inline mirror::Class* ResolveVerifyAndClinit(
uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: annotalysis disabled as monitor semantics are maintained in Java code.
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self)
NO_THREAD_SAFETY_ANALYSIS;
void CheckReferenceResult(mirror::Object* o, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, const char* shorty,
jobject rcvr_jobj, jobject interface_art_method_jobj,
std::vector<jvalue>& args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <typename INT_TYPE, typename FLOAT_TYPE>
inline INT_TYPE art_float_to_integral(FLOAT_TYPE f);
diff --git a/runtime/entrypoints/quick/callee_save_frame.h b/runtime/entrypoints/quick/callee_save_frame.h
index a81a7e7..df37f95 100644
--- a/runtime/entrypoints/quick/callee_save_frame.h
+++ b/runtime/entrypoints/quick/callee_save_frame.h
@@ -40,32 +40,32 @@
explicit ScopedQuickEntrypointChecks(Thread *self,
bool entry_check = kIsDebugBuild,
bool exit_check = kIsDebugBuild)
- SHARED_REQUIRES(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
+ REQUIRES_SHARED(Locks::mutator_lock_) : self_(self), exit_check_(exit_check) {
if (entry_check) {
TestsOnEntry();
}
}
- ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_)
+ ScopedQuickEntrypointChecks() REQUIRES_SHARED(Locks::mutator_lock_)
: self_(kIsDebugBuild ? Thread::Current() : nullptr), exit_check_(kIsDebugBuild) {
if (kIsDebugBuild) {
TestsOnEntry();
}
}
- ~ScopedQuickEntrypointChecks() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ~ScopedQuickEntrypointChecks() REQUIRES_SHARED(Locks::mutator_lock_) {
if (exit_check_) {
TestsOnExit();
}
}
private:
- void TestsOnEntry() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void TestsOnEntry() REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(self_);
self_->VerifyStack();
}
- void TestsOnExit() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void TestsOnExit() REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(self_);
self_->VerifyStack();
}
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 4686a51..dc5fd07 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -31,7 +31,7 @@
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
uint32_t type_idx, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx, kRuntimePointerSize); \
@@ -58,7 +58,7 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -84,7 +84,7 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
mirror::Class* klass, ArtMethod* method ATTRIBUTE_UNUSED, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !(instrumented_bool) && (allocator_type) == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
@@ -108,34 +108,34 @@
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocObjectFromCode<true, instrumented_bool>(type_idx, method, self, allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<false, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
return AllocArrayFromCode<true, instrumented_bool>(type_idx, component_count, method, self, \
allocator_type); \
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCode##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!(instrumented_bool)) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, false, allocator_type); \
@@ -145,7 +145,7 @@
} \
extern "C" mirror::Array* artCheckAndAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, int32_t component_count, ArtMethod* method, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (!(instrumented_bool)) { \
return CheckAndAllocArrayFromCode(type_idx, component_count, method, self, true, allocator_type); \
@@ -156,7 +156,7 @@
extern "C" mirror::String* artAllocStringFromBytesFromCode##suffix##suffix2( \
mirror::ByteArray* byte_array, int32_t high, int32_t offset, int32_t byte_count, \
Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
StackHandleScope<1> hs(self); \
Handle<mirror::ByteArray> handle_array(hs.NewHandle(byte_array)); \
@@ -165,7 +165,7 @@
} \
extern "C" mirror::String* artAllocStringFromCharsFromCode##suffix##suffix2( \
int32_t offset, int32_t char_count, mirror::CharArray* char_array, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
StackHandleScope<1> hs(self); \
Handle<mirror::CharArray> handle_array(hs.NewHandle(char_array)); \
return mirror::String::AllocFromCharArray<instrumented_bool>(self, char_count, handle_array, \
@@ -173,7 +173,7 @@
} \
extern "C" mirror::String* artAllocStringFromStringFromCode##suffix##suffix2( /* NOLINT */ \
mirror::String* string, Thread* self) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
StackHandleScope<1> hs(self); \
Handle<mirror::String> handle_string(hs.NewHandle(string)); \
return mirror::String::AllocFromString<instrumented_bool>(self, handle_string->GetLength(), \
diff --git a/runtime/entrypoints/quick/quick_cast_entrypoints.cc b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
index 8db69a3..2732d68 100644
--- a/runtime/entrypoints/quick/quick_cast_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_cast_entrypoints.cc
@@ -21,7 +21,7 @@
// Assignable test for code, won't throw. Null and equality tests already performed
extern "C" size_t artIsAssignableFromCode(mirror::Class* klass, mirror::Class* ref_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(klass != nullptr);
DCHECK(ref_class != nullptr);
return klass->IsAssignableFrom(ref_class) ? 1 : 0;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index f35c2fe..5b9d03b 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -23,14 +23,13 @@
#include "mirror/object_array-inl.h"
#include "mirror/object-inl.h"
#include "quick_exception_handler.h"
-#include "stack.h"
#include "thread.h"
#include "verifier/method_verifier.h"
namespace art {
NO_RETURN static void artDeoptimizeImpl(Thread* self, bool single_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (VLOG_IS_ON(deopt)) {
if (single_frame) {
// Deopt logging will be in DeoptimizeSingleFrame. It is there to take advantage of the
@@ -60,14 +59,14 @@
}
}
-extern "C" NO_RETURN void artDeoptimize(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+extern "C" NO_RETURN void artDeoptimize(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
artDeoptimizeImpl(self, false);
}
// This is called directly from compiled code by an HDepptimize.
extern "C" NO_RETURN void artDeoptimizeFromCompiledCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// Before deoptimizing to interpreter, we must push the deoptimization context.
JValue return_value;
diff --git a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
index c045e84..2cd0331 100644
--- a/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_dexcache_entrypoints.cc
@@ -26,7 +26,7 @@
namespace art {
extern "C" mirror::Class* artInitializeStaticStorageFromCode(uint32_t type_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Called to ensure static storage base is initialized for direct static field reads and writes.
// A class may be accessing another class' fields when it doesn't have access, as access has been
// given by inheritance.
@@ -36,7 +36,7 @@
}
extern "C" mirror::Class* artInitializeTypeFromCode(uint32_t type_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when method->dex_cache_resolved_types_[] misses.
ScopedQuickEntrypointChecks sqec(self);
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
@@ -44,7 +44,7 @@
}
extern "C" mirror::Class* artInitializeTypeAndVerifyAccessFromCode(uint32_t type_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when caller isn't guaranteed to have access to a type and the dex cache may be
// unpopulated.
ScopedQuickEntrypointChecks sqec(self);
@@ -53,7 +53,7 @@
}
extern "C" mirror::String* artResolveStringFromCode(int32_t string_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
auto* caller = GetCalleeSaveMethodCaller(self, Runtime::kSaveRefsOnly);
return ResolveStringFromCode(caller, string_idx);
diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h
index 08e0d6e..89712a3 100644
--- a/runtime/entrypoints/quick/quick_entrypoints.h
+++ b/runtime/entrypoints/quick/quick_entrypoints.h
@@ -86,7 +86,7 @@
// barrier fast path implementations generated by the compiler to mark
// an object that is referenced by a field of a gray object.
extern "C" mirror::Object* artReadBarrierMark(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
// Read barrier entrypoint for heap references.
// This is the read barrier slow path for instance and static fields
@@ -94,11 +94,11 @@
extern "C" mirror::Object* artReadBarrierSlow(mirror::Object* ref,
mirror::Object* obj,
uint32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
// Read barrier entrypoint for GC roots.
extern "C" mirror::Object* artReadBarrierForRootSlow(GcRoot<mirror::Object>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) HOT_ATTR;
+ REQUIRES_SHARED(Locks::mutator_lock_) HOT_ATTR;
} // namespace art
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 1a12bd4..5b65029 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -44,7 +44,7 @@
size_t size,
mirror::Object** obj)
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
HandleWrapper<mirror::Object> h(hs.NewHandleWrapper(obj));
ArtField* field = FindFieldFromCode<type, kAccessCheck>(field_idx, referrer, self, size);
@@ -56,7 +56,7 @@
}
extern "C" ssize_t artGetByteStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -70,7 +70,7 @@
}
extern "C" size_t artGetBooleanStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -84,7 +84,7 @@
}
extern "C" ssize_t artGetShortStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -98,7 +98,7 @@
}
extern "C" size_t artGetCharStaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -112,7 +112,7 @@
}
extern "C" size_t artGet32StaticFromCode(uint32_t field_idx, ArtMethod* referrer, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
@@ -128,7 +128,7 @@
extern "C" uint64_t artGet64StaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -144,7 +144,7 @@
extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
@@ -167,7 +167,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -188,7 +188,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -208,7 +208,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -229,7 +229,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -250,7 +250,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -271,7 +271,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -292,7 +292,7 @@
mirror::Object* obj,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
@@ -316,7 +316,7 @@
uint32_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr)) {
@@ -349,7 +349,7 @@
uint16_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr)) {
@@ -382,7 +382,7 @@
uint32_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr)) {
@@ -403,7 +403,7 @@
ArtMethod* referrer,
uint64_t new_value,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -424,7 +424,7 @@
mirror::Object* new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
@@ -459,7 +459,7 @@
uint8_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -496,7 +496,7 @@
uint16_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -534,7 +534,7 @@
uint32_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -560,7 +560,7 @@
uint64_t new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -586,7 +586,7 @@
mirror::Object* new_value,
ArtMethod* referrer,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx,
referrer,
diff --git a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
index 22b2fa3..f63c9c2 100644
--- a/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_fillarray_entrypoints.cc
@@ -26,7 +26,7 @@
*/
extern "C" int artHandleFillArrayDataFromCode(uint32_t payload_offset, mirror::Array* array,
ArtMethod* method, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
const uint16_t* const insns = method->GetCodeItem()->insns_;
const Instruction::ArrayDataPayload* payload =
diff --git a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
index 82d5467..fec7373 100644
--- a/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_instrumentation_entrypoints.cc
@@ -29,7 +29,7 @@
mirror::Object* this_object,
Thread* self,
uintptr_t lr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Instrumentation changes the stack. Thus, when exiting, the stack cannot be verified, so skip
// that part.
ScopedQuickEntrypointChecks sqec(self, kIsDebugBuild, false);
@@ -51,7 +51,7 @@
extern "C" TwoWordReturn artInstrumentationMethodExitFromCode(Thread* self, ArtMethod** sp,
uint64_t gpr_result,
uint64_t fpr_result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Instrumentation exit stub must not be entered with a pending exception.
CHECK(!self->IsExceptionPending()) << "Enter instrumentation exit stub with pending exception "
<< self->GetException()->Dump();
diff --git a/runtime/entrypoints/quick/quick_jni_entrypoints.cc b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
index c06824c..64f19af 100644
--- a/runtime/entrypoints/quick/quick_jni_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_jni_entrypoints.cc
@@ -78,7 +78,7 @@
}
static void PopLocalReferences(uint32_t saved_local_ref_cookie, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JNIEnvExt* env = self->GetJniEnv();
if (UNLIKELY(env->check_jni)) {
env->CheckNoHeldMonitors();
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 4adb39b..b4f945a 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -23,7 +23,7 @@
extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ REQUIRES_SHARED(Locks::mutator_lock_) /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-enter)");
@@ -44,7 +44,7 @@
extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
+ REQUIRES_SHARED(Locks::mutator_lock_) /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerException("Null reference used for synchronization (monitor-exit)");
diff --git a/runtime/entrypoints/quick/quick_thread_entrypoints.cc b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
index 47b3eff..0838059 100644
--- a/runtime/entrypoints/quick/quick_thread_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_thread_entrypoints.cc
@@ -19,7 +19,7 @@
namespace art {
-extern "C" void artTestSuspendFromCode(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+extern "C" void artTestSuspendFromCode(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
// Called when suspend count check value is 0 and thread->suspend_count_ != 0
ScopedQuickEntrypointChecks sqec(self);
self->CheckSuspend();
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index ea9f7b0..67cae8a 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -24,14 +24,14 @@
// Deliver an exception that's pending on thread helping set up a callee save frame on the way.
extern "C" NO_RETURN void artDeliverPendingExceptionFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->QuickDeliverException();
}
// Called by generated code to throw an exception.
extern "C" NO_RETURN void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
/*
* exception may be null, in which case this routine should
* throw NPE. NOTE: this is a convenience for generated code,
@@ -50,7 +50,7 @@
// Called by generated code to throw a NPE exception.
extern "C" NO_RETURN void artThrowNullPointerExceptionFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
// We come from an explicit check in the generated code. This path is triggered
// only if the object is indeed null.
@@ -60,7 +60,7 @@
// Installed by a signal handler to throw a NPE exception.
extern "C" NO_RETURN void artThrowNullPointerExceptionFromSignal(uintptr_t addr, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowNullPointerExceptionFromDexPC(/* check_address */ true, addr);
@@ -70,7 +70,7 @@
// Called by generated code to throw an arithmetic divide by zero exception.
extern "C" NO_RETURN void artThrowDivZeroFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArithmeticExceptionDivideByZero();
self->QuickDeliverException();
@@ -78,7 +78,7 @@
// Called by generated code to throw an array index out of bounds exception.
extern "C" NO_RETURN void artThrowArrayBoundsFromCode(int index, int length, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
@@ -86,14 +86,14 @@
// Called by generated code to throw a string index out of bounds exception.
extern "C" NO_RETURN void artThrowStringBoundsFromCode(int index, int length, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowStringIndexOutOfBoundsException(index, length);
self->QuickDeliverException();
}
extern "C" NO_RETURN void artThrowStackOverflowFromCode(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
ThrowStackOverflowError(self);
@@ -102,7 +102,7 @@
}
extern "C" NO_RETURN void artThrowNoSuchMethodFromCode(int32_t method_idx, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowNoSuchMethodError(method_idx);
self->QuickDeliverException();
@@ -111,7 +111,7 @@
extern "C" NO_RETURN void artThrowClassCastException(mirror::Class* dest_type,
mirror::Class* src_type,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
DCHECK(!dest_type->IsAssignableFrom(src_type));
ThrowClassCastException(dest_type, src_type);
@@ -120,7 +120,7 @@
extern "C" NO_RETURN void artThrowArrayStoreException(mirror::Object* array, mirror::Object* value,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ThrowArrayStoreException(value->GetClass(), array->GetClass());
self->QuickDeliverException();
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index c67379a..3043c83 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -295,7 +295,7 @@
// kRefAndArgs runtime method. Since 'this' is a reference, it is located in the
// 1st GPR.
static mirror::Object* GetProxyThisObject(ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK((*sp)->IsProxyMethod());
CHECK_GT(kNumQuickGprArgs, 0u);
constexpr uint32_t kThisGprIndex = 0u; // 'this' is in the 1st GPR.
@@ -305,19 +305,19 @@
return reinterpret_cast<StackReference<mirror::Object>*>(this_arg_address)->AsMirrorPtr();
}
- static ArtMethod* GetCallingMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static ArtMethod* GetCallingMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
return GetCalleeSaveMethodCaller(sp, Runtime::kSaveRefsAndArgs);
}
- static ArtMethod* GetOuterMethod(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static ArtMethod* GetOuterMethod(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* previous_sp =
reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize;
return *reinterpret_cast<ArtMethod**>(previous_sp);
}
- static uint32_t GetCallingDexPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uint32_t GetCallingDexPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
const size_t callee_frame_size = GetCalleeSaveFrameSize(kRuntimeISA, Runtime::kSaveRefsAndArgs);
ArtMethod** caller_sp = reinterpret_cast<ArtMethod**>(
@@ -344,14 +344,14 @@
}
// For the given quick ref and args quick frame, return the caller's PC.
- static uintptr_t GetCallingPc(ArtMethod** sp) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uintptr_t GetCallingPc(ArtMethod** sp) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK((*sp)->IsCalleeSaveMethod());
uint8_t* lr = reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_LrOffset;
return *reinterpret_cast<uintptr_t*>(lr);
}
QuickArgumentVisitor(ArtMethod** sp, bool is_static, const char* shorty,
- uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) :
+ uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) :
is_static_(is_static), shorty_(shorty), shorty_len_(shorty_len),
gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
@@ -436,7 +436,7 @@
}
}
- void VisitArguments() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitArguments() REQUIRES_SHARED(Locks::mutator_lock_) {
// (a) 'stack_args_' should point to the first method's argument
// (b) whatever the argument type it is, the 'stack_index_' should
// be moved forward along with every visiting.
@@ -589,7 +589,7 @@
// Returns the 'this' object of a proxy method. This function is only used by StackVisitor. It
// allows to use the QuickArgumentVisitor constants without moving all the code in its own module.
extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return QuickArgumentVisitor::GetProxyThisObject(sp);
}
@@ -600,7 +600,7 @@
uint32_t shorty_len, ShadowFrame* sf, size_t first_arg_reg) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sf_(sf), cur_reg_(first_arg_reg) {}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
private:
ShadowFrame* const sf_;
@@ -643,7 +643,7 @@
}
extern "C" uint64_t artQuickToInterpreterBridge(ArtMethod* method, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Ensure we don't get thread suspension until the object arguments are safely in the shadow
// frame.
ScopedQuickEntrypointChecks sqec(self);
@@ -698,10 +698,10 @@
if (kIsDebugBuild) {
class DummyStackVisitor : public StackVisitor {
public:
- explicit DummyStackVisitor(Thread* self_in) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit DummyStackVisitor(Thread* self_in) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// Nothing to do here. In a debug build, SanityCheckFrame will do the work in the walking
// logic. Just always say we want to continue.
return true;
@@ -782,9 +782,9 @@
ScopedObjectAccessUnchecked* soa, std::vector<jvalue>* args) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa), args_(args) {}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -843,7 +843,7 @@
// field within the proxy object, which will box the primitive arguments and deal with error cases.
extern "C" uint64_t artQuickProxyInvokeHandler(
ArtMethod* proxy_method, mirror::Object* receiver, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(proxy_method->IsProxyMethod()) << PrettyMethod(proxy_method);
DCHECK(receiver->GetClass()->IsProxyClass()) << PrettyMethod(proxy_method);
// Ensure we don't get thread suspension until the object arguments are safely in jobjects.
@@ -899,9 +899,9 @@
uint32_t shorty_len, ScopedObjectAccessUnchecked* soa) :
QuickArgumentVisitor(sp, is_static, shorty, shorty_len), soa_(soa) {}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
- void FixupReferences() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FixupReferences() REQUIRES_SHARED(Locks::mutator_lock_);
private:
ScopedObjectAccessUnchecked* const soa_;
@@ -932,7 +932,7 @@
// Lazily resolve a method for quick. Called by stub code.
extern "C" const void* artQuickResolutionTrampoline(
ArtMethod* called, mirror::Object* receiver, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// The resolution trampoline stashes the resolved method into the callee-save frame to transport
// it. Thus, when exiting, the stack cannot be verified (as the resolved method most likely
// does not have the same stack layout as the callee-save method).
@@ -1309,7 +1309,7 @@
return gpr_index_ > 0;
}
- void AdvanceHandleScope(mirror::Object* ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void AdvanceHandleScope(mirror::Object* ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t handle = PushHandle(ptr);
if (HaveHandleScopeGpr()) {
gpr_index_--;
@@ -1497,7 +1497,7 @@
void PushStack(uintptr_t val) {
delegate_->PushStack(val);
}
- uintptr_t PushHandle(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
+ uintptr_t PushHandle(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
return delegate_->PushHandle(ref);
}
@@ -1557,10 +1557,10 @@
virtual void WalkHeader(
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
}
- void Walk(const char* shorty, uint32_t shorty_len) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Walk(const char* shorty, uint32_t shorty_len) REQUIRES_SHARED(Locks::mutator_lock_) {
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
WalkHeader(&sm);
@@ -1632,7 +1632,7 @@
//
// Note: assumes ComputeAll() has been run before.
void LayoutCalleeSaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = **m;
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
@@ -1673,7 +1673,7 @@
// Re-layout the callee-save frame (insert a handle-scope). Then add space for the cookie.
// Returns the new bottom. Note: this may be unaligned.
uint8_t* LayoutJNISaveFrame(Thread* self, ArtMethod*** m, void* sp, HandleScope** handle_scope)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// First, fix up the layout of the callee-save frame.
// We have to squeeze in the HandleScope, and relocate the method pointer.
LayoutCalleeSaveFrame(self, m, sp, handle_scope);
@@ -1691,7 +1691,7 @@
uint8_t* ComputeLayout(Thread* self, ArtMethod*** m, const char* shorty, uint32_t shorty_len,
HandleScope** handle_scope, uintptr_t** start_stack, uintptr_t** start_gpr,
uint32_t** start_fpr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
// JNI part.
@@ -1707,7 +1707,7 @@
// Add JNIEnv* and jobj/jclass before the shorty-derived elements.
void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
uint32_t num_handle_scope_references_;
@@ -1763,7 +1763,7 @@
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object*) SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual uintptr_t PushHandle(mirror::Object*) REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
UNREACHABLE();
}
@@ -1801,15 +1801,15 @@
}
}
- void Visit() SHARED_REQUIRES(Locks::mutator_lock_) OVERRIDE;
+ void Visit() REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE;
- void FinalizeHandleScope(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FinalizeHandleScope(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
StackReference<mirror::Object>* GetFirstHandleScopeEntry() {
return handle_scope_->GetHandle(0).GetReference();
}
- jobject GetFirstHandleScopeJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ jobject GetFirstHandleScopeJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
return handle_scope_->GetHandle(0).ToJObject();
}
@@ -1825,7 +1825,7 @@
HandleScope* handle_scope) : FillNativeCall(gpr_regs, fpr_regs, stack_args),
handle_scope_(handle_scope), cur_entry_(0) {}
- uintptr_t PushHandle(mirror::Object* ref) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ uintptr_t PushHandle(mirror::Object* ref) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
void Reset(uintptr_t* gpr_regs, uint32_t* fpr_regs, uintptr_t* stack_args, HandleScope* scope) {
FillNativeCall::Reset(gpr_regs, fpr_regs, stack_args);
@@ -1833,7 +1833,7 @@
cur_entry_ = 0U;
}
- void ResetRemainingScopeSlots() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void ResetRemainingScopeSlots() REQUIRES_SHARED(Locks::mutator_lock_) {
// Initialize padding entries.
size_t expected_slots = handle_scope_->NumberOfReferences();
while (cur_entry_ < expected_slots) {
@@ -1953,7 +1953,7 @@
* 2) An error, if the value is negative.
*/
extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* called = *sp;
DCHECK(called->IsNative()) << PrettyMethod(called, true);
uint32_t shorty_len = 0;
@@ -2048,7 +2048,7 @@
// for the method pointer.
//
// It is valid to use this, as at the usage points here (returns from C functions) we are assuming
-// to hold the mutator lock (see SHARED_REQUIRES(Locks::mutator_lock_) annotations).
+// to hold the mutator lock (see REQUIRES_SHARED(Locks::mutator_lock_) annotations).
template<InvokeType type, bool access_check>
static TwoWordReturn artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, Thread* self,
@@ -2090,7 +2090,7 @@
// Explicit artInvokeCommon template function declarations to please analysis tool.
#define EXPLICIT_INVOKE_COMMON_TEMPLATE_DECL(type, access_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
TwoWordReturn artInvokeCommon<type, access_check>( \
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
@@ -2109,31 +2109,31 @@
// See comments in runtime_support_asm.S
extern "C" TwoWordReturn artInvokeInterfaceTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kInterface, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeDirectTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kDirect, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeStaticTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kStatic, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeSuperTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kSuper, true>(method_idx, this_object, self, sp);
}
extern "C" TwoWordReturn artInvokeVirtualTrampolineWithAccessCheck(
uint32_t method_idx, mirror::Object* this_object, Thread* self, ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return artInvokeCommon<kVirtual, true>(method_idx, this_object, self, sp);
}
@@ -2144,7 +2144,7 @@
mirror::Object* this_object,
Thread* self,
ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
StackHandleScope<1> hs(self);
Handle<mirror::Class> cls(hs.NewHandle(this_object->GetClass()));
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 45db500..db9568a 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -73,12 +73,12 @@
// Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
// Returns false if we overflowed the stack.
- bool AtomicPushBackIgnoreGrowthLimit(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool AtomicPushBackIgnoreGrowthLimit(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
return AtomicPushBackInternal(value, capacity_);
}
// Returns false if we overflowed the stack.
- bool AtomicPushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool AtomicPushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
return AtomicPushBackInternal(value, growth_limit_);
}
@@ -86,7 +86,7 @@
// slots. Returns false if we overflowed the stack.
bool AtomicBumpBack(size_t num_slots, StackReference<T>** start_address,
StackReference<T>** end_address)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
@@ -112,7 +112,7 @@
return true;
}
- void AssertAllZero() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void AssertAllZero() REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
for (size_t i = 0; i < capacity_; ++i) {
DCHECK_EQ(begin_[i].AsMirrorPtr(), static_cast<T*>(nullptr)) << "i=" << i;
@@ -120,7 +120,7 @@
}
}
- void PushBack(T* value) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void PushBack(T* value) REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
@@ -130,7 +130,7 @@
begin_[index].Assign(value);
}
- T* PopBack() SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* PopBack() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
// Decrement the back index non atomically.
back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
@@ -193,12 +193,12 @@
}
}
- bool ContainsSorted(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ContainsSorted(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(debug_is_sorted_);
return std::binary_search(Begin(), End(), value, ObjectComparator());
}
- bool Contains(const T* value) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool Contains(const T* value) const REQUIRES_SHARED(Locks::mutator_lock_) {
for (auto cur = Begin(), end = End(); cur != end; ++cur) {
if (cur->AsMirrorPtr() == value) {
return true;
@@ -220,7 +220,7 @@
// Returns false if we overflowed the stack.
bool AtomicPushBackInternal(T* value, size_t limit) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index b6af908..969bfb7 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -108,7 +108,7 @@
const Visitor& visitor,
const uint8_t minimum_age = kCardDirty) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Assertion used to check the given address is covered by the card table
void CheckAddrIsInCardTable(const uint8_t* addr) const;
diff --git a/runtime/gc/accounting/heap_bitmap.h b/runtime/gc/accounting/heap_bitmap.h
index 0b96979..76247bc 100644
--- a/runtime/gc/accounting/heap_bitmap.h
+++ b/runtime/gc/accounting/heap_bitmap.h
@@ -35,26 +35,26 @@
class HeapBitmap {
public:
- bool Test(const mirror::Object* obj) SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ bool Test(const mirror::Object* obj) REQUIRES_SHARED(Locks::heap_bitmap_lock_);
void Clear(const mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_);
template<typename LargeObjectSetVisitor>
bool Set(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
template<typename LargeObjectSetVisitor>
bool AtomicTestAndSet(const mirror::Object* obj, const LargeObjectSetVisitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) ALWAYS_INLINE;
ContinuousSpaceBitmap* GetContinuousSpaceBitmap(const mirror::Object* obj) const;
LargeObjectBitmap* GetLargeObjectBitmap(const mirror::Object* obj) const;
void Walk(ObjectCallback* callback, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
template <typename Visitor>
void Visit(const Visitor& visitor)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Find and replace a bitmap pointer, this is used by for the bitmap swapping in the GC.
void ReplaceBitmap(ContinuousSpaceBitmap* old_bitmap, ContinuousSpaceBitmap* new_bitmap)
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 35bcb18..24a2c17 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -98,24 +98,24 @@
// Extra parameters are required since we use this same visitor signature for checking objects.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
MarkReference(obj->GetFieldObjectReferenceAddr(offset));
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
VisitRoot(root);
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
MarkReference(root);
}
private:
template<bool kPoisonReferences>
void MarkReference(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
mirror::Object* ref = obj_ptr->AsMirrorPtr();
if (ref != nullptr && !from_space_->HasAddress(ref) && !immune_space_->HasAddress(ref)) {
@@ -150,7 +150,7 @@
void operator()(mirror::Object* root) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_,
from_space_,
@@ -193,7 +193,7 @@
// Extra parameters are required since we use this same visitor signature for checking objects.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
mirror::Object* ref = ref_ptr->AsMirrorPtr();
// Only add the reference if it is non null and fits our criteria.
@@ -204,14 +204,14 @@
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (mod_union_table_->ShouldAddReference(root->AsMirrorPtr())) {
*has_target_reference_ = true;
// TODO: Add MarkCompressedReference callback here.
@@ -242,7 +242,7 @@
has_target_reference_(has_target_reference) {}
void operator()(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
// We don't have an early exit since we use the visitor pattern, an early
// exit should significantly speed this up.
AddToReferenceArrayVisitor visitor(mod_union_table_,
@@ -268,7 +268,7 @@
// Extra parameters are required since we use this same visitor signature for checking objects.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref != nullptr &&
mod_union_table_->ShouldAddReference(ref) &&
@@ -289,14 +289,14 @@
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsDebugBuild && !root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!mod_union_table_->ShouldAddReference(root->AsMirrorPtr()));
}
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 6aa2417..b6792c4 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -121,17 +121,17 @@
// Update table based on cleared cards and mark all references to the other spaces.
void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Exclusive lock is required since verify uses SpaceBitmap::VisitMarkedRange and
// VisitMarkedRange can't know if the callback will modify the bitmap or not.
void Verify() OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Function that tells whether or not to add a reference to the table.
@@ -139,7 +139,7 @@
virtual bool ContainsCardFor(uintptr_t addr) OVERRIDE;
- virtual void Dump(std::ostream& os) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ virtual void Dump(std::ostream& os) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
virtual void SetCards() OVERRIDE;
@@ -169,11 +169,11 @@
// Mark all references to the alloc space(s).
virtual void UpdateAndMarkReferences(MarkObjectVisitor* visitor) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void VisitObjects(ObjectCallback* callback, void* arg) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Nothing to verify.
virtual void Verify() OVERRIDE {}
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 349d6ff..2810f58 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -47,7 +47,7 @@
}
mirror::ObjectArray<mirror::Object>* AllocObjectArray(
Thread* self, space::ContinuousMemMapAllocSpace* space, size_t component_count)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto* klass = GetObjectArrayClass(self, space);
const size_t size = mirror::ComputeArraySize(component_count, 2);
size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
@@ -68,7 +68,7 @@
private:
mirror::Class* GetObjectArrayClass(Thread* self, space::ContinuousMemMapAllocSpace* space)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (java_lang_object_array_ == nullptr) {
java_lang_object_array_ =
Runtime::Current()->GetClassLinker()->GetClassRoot(ClassLinker::kObjectArrayClass);
@@ -98,12 +98,12 @@
public:
explicit CollectVisitedVisitor(std::set<mirror::Object*>* out) : out_(out) {}
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
MarkObject(ref->AsMirrorPtr());
}
virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
out_->insert(obj);
return obj;
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index eb0852a..7229f76 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -67,7 +67,7 @@
contains_reference_to_target_space_(contains_reference_to_target_space) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
if (target_space_->HasAddress(ref_ptr->AsMirrorPtr())) {
@@ -78,7 +78,7 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
if (target_space_->HasAddress(ref->GetReferent())) {
*contains_reference_to_target_space_ = true;
collector_->DelayReferenceReferent(klass, ref);
@@ -86,14 +86,14 @@
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (target_space_->HasAddress(root->AsMirrorPtr())) {
*contains_reference_to_target_space_ = true;
root->Assign(collector_->MarkObject(root->AsMirrorPtr()));
@@ -116,7 +116,7 @@
contains_reference_to_target_space_(contains_reference_to_target_space) {}
void operator()(mirror::Object* obj) const REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RememberedSetReferenceVisitor visitor(target_space_, contains_reference_to_target_space_,
collector_);
obj->VisitReferences(visitor, visitor);
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index 3a0dcf7..5594781 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -57,7 +57,7 @@
void UpdateAndMarkReferences(space::ContinuousSpace* target_space,
collector::GarbageCollector* collector)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Dump(std::ostream& os);
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 3df02ed..3649111 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -195,7 +195,7 @@
void SpaceBitmap<kAlignment>::WalkInstanceFields(SpaceBitmap<kAlignment>* visited,
ObjectCallback* callback, mirror::Object* obj,
mirror::Class* klass, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Visit fields of parent classes first.
mirror::Class* super = klass->GetSuperClass();
if (super != nullptr) {
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 829b1b1..576f9c7 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -123,7 +123,7 @@
// Visit the live objects in the range [visit_begin, visit_end).
// TODO: Use lock annotations when clang is fixed.
- // REQUIRES(Locks::heap_bitmap_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ // REQUIRES(Locks::heap_bitmap_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename Visitor>
void VisitMarkedRange(uintptr_t visit_begin, uintptr_t visit_end, const Visitor& visitor) const
NO_THREAD_SAFETY_ANALYSIS;
@@ -131,12 +131,12 @@
// Visits set bits in address order. The callback is not permitted to change the bitmap bits or
// max during the traversal.
void Walk(ObjectCallback* callback, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
// Visits set bits with an in order traversal. The callback is not permitted to change the bitmap
// bits or max during the traversal.
void InOrderWalk(ObjectCallback* callback, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Walk through the bitmaps in increasing address order, and find the object pointers that
// correspond to garbage objects. Call <callback> zero or more times with lists of these object
@@ -204,12 +204,12 @@
// For an unvisited object, visit it then all its children found via fields.
static void WalkFieldsInOrder(SpaceBitmap* visited, ObjectCallback* callback, mirror::Object* obj,
- void* arg) SHARED_REQUIRES(Locks::mutator_lock_);
+ void* arg) REQUIRES_SHARED(Locks::mutator_lock_);
// Walk instance fields of the given Class. Separate function to allow recursion on the super
// class.
static void WalkInstanceFields(SpaceBitmap<kAlignment>* visited, ObjectCallback* callback,
mirror::Object* obj, mirror::Class* klass, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Backing storage for bitmap.
std::unique_ptr<MemMap> mem_map_;
diff --git a/runtime/gc/allocation_record.cc b/runtime/gc/allocation_record.cc
index 522f236..13ebb27 100644
--- a/runtime/gc/allocation_record.cc
+++ b/runtime/gc/allocation_record.cc
@@ -119,7 +119,7 @@
}
static inline void SweepClassObject(AllocRecord* record, IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
GcRoot<mirror::Class>& klass = record->GetClassGcRoot();
// This does not need a read barrier because this is called by GC.
@@ -187,7 +187,7 @@
class AllocRecordStackVisitor : public StackVisitor {
public:
AllocRecordStackVisitor(Thread* thread, size_t max_depth, AllocRecordStackTrace* trace_out)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
max_depth_(max_depth),
trace_(trace_out) {}
diff --git a/runtime/gc/allocation_record.h b/runtime/gc/allocation_record.h
index a2d86cc..f1f013b 100644
--- a/runtime/gc/allocation_record.h
+++ b/runtime/gc/allocation_record.h
@@ -38,7 +38,7 @@
class AllocRecordStackTraceElement {
public:
- int32_t ComputeLineNumber() const SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t ComputeLineNumber() const REQUIRES_SHARED(Locks::mutator_lock_);
AllocRecordStackTraceElement() = default;
AllocRecordStackTraceElement(ArtMethod* method, uint32_t dex_pc)
@@ -174,14 +174,14 @@
return trace_.GetTid();
}
- mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
return klass_.Read();
}
const char* GetClassDescriptor(std::string* storage) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- GcRoot<mirror::Class>& GetClassGcRoot() SHARED_REQUIRES(Locks::mutator_lock_) {
+ GcRoot<mirror::Class>& GetClassGcRoot() REQUIRES_SHARED(Locks::mutator_lock_) {
return klass_;
}
@@ -213,7 +213,7 @@
mirror::Object** obj,
size_t byte_count)
REQUIRES(!Locks::alloc_tracker_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void SetAllocTrackingEnabled(bool enabled) REQUIRES(!Locks::alloc_tracker_lock_);
@@ -221,7 +221,7 @@
~AllocRecordObjectMap();
void Put(mirror::Object* obj, AllocRecord&& record)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
if (entries_.size() == alloc_record_max_) {
entries_.pop_front();
@@ -229,22 +229,22 @@
entries_.push_back(EntryPair(GcRoot<mirror::Object>(obj), std::move(record)));
}
- size_t Size() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
+ size_t Size() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
return entries_.size();
}
- size_t GetRecentAllocationSize() const SHARED_REQUIRES(Locks::alloc_tracker_lock_) {
+ size_t GetRecentAllocationSize() const REQUIRES_SHARED(Locks::alloc_tracker_lock_) {
CHECK_LE(recent_record_max_, alloc_record_max_);
size_t sz = entries_.size();
return std::min(recent_record_max_, sz);
}
void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
void SweepAllocationRecords(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
// Allocation tracking could be enabled by user in between DisallowNewAllocationRecords() and
@@ -254,36 +254,36 @@
// swept from the list. But missing the first few records is acceptable for using the button to
// enable allocation tracking.
void DisallowNewAllocationRecords()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
void AllowNewAllocationRecords()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_);
// TODO: Is there a better way to hide the entries_'s type?
EntryList::iterator Begin()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.begin();
}
EntryList::iterator End()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.end();
}
EntryList::reverse_iterator RBegin()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.rbegin();
}
EntryList::reverse_iterator REnd()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::alloc_tracker_lock_) {
return entries_.rend();
}
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 375d869..a7f2aa0 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -133,7 +133,7 @@
DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0));
if (req_byte_size <= fpr_byte_size) {
// Found one.
- free_page_runs_.erase(it++);
+ it = free_page_runs_.erase(it);
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x"
<< std::hex << reinterpret_cast<intptr_t>(fpr)
@@ -141,7 +141,8 @@
}
if (req_byte_size < fpr_byte_size) {
// Split.
- FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
+ FreePageRun* remainder =
+ reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size);
if (kIsDebugBuild) {
remainder->magic_num_ = kMagicNumFree;
}
@@ -364,86 +365,74 @@
<< std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec
<< (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]";
}
- auto higher_it = free_page_runs_.upper_bound(fpr);
- if (higher_it != free_page_runs_.end()) {
- for (auto it = higher_it; it != free_page_runs_.end(); ) {
- FreePageRun* h = *it;
- DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.end(); ) {
+ FreePageRun* h = *it;
+ DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
+ << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
+ << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
+ << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
+ }
+ if (fpr->End(this) == h->Begin()) {
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x"
- << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x"
- << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec
- << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]";
+ LOG(INFO) << "Success";
}
- if (fpr->End(this) == h->Begin()) {
- if (kTraceRosAlloc) {
- LOG(INFO) << "Success";
- }
- // Clear magic num since this is no longer the start of a free page run.
- if (kIsDebugBuild) {
- h->magic_num_ = 0;
- }
- free_page_runs_.erase(it++);
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
- << reinterpret_cast<intptr_t>(h)
- << " from free_page_runs_";
- }
- fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
- DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
- } else {
- // Not adjacent. Stop.
- if (kTraceRosAlloc) {
- LOG(INFO) << "Fail";
- }
- break;
+ // Clear magic num since this is no longer the start of a free page run.
+ if (kIsDebugBuild) {
+ h->magic_num_ = 0;
}
+ it = free_page_runs_.erase(it);
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
+ << reinterpret_cast<intptr_t>(h)
+ << " from free_page_runs_";
+ }
+ fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this));
+ DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ } else {
+ // Not adjacent. Stop.
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "Fail";
+ }
+ break;
}
}
// Try to coalesce in the lower address direction.
- auto lower_it = free_page_runs_.upper_bound(fpr);
- if (lower_it != free_page_runs_.begin()) {
- --lower_it;
- for (auto it = lower_it; ; ) {
- // We want to try to coalesce with the first element but
- // there's no "<=" operator for the iterator.
- bool to_exit_loop = it == free_page_runs_.begin();
+ for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.begin(); ) {
+ --it;
- FreePageRun* l = *it;
- DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ FreePageRun* l = *it;
+ DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
+ << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
+ << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
+ << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
+ }
+ if (l->End(this) == fpr->Begin()) {
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x"
- << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x"
- << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec
- << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]";
+ LOG(INFO) << "Success";
}
- if (l->End(this) == fpr->Begin()) {
- if (kTraceRosAlloc) {
- LOG(INFO) << "Success";
- }
- free_page_runs_.erase(it--);
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
- << reinterpret_cast<intptr_t>(l)
- << " from free_page_runs_";
- }
- l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
- DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
- // Clear magic num since this is no longer the start of a free page run.
- if (kIsDebugBuild) {
- fpr->magic_num_ = 0;
- }
- fpr = l;
- } else {
- // Not adjacent. Stop.
- if (kTraceRosAlloc) {
- LOG(INFO) << "Fail";
- }
- break;
+ it = free_page_runs_.erase(it);
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex
+ << reinterpret_cast<intptr_t>(l)
+ << " from free_page_runs_";
}
- if (to_exit_loop) {
- break;
+ l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this));
+ DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0));
+ // Clear magic num since this is no longer the start of a free page run.
+ if (kIsDebugBuild) {
+ fpr->magic_num_ = 0;
}
+ fpr = l;
+ } else {
+ // Not adjacent. Stop.
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "Fail";
+ }
+ break;
}
}
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 85d307b..975ac36 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -241,7 +241,7 @@
: concurrent_copying_(concurrent_copying), use_tlab_(use_tlab) {
}
- virtual void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
// Note: self is not necessarily equal to thread since thread may be suspended.
Thread* self = Thread::Current();
CHECK(thread == self || thread->IsSuspended() || thread->GetState() == kWaitingPerformingGc)
@@ -271,7 +271,7 @@
void VisitRoots(mirror::Object*** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::Object** root = roots[i];
mirror::Object* ref = *root;
@@ -287,7 +287,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::CompressedReference<mirror::Object>* const root = roots[i];
if (!root->IsNull()) {
@@ -355,14 +355,14 @@
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
- const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
CheckReference(obj->GetFieldObject<mirror::Object, kVerifyNone, kWithoutReadBarrier>(offset),
obj, offset);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
CheckReference(ref->GetReferent<kWithoutReadBarrier>(),
ref,
@@ -371,7 +371,7 @@
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -379,7 +379,7 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CheckReference(root->AsMirrorPtr(), nullptr, MemberOffset(0));
}
@@ -387,7 +387,7 @@
ConcurrentCopying* const collector_;
void CheckReference(mirror::Object* ref, mirror::Object* holder, MemberOffset offset) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (ref != nullptr) {
if (!collector_->immune_spaces_.ContainsObject(ref)) {
// Not immune, must be a zygote large object.
@@ -414,7 +414,7 @@
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->Limit()),
[&visitor](mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// If an object is not gray, it should only have references to things in the immune spaces.
if (obj->GetReadBarrierPointer() != ReadBarrier::GrayPtr()) {
obj->VisitReferences</*kVisitNativeRoots*/true,
@@ -456,7 +456,7 @@
public:
explicit GrayImmuneObjectVisitor() {}
- ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (kUseBakerReadBarrier) {
if (kIsDebugBuild) {
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
@@ -465,7 +465,7 @@
}
}
- static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
reinterpret_cast<GrayImmuneObjectVisitor*>(arg)->operator()(obj);
}
};
@@ -540,7 +540,7 @@
explicit ImmuneSpaceScanObjVisitor(ConcurrentCopying* cc)
: collector_(cc) {}
- ALWAYS_INLINE void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (kUseBakerReadBarrier && kGrayDirtyImmuneObjects) {
if (obj->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
collector_->ScanImmuneObject(obj);
@@ -554,7 +554,7 @@
}
}
- static void Callback(mirror::Object* obj, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Callback(mirror::Object* obj, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
reinterpret_cast<ImmuneSpaceScanObjVisitor*>(arg)->operator()(obj);
}
@@ -922,7 +922,7 @@
: collector_(collector) {}
void operator()(mirror::Object* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
if (ref == nullptr) {
// OK.
return;
@@ -936,7 +936,7 @@
}
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(root != nullptr);
operator()(root);
}
@@ -951,27 +951,27 @@
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
this->operator()(ref, mirror::Reference::ReferentOffset(), false);
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
VerifyNoFromSpaceRefsVisitor visitor(collector_);
visitor(root->AsMirrorPtr());
}
@@ -985,11 +985,11 @@
explicit VerifyNoFromSpaceRefsObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectCallback(obj, collector_);
}
static void ObjectCallback(mirror::Object* obj, void *arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(obj != nullptr);
ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
space::RegionSpace* region_space = collector->RegionSpace();
@@ -1055,7 +1055,7 @@
: collector_(collector) {}
void operator()(mirror::Object* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
if (ref == nullptr) {
// OK.
return;
@@ -1073,26 +1073,26 @@
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref =
obj->GetFieldObject<mirror::Object, kDefaultVerifyFlags, kWithoutReadBarrier>(offset);
AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
AssertToSpaceInvariantRefsVisitor visitor(collector_);
visitor(root->AsMirrorPtr());
}
@@ -1106,11 +1106,11 @@
explicit AssertToSpaceInvariantObjectVisitor(ConcurrentCopying* collector)
: collector_(collector) {}
void operator()(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectCallback(obj, collector_);
}
static void ObjectCallback(mirror::Object* obj, void *arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(obj != nullptr);
ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
space::RegionSpace* region_space = collector->RegionSpace();
@@ -1474,7 +1474,7 @@
reinterpret_cast<uintptr_t>(los->End()),
[mark_bitmap, los, self](mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (los->IsZygoteLargeObject(self, obj)) {
mark_bitmap->Set(obj);
}
@@ -1600,7 +1600,7 @@
template <class MirrorType>
ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -1608,13 +1608,13 @@
template <class MirrorType>
void VisitRoot(mirror::Object** root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << *root;
}
template <class MirrorType>
void VisitRoot(mirror::CompressedReference<MirrorType>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(INTERNAL_FATAL) << "root=" << root << " ref=" << root->AsMirrorPtr();
}
};
@@ -1745,20 +1745,20 @@
: collector_(collector) {}
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */)
- const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
collector_->Process(obj, offset);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
CHECK(klass->IsTypeOfReferenceClass());
collector_->DelayReferenceReferent(klass, ref);
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -1766,7 +1766,7 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
collector_->MarkRoot</*kGrayImmuneObject*/false>(root);
}
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 97f4555..1ef0aea 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -71,16 +71,16 @@
!mark_stack_lock_,
!rb_slow_path_histogram_lock_,
!skipped_blocks_lock_);
- void InitializePhase() SHARED_REQUIRES(Locks::mutator_lock_)
+ void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
- void MarkingPhase() SHARED_REQUIRES(Locks::mutator_lock_)
+ void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- void ReclaimPhase() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void FinishPhase() REQUIRES(!mark_stack_lock_,
!rb_slow_path_histogram_lock_,
!skipped_blocks_lock_);
- void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+ void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
virtual GcType GetGcType() const OVERRIDE {
return kGcTypePartial;
@@ -97,19 +97,19 @@
return region_space_;
}
void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsInToSpace(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(ref != nullptr);
return IsMarked(ref) == ref;
}
template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
bool IsMarking() const {
return is_marking_;
@@ -123,37 +123,37 @@
bool IsWeakRefAccessEnabled() {
return weak_ref_access_enabled_.LoadRelaxed();
}
- void RevokeThreadLocalMarkStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
private:
- void PushOntoMarkStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+ void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- mirror::Object* Copy(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::Object* Copy(mirror::Object* from_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
- void Scan(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void Process(mirror::Object* obj, MemberOffset offset)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
template<bool kGrayImmuneObject>
void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
accounting::ObjectStack* GetAllocationStack();
accounting::ObjectStack* GetLiveStack();
- virtual void ProcessMarkStack() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- bool ProcessMarkStackOnce() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- void ProcessMarkStackRef(mirror::Object* to_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void GrayAllDirtyImmuneObjects()
REQUIRES(Locks::mutator_lock_)
@@ -162,75 +162,75 @@
REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void SwitchToSharedMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- void SwitchToGcExclusiveMarkStackMode() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
- void ProcessReferences(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
void Sweep(bool swap_bitmaps)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
void SweepLargeObjects(bool swap_bitmaps)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
void MarkZygoteLargeObjects()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void CheckEmptyMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- void IssueEmptyCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsOnAllocStack(mirror::Object* ref) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* GetFwdPtr(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
- void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
void RecordLiveStackFreezeSize(Thread* self);
void ComputeUnevacFromSpaceLiveRatio();
void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void ReenableWeakRefAccess(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
- void DisableMarking() SHARED_REQUIRES(Locks::mutator_lock_);
- void IssueDisableMarkingCheckpoint() SHARED_REQUIRES(Locks::mutator_lock_);
- void ExpandGcMarkStack() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Object* MarkNonMoving(mirror::Object* from_ref) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+ void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
+ void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
+ void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::Object* MarkNonMoving(mirror::Object* from_ref) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
accounting::SpaceBitmap<kObjectAlignment>* bitmap)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
template<bool kGrayImmuneObject>
ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
- void PushOntoFalseGrayStack(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
+ void PushOntoFalseGrayStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
- void ProcessFalseGrayStack() SHARED_REQUIRES(Locks::mutator_lock_)
+ void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_);
void ScanImmuneObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
mirror::Object* MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index e0b71a7..6afe876 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -155,7 +155,7 @@
// this is the allocation space, for full GC then we swap the zygote bitmaps too.
void SwapBitmaps()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint64_t GetTotalPausedTimeNs() REQUIRES(!pause_histogram_lock_);
int64_t GetTotalFreedBytes() const {
return total_freed_bytes_;
@@ -186,18 +186,18 @@
// Helper functions for querying if objects are marked. These are used for processing references,
// and will be used for reading system weaks while the GC is running.
virtual mirror::Object* IsMarked(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Used by reference processor.
- virtual void ProcessMarkStack() SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ virtual void ProcessMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Force mark an object.
virtual mirror::Object* MarkObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
protected:
// Run all of the GC phases.
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 43482eb..d866106 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -136,7 +136,7 @@
} else {
DCHECK(!space_->HasAddress(obj));
auto slow_path = [this](const mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Marking a large object, make sure its aligned as a sanity check.
if (!IsAligned<kPageSize>(ref)) {
Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
@@ -289,7 +289,7 @@
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::Object* obj = *roots[i];
mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
@@ -303,7 +303,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
OVERRIDE REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mirror::Object* obj = roots[i]->AsMirrorPtr();
mirror::Object* new_obj = collector_->GetMarkedForwardAddress(obj);
@@ -322,7 +322,7 @@
public:
explicit UpdateObjectReferencesVisitor(MarkCompact* collector) : collector_(collector) {}
- void operator()(mirror::Object* obj) const SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ void operator()(mirror::Object* obj) const REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
collector_->UpdateObjectReferences(obj);
}
@@ -509,7 +509,7 @@
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
reinterpret_cast<uintptr_t>(space_->End()),
[this](mirror::Object* obj)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
MoveObject(obj, obj->SizeOf());
});
@@ -558,7 +558,7 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 16abfb7..a61646c 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -96,7 +96,7 @@
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+ void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
void UnBindBitmaps()
@@ -112,7 +112,7 @@
void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
void SweepSystemWeaks()
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
OVERRIDE REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
@@ -123,33 +123,33 @@
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
mirror::Object* GetMarkedForwardAddress(mirror::Object* object)
REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
// Marks or unmarks a large object based on whether or not set is true. If set is true, then we
// mark, otherwise we unmark.
bool MarkLargeObject(const mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
- void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ void ResizeMarkStack(size_t new_size) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if we should sweep the space.
bool ShouldSweepSpace(space::ContinuousSpace* space) const;
// Push an object onto the mark stack.
- void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void MarkStackPush(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateAndMarkModUnion()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
void ProcessMarkStack()
@@ -173,20 +173,20 @@
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
void ForwardObject(mirror::Object* obj) REQUIRES(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
// Update a single heap reference.
void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
// Update all of the references of a single object.
void UpdateObjectReferences(mirror::Object* obj)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_)
REQUIRES(Locks::mutator_lock_);
// Revoke all the thread-local buffers.
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 3904160..cbc4dc1 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -275,7 +275,7 @@
void operator()(mirror::Object* obj) const
ALWAYS_INLINE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -541,7 +541,7 @@
explicit VerifyRootMarkedVisitor(MarkSweep* collector) : collector_(collector) { }
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
}
@@ -568,7 +568,7 @@
class MarkSweep::VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// See if the root is on any space bitmap.
auto* heap = Runtime::Current()->GetHeap();
if (heap->GetLiveBitmap()->GetContinuousSpaceBitmap(root) == nullptr) {
@@ -618,7 +618,7 @@
void operator()(mirror::Class* klass, mirror::Reference* ref) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
collector_->DelayReferenceReferent(klass, ref);
}
@@ -659,19 +659,19 @@
ALWAYS_INLINE void operator()(mirror::Object* obj,
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Mark(obj->GetFieldObject<mirror::Object>(offset));
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -680,7 +680,7 @@
}
private:
- ALWAYS_INLINE void Mark(mirror::Object* ref) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void Mark(mirror::Object* ref) const REQUIRES_SHARED(Locks::mutator_lock_) {
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
if (kUseFinger) {
std::atomic_thread_fence(std::memory_order_seq_cst);
@@ -705,7 +705,7 @@
// No thread safety analysis since multiple threads will use this visitor.
void operator()(mirror::Object* obj) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
DelayReferenceReferentVisitor ref_visitor(mark_sweep);
@@ -732,7 +732,7 @@
size_t mark_stack_pos_;
ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
// Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
mark_stack_pos_ /= 2;
@@ -754,7 +754,7 @@
// Scans all of the objects
virtual void Run(Thread* self ATTRIBUTE_UNUSED)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
@@ -1069,7 +1069,7 @@
virtual mirror::Object* IsMarked(mirror::Object* obj)
OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
mark_sweep_->VerifyIsLive(obj);
return obj;
}
@@ -1102,7 +1102,7 @@
}
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(*roots[i]);
@@ -1112,7 +1112,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_)
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_) {
for (size_t i = 0; i < count; ++i) {
mark_sweep_->MarkObjectNonNullParallel(roots[i]->AsMirrorPtr());
@@ -1311,7 +1311,7 @@
MemberOffset offset,
bool is_static ATTRIBUTE_UNUSED) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
@@ -1321,7 +1321,7 @@
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
@@ -1329,7 +1329,7 @@
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 9747031..bbac9da 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -60,14 +60,14 @@
virtual void RunPhases() OVERRIDE REQUIRES(!mark_stack_lock_);
void InitializePhase();
- void MarkingPhase() REQUIRES(!mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ void MarkingPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void PausePhase() REQUIRES(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
- void ReclaimPhase() REQUIRES(!mark_stack_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ void ReclaimPhase() REQUIRES(!mark_stack_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void FinishPhase();
virtual void MarkReachableObjects()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsConcurrent() const {
return is_concurrent_;
@@ -85,71 +85,71 @@
void Init();
// Find the default mark bitmap.
- void FindDefaultSpaceBitmap() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FindDefaultSpaceBitmap() REQUIRES_SHARED(Locks::mutator_lock_);
// Marks all objects in the root set at the start of a garbage collection.
void MarkRoots(Thread* self)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkNonThreadRoots()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkConcurrentRoots(VisitRootFlags flags)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkRootsCheckpoint(Thread* self, bool revoke_ros_alloc_thread_local_buffers_at_checkpoint)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Builds a mark stack and recursively mark until it empties.
void RecursiveMark()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_);
+ virtual void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_);
// Builds a mark stack with objects on dirty cards and recursively mark until it empties.
void RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Remarks the root set after completing the concurrent mark.
void ReMarkRoots()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessReferences(Thread* self)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update and mark references from immune spaces.
void UpdateAndMarkModUnion()
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Pre clean cards to reduce how much work is needed in the pause.
void PreCleanCards()
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection. Virtual as by default it sweeps
// all allocation spaces. Partial and sticky GCs want to just sweep a subset of the heap.
virtual void Sweep(bool swap_bitmaps)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
@@ -157,13 +157,13 @@
// Sweep only pointers within an array. WARNING: Trashes objects.
void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Blackens an object.
void ScanObject(mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// No thread safety analysis due to lambdas.
template<typename MarkVisitor, typename ReferenceVisitor>
@@ -172,53 +172,53 @@
const ReferenceVisitor& ref_visitor)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
REQUIRES(!Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static mirror::Object* VerifySystemWeakIsLiveCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void VerifySystemWeaks()
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Verify that an object is live, either in a live bitmap or in the allocation stack.
void VerifyIsLive(const mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
size_t count,
const RootInfo& info) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Marks an object.
virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Barrier& GetBarrier() {
return *gc_barrier_;
@@ -226,24 +226,24 @@
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns object if the object is marked in the heap bitmap, otherwise null.
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void MarkObjectNonNull(mirror::Object* obj,
mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Marks an object atomically, safe to use from multiple threads.
void MarkObjectNonNullParallel(mirror::Object* obj)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if we need to add obj to a mark stack.
bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -251,16 +251,16 @@
// Verify the roots of the heap and print out information related to any invalid roots.
// Called in MarkObject, so may we may not hold the mutator lock.
void VerifySuspendedThreadRoots()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
void ExpandMarkStack()
REQUIRES(mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ResizeMarkStack(size_t new_size)
REQUIRES(mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns how many threads we should use for the current GC phase based on if we are paused,
// whether or not we care about pauses.
@@ -269,19 +269,19 @@
// Push a single reference on a mark stack.
void PushOnMarkStack(mirror::Object* obj)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Blackens objects grayed during a garbage collection.
void ScanGrayObjects(bool paused, uint8_t minimum_age)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual void ProcessMarkStack()
OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ProcessMarkStack(false);
}
@@ -289,12 +289,12 @@
void ProcessMarkStack(bool paused)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessMarkStackParallel(size_t thread_count)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!mark_stack_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Used to Get around thread safety annotations. The call is from MarkingPhase and is guarded by
// IsExclusiveHeld.
diff --git a/runtime/gc/collector/partial_mark_sweep.h b/runtime/gc/collector/partial_mark_sweep.h
index e9b4f6f..8b0d3dd 100644
--- a/runtime/gc/collector/partial_mark_sweep.h
+++ b/runtime/gc/collector/partial_mark_sweep.h
@@ -37,7 +37,7 @@
// Bind the live bits to the mark bits of bitmaps for spaces that aren't collected for partial
// collections, ie the Zygote space. Also mark this space is immune. Virtual as overridden by
// StickyMarkSweep.
- virtual void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ virtual void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PartialMarkSweep);
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 7a4c025..fc04f30 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -265,16 +265,20 @@
RecordFree(ObjectBytePair(from_objects - to_objects, from_bytes - to_bytes));
// Clear and protect the from space.
from_space_->Clear();
- if (kProtectFromSpace && !from_space_->IsRosAllocSpace()) {
- // Protect with PROT_NONE.
- VLOG(heap) << "Protecting from_space_ : " << *from_space_;
- from_space_->GetMemMap()->Protect(PROT_NONE);
- } else {
- // If RosAllocSpace, we'll leave it as PROT_READ here so the
- // rosaloc verification can read the metadata magic number and
- // protect it with PROT_NONE later in FinishPhase().
- VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
- from_space_->GetMemMap()->Protect(PROT_READ);
+ // b/31172841. Temporarily disable the from-space protection under gcstress mode with debug build
+ // due to some protection issue in the build server.
+ if (kProtectFromSpace && !(kIsDebugBuild && heap_->gc_stress_mode_)) {
+ if (!from_space_->IsRosAllocSpace()) {
+ // Protect with PROT_NONE.
+ VLOG(heap) << "Protecting from_space_ : " << *from_space_;
+ from_space_->GetMemMap()->Protect(PROT_NONE);
+ } else {
+ // If RosAllocSpace, we'll leave it as PROT_READ here so the
+ // rosaloc verification can read the metadata magic number and
+ // protect it with PROT_NONE later in FinishPhase().
+ VLOG(heap) << "Protecting from_space_ with PROT_READ : " << *from_space_;
+ from_space_->GetMemMap()->Protect(PROT_READ);
+ }
}
heap_->PreSweepingGcVerification(this);
if (swap_semi_spaces_) {
@@ -289,7 +293,7 @@
: from_space_(from_space) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE {
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (from_space_->HasAddress(ref)) {
Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
@@ -382,7 +386,7 @@
live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
reinterpret_cast<uintptr_t>(space->End()),
[this](Object* obj)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
DCHECK(obj != nullptr);
VerifyNoFromSpaceReferences(obj);
});
@@ -790,9 +794,13 @@
void SemiSpace::FinishPhase() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- if (kProtectFromSpace && from_space_->IsRosAllocSpace()) {
- VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
- from_space_->GetMemMap()->Protect(PROT_NONE);
+ // b/31172841. Temporarily disable the from-space protection under gcstress mode with debug build
+ // due to some protection issue in the build server.
+ if (kProtectFromSpace && !(kIsDebugBuild && heap_->gc_stress_mode_)) {
+ if (from_space_->IsRosAllocSpace()) {
+ VLOG(heap) << "Protecting from_space_ with PROT_NONE : " << *from_space_;
+ from_space_->GetMemMap()->Protect(PROT_NONE);
+ }
}
// Null the "to" and "from" spaces since compacting from one to the other isn't valid until
// further action is done by the heap.
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 694e536..4b63d9b 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -117,7 +117,7 @@
REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
void VerifyNoFromSpaceReferences(mirror::Object* obj)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Marks the root set at the start of a garbage collection.
void MarkRoots()
@@ -125,7 +125,7 @@
// Bind the live bits to the mark bits of bitmaps for spaces that are never collected, ie
// the image. Mark that portion of the heap as immune.
- virtual void BindBitmaps() SHARED_REQUIRES(Locks::mutator_lock_)
+ virtual void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_);
void UnBindBitmaps()
@@ -137,13 +137,13 @@
// Sweeps unmarked objects to complete the garbage collection.
virtual void Sweep(bool swap_bitmaps)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sweeps unmarked objects to complete the garbage collection.
void SweepLargeObjects(bool swap_bitmaps) REQUIRES(Locks::heap_bitmap_lock_);
void SweepSystemWeaks()
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
@@ -157,44 +157,44 @@
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
REQUIRES(Locks::mutator_lock_)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
// Marks or unmarks a large object based on whether or not set is true. If set is true, then we
// mark, otherwise we unmark.
bool MarkLargeObject(const mirror::Object* obj)
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Expand mark stack to 2x its current size.
- void ResizeMarkStack(size_t new_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ void ResizeMarkStack(size_t new_size) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if we should sweep the space.
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const;
// Push an object onto the mark stack.
- void MarkStackPush(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void MarkStackPush(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateAndMarkModUnion()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Recursively blackens objects on the mark stack.
void ProcessMarkStack()
REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Revoke all the thread-local buffers.
void RevokeAllThreadLocalBuffers();
diff --git a/runtime/gc/collector/sticky_mark_sweep.h b/runtime/gc/collector/sticky_mark_sweep.h
index abaf978..100ca64 100644
--- a/runtime/gc/collector/sticky_mark_sweep.h
+++ b/runtime/gc/collector/sticky_mark_sweep.h
@@ -36,17 +36,17 @@
protected:
// Bind the live bits to the mark bits of bitmaps for all spaces, all spaces other than the
// alloc space will be marked as immune.
- void BindBitmaps() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ void BindBitmaps() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
void MarkReachableObjects()
OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Sweep(bool swap_bitmaps)
OVERRIDE
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StickyMarkSweep);
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 4d16b6e..600aff1 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1704,7 +1704,7 @@
return nullptr;
}
-static inline bool EntrypointsInstrumented() SHARED_REQUIRES(Locks::mutator_lock_) {
+static inline bool EntrypointsInstrumented() REQUIRES_SHARED(Locks::mutator_lock_) {
instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
return instrumentation != nullptr && instrumentation->AllocEntrypointsInstrumented();
@@ -1930,11 +1930,11 @@
InstanceCounter(const std::vector<mirror::Class*>& classes,
bool use_is_assignable_from,
uint64_t* counts)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: classes_(classes), use_is_assignable_from_(use_is_assignable_from), counts_(counts) {}
static void Callback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
InstanceCounter* instance_counter = reinterpret_cast<InstanceCounter*>(arg);
mirror::Class* instance_class = obj->GetClass();
CHECK(instance_class != nullptr);
@@ -1966,11 +1966,11 @@
class InstanceCollector {
public:
InstanceCollector(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: class_(c), max_count_(max_count), instances_(instances) {
}
static void Callback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
DCHECK(arg != nullptr);
InstanceCollector* instance_collector = reinterpret_cast<InstanceCollector*>(arg);
if (obj->GetClass() == instance_collector->class_) {
@@ -2000,12 +2000,12 @@
ReferringObjectsFinder(mirror::Object* object,
int32_t max_count,
std::vector<mirror::Object*>& referring_objects)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: object_(object), max_count_(max_count), referring_objects_(referring_objects) {
}
static void Callback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
reinterpret_cast<ReferringObjectsFinder*>(arg)->operator()(obj);
}
@@ -2018,7 +2018,7 @@
// For Object::VisitReferences.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
referring_objects_.push_back(obj);
@@ -2125,11 +2125,11 @@
}
void Heap::TransitionCollector(CollectorType collector_type) {
- // Collector transition must not happen with CC
- CHECK(!kUseReadBarrier);
if (collector_type == collector_type_) {
return;
}
+ // Collector transition must not happen with CC
+ CHECK(!kUseReadBarrier);
VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
<< " -> " << static_cast<int>(collector_type);
uint64_t start_time = NanoTime();
@@ -2374,7 +2374,7 @@
const bool is_running_on_memory_tool_;
static void Callback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(arg != nullptr);
BinContext* context = reinterpret_cast<BinContext*>(arg);
ZygoteCompactingCollector* collector = context->collector_;
@@ -2571,7 +2571,7 @@
zygote_space_->GetLiveBitmap()->VisitMarkedRange(
reinterpret_cast<uintptr_t>(zygote_space_->Begin()),
reinterpret_cast<uintptr_t>(zygote_space_->Limit()),
- [](mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ [](mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(obj->AtomicSetMarkBit(0, 1));
});
}
@@ -2891,7 +2891,7 @@
explicit RootMatchesObjectVisitor(const mirror::Object* obj) : obj_(obj) { }
void VisitRoot(mirror::Object* root, const RootInfo& info)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == obj_) {
LOG(INFO) << "Object " << obj_ << " is a root " << info.ToString();
}
@@ -2913,7 +2913,7 @@
class VerifyReferenceVisitor : public SingleRootVisitor {
public:
VerifyReferenceVisitor(Heap* heap, Atomic<size_t>* fail_count, bool verify_referent)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_)
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
size_t GetFailureCount() const {
@@ -2921,14 +2921,14 @@
}
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (verify_referent_) {
VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
}
}
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
VerifyReference(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
}
@@ -2937,19 +2937,19 @@
}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const_cast<VerifyReferenceVisitor*>(this)->VisitRoot(
root->AsMirrorPtr(), RootInfo(kRootVMInternal));
}
virtual void VisitRoot(mirror::Object* root, const RootInfo& root_info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (root == nullptr) {
LOG(ERROR) << "Root is null with info " << root_info.GetType();
} else if (!VerifyReference(nullptr, root, MemberOffset(0))) {
@@ -3066,7 +3066,7 @@
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
void operator()(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Note: we are verifying the references in obj but not obj itself, this is because obj must
// be live or else how did we find it in the live bitmap?
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
@@ -3075,12 +3075,12 @@
}
static void VisitCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyObjectVisitor* visitor = reinterpret_cast<VerifyObjectVisitor*>(arg);
visitor->operator()(obj);
}
- void VerifyRoots() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
+ void VerifyRoots() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_) {
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
VerifyReferenceVisitor visitor(heap_, fail_count_, verify_referent_);
Runtime::Current()->VisitRoots(&visitor);
@@ -3172,7 +3172,7 @@
class VerifyReferenceCardVisitor {
public:
VerifyReferenceCardVisitor(Heap* heap, bool* failed)
- SHARED_REQUIRES(Locks::mutator_lock_,
+ REQUIRES_SHARED(Locks::mutator_lock_,
Locks::heap_bitmap_lock_)
: heap_(heap), failed_(failed) {
}
@@ -3250,7 +3250,7 @@
failed_(false) {}
void operator()(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
obj->VisitReferences(visitor, VoidFunctor());
}
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b357b87..10bebef 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -195,7 +195,7 @@
mirror::Class* klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
!Roles::uninterruptible_) {
return AllocObjectWithAllocator<kInstrumented, true>(
@@ -207,7 +207,7 @@
mirror::Class* klass,
size_t num_bytes,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
!Roles::uninterruptible_) {
return AllocObjectWithAllocator<kInstrumented, true>(
@@ -220,7 +220,7 @@
size_t byte_count,
AllocatorType allocator,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_,
!Roles::uninterruptible_);
@@ -234,13 +234,13 @@
// Visit all of the live objects in the heap.
void VisitObjects(ObjectCallback callback, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void VisitObjectsPaused(ObjectCallback callback, void* arg)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void CheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
@@ -274,12 +274,12 @@
// A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
// and doesn't abort on error, allowing the caller to report more
// meaningful diagnostics.
- bool IsValidObjectAddress(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsValidObjectAddress(const mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Faster alternative to IsHeapAddress since finding if an object is in the large object space is
// very slow.
bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
// Requires the heap lock to be held.
@@ -287,10 +287,10 @@
bool search_allocation_stack = true,
bool search_live_stack = true,
bool sorted = false)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
// Returns true if there is any chance that the object (obj) will move.
- bool IsMovableObject(const mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsMovableObject(const mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Enables us to compacting GC until objects are released.
void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
@@ -306,7 +306,7 @@
// Mutator lock is required for GetContinuousSpaces.
void ClearMarkedObjects()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Initiates an explicit garbage collection.
void CollectGarbage(bool clear_soft_references)
@@ -323,17 +323,17 @@
bool use_is_assignable_from,
uint64_t* counts)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implements JDWP RT_Instances.
void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implements JDWP OR_ReferringObjects.
void GetReferringObjects(mirror::Object* o,
int32_t max_count,
std::vector<mirror::Object*>& referring_objects)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
// implement dalvik.system.VMRuntime.clearGrowthLimit.
@@ -386,7 +386,7 @@
}
const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return continuous_spaces_;
}
@@ -527,12 +527,12 @@
// spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
// TODO: consider using faster data structure like binary tree.
space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
bool fail_ok) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
@@ -550,15 +550,15 @@
void RosAllocVerification(TimingLogger* timings, const char* name)
REQUIRES(Locks::mutator_lock_);
- accounting::HeapBitmap* GetLiveBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
return live_bitmap_.get();
}
- accounting::HeapBitmap* GetMarkBitmap() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
return mark_bitmap_.get();
}
- accounting::ObjectStack* GetLiveStack() SHARED_REQUIRES(Locks::heap_bitmap_lock_) {
+ accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
return live_stack_.get();
}
@@ -566,7 +566,7 @@
// Mark and empty stack.
void FlushAllocStack()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Revoke all the thread-local allocation stacks.
@@ -579,18 +579,18 @@
accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
accounting::ObjectStack* stack)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Mark the specified allocation stack as live.
void MarkAllocStackAsLive(accounting::ObjectStack* stack)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_);
// Unbind any bound bitmaps.
void UnBindBitmaps()
REQUIRES(Locks::heap_bitmap_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the boot image spaces. There may be multiple boot image spaces.
const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
@@ -598,10 +598,10 @@
}
bool ObjectIsInBootImageSpace(mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsInBootImageOatFile(const void* p) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void GetBootImagesSize(uint32_t* boot_image_begin,
uint32_t* boot_image_end,
@@ -621,7 +621,7 @@
// Return the corresponding rosalloc space.
space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
space::MallocSpace* GetNonMovingSpace() const {
return non_moving_space_;
@@ -646,8 +646,8 @@
}
}
- void DumpSpaces(std::ostream& stream) const SHARED_REQUIRES(Locks::mutator_lock_);
- std::string DumpSpaces() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
+ std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
// Dump object should only be used by the signal handler.
void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
@@ -759,23 +759,23 @@
REQUIRES(Locks::alloc_tracker_lock_);
void VisitAllocationRecords(RootVisitor* visitor) const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void SweepAllocationRecords(IsMarkedVisitor* visitor) const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void DisallowNewAllocationRecords() const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void AllowNewAllocationRecords() const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void BroadcastForNewAllocationRecords() const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::alloc_tracker_lock_);
void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
@@ -831,11 +831,11 @@
collector_type == kCollectorTypeHomogeneousSpaceCompact;
}
bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void CheckConcurrentGC(Thread* self,
size_t new_num_bytes_allocated,
mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
accounting::ObjectStack* GetMarkStack() {
@@ -848,7 +848,7 @@
mirror::Class** klass,
size_t byte_count,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
// Handles Allocate()'s slow allocation path with GC involved after
@@ -862,14 +862,14 @@
size_t* bytes_tl_bulk_allocated,
mirror::Class** klass)
REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate into a specific space.
mirror::Object* AllocateInto(Thread* self,
space::AllocSpace* space,
mirror::Class* c,
size_t bytes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
// wrong space.
@@ -884,17 +884,17 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kGrow>
ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
// Returns true if the address passed in is within the address range of a continuous space.
bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
void RunFinalization(JNIEnv* env, uint64_t timeout);
@@ -908,7 +908,7 @@
REQUIRES(!*pending_task_lock_);
void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*pending_task_lock_);
bool IsGCRequestPending() const;
@@ -964,10 +964,10 @@
size_t GetPercentFree();
static void VerificationCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::heap_bitmap_lock_);
// Swap the allocation stack with the live stack.
- void SwapStacks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
// Clear cards and update the mod union table. When process_alloc_space_cards is true,
// if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
@@ -976,17 +976,17 @@
bool use_rem_sets,
bool process_alloc_space_cards,
bool clear_alloc_space_cards)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Push an object onto the allocation stack.
void PushOnAllocationStack(Thread* self, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void PushOnAllocationStackWithInternalGC(Thread* self, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
void ClearConcurrentGCRequest();
@@ -1008,7 +1008,7 @@
void TrimIndirectReferenceTables(Thread* self);
void VisitObjectsInternal(ObjectCallback callback, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
void VisitObjectsInternalRegionSpace(ObjectCallback callback, void* arg)
REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
@@ -1017,7 +1017,7 @@
// GC stress mode attempts to do one GC per unique backtrace.
void CheckGcStressMode(Thread* self, mirror::Object** obj)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
// All-known continuous spaces, where objects lie within fixed bounds.
diff --git a/runtime/gc/reference_processor.h b/runtime/gc/reference_processor.h
index d9dfedb..4788f8a 100644
--- a/runtime/gc/reference_processor.h
+++ b/runtime/gc/reference_processor.h
@@ -48,34 +48,34 @@
explicit ReferenceProcessor();
void ProcessReferences(bool concurrent, TimingLogger* timings, bool clear_soft_references,
gc::collector::GarbageCollector* collector)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::heap_bitmap_lock_)
REQUIRES(!Locks::reference_processor_lock_);
// The slow path bool is contained in the reference class object, can only be set once
// Only allow setting this with mutators suspended so that we can avoid using a lock in the
// GetReferent fast path as an optimization.
- void EnableSlowPath() SHARED_REQUIRES(Locks::mutator_lock_);
+ void EnableSlowPath() REQUIRES_SHARED(Locks::mutator_lock_);
void BroadcastForSlowPath(Thread* self);
// Decode the referent, may block if references are being processed.
mirror::Object* GetReferent(Thread* self, mirror::Reference* reference)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::reference_processor_lock_);
void EnqueueClearedReferences(Thread* self) REQUIRES(!Locks::mutator_lock_);
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* ref,
collector::GarbageCollector* collector)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateRoots(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Make a circular list with reference if it is not enqueued. Uses the finalizer queue lock.
bool MakeCircularListIfUnenqueued(mirror::FinalizerReference* reference)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::reference_processor_lock_,
!Locks::reference_queue_finalizer_references_lock_);
private:
- bool SlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool SlowPathEnabled() REQUIRES_SHARED(Locks::mutator_lock_);
// Called by ProcessReferences.
void DisableSlowPath(Thread* self) REQUIRES(Locks::reference_processor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// If we are preserving references it means that some dead objects may become live, we use start
// and stop preserving to block mutators using GetReferrent from getting access to these
// referents.
diff --git a/runtime/gc/reference_queue.h b/runtime/gc/reference_queue.h
index 04d3454..1de1aa1 100644
--- a/runtime/gc/reference_queue.h
+++ b/runtime/gc/reference_queue.h
@@ -55,35 +55,35 @@
// threads since it uses a lock to avoid a race between checking for the references presence and
// adding it.
void AtomicEnqueueIfNotEnqueued(Thread* self, mirror::Reference* ref)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*lock_);
// Enqueue a reference. The reference must be unprocessed.
// Not thread safe, used when mutators are paused to minimize lock overhead.
- void EnqueueReference(mirror::Reference* ref) SHARED_REQUIRES(Locks::mutator_lock_);
+ void EnqueueReference(mirror::Reference* ref) REQUIRES_SHARED(Locks::mutator_lock_);
// Dequeue a reference from the queue and return that dequeued reference.
- mirror::Reference* DequeuePendingReference() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Reference* DequeuePendingReference() REQUIRES_SHARED(Locks::mutator_lock_);
// Enqueues finalizer references with white referents. White referents are blackened, moved to
// the zombie field, and the referent field is cleared.
void EnqueueFinalizerReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Walks the reference list marking any references subject to the reference clearing policy.
// References with a black referent are removed from the list. References with white referents
// biased toward saving are blackened and also removed from the list.
void ForwardSoftReferences(MarkObjectVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Unlink the reference list clearing references objects with white referents. Cleared references
// registered to a reference queue are scheduled for appending by the heap worker thread.
void ClearWhiteReferences(ReferenceQueue* cleared_references,
collector::GarbageCollector* collector)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
- size_t GetLength() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
+ size_t GetLength() const REQUIRES_SHARED(Locks::mutator_lock_);
bool IsEmpty() const {
return list_ == nullptr;
@@ -91,13 +91,13 @@
void Clear() {
list_ = nullptr;
}
- mirror::Reference* GetList() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Reference* GetList() REQUIRES_SHARED(Locks::mutator_lock_) {
return list_;
}
// Visits list_, currently only used for the mark compact GC.
void UpdateRoots(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Lock, used for parallel GC reference enqueuing. It allows for multiple threads simultaneously
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 2263797..45cea5a 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -87,7 +87,7 @@
}
inline size_t BumpPointerSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t num_bytes = obj->SizeOf();
if (usable_size != nullptr) {
*usable_size = RoundUp(num_bytes, kAlignment);
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 0e27d84..e9982e9 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -58,7 +58,7 @@
// Return the storage space required by obj.
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
@@ -72,7 +72,7 @@
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Removes the fork time growth limit on capacity, allowing the application to allocate up to the
// maximum reserved size of the heap.
@@ -110,9 +110,9 @@
void AssertAllThreadLocalBuffersAreRevoked()
REQUIRES(!Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_, !block_lock_);
- uint64_t GetBytesAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+ uint64_t GetBytesAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
- uint64_t GetObjectsAllocated() SHARED_REQUIRES(Locks::mutator_lock_)
+ uint64_t GetObjectsAllocated() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*Locks::runtime_shutdown_lock_, !*Locks::thread_list_lock_, !block_lock_);
bool IsEmpty() const {
return Begin() == End();
@@ -132,7 +132,7 @@
// Return the object which comes after obj, while ensuring alignment.
static mirror::Object* GetNextObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Allocate a new TLAB, returns false if the allocation failed.
bool AllocNewTlab(Thread* self, size_t bytes) REQUIRES(!block_lock_);
@@ -143,7 +143,7 @@
// Go through all of the blocks and visit the continuous objects.
void Walk(ObjectCallback* callback, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!block_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!block_lock_);
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE;
@@ -154,7 +154,7 @@
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Object alignment within the space.
static constexpr size_t kAlignment = 8;
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 455d28e..9282ec7 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -319,7 +319,7 @@
namespace allocator {
// Implement the dlmalloc morecore callback.
-void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) SHARED_REQUIRES(Locks::mutator_lock_) {
+void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
Heap* heap = runtime->GetHeap();
::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index eab757a..8fb2d76 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -65,11 +65,11 @@
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Virtual to allow MemoryToolMallocSpace to intercept.
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
return num_bytes;
@@ -136,7 +136,7 @@
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
protected:
DlMallocSpace(MemMap* mem_map, size_t initial_size, const std::string& name, void* mspace,
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index c87312b..e41c532 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -456,7 +456,7 @@
bool is_global_cache,
bool validate_oat_file,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Note that we must not use the file descriptor associated with
// ScopedFlock::GetFile to Init the image file. We want the file
// descriptor (and the associated exclusive lock) to be released when
@@ -492,7 +492,7 @@
bool validate_oat_file,
const OatFile* oat_file,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(image_filename != nullptr);
CHECK(image_location != nullptr);
@@ -865,14 +865,14 @@
explicit FixupRootVisitor(Args... args) : FixupVisitor(args...) {}
ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
ALWAYS_INLINE void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* ref = root->AsMirrorPtr();
mirror::Object* new_ref = ForwardObject(ref);
if (ref != new_ref) {
@@ -936,7 +936,7 @@
// java.lang.ref.Reference visitor.
void operator()(mirror::Class* klass ATTRIBUTE_UNUSED, mirror::Reference* ref) const
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
mirror::Object* obj = ref->GetReferent<kWithoutReadBarrier>();
ref->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(
mirror::Reference::ReferentOffset(),
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 534232d..c407259 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -47,13 +47,13 @@
InstructionSet image_isa,
bool secondary_image,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to open an existing app image space.
static std::unique_ptr<ImageSpace> CreateFromAppImage(const char* image,
const OatFile* oat_file,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Reads the image header from the specified image location for the
// instruction set image_isa. Returns null on failure, with
@@ -70,7 +70,7 @@
std::unique_ptr<const OatFile> ReleaseOatFile();
void VerifyImageAllocations()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ImageHeader& GetImageHeader() const {
return *reinterpret_cast<ImageHeader*>(Begin());
@@ -158,7 +158,7 @@
bool validate_oat_file,
const OatFile* oat_file,
std::string* error_msg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Atomic<uint32_t> bitmap_index_;
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index c726998..0320e79 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -96,7 +96,7 @@
return Begin() <= byte_obj && byte_obj < End();
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return true if the large object is a zygote large object. Potentially slow.
virtual bool IsZygoteLargeObject(Thread* self, mirror::Object* obj) const = 0;
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index c6b2870..f85ea46 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -63,9 +63,9 @@
// amount of the storage space that may be used by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
virtual size_t Free(Thread* self, mirror::Object* ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Returns the maximum bytes that could be allocated for the given
// size in bulk, that is the maximum value for the
@@ -160,7 +160,7 @@
size_t maximum_size, bool low_memory_mode) = 0;
virtual void RegisterRecentFree(mirror::Object* ptr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(lock_);
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
@@ -196,7 +196,7 @@
private:
static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(MallocSpace);
};
diff --git a/runtime/gc/space/memory_tool_malloc_space.h b/runtime/gc/space/memory_tool_malloc_space.h
index c081011..e53f009 100644
--- a/runtime/gc/space/memory_tool_malloc_space.h
+++ b/runtime/gc/space/memory_tool_malloc_space.h
@@ -43,10 +43,10 @@
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterRecentFree(mirror::Object* ptr ATTRIBUTE_UNUSED) OVERRIDE {}
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 823aa38..4e57a85 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -62,11 +62,11 @@
// Return the storage space required by obj.
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_) {
return AllocationSizeNonvirtual(obj, usable_size);
}
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
size_t Free(Thread*, mirror::Object*) OVERRIDE {
UNIMPLEMENTED(FATAL);
@@ -163,7 +163,7 @@
return nullptr;
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!region_lock_);
// Object alignment within the space.
static constexpr size_t kAlignment = kObjectAlignment;
@@ -503,7 +503,7 @@
}
mirror::Object* GetNextObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Mutex region_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index b016095..8ccbfaa 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -379,7 +379,7 @@
// Callback from rosalloc when it needs to increase the footprint.
void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Heap* heap = Runtime::Current()->GetHeap();
art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc);
DCHECK(rosalloc_space != nullptr);
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index b175fbf..f9c7dbc 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -64,9 +64,9 @@
return AllocationSizeNonvirtual<true>(obj, usable_size);
}
size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 23e937d..bd600fe 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -52,7 +52,7 @@
heap->SetSpaceAsDefault(space);
}
- mirror::Class* GetByteArrayClass(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetByteArrayClass(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
auto null_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
if (byte_array_class_ == nullptr) {
@@ -71,7 +71,7 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->Alloc(self,
@@ -91,7 +91,7 @@
size_t* bytes_allocated,
size_t* usable_size,
size_t* bytes_tl_bulk_allocated)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
@@ -103,7 +103,7 @@
}
void InstallClass(mirror::Object* o, mirror::Class* byte_array_class, size_t size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Note the minimum size, which is the size of a zero-length byte array.
EXPECT_GE(size, SizeOfZeroLengthByteArray());
EXPECT_TRUE(byte_array_class != nullptr);
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index f2889e2..4d10de8 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -33,7 +33,7 @@
static ZygoteSpace* Create(const std::string& name, MemMap* mem_map,
accounting::ContinuousSpaceBitmap* live_bitmap,
accounting::ContinuousSpaceBitmap* mark_bitmap)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Dump(std::ostream& os) const;
@@ -77,7 +77,7 @@
}
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
protected:
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() {
diff --git a/runtime/gc_root.h b/runtime/gc_root.h
index 0304d0d..0a98f55 100644
--- a/runtime/gc_root.h
+++ b/runtime/gc_root.h
@@ -91,24 +91,24 @@
// Single root version, not overridable.
ALWAYS_INLINE void VisitRoot(mirror::Object** root, const RootInfo& info)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
VisitRoots(&root, 1, info);
}
// Single root version, not overridable.
ALWAYS_INLINE void VisitRootIfNonNull(mirror::Object** root, const RootInfo& info)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (*root != nullptr) {
VisitRoot(root, info);
}
}
virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
// Only visits roots one at a time, doesn't handle updating roots. Used when performance isn't
@@ -116,7 +116,7 @@
class SingleRootVisitor : public RootVisitor {
private:
void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(*roots[i], info);
}
@@ -124,7 +124,7 @@
void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
const RootInfo& info) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < count; ++i) {
VisitRoot(roots[i]->AsMirrorPtr(), info);
}
@@ -169,10 +169,10 @@
public:
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE MirrorType* Read(GcRootSource* gc_root_source = nullptr) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoot(RootVisitor* visitor, const RootInfo& info) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!IsNull());
mirror::CompressedReference<mirror::Object>* roots[1] = { &root_ };
visitor->VisitRoots(roots, 1u, info);
@@ -180,7 +180,7 @@
}
void VisitRootIfNonNull(RootVisitor* visitor, const RootInfo& info) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsNull()) {
VisitRoot(visitor, info);
}
@@ -196,7 +196,7 @@
}
ALWAYS_INLINE GcRoot() {}
- explicit ALWAYS_INLINE GcRoot(MirrorType* ref) SHARED_REQUIRES(Locks::mutator_lock_);
+ explicit ALWAYS_INLINE GcRoot(MirrorType* ref) REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Root visitors take pointers to root_ and place them in CompressedReference** arrays. We use a
@@ -223,7 +223,7 @@
template <class MirrorType>
ALWAYS_INLINE void VisitRootIfNonNull(GcRoot<MirrorType>& root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root.IsNull()) {
VisitRoot(root);
}
@@ -231,27 +231,27 @@
template <class MirrorType>
ALWAYS_INLINE void VisitRootIfNonNull(mirror::CompressedReference<MirrorType>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
template <class MirrorType>
- void VisitRoot(GcRoot<MirrorType>& root) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitRoot(GcRoot<MirrorType>& root) REQUIRES_SHARED(Locks::mutator_lock_) {
VisitRoot(root.AddressWithoutBarrier());
}
template <class MirrorType>
void VisitRoot(mirror::CompressedReference<MirrorType>* root)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(buffer_pos_ >= kBufferSize)) {
Flush();
}
roots_[buffer_pos_++] = root;
}
- void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Flush() REQUIRES_SHARED(Locks::mutator_lock_) {
visitor_->VisitRoots(roots_, buffer_pos_, root_info_);
buffer_pos_ = 0;
}
diff --git a/runtime/generated/asm_support_gen.h b/runtime/generated/asm_support_gen.h
index 5c98ea6..03f5bf6 100644
--- a/runtime/generated/asm_support_gen.h
+++ b/runtime/generated/asm_support_gen.h
@@ -80,8 +80,8 @@
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_SIZE_MINUS_ONE), (static_cast<int32_t>(art::mirror::DexCache::kDexCacheStringCacheSize - 1)))
#define STRING_DEX_CACHE_HASH_BITS 10
DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_HASH_BITS), (static_cast<int32_t>(art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))))
-#define STRING_DEX_CACHE_ELEMENT_SIZE 0x8
-DEFINE_CHECK_EQ(static_cast<size_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<size_t>(sizeof(art::mirror::StringDexCachePair))))
+#define STRING_DEX_CACHE_ELEMENT_SIZE 8
+DEFINE_CHECK_EQ(static_cast<int32_t>(STRING_DEX_CACHE_ELEMENT_SIZE), (static_cast<int32_t>(sizeof(art::mirror::StringDexCachePair))))
#define MIN_LARGE_OBJECT_THRESHOLD 0x3000
DEFINE_CHECK_EQ(static_cast<size_t>(MIN_LARGE_OBJECT_THRESHOLD), (static_cast<size_t>(art::gc::Heap::kMinLargeObjectThreshold)))
#define LOCK_WORD_STATE_SHIFT 30
diff --git a/runtime/handle.h b/runtime/handle.h
index a415373..d4c13d4 100644
--- a/runtime/handle.h
+++ b/runtime/handle.h
@@ -20,8 +20,10 @@
#include "base/casts.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/mutex.h"
#include "base/value_object.h"
-#include "stack.h"
+#include "jni.h"
+#include "stack_reference.h"
namespace art {
@@ -50,19 +52,19 @@
ALWAYS_INLINE explicit Handle(StackReference<T>* reference) : reference_(reference) {
}
- ALWAYS_INLINE T& operator*() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE T& operator*() const REQUIRES_SHARED(Locks::mutator_lock_) {
return *Get();
}
- ALWAYS_INLINE T* operator->() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE T* operator->() const REQUIRES_SHARED(Locks::mutator_lock_) {
return Get();
}
- ALWAYS_INLINE T* Get() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE T* Get() const REQUIRES_SHARED(Locks::mutator_lock_) {
return down_cast<T*>(reference_->AsMirrorPtr());
}
- ALWAYS_INLINE jobject ToJObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE jobject ToJObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(reference_->AsMirrorPtr() == nullptr)) {
// Special case so that we work with null handles.
return nullptr;
@@ -106,22 +108,22 @@
}
ALWAYS_INLINE MutableHandle(const MutableHandle<T>& handle)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: Handle<T>(handle.reference_) {
}
ALWAYS_INLINE MutableHandle<T>& operator=(const MutableHandle<T>& handle)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Handle<T>::operator=(handle);
return *this;
}
ALWAYS_INLINE explicit MutableHandle(StackReference<T>* reference)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: Handle<T>(reference) {
}
- ALWAYS_INLINE T* Assign(T* reference) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE T* Assign(T* reference) REQUIRES_SHARED(Locks::mutator_lock_) {
StackReference<mirror::Object>* ref = Handle<T>::GetReference();
T* old = down_cast<T*>(ref->AsMirrorPtr());
ref->Assign(reference);
@@ -129,12 +131,12 @@
}
template<typename S>
- explicit MutableHandle(const MutableHandle<S>& handle) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit MutableHandle(const MutableHandle<S>& handle) REQUIRES_SHARED(Locks::mutator_lock_)
: Handle<T>(handle) {
}
template<typename S>
- explicit MutableHandle(StackReference<S>* reference) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit MutableHandle(StackReference<S>* reference) REQUIRES_SHARED(Locks::mutator_lock_)
: Handle<T>(reference) {
}
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index 67d7054..37eed99 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -22,8 +22,9 @@
#include "base/enums.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/mutex.h"
#include "handle.h"
-#include "stack.h"
+#include "stack_reference.h"
#include "verify_object.h"
namespace art {
@@ -61,15 +62,15 @@
}
ALWAYS_INLINE mirror::Object* GetReference(size_t i) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE Handle<mirror::Object> GetHandle(size_t i);
ALWAYS_INLINE MutableHandle<mirror::Object> GetMutableHandle(size_t i)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE bool Contains(StackReference<mirror::Object>* handle_scope_entry) const;
@@ -150,14 +151,14 @@
ALWAYS_INLINE ~StackHandleScope();
template<class T>
- ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_);
template<class T>
ALWAYS_INLINE HandleWrapper<T> NewHandleWrapper(T** object)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetReference(size_t i, mirror::Object* object)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Thread* Self() const {
return self_;
@@ -165,7 +166,7 @@
private:
template<class T>
- ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE MutableHandle<T> GetHandle(size_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LT(i, kNumReferences);
return MutableHandle<T>(&GetReferences()[i]);
}
@@ -209,7 +210,7 @@
}
template<class T>
- MutableHandle<T> NewHandle(T* object) SHARED_REQUIRES(Locks::mutator_lock_) {
+ MutableHandle<T> NewHandle(T* object) REQUIRES_SHARED(Locks::mutator_lock_) {
if (scopes_.empty() || current_scope_num_refs_ >= kNumReferencesPerScope) {
StackHandleScope<kNumReferencesPerScope>* scope =
new StackHandleScope<kNumReferencesPerScope>(self_);
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index 4005f05..921dde1 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -246,7 +246,7 @@
}
void AddIdList(mirror::ObjectArray<mirror::Object>* values)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const int32_t length = values->GetLength();
for (int32_t i = 0; i < length; ++i) {
AddObjectId(values->GetWithoutChecks(i));
@@ -489,23 +489,23 @@
private:
static void VisitObjectCallback(mirror::Object* obj, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(obj != nullptr);
DCHECK(arg != nullptr);
reinterpret_cast<Hprof*>(arg)->DumpHeapObject(obj);
}
void DumpHeapObject(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpHeapClass(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpHeapArray(mirror::Array* obj, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ProcessHeap(bool header_first)
REQUIRES(Locks::mutator_lock_) {
@@ -555,7 +555,7 @@
output_->EndRecord();
}
- void WriteClassTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void WriteClassTable() REQUIRES_SHARED(Locks::mutator_lock_) {
for (const auto& p : classes_) {
mirror::Class* c = p.first;
HprofClassSerialNumber sn = p.second;
@@ -604,11 +604,11 @@
}
void VisitRoot(mirror::Object* obj, const RootInfo& root_info)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
void MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeapTag heap_tag,
uint32_t thread_serial);
- HprofClassObjectId LookupClassId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
+ HprofClassObjectId LookupClassId(mirror::Class* c) REQUIRES_SHARED(Locks::mutator_lock_) {
if (c != nullptr) {
auto it = classes_.find(c);
if (it == classes_.end()) {
@@ -623,7 +623,7 @@
}
HprofStackTraceSerialNumber LookupStackTraceSerialNumber(const mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto r = allocation_records_.find(obj);
if (r == allocation_records_.end()) {
return kHprofNullStackTrace;
@@ -635,7 +635,7 @@
}
}
- HprofStringId LookupStringId(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_) {
+ HprofStringId LookupStringId(mirror::String* string) REQUIRES_SHARED(Locks::mutator_lock_) {
return LookupStringId(string->ToModifiedUtf8());
}
@@ -653,7 +653,7 @@
return id;
}
- HprofStringId LookupClassNameId(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
+ HprofStringId LookupClassNameId(mirror::Class* c) REQUIRES_SHARED(Locks::mutator_lock_) {
return LookupStringId(PrettyDescriptor(c));
}
@@ -681,7 +681,7 @@
__ AddU4(static_cast<uint32_t>(nowMs & 0xFFFFFFFF));
}
- void WriteStackTraces() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void WriteStackTraces() REQUIRES_SHARED(Locks::mutator_lock_) {
// Write a dummy stack trace record so the analysis tools don't freak out.
output_->StartNewRecord(HPROF_TAG_STACK_TRACE, kHprofTime);
__ AddStackTraceSerialNumber(kHprofNullStackTrace);
@@ -1072,14 +1072,14 @@
// Note that these don't have read barriers. Its OK however since the GC is guaranteed to not be
// running during the hprof dumping process.
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = root->AsMirrorPtr();
// The two cases are either classes or dex cache arrays. If it is a dex cache array, then use
// VM internal. Otherwise the object is a declaring class of an ArtField or ArtMethod or a
diff --git a/runtime/image-inl.h b/runtime/image-inl.h
index 28620db..669649e 100644
--- a/runtime/image-inl.h
+++ b/runtime/image-inl.h
@@ -21,6 +21,7 @@
#include "art_method.h"
#include "imtable.h"
+#include "read_barrier-inl.h"
namespace art {
diff --git a/runtime/image.h b/runtime/image.h
index 9ff18d6..8cd94bb 100644
--- a/runtime/image.h
+++ b/runtime/image.h
@@ -229,11 +229,11 @@
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Object* GetImageRoot(ImageRoot image_root) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::ObjectArray<mirror::Object>* GetImageRoots() const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RelocateImage(off_t delta);
void RelocateImageMethods(off_t delta);
diff --git a/runtime/indirect_reference_table.cc b/runtime/indirect_reference_table.cc
index 8e49492..4f81b59 100644
--- a/runtime/indirect_reference_table.cc
+++ b/runtime/indirect_reference_table.cc
@@ -36,10 +36,10 @@
class MutatorLockedDumpable {
public:
explicit MutatorLockedDumpable(T& value)
- SHARED_REQUIRES(Locks::mutator_lock_) : value_(value) {
+ REQUIRES_SHARED(Locks::mutator_lock_) : value_(value) {
}
- void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_) {
value_.Dump(os);
}
@@ -51,7 +51,7 @@
template<typename T>
std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs)
-// TODO: should be SHARED_REQUIRES(Locks::mutator_lock_) however annotalysis
+// TODO: should be REQUIRES_SHARED(Locks::mutator_lock_) however annotalysis
// currently fails for this.
NO_THREAD_SAFETY_ANALYSIS {
rhs.Dump(os);
diff --git a/runtime/indirect_reference_table.h b/runtime/indirect_reference_table.h
index 2d0ae63..13c6225 100644
--- a/runtime/indirect_reference_table.h
+++ b/runtime/indirect_reference_table.h
@@ -199,7 +199,7 @@
static const size_t kIRTPrevCount = kIsDebugBuild ? 7 : 3;
class IrtEntry {
public:
- void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Add(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
++serial_;
if (serial_ == kIRTPrevCount) {
serial_ = 0;
@@ -227,11 +227,11 @@
class IrtIterator {
public:
- IrtIterator(IrtEntry* table, size_t i, size_t capacity) SHARED_REQUIRES(Locks::mutator_lock_)
+ IrtIterator(IrtEntry* table, size_t i, size_t capacity) REQUIRES_SHARED(Locks::mutator_lock_)
: table_(table), i_(i), capacity_(capacity) {
}
- IrtIterator& operator++() SHARED_REQUIRES(Locks::mutator_lock_) {
+ IrtIterator& operator++() REQUIRES_SHARED(Locks::mutator_lock_) {
++i_;
return *this;
}
@@ -277,7 +277,7 @@
* failed during expansion).
*/
IndirectRef Add(uint32_t cookie, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Given an IndirectRef in the table, return the Object it refers to.
@@ -285,12 +285,12 @@
* Returns kInvalidIndirectRefObject if iref is invalid.
*/
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Object* Get(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::Object* Get(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_)
ALWAYS_INLINE;
// Synchronized get which reads a reference, acquiring a lock if necessary.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Object* SynchronizedGet(IndirectRef iref) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* SynchronizedGet(IndirectRef iref) const REQUIRES_SHARED(Locks::mutator_lock_) {
return Get<kReadBarrierOption>(iref);
}
@@ -299,7 +299,7 @@
*
* Updates an existing indirect reference to point to a new object.
*/
- void Update(IndirectRef iref, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Update(IndirectRef iref, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Remove an existing entry.
@@ -314,7 +314,7 @@
void AssertEmpty();
- void Dump(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Return the #of entries in the entire table. This includes holes, and
@@ -334,7 +334,7 @@
}
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetSegmentState() const {
return segment_state_.all;
@@ -352,7 +352,7 @@
}
// Release pages past the end of the table that may have previously held references.
- void Trim() SHARED_REQUIRES(Locks::mutator_lock_);
+ void Trim() REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Extract the table index from an indirect reference.
diff --git a/runtime/indirect_reference_table_test.cc b/runtime/indirect_reference_table_test.cc
index f376ec0..61bcadd 100644
--- a/runtime/indirect_reference_table_test.cc
+++ b/runtime/indirect_reference_table_test.cc
@@ -26,7 +26,7 @@
class IndirectReferenceTableTest : public CommonRuntimeTest {};
static void CheckDump(IndirectReferenceTable* irt, size_t num_objects, size_t num_unique)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostringstream oss;
irt->Dump(oss);
if (num_objects == 0) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 4a86e36..388561b 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -101,12 +101,12 @@
}
static void UpdateEntrypoints(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
method->SetEntryPointFromQuickCompiledCode(quick_code);
}
bool Instrumentation::NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Dbg::IsDebuggerActive() &&
Runtime::Current()->GetHeap()->IsInBootImageOatFile(code) &&
!method->IsNative() &&
@@ -169,7 +169,7 @@
// Since we may already have done this previously, we need to push new instrumentation frame before
// existing instrumentation frames.
static void InstrumentationInstallStack(Thread* thread, void* arg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
struct InstallStackVisitor FINAL : public StackVisitor {
InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc)
: StackVisitor(thread_in, context, kInstrumentationStackWalk),
@@ -179,7 +179,7 @@
last_return_pc_(0) {
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr) {
if (kVerboseInstrumentation) {
@@ -329,7 +329,7 @@
instrumentation_stack_(thread_in->GetInstrumentationStack()),
frames_removed_(0) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (instrumentation_stack_->size() == 0) {
return false; // Stop.
}
@@ -1019,7 +1019,7 @@
static void CheckStackDepth(Thread* self, const InstrumentationStackFrame& instrumentation_frame,
int delta)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t frame_id = StackVisitor::ComputeNumFrames(self, kInstrumentationStackWalk) + delta;
if (frame_id != instrumentation_frame.frame_id_) {
LOG(ERROR) << "Expected frame_id=" << frame_id << " but found "
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 757be8e..1e5fcf2 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -64,24 +64,24 @@
// Call-back for when a method is entered.
virtual void MethodEntered(Thread* thread, mirror::Object* this_object,
ArtMethod* method,
- uint32_t dex_pc) SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when a method is exited.
virtual void MethodExited(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
const JValue& return_value)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when a method is popped due to an exception throw. A method will either cause a
// MethodExited call-back or a MethodUnwind call-back when its activation is removed.
virtual void MethodUnwind(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when the dex pc moves in a method.
virtual void DexPcMoved(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t new_dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when we read from a field.
virtual void FieldRead(Thread* thread, mirror::Object* this_object, ArtMethod* method,
@@ -93,14 +93,14 @@
// Call-back when an exception is caught.
virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when we execute a branch.
virtual void Branch(Thread* thread,
ArtMethod* method,
uint32_t dex_pc,
int32_t dex_pc_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Call-back for when we get an invokevirtual or an invokeinterface.
virtual void InvokeVirtualOrInterface(Thread* thread,
@@ -109,7 +109,7 @@
uint32_t dex_pc,
ArtMethod* callee)
REQUIRES(Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
};
// Instrumentation is a catch-all for when extra information is required from the runtime. The
@@ -161,7 +161,7 @@
bool AreAllMethodsDeoptimized() const {
return interpreter_stubs_installed_;
}
- bool ShouldNotifyMethodEnterExitEvents() const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ShouldNotifyMethodEnterExitEvents() const REQUIRES_SHARED(Locks::mutator_lock_);
// Executes everything with interpreter.
void DeoptimizeEverything(const char* key)
@@ -192,7 +192,7 @@
// Indicates whether the method has been deoptimized so it is executed with the interpreter.
bool IsDeoptimized(ArtMethod* method)
- REQUIRES(!deoptimized_methods_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!deoptimized_methods_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Enable method tracing by installing instrumentation entry/exit stubs or interpreter.
void EnableMethodTracing(const char* key,
@@ -210,7 +210,7 @@
!deoptimized_methods_lock_);
InterpreterHandlerTable GetInterpreterHandlerTable() const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return interpreter_handler_table_;
}
@@ -226,17 +226,17 @@
// Update the code of a method respecting any installed stubs.
void UpdateMethodsCode(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Update the code of a method respecting any installed stubs from debugger.
void UpdateMethodsCodeFromDebugger(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Get the quick code for the given method. More efficient than asking the class linker as it
// will short-cut to GetCode if instrumentation and static method resolution stubs aren't
// installed.
const void* GetQuickCodeFor(ArtMethod* method, PointerSize pointer_size) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ForceInterpretOnly() {
interpret_only_ = true;
@@ -255,49 +255,49 @@
// Code is in boot image oat file which isn't compiled as debuggable.
// Need debug version (interpreter or jitted) if that's the case.
bool NeedDebugVersionForBootImageCode(ArtMethod* method, const void* code) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool AreExitStubsInstalled() const {
return instrumentation_stubs_installed_;
}
- bool HasMethodEntryListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasMethodEntryListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_method_entry_listeners_;
}
- bool HasMethodExitListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasMethodExitListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_method_exit_listeners_;
}
- bool HasMethodUnwindListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasMethodUnwindListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_method_unwind_listeners_;
}
- bool HasDexPcListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasDexPcListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_dex_pc_listeners_;
}
- bool HasFieldReadListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasFieldReadListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_field_read_listeners_;
}
- bool HasFieldWriteListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasFieldWriteListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_field_write_listeners_;
}
- bool HasExceptionCaughtListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasExceptionCaughtListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_exception_caught_listeners_;
}
- bool HasBranchListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasBranchListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_branch_listeners_;
}
- bool HasInvokeVirtualOrInterfaceListeners() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasInvokeVirtualOrInterfaceListeners() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_invoke_virtual_or_interface_listeners_;
}
- bool IsActive() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsActive() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_entry_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
have_exception_caught_listeners_ || have_method_unwind_listeners_ ||
@@ -305,7 +305,7 @@
}
// Any instrumentation *other* than what is needed for Jit profiling active?
- bool NonJitProfilingActive() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool NonJitProfilingActive() const REQUIRES_SHARED(Locks::mutator_lock_) {
return have_dex_pc_listeners_ || have_method_exit_listeners_ ||
have_field_read_listeners_ || have_field_write_listeners_ ||
have_exception_caught_listeners_ || have_method_unwind_listeners_ ||
@@ -316,7 +316,7 @@
// listeners into executing code and get method enter events for methods already on the stack.
void MethodEnterEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasMethodEntryListeners())) {
MethodEnterEventImpl(thread, this_object, method, dex_pc);
}
@@ -326,7 +326,7 @@
void MethodExitEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
const JValue& return_value) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasMethodExitListeners())) {
MethodExitEventImpl(thread, this_object, method, dex_pc, return_value);
}
@@ -335,12 +335,12 @@
// Inform listeners that a method has been exited due to an exception.
void MethodUnwindEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Inform listeners that the dex pc has moved (only supported by the interpreter).
void DexPcMovedEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasDexPcListeners())) {
DexPcMovedEventImpl(thread, this_object, method, dex_pc);
}
@@ -348,7 +348,7 @@
// Inform listeners that a branch has been taken (only supported by the interpreter).
void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasBranchListeners())) {
BranchImpl(thread, method, dex_pc, offset);
}
@@ -358,7 +358,7 @@
void FieldReadEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldReadListeners())) {
FieldReadEventImpl(thread, this_object, method, dex_pc, field);
}
@@ -368,7 +368,7 @@
void FieldWriteEvent(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasFieldWriteListeners())) {
FieldWriteEventImpl(thread, this_object, method, dex_pc, field, field_value);
}
@@ -379,7 +379,7 @@
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(HasInvokeVirtualOrInterfaceListeners())) {
InvokeVirtualOrInterfaceImpl(thread, this_object, caller, dex_pc, callee);
}
@@ -387,48 +387,48 @@
// Inform listeners that an exception was caught.
void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Called when an instrumented method is entered. The intended link register (lr) is saved so
// that returning causes a branch to the method exit stub. Generates method enter events.
void PushInstrumentationStackFrame(Thread* self, mirror::Object* this_object,
ArtMethod* method, uintptr_t lr,
bool interpreter_entry)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Called when an instrumented method is exited. Removes the pushed instrumentation frame
// returning the intended link register. Generates method exit events.
TwoWordReturn PopInstrumentationStackFrame(Thread* self, uintptr_t* return_pc,
uint64_t gpr_result, uint64_t fpr_result)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Pops an instrumentation frame from the current thread and generate an unwind event.
// Returns the return pc for the instrumentation frame that's popped.
uintptr_t PopMethodForUnwind(Thread* self, bool is_deoptimization) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Call back for configure stubs.
- void InstallStubsForClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_)
+ void InstallStubsForClass(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!deoptimized_methods_lock_);
void InstallStubsForMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Install instrumentation exit stub on every method of the stack of the given thread.
// This is used by the debugger to cause a deoptimization of the thread's stack after updating
// local variable(s).
void InstrumentThreadStack(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_);
static size_t ComputeFrameId(Thread* self,
size_t frame_depth,
size_t inlined_frames_before_frame)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Does not hold lock, used to check if someone changed from not instrumented to instrumented
// during a GC suspend point.
- bool AllocEntrypointsInstrumented() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool AllocEntrypointsInstrumented() const REQUIRES_SHARED(Locks::mutator_lock_) {
return alloc_entrypoints_instrumented_;
}
@@ -463,44 +463,44 @@
void MethodEnterEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MethodExitEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method,
uint32_t dex_pc, const JValue& return_value) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void BranchImpl(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t offset) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void InvokeVirtualOrInterfaceImpl(Thread* thread,
mirror::Object* this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
ArtField* field, const JValue& field_value) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Read barrier-aware utility functions for accessing deoptimized_methods_
bool AddDeoptimizedMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
bool IsDeoptimizedMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
bool RemoveDeoptimizedMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(deoptimized_methods_lock_);
ArtMethod* BeginDeoptimizedMethod()
- SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
bool IsDeoptimizedMethodsEmpty() const
- SHARED_REQUIRES(Locks::mutator_lock_, deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_, deoptimized_methods_lock_);
void UpdateMethodsCodeImpl(ArtMethod* method, const void* quick_code)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!deoptimized_methods_lock_);
// Have we hijacked ArtMethod::code_ so that it calls instrumentation/interpreter code?
@@ -610,7 +610,7 @@
interpreter_entry_(interpreter_entry) {
}
- std::string Dump() const SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* this_object_;
ArtMethod* method_;
diff --git a/runtime/instrumentation_test.cc b/runtime/instrumentation_test.cc
index 2cc35cf..abe3184 100644
--- a/runtime/instrumentation_test.cc
+++ b/runtime/instrumentation_test.cc
@@ -47,7 +47,7 @@
mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_enter_event = true;
}
@@ -56,7 +56,7 @@
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
const JValue& return_value ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_exit_event = true;
}
@@ -64,7 +64,7 @@
mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_method_unwind_event = true;
}
@@ -72,7 +72,7 @@
mirror::Object* this_object ATTRIBUTE_UNUSED,
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t new_dex_pc ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_dex_pc_moved_event = true;
}
@@ -81,7 +81,7 @@
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_read_event = true;
}
@@ -91,13 +91,13 @@
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtField* field ATTRIBUTE_UNUSED,
const JValue& field_value ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_field_written_event = true;
}
void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_exception_caught_event = true;
}
@@ -105,7 +105,7 @@
ArtMethod* method ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
int32_t dex_pc_offset ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_branch_event = true;
}
@@ -114,7 +114,7 @@
ArtMethod* caller ATTRIBUTE_UNUSED,
uint32_t dex_pc ATTRIBUTE_UNUSED,
ArtMethod* callee ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
received_invoke_virtual_or_interface_event = true;
}
@@ -205,7 +205,7 @@
}
void DeoptimizeMethod(Thread* self, ArtMethod* method, bool enable_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -221,7 +221,7 @@
void UndeoptimizeMethod(Thread* self, ArtMethod* method,
const char* key, bool disable_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -236,7 +236,7 @@
}
void DeoptimizeEverything(Thread* self, const char* key, bool enable_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -251,7 +251,7 @@
}
void UndeoptimizeEverything(Thread* self, const char* key, bool disable_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -266,7 +266,7 @@
}
void EnableMethodTracing(Thread* self, const char* key, bool needs_interpreter)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -278,7 +278,7 @@
}
void DisableMethodTracing(Thread* self, const char* key)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
instrumentation::Instrumentation* instrumentation = runtime->GetInstrumentation();
ScopedThreadSuspension sts(self, kSuspended);
@@ -291,7 +291,7 @@
private:
static bool HasEventListener(const instrumentation::Instrumentation* instr, uint32_t event_type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
switch (event_type) {
case instrumentation::Instrumentation::kMethodEntered:
return instr->HasMethodEntryListeners();
@@ -320,7 +320,7 @@
static void ReportEvent(const instrumentation::Instrumentation* instr, uint32_t event_type,
Thread* self, ArtMethod* method, mirror::Object* obj,
uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
switch (event_type) {
case instrumentation::Instrumentation::kMethodEntered:
instr->MethodEnterEvent(self, obj, method, dex_pc);
diff --git a/runtime/intern_table.h b/runtime/intern_table.h
index f845de5..184fbdc 100644
--- a/runtime/intern_table.h
+++ b/runtime/intern_table.h
@@ -58,44 +58,44 @@
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
mirror::String* InternStrong(int32_t utf16_length, const char* utf8_data)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// Only used by image writer. Special version that may not cause thread suspension since the GC
// cannot be running while we are doing image writing. Maybe be called while while holding a
// lock since there will not be thread suspension.
mirror::String* InternStrongImageString(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
- mirror::String* InternStrong(const char* utf8_data) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* InternStrong(const char* utf8_data) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// Interns a potentially new string in the 'strong' table. May cause thread suspension.
- mirror::String* InternStrong(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* InternStrong(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
// Interns a potentially new string in the 'weak' table. May cause thread suspension.
- mirror::String* InternWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* InternWeak(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- void SweepInternTableWeaks(IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_)
+ void SweepInternTableWeaks(IsMarkedVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::intern_table_lock_);
- bool ContainsWeak(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ bool ContainsWeak(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::intern_table_lock_);
// Lookup a strong intern, returns null if not found.
mirror::String* LookupStrong(Thread* self, mirror::String* s)
REQUIRES(!Locks::intern_table_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::String* LookupStrong(Thread* self, uint32_t utf16_length, const char* utf8_data)
REQUIRES(!Locks::intern_table_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Lookup a weak intern, returns null if not found.
mirror::String* LookupWeak(Thread* self, mirror::String* s)
REQUIRES(!Locks::intern_table_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Total number of interned strings.
size_t Size() const REQUIRES(!Locks::intern_table_lock_);
@@ -107,31 +107,31 @@
size_t WeakSize() const REQUIRES(!Locks::intern_table_lock_);
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
void DumpForSigQuit(std::ostream& os) const REQUIRES(!Locks::intern_table_lock_);
- void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_);
+ void BroadcastForNewInterns() REQUIRES_SHARED(Locks::mutator_lock_);
// Adds all of the resolved image strings from the image spaces into the intern table. The
// advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets
// images_added_to_intern_table_ to true.
void AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
// Add a new intern table for inserting to, previous intern tables are still there but no
// longer inserted into and ideally unmodified. This is done to prevent dirty pages.
void AddNewTable()
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_);
// Read the intern table from memory. The elements aren't copied, the intern hash set data will
// point to somewhere within ptr. Only reads the strong interns.
size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Write the post zygote intern table to a pointer. Only writes the strong interns since it is
// expected that there is no weak interns since this is called from the image writer.
- size_t WriteToMemory(uint8_t* ptr) SHARED_REQUIRES(Locks::mutator_lock_)
+ size_t WriteToMemory(uint8_t* ptr) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::intern_table_lock_);
// Change the weak root state. May broadcast to waiters.
@@ -181,18 +181,18 @@
class Table {
public:
Table();
- mirror::String* Find(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* Find(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
- mirror::String* Find(const Utf8String& string) SHARED_REQUIRES(Locks::mutator_lock_)
+ mirror::String* Find(const Utf8String& string) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
- void Insert(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_)
+ void Insert(mirror::String* s) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
void Remove(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void SweepWeaks(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Add a new intern table that will only be inserted into from now on.
void AddNewTable() REQUIRES(Locks::intern_table_lock_);
size_t Size() const REQUIRES(Locks::intern_table_lock_);
@@ -200,18 +200,18 @@
// Tables read are inserted at the front of the table array. Only checks for conflicts in
// debug builds. Returns how many bytes were read.
size_t AddTableFromMemory(const uint8_t* ptr)
- REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Write the intern tables to ptr, if there are multiple tables they are combined into a single
// one. Returns how many bytes were written.
size_t WriteToMemory(uint8_t* ptr)
- REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
private:
typedef HashSet<GcRoot<mirror::String>, GcRootEmptyFn, StringHashEquals, StringHashEquals,
TrackingAllocator<GcRoot<mirror::String>, kAllocatorTagInternTable>> UnorderedSet;
void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// We call AddNewTable when we create the zygote to reduce private dirty pages caused by
// modifying the zygote intern table. The back of table is modified when strings are interned.
@@ -222,35 +222,35 @@
// If holding_locks is true, then we may also hold other locks. If holding_locks is true, then we
// require GC is not running since it is not safe to wait while holding locks.
mirror::String* Insert(mirror::String* s, bool is_strong, bool holding_locks)
- REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
mirror::String* LookupStrongLocked(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* LookupWeakLocked(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertStrong(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertWeak(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveStrong(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveWeak(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
// Transaction rollback access.
mirror::String* LookupStringFromImage(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertStrongFromTransaction(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
mirror::String* InsertWeakFromTransaction(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveStrongFromTransaction(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
void RemoveWeakFromTransaction(mirror::String* s)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_);
size_t AddTableFromMemoryLocked(const uint8_t* ptr)
- REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
// Change the weak root state. May broadcast to waiters.
void ChangeWeakRootStateLocked(gc::WeakRootState new_state)
@@ -258,7 +258,7 @@
// Wait until we can read weak roots.
void WaitUntilAccessible(Thread* self)
- REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(Locks::intern_table_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
bool images_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_);
bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_);
diff --git a/runtime/intern_table_test.cc b/runtime/intern_table_test.cc
index fe78bf2..620e15b 100644
--- a/runtime/intern_table_test.cc
+++ b/runtime/intern_table_test.cc
@@ -64,7 +64,7 @@
class TestPredicate : public IsMarkedVisitor {
public:
- mirror::Object* IsMarked(mirror::Object* s) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* IsMarked(mirror::Object* s) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
bool erased = false;
for (auto it = expected_.begin(), end = expected_.end(); it != end; ++it) {
if (*it == s) {
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 101c9a1..277bda4 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -37,7 +37,7 @@
static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
Object* receiver, uint32_t* args, JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// TODO: The following enters JNI code using a typedef-ed function rather than the JNI compiler,
// it should be removed and JNI compiled stubs used instead.
ScopedObjectAccessUnchecked soa(self);
@@ -250,7 +250,7 @@
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame,
JValue result_register,
- bool stay_in_interpreter = false) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool stay_in_interpreter = false) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!shadow_frame.GetMethod()->IsAbstract());
DCHECK(!shadow_frame.GetMethod()->IsNative());
if (LIKELY(shadow_frame.GetDexPC() == 0)) { // Entering the method, but not via deoptimization.
@@ -466,7 +466,7 @@
}
static bool IsStringInit(const Instruction* instr, ArtMethod* caller)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (instr->Opcode() == Instruction::INVOKE_DIRECT ||
instr->Opcode() == Instruction::INVOKE_DIRECT_RANGE) {
// Instead of calling ResolveMethod() which has suspend point and can trigger
@@ -499,7 +499,7 @@
ShadowFrame* shadow_frame,
bool from_code,
JValue* ret_val)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue value;
// Set value to last known result in case the shadow frame chain is empty.
value.SetJ(ret_val->GetJ());
diff --git a/runtime/interpreter/interpreter.h b/runtime/interpreter/interpreter.h
index bf4bcff..38ce851 100644
--- a/runtime/interpreter/interpreter.h
+++ b/runtime/interpreter/interpreter.h
@@ -38,20 +38,20 @@
extern void EnterInterpreterFromInvoke(Thread* self, ArtMethod* method,
mirror::Object* receiver, uint32_t* args, JValue* result,
bool stay_in_interpreter = false)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// 'from_code' denotes whether the deoptimization was explicitly triggered by compiled code.
extern void EnterInterpreterFromDeoptimize(Thread* self, ShadowFrame* shadow_frame, bool from_code,
JValue* ret_val)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
extern JValue EnterInterpreterFromEntryPoint(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ArtInterpreterToInterpreterBridge(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// One-time sanity check.
void CheckInterpreterAsmConstants();
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index ac146b3..77c3f0f 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -196,7 +196,7 @@
template<Primitive::Type field_type>
static JValue GetFieldValue(const ShadowFrame& shadow_frame, uint32_t vreg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue field_value;
switch (field_type) {
case Primitive::kPrimBoolean:
@@ -456,7 +456,7 @@
// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
size_t dest_reg, size_t src_reg)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Uint required, so that sign extension does not make this wrong on 64b systems
uint32_t src_value = shadow_frame.GetVReg(src_reg);
mirror::Object* o = shadow_frame.GetVRegReference<kVerifyNone>(src_reg);
@@ -491,7 +491,7 @@
template <bool is_range,
bool do_assignability_check,
size_t kVarArgMax>
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
static inline bool DoCallCommon(ArtMethod* called_method,
Thread* self,
ShadowFrame& shadow_frame,
@@ -505,7 +505,7 @@
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
// Ensure static methods are initialized.
if (method->IsStatic()) {
@@ -541,7 +541,7 @@
void SetStringInitValueToAllAliases(ShadowFrame* shadow_frame,
uint16_t this_obj_vreg,
JValue result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Object* existing = shadow_frame->GetVRegReference(this_obj_vreg);
if (existing == nullptr) {
// If it's null, we come from compiled code that was deoptimized. Nothing to do,
@@ -854,7 +854,7 @@
return true;
}
-// TODO fix thread analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
+// TODO fix thread analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<typename T>
static void RecordArrayElementsInTransactionImpl(mirror::PrimitiveArray<T>* array, int32_t count)
NO_THREAD_SAFETY_ANALYSIS {
@@ -865,7 +865,7 @@
}
void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(Runtime::Current()->IsActiveTransaction());
DCHECK(array != nullptr);
DCHECK_LE(count, array->GetLength());
@@ -904,7 +904,7 @@
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoCall<_is_range, _do_assignability_check>(ArtMethod* method, Thread* self, \
ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
@@ -917,7 +917,7 @@
// Explicit DoFilledNewArray template function declarations.
#define EXPLICIT_DO_FILLED_NEW_ARRAY_TEMPLATE_DECL(_is_range_, _check, _transaction_active) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoFilledNewArray<_is_range_, _check, _transaction_active>(const Instruction* inst, \
const ShadowFrame& shadow_frame, \
Thread* self, JValue* result)
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 7b38473..9d76685 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -66,7 +66,7 @@
namespace interpreter {
void ThrowNullPointerExceptionFromInterpreter()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kMonitorCounting>
static inline void DoMonitorEnter(Thread* self,
@@ -108,13 +108,13 @@
void AbortTransactionF(Thread* self, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AbortTransactionV(Thread* self, const char* fmt, va_list args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Invokes the given method. This is part of the invocation support and is used by DoInvoke and
// DoInvokeVirtualQuick functions.
@@ -213,32 +213,32 @@
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_);
// Handles iget-quick, iget-wide-quick and iget-object-quick instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<Primitive::Type field_type>
bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Handles iput-XXX and sput-XXX instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check,
bool transaction_active>
bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction* inst,
- uint16_t inst_data) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t inst_data) REQUIRES_SHARED(Locks::mutator_lock_);
// Handles iput-quick, iput-wide-quick and iput-object-quick instructions.
// Returns true on success, otherwise throws an exception and returns false.
template<Primitive::Type field_type, bool transaction_active>
bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Handles string resolution for const-string and const-string-jumbo instructions. Also ensures the
// java.lang.String class is initialized.
static inline String* ResolveString(Thread* self, ShadowFrame& shadow_frame, uint32_t string_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* java_lang_string_class = String::GetJavaLangString();
if (UNLIKELY(!java_lang_string_class->IsInitialized())) {
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -272,7 +272,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoIntDivide(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -290,7 +290,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoIntRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int32_t dividend, int32_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr int32_t kMinInt = std::numeric_limits<int32_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -308,7 +308,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoLongDivide(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -326,7 +326,7 @@
// Returns true on success, otherwise throws a java.lang.ArithmeticException and return false.
static inline bool DoLongRemainder(ShadowFrame& shadow_frame, size_t result_reg,
int64_t dividend, int64_t divisor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const int64_t kMinLong = std::numeric_limits<int64_t>::min();
if (UNLIKELY(divisor == 0)) {
ThrowArithmeticExceptionDivideByZero();
@@ -350,7 +350,7 @@
// Returns the branch offset to the next instruction to execute.
static inline int32_t DoPackedSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(inst->Opcode() == Instruction::PACKED_SWITCH);
const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -378,7 +378,7 @@
// Returns the branch offset to the next instruction to execute.
static inline int32_t DoSparseSwitch(const Instruction* inst, const ShadowFrame& shadow_frame,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(inst->Opcode() == Instruction::SPARSE_SWITCH);
const uint16_t* switch_data = reinterpret_cast<const uint16_t*>(inst) + inst->VRegB_31t();
int32_t test_val = shadow_frame.GetVReg(inst->VRegA_31t(inst_data));
@@ -411,18 +411,18 @@
uint32_t FindNextInstructionFollowingException(Thread* self, ShadowFrame& shadow_frame,
uint32_t dex_pc, const instrumentation::Instrumentation* instrumentation)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
NO_RETURN void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
__attribute__((cold))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Set true if you want TraceExecution invocation before each bytecode execution.
constexpr bool kTraceExecutionEnabled = false;
static inline void TraceExecution(const ShadowFrame& shadow_frame, const Instruction* inst,
const uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kTraceExecutionEnabled) {
#define TRACE_LOG std::cerr
std::ostringstream oss;
@@ -465,7 +465,7 @@
// Explicitly instantiate all DoInvoke functions.
#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
@@ -486,7 +486,7 @@
// Explicitly instantiate all DoInvokeVirtualQuick functions.
#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
- template SHARED_REQUIRES(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
diff --git a/runtime/interpreter/interpreter_goto_table_impl.h b/runtime/interpreter/interpreter_goto_table_impl.h
index bb9be88..c54746d 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.h
+++ b/runtime/interpreter/interpreter_goto_table_impl.h
@@ -33,7 +33,7 @@
JValue ExecuteGotoImpl(Thread* self,
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame,
- JValue result_register) SHARED_REQUIRES(Locks::mutator_lock_);
+ JValue result_register) REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_mterp_impl.h b/runtime/interpreter/interpreter_mterp_impl.h
index 322df4e..90d9f89 100644
--- a/runtime/interpreter/interpreter_mterp_impl.h
+++ b/runtime/interpreter/interpreter_mterp_impl.h
@@ -33,7 +33,7 @@
extern "C" bool ExecuteMterpImpl(Thread* self,
const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame,
- JValue* result_register) SHARED_REQUIRES(Locks::mutator_lock_);
+ JValue* result_register) REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/interpreter_switch_impl.h b/runtime/interpreter/interpreter_switch_impl.h
index 90ec908..d0c9386 100644
--- a/runtime/interpreter/interpreter_switch_impl.h
+++ b/runtime/interpreter/interpreter_switch_impl.h
@@ -34,7 +34,7 @@
const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame,
JValue result_register,
- bool interpret_one_instruction) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool interpret_one_instruction) REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace interpreter
} // namespace art
diff --git a/runtime/interpreter/mterp/mips64/footer.S b/runtime/interpreter/mterp/mips64/footer.S
index 9994169..4063162 100644
--- a/runtime/interpreter/mterp/mips64/footer.S
+++ b/runtime/interpreter/mterp/mips64/footer.S
@@ -217,7 +217,8 @@
b MterpDone
/*
* Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be 0.
+ * significant bits of a0 must be zero-extended or sign-extended
+ * depending on the return type.
*/
MterpReturn:
ld a2, OFF_FP_RESULT_REGISTER(rFP)
diff --git a/runtime/interpreter/mterp/mips64/op_return.S b/runtime/interpreter/mterp/mips64/op_return.S
index ec986b8..b10c03f 100644
--- a/runtime/interpreter/mterp/mips64/op_return.S
+++ b/runtime/interpreter/mterp/mips64/op_return.S
@@ -1,7 +1,8 @@
+%default {"instr":"GET_VREG"}
/*
* Return a 32-bit value.
*
- * for: return, return-object
+ * for: return (sign-extend), return-object (zero-extend)
*/
/* op vAA */
.extern MterpThreadFenceForConstructor
@@ -14,5 +15,5 @@
jal MterpSuspendCheck # (self)
1:
srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA
+ $instr a0, a2 # a0 <- vAA
b MterpReturn
diff --git a/runtime/interpreter/mterp/mips64/op_return_object.S b/runtime/interpreter/mterp/mips64/op_return_object.S
index 67f1871..b69b880 100644
--- a/runtime/interpreter/mterp/mips64/op_return_object.S
+++ b/runtime/interpreter/mterp/mips64/op_return_object.S
@@ -1 +1 @@
-%include "mips64/op_return.S"
+%include "mips64/op_return.S" {"instr":"GET_VREG_U"}
diff --git a/runtime/interpreter/mterp/mterp.cc b/runtime/interpreter/mterp/mterp.cc
index 20a0753..a8c7d15 100644
--- a/runtime/interpreter/mterp/mterp.cc
+++ b/runtime/interpreter/mterp/mterp.cc
@@ -143,7 +143,7 @@
}
extern "C" size_t MterpShouldSwitchInterpreters()
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
return instrumentation->NonJitProfilingActive() || Dbg::IsDebuggerActive();
@@ -154,7 +154,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kVirtual, false, false>(
@@ -165,7 +165,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kSuper, false, false>(
@@ -176,7 +176,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kInterface, false, false>(
@@ -187,7 +187,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kDirect, false, false>(
@@ -198,7 +198,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kStatic, false, false>(
@@ -209,7 +209,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kVirtual, true, false>(
@@ -220,7 +220,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kSuper, true, false>(
@@ -231,7 +231,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kInterface, true, false>(
@@ -242,7 +242,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kDirect, true, false>(
@@ -253,7 +253,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvoke<kStatic, true, false>(
@@ -264,7 +264,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvokeVirtualQuick<false>(
@@ -275,7 +275,7 @@
ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint16_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue* result_register = shadow_frame->GetResultRegister();
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoInvokeVirtualQuick<true>(
@@ -290,7 +290,7 @@
uint32_t tgt_vreg,
ShadowFrame* shadow_frame,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
String* s = ResolveString(self, *shadow_frame, index);
if (UNLIKELY(s == nullptr)) {
return true;
@@ -303,7 +303,7 @@
uint32_t tgt_vreg,
ShadowFrame* shadow_frame,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, shadow_frame->GetMethod(), self, false, false);
if (UNLIKELY(c == nullptr)) {
return true;
@@ -316,7 +316,7 @@
StackReference<mirror::Object>* vreg_addr,
art::ArtMethod* method,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
return true;
@@ -334,7 +334,7 @@
StackReference<mirror::Object>* vreg_addr,
art::ArtMethod* method,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Class* c = ResolveVerifyAndClinit(index, method, self, false, false);
if (UNLIKELY(c == nullptr)) {
return false; // Caller will check for pending exception. Return value unimportant.
@@ -345,12 +345,12 @@
}
extern "C" size_t MterpFillArrayData(Object* obj, const Instruction::ArrayDataPayload* payload)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return FillArrayData(obj, payload);
}
extern "C" size_t MterpNewInstance(ShadowFrame* shadow_frame, Thread* self, uint32_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
Object* obj = nullptr;
Class* c = ResolveVerifyAndClinit(inst->VRegB_21c(), shadow_frame->GetMethod(),
@@ -375,7 +375,7 @@
extern "C" size_t MterpSputObject(ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr,
uint32_t inst_data, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFieldPut<StaticObjectWrite, Primitive::kPrimNot, false, false>
(self, *shadow_frame, inst, inst_data);
@@ -385,7 +385,7 @@
uint16_t* dex_pc_ptr,
uint32_t inst_data,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFieldPut<InstanceObjectWrite, Primitive::kPrimNot, false, false>
(self, *shadow_frame, inst, inst_data);
@@ -394,7 +394,7 @@
extern "C" size_t MterpIputObjectQuick(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoIPutQuick<Primitive::kPrimNot, false>(*shadow_frame, inst, inst_data);
}
@@ -402,7 +402,7 @@
extern "C" size_t MterpAputObject(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
Object* a = shadow_frame->GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == nullptr)) {
@@ -421,7 +421,7 @@
extern "C" size_t MterpFilledNewArray(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFilledNewArray<false, false, false>(inst, *shadow_frame, self,
shadow_frame->GetResultRegister());
@@ -430,7 +430,7 @@
extern "C" size_t MterpFilledNewArrayRange(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
return DoFilledNewArray<true, false, false>(inst, *shadow_frame, self,
shadow_frame->GetResultRegister());
@@ -439,7 +439,7 @@
extern "C" size_t MterpNewArray(ShadowFrame* shadow_frame,
uint16_t* dex_pc_ptr,
uint32_t inst_data, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
int32_t length = shadow_frame->GetVReg(inst->VRegB_22c(inst_data));
Object* obj = AllocArrayFromCode<false, true>(
@@ -453,7 +453,7 @@
}
extern "C" size_t MterpHandleException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(self->IsExceptionPending());
const instrumentation::Instrumentation* const instrumentation =
Runtime::Current()->GetInstrumentation();
@@ -469,7 +469,7 @@
}
extern "C" void MterpCheckBefore(Thread* self, ShadowFrame* shadow_frame, uint16_t* dex_pc_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const Instruction* inst = Instruction::At(dex_pc_ptr);
uint16_t inst_data = inst->Fetch16(0);
if (inst->Opcode(inst_data) == Instruction::MOVE_EXCEPTION) {
@@ -488,7 +488,7 @@
}
extern "C" void MterpLogDivideByZeroException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -496,7 +496,7 @@
}
extern "C" void MterpLogArrayIndexException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -504,7 +504,7 @@
}
extern "C" void MterpLogNegativeArraySizeException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -512,7 +512,7 @@
}
extern "C" void MterpLogNoSuchMethodException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -520,7 +520,7 @@
}
extern "C" void MterpLogExceptionThrownException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -528,7 +528,7 @@
}
extern "C" void MterpLogNullObjectException(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -536,7 +536,7 @@
}
extern "C" void MterpLogFallback(Thread* self, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -545,7 +545,7 @@
}
extern "C" void MterpLogOSR(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -553,7 +553,7 @@
}
extern "C" void MterpLogSuspendFallback(Thread* self, ShadowFrame* shadow_frame, uint32_t flags)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self);
const Instruction* inst = Instruction::At(shadow_frame->GetDexPCPtr());
uint16_t inst_data = inst->Fetch16(0);
@@ -565,7 +565,7 @@
}
extern "C" size_t MterpSuspendCheck(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
self->AllowThreadSuspension();
return MterpShouldSwitchInterpreters();
}
@@ -574,7 +574,7 @@
ArtMethod* referrer,
uint64_t* new_value,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite, sizeof(int64_t));
if (LIKELY(field != nullptr)) {
@@ -595,7 +595,7 @@
mirror::Object* obj,
uint8_t new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite, sizeof(int8_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
Primitive::Type type = field->GetTypeAsPrimitiveType();
@@ -614,7 +614,7 @@
mirror::Object* obj,
uint16_t new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int16_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -634,7 +634,7 @@
mirror::Object* obj,
uint32_t new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int32_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -648,7 +648,7 @@
mirror::Object* obj,
uint64_t* new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
sizeof(int64_t));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -662,7 +662,7 @@
mirror::Object* obj,
mirror::Object* new_value,
ArtMethod* referrer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = FindFieldFast(field_idx, referrer, InstanceObjectWrite,
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr && obj != nullptr)) {
@@ -673,7 +673,7 @@
}
extern "C" mirror::Object* artAGetObjectFromMterp(mirror::Object* arr, int32_t index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(arr == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
return nullptr;
@@ -687,7 +687,7 @@
}
extern "C" mirror::Object* artIGetObjectFromMterp(mirror::Object* obj, uint32_t field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (UNLIKELY(obj == nullptr)) {
ThrowNullPointerExceptionFromInterpreter();
return nullptr;
@@ -702,7 +702,7 @@
* and regenerated following batch updates.
*/
extern "C" ssize_t MterpSetUpHotnessCountdown(ArtMethod* method, ShadowFrame* shadow_frame)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint16_t hotness_count = method->GetCounter();
int32_t countdown_value = jit::kJitHotnessDisabled;
jit::Jit* jit = Runtime::Current()->GetJit();
@@ -742,7 +742,7 @@
extern "C" ssize_t MterpAddHotnessBatch(ArtMethod* method,
ShadowFrame* shadow_frame,
Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
jit::Jit* jit = Runtime::Current()->GetJit();
if (jit != nullptr) {
int16_t count = shadow_frame->GetCachedHotnessCountdown() - shadow_frame->GetHotnessCountdown();
@@ -753,7 +753,7 @@
// TUNING: Unused by arm/arm64/x86/x86_64. Remove when mips/mips64 mterps support batch updates.
extern "C" size_t MterpProfileBranch(Thread* self, ShadowFrame* shadow_frame, int32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
JValue* result = shadow_frame->GetResultRegister();
uint32_t dex_pc = shadow_frame->GetDexPC();
@@ -772,7 +772,7 @@
extern "C" size_t MterpMaybeDoOnStackReplacement(Thread* self,
ShadowFrame* shadow_frame,
int32_t offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = shadow_frame->GetMethod();
JValue* result = shadow_frame->GetResultRegister();
uint32_t dex_pc = shadow_frame->GetDexPC();
diff --git a/runtime/interpreter/mterp/mterp_stub.cc b/runtime/interpreter/mterp/mterp_stub.cc
index 7e7337e..35f8f1c 100644
--- a/runtime/interpreter/mterp/mterp_stub.cc
+++ b/runtime/interpreter/mterp/mterp_stub.cc
@@ -40,7 +40,7 @@
*/
extern "C" bool ExecuteMterpImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame* shadow_frame, JValue* result_register)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(self); UNUSED(shadow_frame); UNUSED(code_item); UNUSED(result_register);
UNIMPLEMENTED(art::FATAL);
return false;
diff --git a/runtime/interpreter/mterp/out/mterp_mips64.S b/runtime/interpreter/mterp/out/mterp_mips64.S
index a061f1e..88e972f 100644
--- a/runtime/interpreter/mterp/out/mterp_mips64.S
+++ b/runtime/interpreter/mterp/out/mterp_mips64.S
@@ -651,7 +651,7 @@
/*
* Return a 32-bit value.
*
- * for: return, return-object
+ * for: return (sign-extend), return-object (zero-extend)
*/
/* op vAA */
.extern MterpThreadFenceForConstructor
@@ -664,7 +664,7 @@
jal MterpSuspendCheck # (self)
1:
srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA
+ GET_VREG a0, a2 # a0 <- vAA
b MterpReturn
/* ------------------------------ */
@@ -697,7 +697,7 @@
/*
* Return a 32-bit value.
*
- * for: return, return-object
+ * for: return (sign-extend), return-object (zero-extend)
*/
/* op vAA */
.extern MterpThreadFenceForConstructor
@@ -710,7 +710,7 @@
jal MterpSuspendCheck # (self)
1:
srl a2, rINST, 8 # a2 <- AA
- GET_VREG_U a0, a2 # a0 <- vAA
+ GET_VREG_U a0, a2 # a0 <- vAA
b MterpReturn
@@ -12298,7 +12298,8 @@
b MterpDone
/*
* Returned value is expected in a0 and if it's not 64-bit, the 32 most
- * significant bits of a0 must be 0.
+ * significant bits of a0 must be zero-extended or sign-extended
+ * depending on the return type.
*/
MterpReturn:
ld a2, OFF_FP_RESULT_REGISTER(rFP)
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
index a0e0e62..c614408 100644
--- a/runtime/interpreter/unstarted_runtime.cc
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -57,7 +57,7 @@
static void AbortTransactionOrFail(Thread* self, const char* fmt, ...)
__attribute__((__format__(__printf__, 2, 3)))
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void AbortTransactionOrFail(Thread* self, const char* fmt, ...) {
va_list args;
@@ -81,7 +81,7 @@
ShadowFrame* shadow_frame,
JValue* result,
size_t arg_offset,
- bool to_lower_case) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool to_lower_case) REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t int_value = static_cast<uint32_t>(shadow_frame->GetVReg(arg_offset));
// Only ASCII (7-bit).
@@ -117,7 +117,7 @@
Handle<mirror::ClassLoader> class_loader, JValue* result,
const std::string& method_name, bool initialize_class,
bool abort_if_not_found)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(className.Get() != nullptr);
std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str()));
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -147,7 +147,7 @@
// actually the transaction abort exception. This must not be wrapped, as it signals an
// initialization abort.
static void CheckExceptionGenerateClassNotFound(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (self->IsExceptionPending()) {
// If it is not the transaction abort exception, wrap it.
std::string type(PrettyTypeOf(self->GetException()));
@@ -159,7 +159,7 @@
}
static mirror::String* GetClassName(Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* param = shadow_frame->GetVRegReference(arg_offset);
if (param == nullptr) {
AbortTransactionOrFail(self, "Null-pointer in Class.forName.");
@@ -442,7 +442,7 @@
static void GetResourceAsStream(Thread* self,
ShadowFrame* shadow_frame,
JValue* result,
- size_t arg_offset) SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t arg_offset) REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* resource_obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (resource_obj == nullptr) {
AbortTransactionOrFail(self, "null name for getResourceAsStream");
@@ -604,7 +604,7 @@
mirror::Array* src_array, int32_t src_pos,
mirror::Array* dst_array, int32_t dst_pos,
int32_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (src_array->GetClass()->GetComponentType() != dst_array->GetClass()->GetComponentType()) {
AbortTransactionOrFail(self, "Types mismatched in arraycopy: %s vs %s.",
PrettyDescriptor(src_array->GetClass()->GetComponentType()).c_str(),
@@ -748,7 +748,7 @@
JValue* result,
size_t arg_offset,
bool is_default_version)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<4> hs(self);
Handle<mirror::String> h_key(
hs.NewHandle(reinterpret_cast<mirror::String*>(shadow_frame->GetVRegReference(arg_offset))));
@@ -915,7 +915,7 @@
}
static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile* dex_file = dex_cache->GetDexFile();
if (dex_file == nullptr) {
return nullptr;
@@ -1026,7 +1026,7 @@
static void UnstartedMemoryPeekArray(
Primitive::Type type, Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
int64_t address_long = shadow_frame->GetVRegLong(arg_offset);
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 2);
if (obj == nullptr) {
@@ -1173,7 +1173,7 @@
// This allows getting the char array for new style of String objects during compilation.
void UnstartedRuntime::UnstartedStringToCharArray(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::String* string = shadow_frame->GetVRegReference(arg_offset)->AsString();
if (string == nullptr) {
AbortTransactionOrFail(self, "String.charAt with null object");
@@ -1299,7 +1299,7 @@
void UnstartedRuntime::UnstartedUnsafeGetObjectVolatile(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Argument 0 is the Unsafe instance, skip.
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (obj == nullptr) {
@@ -1313,7 +1313,7 @@
void UnstartedRuntime::UnstartedUnsafePutObjectVolatile(
Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Argument 0 is the Unsafe instance, skip.
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (obj == nullptr) {
@@ -1331,7 +1331,7 @@
void UnstartedRuntime::UnstartedUnsafePutOrderedObject(
Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Argument 0 is the Unsafe instance, skip.
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 1);
if (obj == nullptr) {
@@ -1352,7 +1352,7 @@
// of correctly handling the corner cases.
void UnstartedRuntime::UnstartedIntegerParseInt(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
if (obj == nullptr) {
AbortTransactionOrFail(self, "Cannot parse null string, retry at runtime.");
@@ -1396,7 +1396,7 @@
// well.
void UnstartedRuntime::UnstartedLongParseLong(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
if (obj == nullptr) {
AbortTransactionOrFail(self, "Cannot parse null string, retry at runtime.");
@@ -1437,7 +1437,7 @@
void UnstartedRuntime::UnstartedMethodInvoke(
Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JNIEnvExt* env = self->GetJniEnv();
ScopedObjectAccessUnchecked soa(self);
diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h
index 03d7026..3f36a27 100644
--- a/runtime/interpreter/unstarted_runtime.h
+++ b/runtime/interpreter/unstarted_runtime.h
@@ -52,14 +52,14 @@
ShadowFrame* shadow_frame,
JValue* result,
size_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void Jni(Thread* self,
ArtMethod* method,
mirror::Object* receiver,
uint32_t* args,
JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Methods that intercept available libcore implementations.
@@ -68,7 +68,7 @@
ShadowFrame* shadow_frame, \
JValue* result, \
size_t arg_offset) \
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
#include "unstarted_runtime_list.h"
UNSTARTED_RUNTIME_DIRECT_LIST(UNSTARTED_DIRECT)
#undef UNSTARTED_RUNTIME_DIRECT_LIST
@@ -82,7 +82,7 @@
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
#include "unstarted_runtime_list.h"
UNSTARTED_RUNTIME_JNI_LIST(UNSTARTED_JNI)
#undef UNSTARTED_RUNTIME_DIRECT_LIST
diff --git a/runtime/interpreter/unstarted_runtime_test.cc b/runtime/interpreter/unstarted_runtime_test.cc
index c324600..ba751ec 100644
--- a/runtime/interpreter/unstarted_runtime_test.cc
+++ b/runtime/interpreter/unstarted_runtime_test.cc
@@ -49,7 +49,7 @@
ShadowFrame* shadow_frame, \
JValue* result, \
size_t arg_offset) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
interpreter::UnstartedRuntime::Unstarted ## Name(self, shadow_frame, result, arg_offset); \
}
#include "unstarted_runtime_list.h"
@@ -65,7 +65,7 @@
mirror::Object* receiver, \
uint32_t* args, \
JValue* result) \
- SHARED_REQUIRES(Locks::mutator_lock_) { \
+ REQUIRES_SHARED(Locks::mutator_lock_) { \
interpreter::UnstartedRuntime::UnstartedJNI ## Name(self, method, receiver, args, result); \
}
#include "unstarted_runtime_list.h"
@@ -83,7 +83,7 @@
Thread* self,
mirror::Class* component_type,
const StackHandleScope<3>& data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime* runtime = Runtime::Current();
mirror::Class* array_type = runtime->GetClassLinker()->FindArrayClass(self, &component_type);
CHECK(array_type != nullptr);
@@ -99,7 +99,7 @@
static void CheckObjectArray(mirror::ObjectArray<mirror::Object>* array,
const StackHandleScope<3>& data)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK_EQ(array->GetLength(), 3);
CHECK_EQ(data.NumberOfReferences(), 3U);
for (size_t i = 0; i < 3; ++i) {
@@ -115,7 +115,7 @@
mirror::ObjectArray<mirror::Object>* dst,
int32_t dst_pos,
int32_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JValue result;
tmp->SetVRegReference(0, src);
tmp->SetVReg(1, src_pos);
@@ -141,7 +141,7 @@
int32_t dst_pos,
int32_t length,
const StackHandleScope<3>& expected_result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<3> hs_misc(self);
Handle<mirror::Class> dst_component_handle(hs_misc.NewHandle(dst_component_class));
@@ -167,7 +167,7 @@
ShadowFrame* tmp,
double const test_pairs[][2],
size_t num_pairs)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0; i < num_pairs; ++i) {
tmp->SetVRegDouble(0, test_pairs[i][0]);
@@ -189,7 +189,7 @@
// Prepare for aborts. Aborts assume that the exception class is already resolved, as the
// loading code doesn't work under transactions.
- void PrepareForAborts() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void PrepareForAborts() REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* result = Runtime::Current()->GetClassLinker()->FindClass(
Thread::Current(),
Transaction::kAbortExceptionSignature,
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 2401bec..979495a 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -234,7 +234,7 @@
// See section 11.3 "Linking Native Methods" of the JNI spec.
void* FindNativeMethod(ArtMethod* m, std::string& detail)
REQUIRES(Locks::jni_libraries_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string jni_short_name(JniShortName(m));
std::string jni_long_name(JniLongName(m));
mirror::ClassLoader* const declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
@@ -273,7 +273,7 @@
// Unload native libraries with cleared class loaders.
void UnloadNativeLibraries()
REQUIRES(!Locks::jni_libraries_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedObjectAccessUnchecked soa(Thread::Current());
std::vector<SharedLibrary*> unload_libraries;
{
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
index ed9d3ab..a10a72f 100644
--- a/runtime/java_vm_ext.h
+++ b/runtime/java_vm_ext.h
@@ -81,7 +81,7 @@
// such as NewByteArray.
// If -verbose:third-party-jni is on, we want to log any JNI function calls
// made by a third-party native method.
- bool ShouldTrace(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ShouldTrace(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
/**
* Loads the given shared library. 'path' is an absolute pathname.
@@ -98,67 +98,67 @@
// Unload native libraries with cleared class loaders.
void UnloadNativeLibraries()
REQUIRES(!Locks::jni_libraries_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/**
* Returns a pointer to the code for the native method 'm', found
* using dlsym(3) on every native library that's been loaded so far.
*/
void* FindCodeForNativeMethod(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpForSigQuit(std::ostream& os)
REQUIRES(!Locks::jni_libraries_lock_, !globals_lock_, !weak_globals_lock_);
void DumpReferenceTables(std::ostream& os)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_, !weak_globals_lock_);
bool SetCheckJniEnabled(bool enabled);
- void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_)
+ void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!globals_lock_);
- void DisallowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void AllowNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
- void BroadcastForNewWeakGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
+ void DisallowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ void AllowNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ void BroadcastForNewWeakGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
jobject AddGlobalRef(Thread* self, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
jweak AddWeakGlobalRef(Thread* self, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
void DeleteGlobalRef(Thread* self, jobject obj) REQUIRES(!globals_lock_);
void DeleteWeakGlobalRef(Thread* self, jweak obj) REQUIRES(!weak_globals_lock_);
void SweepJniWeakGlobals(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
mirror::Object* DecodeGlobal(IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!globals_lock_);
mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
mirror::Object* DecodeWeakGlobalLocked(Thread* self, IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(weak_globals_lock_);
// Like DecodeWeakGlobal() but to be used only during a runtime shutdown where self may be
// null.
mirror::Object* DecodeWeakGlobalDuringShutdown(Thread* self, IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
// Checks if the weak global ref has been cleared by the GC without decode (read barrier.)
bool IsWeakGlobalCleared(Thread* self, IndirectRef ref)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!weak_globals_lock_);
Mutex& WeakGlobalsLock() RETURN_CAPABILITY(weak_globals_lock_) {
@@ -166,13 +166,13 @@
}
void UpdateWeakGlobal(Thread* self, IndirectRef ref, mirror::Object* result)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!weak_globals_lock_);
const JNIInvokeInterface* GetUncheckedFunctions() const {
return unchecked_functions_;
}
- void TrimGlobals() SHARED_REQUIRES(Locks::mutator_lock_)
+ void TrimGlobals() REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!globals_lock_);
jint HandleGetEnv(/*out*/void** env, jint version);
@@ -183,9 +183,9 @@
private:
// Return true if self can currently access weak globals.
- bool MayAccessWeakGlobalsUnlocked(Thread* self) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool MayAccessWeakGlobalsUnlocked(Thread* self) const REQUIRES_SHARED(Locks::mutator_lock_);
bool MayAccessWeakGlobals(Thread* self) const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(weak_globals_lock_);
Runtime* const runtime_;
diff --git a/runtime/jdwp/jdwp.h b/runtime/jdwp/jdwp.h
index ae02fe6..e5d34e1 100644
--- a/runtime/jdwp/jdwp.h
+++ b/runtime/jdwp/jdwp.h
@@ -88,7 +88,7 @@
uint64_t dex_pc;
};
std::ostream& operator<<(std::ostream& os, const JdwpLocation& rhs)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool operator==(const JdwpLocation& lhs, const JdwpLocation& rhs);
bool operator!=(const JdwpLocation& lhs, const JdwpLocation& rhs);
@@ -186,7 +186,7 @@
* The VM has finished initializing. Only called when the debugger is
* connected at the time initialization completes.
*/
- void PostVMStart() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
+ void PostVMStart() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
/*
* A location of interest has been reached. This is used for breakpoints,
@@ -202,7 +202,7 @@
*/
void PostLocationEvent(const EventLocation* pLoc, mirror::Object* thisPtr, int eventFlags,
const JValue* returnValue)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A field of interest has been accessed or modified. This is used for field access and field
@@ -213,7 +213,7 @@
*/
void PostFieldEvent(const EventLocation* pLoc, ArtField* field, mirror::Object* thisPtr,
const JValue* fieldValue, bool is_modification)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* An exception has been thrown.
@@ -222,19 +222,19 @@
*/
void PostException(const EventLocation* pThrowLoc, mirror::Throwable* exception_object,
const EventLocation* pCatchLoc, mirror::Object* thisPtr)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* A thread has started or stopped.
*/
void PostThreadChange(Thread* thread, bool start)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Class has been prepared.
*/
void PostClassPrepare(mirror::Class* klass)
- REQUIRES(!event_list_lock_, !jdwp_token_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_, !jdwp_token_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* The VM is about to stop.
@@ -242,7 +242,7 @@
bool PostVMDeath();
// Called if/when we realize we're talking to DDMS.
- void NotifyDdmsActive() SHARED_REQUIRES(Locks::mutator_lock_);
+ void NotifyDdmsActive() REQUIRES_SHARED(Locks::mutator_lock_);
void SetupChunkHeader(uint32_t type, size_t data_len, size_t header_size, uint8_t* out_header);
@@ -251,7 +251,7 @@
* Send up a chunk of DDM data.
*/
void DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool HandlePacket() REQUIRES(!shutdown_lock_, !jdwp_token_lock_);
@@ -259,7 +259,7 @@
void ResetState()
REQUIRES(!event_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/* atomic ops to get next serial number */
uint32_t NextRequestSerial();
@@ -277,21 +277,21 @@
*/
JdwpError RegisterEvent(JdwpEvent* pEvent)
REQUIRES(!event_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister an event, given the requestId.
*/
void UnregisterEventById(uint32_t requestId)
REQUIRES(!event_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Unregister all events.
*/
void UnregisterAll()
REQUIRES(!event_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
explicit JdwpState(const JdwpOptions* options);
@@ -303,18 +303,18 @@
REQUIRES(!Locks::mutator_lock_);
void SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy suspend_policy,
ObjectId threadId)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!jdwp_token_lock_);
void CleanupMatchList(const std::vector<JdwpEvent*>& match_list)
- REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void EventFinish(ExpandBuf* pReq);
bool FindMatchingEvents(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(!event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void FindMatchingEventsLocked(JdwpEventKind eventKind, const ModBasket& basket,
std::vector<JdwpEvent*>* match_list)
- REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void UnregisterEvent(JdwpEvent* pEvent)
- REQUIRES(event_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(event_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void SendBufferedRequest(uint32_t type, const std::vector<iovec>& iov);
/*
@@ -410,9 +410,9 @@
bool processing_request_ GUARDED_BY(shutdown_lock_);
};
-std::string DescribeField(const FieldId& field_id) SHARED_REQUIRES(Locks::mutator_lock_);
-std::string DescribeMethod(const MethodId& method_id) SHARED_REQUIRES(Locks::mutator_lock_);
-std::string DescribeRefTypeId(const RefTypeId& ref_type_id) SHARED_REQUIRES(Locks::mutator_lock_);
+std::string DescribeField(const FieldId& field_id) REQUIRES_SHARED(Locks::mutator_lock_);
+std::string DescribeMethod(const MethodId& method_id) REQUIRES_SHARED(Locks::mutator_lock_);
+std::string DescribeRefTypeId(const RefTypeId& ref_type_id) REQUIRES_SHARED(Locks::mutator_lock_);
class Request {
public:
@@ -428,9 +428,9 @@
uint32_t ReadUnsigned32(const char* what);
- FieldId ReadFieldId() SHARED_REQUIRES(Locks::mutator_lock_);
+ FieldId ReadFieldId() REQUIRES_SHARED(Locks::mutator_lock_);
- MethodId ReadMethodId() SHARED_REQUIRES(Locks::mutator_lock_);
+ MethodId ReadMethodId() REQUIRES_SHARED(Locks::mutator_lock_);
ObjectId ReadObjectId(const char* specific_kind);
@@ -442,7 +442,7 @@
ObjectId ReadThreadGroupId();
- RefTypeId ReadRefTypeId() SHARED_REQUIRES(Locks::mutator_lock_);
+ RefTypeId ReadRefTypeId() REQUIRES_SHARED(Locks::mutator_lock_);
FrameId ReadFrameId();
@@ -456,7 +456,7 @@
JdwpTypeTag ReadTypeTag();
- JdwpLocation ReadLocation() SHARED_REQUIRES(Locks::mutator_lock_);
+ JdwpLocation ReadLocation() REQUIRES_SHARED(Locks::mutator_lock_);
JdwpModKind ReadModKind();
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index 06b67b3..e2d29fe 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -447,7 +447,7 @@
* need to do this even if later mods cause us to ignore the event.
*/
static bool ModsMatch(JdwpEvent* pEvent, const ModBasket& basket)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JdwpEventMod* pMod = pEvent->mods;
for (int i = pEvent->modCount; i > 0; i--, pMod++) {
@@ -783,7 +783,7 @@
static void LogMatchingEventsAndThread(const std::vector<JdwpEvent*> match_list,
ObjectId thread_id)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
for (size_t i = 0, e = match_list.size(); i < e; ++i) {
JdwpEvent* pEvent = match_list[i];
VLOG(jdwp) << "EVENT #" << i << ": " << pEvent->eventKind
@@ -799,7 +799,7 @@
static void SetJdwpLocationFromEventLocation(const JDWP::EventLocation* event_location,
JDWP::JdwpLocation* jdwp_location)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(event_location != nullptr);
DCHECK(jdwp_location != nullptr);
Dbg::SetJdwpLocation(jdwp_location, event_location->method, event_location->dex_pc);
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 6278ef0..f6008ac 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -54,7 +54,7 @@
}
static JdwpError WriteTaggedObject(ExpandBuf* reply, ObjectId object_id)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint8_t tag;
JdwpError rc = Dbg::GetObjectTag(object_id, &tag);
if (rc == ERR_NONE) {
@@ -65,7 +65,7 @@
}
static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<ObjectId>& objects)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
expandBufAdd4BE(reply, objects.size());
for (size_t i = 0; i < objects.size(); ++i) {
JdwpError rc = WriteTaggedObject(reply, objects[i]);
@@ -85,7 +85,7 @@
static JdwpError RequestInvoke(JdwpState*, Request* request,
ObjectId thread_id, ObjectId object_id,
RefTypeId class_id, MethodId method_id, bool is_constructor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(!is_constructor || object_id != 0);
int32_t arg_count = request->ReadSigned32("argument count");
@@ -124,7 +124,7 @@
}
static JdwpError VM_Version(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Text information on runtime version.
std::string version(StringPrintf("Android Runtime %s", Runtime::Current()->GetVersion()));
expandBufAddUtf8String(pReply, version);
@@ -148,7 +148,7 @@
* been loaded by multiple class loaders.
*/
static JdwpError VM_ClassesBySignature(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string classDescriptor(request->ReadUtf8String());
std::vector<RefTypeId> ids;
@@ -180,7 +180,7 @@
* to be suspended, and that violates some JDWP expectations.
*/
static JdwpError VM_AllThreads(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::vector<ObjectId> thread_ids;
Dbg::GetThreads(nullptr /* all thread groups */, &thread_ids);
@@ -196,7 +196,7 @@
* List all thread groups that do not have a parent.
*/
static JdwpError VM_TopLevelThreadGroups(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
/*
* TODO: maintain a list of parentless thread groups in the VM.
*
@@ -215,7 +215,7 @@
* Respond with the sizes of the basic debugger types.
*/
static JdwpError VM_IDSizes(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
expandBufAdd4BE(pReply, sizeof(FieldId));
expandBufAdd4BE(pReply, sizeof(MethodId));
expandBufAdd4BE(pReply, sizeof(ObjectId));
@@ -225,7 +225,7 @@
}
static JdwpError VM_Dispose(JdwpState*, Request*, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::Dispose();
return ERR_NONE;
}
@@ -237,7 +237,7 @@
* This needs to increment the "suspend count" on all threads.
*/
static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Thread* self = Thread::Current();
ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
Dbg::SuspendVM();
@@ -248,13 +248,13 @@
* Resume execution. Decrements the "suspend count" of all threads.
*/
static JdwpError VM_Resume(JdwpState*, Request*, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Dbg::ResumeVM();
return ERR_NONE;
}
static JdwpError VM_Exit(JdwpState* state, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t exit_status = request->ReadUnsigned32("exit_status");
state->ExitAfterReplying(exit_status);
return ERR_NONE;
@@ -267,7 +267,7 @@
* string "java.util.Arrays".)
*/
static JdwpError VM_CreateString(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string str(request->ReadUtf8String());
ObjectId string_id;
JdwpError status = Dbg::CreateString(str, &string_id);
@@ -279,7 +279,7 @@
}
static JdwpError VM_ClassPaths(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
expandBufAddUtf8String(pReply, "/");
std::vector<std::string> class_path;
@@ -300,7 +300,7 @@
}
static JdwpError VM_DisposeObjects(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
size_t object_count = request->ReadUnsigned32("object_count");
for (size_t i = 0; i < object_count; ++i) {
ObjectId object_id = request->ReadObjectId();
@@ -311,7 +311,7 @@
}
static JdwpError VM_Capabilities(JdwpState*, Request*, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
expandBufAdd1(reply, true); // canWatchFieldModification
expandBufAdd1(reply, true); // canWatchFieldAccess
expandBufAdd1(reply, true); // canGetBytecodes
@@ -323,7 +323,7 @@
}
static JdwpError VM_CapabilitiesNew(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// The first few capabilities are the same as those reported by the older call.
VM_Capabilities(nullptr, request, reply);
@@ -350,7 +350,7 @@
}
static JdwpError VM_AllClassesImpl(ExpandBuf* pReply, bool descriptor_and_status, bool generic)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::vector<JDWP::RefTypeId> classes;
Dbg::GetClassList(&classes);
@@ -381,17 +381,17 @@
}
static JdwpError VM_AllClasses(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return VM_AllClassesImpl(pReply, true, false);
}
static JdwpError VM_AllClassesWithGeneric(JdwpState*, Request*, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return VM_AllClassesImpl(pReply, true, true);
}
static JdwpError VM_InstanceCounts(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
int32_t class_count = request->ReadSigned32("class count");
if (class_count < 0) {
return ERR_ILLEGAL_ARGUMENT;
@@ -415,7 +415,7 @@
}
static JdwpError RT_Modifiers(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::GetModifiers(refTypeId, pReply);
}
@@ -424,7 +424,7 @@
* Get values from static fields in a reference type.
*/
static JdwpError RT_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
int32_t field_count = request->ReadSigned32("field count");
expandBufAdd4BE(pReply, field_count);
@@ -442,7 +442,7 @@
* Get the name of the source file in which a reference type was declared.
*/
static JdwpError RT_SourceFile(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
std::string source_file;
JdwpError status = Dbg::GetSourceFile(refTypeId, &source_file);
@@ -457,7 +457,7 @@
* Return the current status of the reference type.
*/
static JdwpError RT_Status(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
JDWP::JdwpTypeTag type_tag;
uint32_t class_status;
@@ -473,7 +473,7 @@
* Return interfaces implemented directly by this class.
*/
static JdwpError RT_Interfaces(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredInterfaces(refTypeId, pReply);
}
@@ -482,7 +482,7 @@
* Return the class object corresponding to this type.
*/
static JdwpError RT_ClassObject(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
ObjectId class_object_id;
JdwpError status = Dbg::GetClassObject(refTypeId, &class_object_id);
@@ -500,13 +500,13 @@
* JDB seems interested, but DEX files don't currently support this.
*/
static JdwpError RT_SourceDebugExtension(JdwpState*, Request*, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
/* referenceTypeId in, string out */
return ERR_ABSENT_INFORMATION;
}
static JdwpError RT_Signature(JdwpState*, Request* request, ExpandBuf* pReply, bool with_generic)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
std::string signature;
@@ -522,12 +522,12 @@
}
static JdwpError RT_Signature(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return RT_Signature(state, request, pReply, false);
}
static JdwpError RT_SignatureWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return RT_Signature(state, request, pReply, true);
}
@@ -536,7 +536,7 @@
* reference type, or null if it was loaded by the system loader.
*/
static JdwpError RT_ClassLoader(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::GetClassLoader(refTypeId, pReply);
}
@@ -546,14 +546,14 @@
* fields declared by a class.
*/
static JdwpError RT_FieldsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredFields(refTypeId, true, pReply);
}
// Obsolete equivalent of FieldsWithGeneric, without the generic type information.
static JdwpError RT_Fields(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredFields(refTypeId, false, pReply);
}
@@ -563,20 +563,20 @@
* methods declared by a class.
*/
static JdwpError RT_MethodsWithGeneric(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredMethods(refTypeId, true, pReply);
}
// Obsolete equivalent of MethodsWithGeneric, without the generic type information.
static JdwpError RT_Methods(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
return Dbg::OutputDeclaredMethods(refTypeId, false, pReply);
}
static JdwpError RT_Instances(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
int32_t max_count = request->ReadSigned32("max count");
if (max_count < 0) {
@@ -596,7 +596,7 @@
* Return the immediate superclass of a class.
*/
static JdwpError CT_Superclass(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
RefTypeId superClassId;
JdwpError status = Dbg::GetSuperclass(class_id, &superClassId);
@@ -611,7 +611,7 @@
* Set static class values.
*/
static JdwpError CT_SetValues(JdwpState* , Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
int32_t values_count = request->ReadSigned32("values count");
@@ -641,7 +641,7 @@
*/
static JdwpError CT_InvokeMethod(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
@@ -658,7 +658,7 @@
*/
static JdwpError CT_NewInstance(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
@@ -675,7 +675,7 @@
* Create a new array object of the requested type and length.
*/
static JdwpError AT_newInstance(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId arrayTypeId = request->ReadRefTypeId();
int32_t length = request->ReadSigned32("length");
@@ -694,7 +694,7 @@
*/
static JdwpError IT_InvokeMethod(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
@@ -706,7 +706,7 @@
* Return line number information for the method, if present.
*/
static JdwpError M_LineTable(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId refTypeId = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
@@ -717,7 +717,7 @@
static JdwpError M_VariableTable(JdwpState*, Request* request, ExpandBuf* pReply,
bool generic)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
@@ -730,17 +730,17 @@
}
static JdwpError M_VariableTable(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return M_VariableTable(state, request, pReply, false);
}
static JdwpError M_VariableTableWithGeneric(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return M_VariableTable(state, request, pReply, true);
}
static JdwpError M_Bytecodes(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
@@ -760,7 +760,7 @@
// Default implementation for IDEs relying on this command.
static JdwpError M_IsObsolete(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
request->ReadRefTypeId(); // unused reference type ID
request->ReadMethodId(); // unused method ID
expandBufAdd1(reply, false); // a method is never obsolete.
@@ -775,7 +775,7 @@
* passed in here.
*/
static JdwpError OR_ReferenceType(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::GetReferenceType(object_id, pReply);
}
@@ -784,7 +784,7 @@
* Get values from the fields of an object.
*/
static JdwpError OR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
int32_t field_count = request->ReadSigned32("field count");
@@ -804,7 +804,7 @@
* Set values in the fields of an object.
*/
static JdwpError OR_SetValues(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
int32_t field_count = request->ReadSigned32("field count");
@@ -826,7 +826,7 @@
}
static JdwpError OR_MonitorInfo(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::GetMonitorInfo(object_id, reply);
}
@@ -844,7 +844,7 @@
*/
static JdwpError OR_InvokeMethod(JdwpState* state, Request* request,
ExpandBuf* pReply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
ObjectId thread_id = request->ReadThreadId();
RefTypeId class_id = request->ReadRefTypeId();
@@ -854,19 +854,19 @@
}
static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::DisableCollection(object_id);
}
static JdwpError OR_EnableCollection(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
return Dbg::EnableCollection(object_id);
}
static JdwpError OR_IsCollected(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
bool is_collected;
JdwpError rc = Dbg::IsCollected(object_id, &is_collected);
@@ -875,7 +875,7 @@
}
static JdwpError OR_ReferringObjects(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId object_id = request->ReadObjectId();
int32_t max_count = request->ReadSigned32("max count");
if (max_count < 0) {
@@ -895,7 +895,7 @@
* Return the string value in a string object.
*/
static JdwpError SR_Value(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId stringObject = request->ReadObjectId();
std::string str;
JDWP::JdwpError error = Dbg::StringToUtf8(stringObject, &str);
@@ -914,7 +914,7 @@
* Return a thread's name.
*/
static JdwpError TR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
std::string name;
@@ -935,7 +935,7 @@
* resume it; only the JDI is allowed to resume it.
*/
static JdwpError TR_Suspend(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
if (thread_id == Dbg::GetThreadSelfId()) {
@@ -953,7 +953,7 @@
* Resume the specified thread.
*/
static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
if (thread_id == Dbg::GetThreadSelfId()) {
@@ -969,7 +969,7 @@
* Return status of specified thread.
*/
static JdwpError TR_Status(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
JDWP::JdwpThreadStatus threadStatus;
@@ -991,7 +991,7 @@
* Return the thread group that the specified thread is a member of.
*/
static JdwpError TR_ThreadGroup(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
return Dbg::GetThreadGroup(thread_id, pReply);
}
@@ -1003,7 +1003,7 @@
* be THREAD_NOT_SUSPENDED.
*/
static JdwpError TR_Frames(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
uint32_t start_frame = request->ReadUnsigned32("start frame");
uint32_t length = request->ReadUnsigned32("length");
@@ -1035,7 +1035,7 @@
* Returns the #of frames on the specified thread, which must be suspended.
*/
static JdwpError TR_FrameCount(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
size_t frame_count;
@@ -1049,7 +1049,7 @@
}
static JdwpError TR_OwnedMonitors(Request* request, ExpandBuf* reply, bool with_stack_depths)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
std::vector<ObjectId> monitors;
@@ -1073,17 +1073,17 @@
}
static JdwpError TR_OwnedMonitors(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return TR_OwnedMonitors(request, reply, false);
}
static JdwpError TR_OwnedMonitorsStackDepthInfo(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return TR_OwnedMonitors(request, reply, true);
}
static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
ObjectId contended_monitor;
@@ -1095,7 +1095,7 @@
}
static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
@@ -1107,7 +1107,7 @@
* its suspend count recently.)
*/
static JdwpError TR_DebugSuspendCount(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
return Dbg::GetThreadDebugSuspendCount(thread_id, pReply);
}
@@ -1118,7 +1118,7 @@
* The Eclipse debugger recognizes "main" and "system" as special.
*/
static JdwpError TGR_Name(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
return Dbg::GetThreadGroupName(thread_group_id, pReply);
}
@@ -1128,7 +1128,7 @@
* thread group.
*/
static JdwpError TGR_Parent(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
return Dbg::GetThreadGroupParent(thread_group_id, pReply);
}
@@ -1138,7 +1138,7 @@
* specified thread group.
*/
static JdwpError TGR_Children(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_group_id = request->ReadThreadGroupId();
return Dbg::GetThreadGroupChildren(thread_group_id, pReply);
}
@@ -1147,7 +1147,7 @@
* Return the #of components in the array.
*/
static JdwpError AR_Length(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId array_id = request->ReadArrayId();
int32_t length;
@@ -1166,7 +1166,7 @@
* Return the values from an array.
*/
static JdwpError AR_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId array_id = request->ReadArrayId();
uint32_t offset = request->ReadUnsigned32("offset");
uint32_t length = request->ReadUnsigned32("length");
@@ -1177,7 +1177,7 @@
* Set values in an array.
*/
static JdwpError AR_SetValues(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId array_id = request->ReadArrayId();
uint32_t offset = request->ReadUnsigned32("offset");
uint32_t count = request->ReadUnsigned32("count");
@@ -1185,7 +1185,7 @@
}
static JdwpError CLR_VisibleClasses(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
request->ReadObjectId(); // classLoaderObject
// TODO: we should only return classes which have the given class loader as a defining or
// initiating loader. The former would be easy; the latter is hard, because we don't have
@@ -1206,7 +1206,7 @@
* Reply with a requestID.
*/
static JdwpError ER_Set(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
JdwpEventKind event_kind = request->ReadEnum1<JdwpEventKind>("event kind");
JdwpSuspendPolicy suspend_policy = request->ReadEnum1<JdwpSuspendPolicy>("suspend policy");
int32_t modifier_count = request->ReadSigned32("modifier count");
@@ -1348,7 +1348,7 @@
}
static JdwpError ER_Clear(JdwpState* state, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
request->ReadEnum1<JdwpEventKind>("event kind");
uint32_t requestId = request->ReadUnsigned32("request id");
@@ -1362,7 +1362,7 @@
* Return the values of arguments and local variables.
*/
static JdwpError SF_GetValues(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Dbg::GetLocalValues(request, pReply);
}
@@ -1370,12 +1370,12 @@
* Set the values of arguments and local variables.
*/
static JdwpError SF_SetValues(JdwpState*, Request* request, ExpandBuf*)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return Dbg::SetLocalValues(request);
}
static JdwpError SF_ThisObject(JdwpState*, Request* request, ExpandBuf* reply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ObjectId thread_id = request->ReadThreadId();
FrameId frame_id = request->ReadFrameId();
@@ -1396,7 +1396,7 @@
* that, or I have no idea what this is for.)
*/
static JdwpError COR_ReflectedType(JdwpState*, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
RefTypeId class_object_id = request->ReadRefTypeId();
return Dbg::GetReflectedType(class_object_id, pReply);
}
@@ -1405,7 +1405,7 @@
* Handle a DDM packet with a single chunk in it.
*/
static JdwpError DDM_Chunk(JdwpState* state, Request* request, ExpandBuf* pReply)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
state->NotifyDdmsActive();
uint8_t* replyBuf = nullptr;
int replyLen = -1;
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 3fbad36..5989b61 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -63,12 +63,12 @@
// Explicit template instantiation.
template
-SHARED_REQUIRES(Locks::mutator_lock_)
+REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Object> obj_h);
template
-SHARED_REQUIRES(Locks::mutator_lock_)
+REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
JDWP::ObjectId ObjectRegistry::Add(Handle<mirror::Throwable> obj_h);
diff --git a/runtime/jdwp/object_registry.h b/runtime/jdwp/object_registry.h
index 17490f4..7fa57c6 100644
--- a/runtime/jdwp/object_registry.h
+++ b/runtime/jdwp/object_registry.h
@@ -63,24 +63,24 @@
ObjectRegistry();
JDWP::ObjectId Add(mirror::Object* o)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
JDWP::RefTypeId AddRefType(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
template<class T>
JDWP::ObjectId Add(Handle<T> obj_h)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
JDWP::RefTypeId AddRefType(Handle<mirror::Class> c_h)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !lock_);
template<typename T> T Get(JDWP::ObjectId id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_) {
if (id == 0) {
*error = JDWP::ERR_NONE;
return nullptr;
@@ -88,42 +88,42 @@
return down_cast<T>(InternalGet(id, error));
}
- void Clear() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ void Clear() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
void DisableCollection(JDWP::ObjectId id)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
void EnableCollection(JDWP::ObjectId id)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
bool IsCollected(JDWP::ObjectId id)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
void DisposeObject(JDWP::ObjectId id, uint32_t reference_count)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
// This is needed to get the jobject instead of the Object*.
// Avoid using this and use standard Get when possible.
- jobject GetJObject(JDWP::ObjectId id) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ jobject GetJObject(JDWP::ObjectId id) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
private:
template<class T>
JDWP::ObjectId InternalAdd(Handle<T> obj_h)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
mirror::Object* InternalGet(JDWP::ObjectId id, JDWP::JdwpError* error)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_);
void Demote(ObjectRegistryEntry& entry)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(lock_);
void Promote(ObjectRegistryEntry& entry)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(lock_);
bool ContainsLocked(Thread* self, mirror::Object* o, int32_t identity_hash_code,
ObjectRegistryEntry** out_entry)
- REQUIRES(lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(lock_) REQUIRES_SHARED(Locks::mutator_lock_);
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::multimap<int32_t, ObjectRegistryEntry*> object_to_entry_ GUARDED_BY(lock_);
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 2aa6f3d..417a185 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -31,6 +31,7 @@
class ArtMethod;
struct RuntimeArgumentMap;
+union JValue;
namespace jit {
@@ -50,7 +51,7 @@
virtual ~Jit();
static Jit* Create(JitOptions* options, std::string* error_msg);
bool CompileMethod(ArtMethod* method, Thread* self, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CreateThreadPool();
const JitCodeCache* GetCodeCache() const {
@@ -70,7 +71,7 @@
void AddMemoryUsage(ArtMethod* method, size_t bytes)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t OSRMethodThreshold() const {
return osr_method_threshold_;
@@ -102,25 +103,25 @@
// Profiling methods.
void MethodEntered(Thread* thread, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AddSamples(Thread* self, ArtMethod* method, uint16_t samples, bool with_backedges)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void InvokeVirtualOrInterface(Thread* thread,
mirror::Object* this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
AddSamples(self, caller, invoke_transition_weight_, false);
}
void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
AddSamples(self, callee, invoke_transition_weight_, false);
}
@@ -140,7 +141,7 @@
void DumpForSigQuit(std::ostream& os) REQUIRES(!lock_);
static void NewTypeLoadedIfUsingJit(mirror::Class* type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// If debug info generation is turned on then write the type information for types already loaded
// into the specified class linker to the jit debug interface,
@@ -164,7 +165,7 @@
uint32_t dex_pc,
int32_t dex_pc_offset,
JValue* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool LoadCompilerLibrary(std::string* error_msg);
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index b1079dd..c9227b1 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -446,7 +446,7 @@
code_cache_(code_cache_in),
bitmap_(code_cache_->GetLiveBitmap()) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
if (method_header == nullptr) {
return true;
@@ -469,7 +469,7 @@
MarkCodeClosure(JitCodeCache* code_cache, Barrier* barrier)
: code_cache_(code_cache), barrier_(barrier) {}
- void Run(Thread* thread) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Run(Thread* thread) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ScopedTrace trace(__PRETTY_FUNCTION__);
DCHECK(thread == Thread::Current() || thread->IsSuspended());
MarkCodeVisitor visitor(thread, code_cache_);
diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h
index 1938221..e15c93a 100644
--- a/runtime/jit/jit_code_cache.h
+++ b/runtime/jit/jit_code_cache.h
@@ -70,7 +70,7 @@
size_t DataCacheSize() REQUIRES(!lock_);
bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
// Notify to the code cache that the compiler wants to use the
@@ -78,15 +78,15 @@
// and therefore ensure the returned profiling info object is not
// collected.
ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
void DoneCompilerUse(ArtMethod* method, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
// Allocate and write code and its metadata to the code cache.
@@ -99,7 +99,7 @@
const uint8_t* code,
size_t code_size,
bool osr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
// Return true if the code cache contains this pc.
@@ -110,12 +110,12 @@
// Reserve a region of data of size at least "size". Returns null if there is no more room.
uint8_t* ReserveData(Thread* self, size_t size, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
// Clear data from the data portion of the code cache.
void ClearData(Thread* self, void* data)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!lock_);
CodeCacheBitmap* GetLiveBitmap() const {
@@ -125,28 +125,28 @@
// Return whether we should do a full collection given the current state of the cache.
bool ShouldDoFullCollection()
REQUIRES(lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Perform a collection on the code cache.
void GarbageCollectCache(Thread* self)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given the 'pc', try to find the JIT compiled code associated with it.
// Return null if 'pc' is not in the code cache. 'method' is passed for
// sanity check.
OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Remove all methods in our cache that were allocated by 'alloc'.
void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ClearGcRootsInInlineCaches(Thread* self) REQUIRES(!lock_);
@@ -157,7 +157,7 @@
const std::vector<uint32_t>& entries,
bool retry_allocation)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
return mspace == code_mspace_ || mspace == data_mspace_;
@@ -169,7 +169,7 @@
void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
std::vector<MethodReference>& methods)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint64_t GetLastUpdateTimeNs() const;
@@ -182,7 +182,7 @@
void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Dump(std::ostream& os) REQUIRES(!lock_);
@@ -209,13 +209,13 @@
size_t code_size,
bool osr)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ProfilingInfo* AddProfilingInfoInternal(Thread* self,
ArtMethod* method,
const std::vector<uint32_t>& entries)
REQUIRES(lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// If a collection is in progress, wait for it to finish. Return
// whether the thread actually waited.
@@ -243,15 +243,15 @@
void DoCollection(Thread* self, bool collect_profiling_info)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RemoveUnmarkedCode(Thread* self)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void MarkCompiledCodeOnThreadStacks(Thread* self)
REQUIRES(!lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CheckLiveCompiledCodeHasProfilingInfo()
REQUIRES(lock_);
diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc
index 927681c..42916c3 100644
--- a/runtime/jit/profile_saver.cc
+++ b/runtime/jit/profile_saver.cc
@@ -189,7 +189,7 @@
: methods_(methods),
startup_method_samples_(startup_method_samples) {}
- virtual bool operator()(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) {
+ virtual bool operator()(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) {
return true;
}
diff --git a/runtime/jit/profiling_info.h b/runtime/jit/profiling_info.h
index a890fbb..1056fac 100644
--- a/runtime/jit/profiling_info.h
+++ b/runtime/jit/profiling_info.h
@@ -53,7 +53,7 @@
return true;
}
- mirror::Class* GetMonomorphicType() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetMonomorphicType() const REQUIRES_SHARED(Locks::mutator_lock_) {
// Note that we cannot ensure the inline cache is actually monomorphic
// at this point, as other threads may have updated it.
DCHECK(!classes_[0].IsNull());
@@ -69,7 +69,7 @@
return !classes_[1].IsNull() && classes_[kIndividualCacheSize - 1].IsNull();
}
- mirror::Class* GetTypeAt(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetTypeAt(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
return classes_[i].Read();
}
@@ -93,14 +93,14 @@
// Create a ProfilingInfo for 'method'. Return whether it succeeded, or if it is
// not needed in case the method does not have virtual/interface invocations.
static bool Create(Thread* self, ArtMethod* method, bool retry_allocation)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Add information from an executed INVOKE instruction to the profile.
void AddInvokeInfo(uint32_t dex_pc, mirror::Class* cls)
// Method should not be interruptible, as it manipulates the ProfilingInfo
// which can be concurrently collected.
REQUIRES(Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS since we don't know what the callback requires.
template<typename RootVisitorType>
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
index 40efc89..0358494 100644
--- a/runtime/jni_env_ext.cc
+++ b/runtime/jni_env_ext.cc
@@ -154,7 +154,7 @@
}
// Use some defining part of the caller's frame as the identifying mark for the JNI segment.
-static uintptr_t GetJavaCallFrame(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) {
+static uintptr_t GetJavaCallFrame(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) {
NthCallerVisitor zeroth_caller(self, 0, false);
zeroth_caller.WalkStack();
if (zeroth_caller.caller == nullptr) {
@@ -175,7 +175,7 @@
}
static std::string ComputeMonitorDescription(Thread* self,
- jobject obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ jobject obj) REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* o = self->DecodeJObject(obj);
if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
Locks::mutator_lock_->IsExclusiveHeld(self)) {
@@ -196,12 +196,12 @@
uintptr_t frame,
ReferenceTable* monitors,
std::vector<std::pair<uintptr_t, jobject>>* locked_objects)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto kept_end = std::remove_if(
locked_objects->begin(),
locked_objects->end(),
[self, frame, monitors](const std::pair<uintptr_t, jobject>& pair)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (frame == pair.first) {
mirror::Object* o = self->DecodeJObject(pair.second);
monitors->Remove(o);
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
index ac287d4..79dfb0d 100644
--- a/runtime/jni_env_ext.h
+++ b/runtime/jni_env_ext.h
@@ -39,16 +39,16 @@
~JNIEnvExt();
void DumpReferenceTables(std::ostream& os)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetCheckJniEnabled(bool enabled);
- void PushFrame(int capacity) SHARED_REQUIRES(Locks::mutator_lock_);
- void PopFrame() SHARED_REQUIRES(Locks::mutator_lock_);
+ void PushFrame(int capacity) REQUIRES_SHARED(Locks::mutator_lock_);
+ void PopFrame() REQUIRES_SHARED(Locks::mutator_lock_);
template<typename T>
T AddLocalReference(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Offset SegmentStateOffset(size_t pointer_size);
static Offset LocalRefCookieOffset(size_t pointer_size);
@@ -56,8 +56,8 @@
static jint GetEnvHandler(JavaVMExt* vm, /*out*/void** out, jint version);
- jobject NewLocalRef(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
- void DeleteLocalRef(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ jobject NewLocalRef(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void DeleteLocalRef(jobject obj) REQUIRES_SHARED(Locks::mutator_lock_);
Thread* const self;
JavaVMExt* const vm;
@@ -92,13 +92,13 @@
// rules in CheckJNI mode.
// Record locking of a monitor.
- void RecordMonitorEnter(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void RecordMonitorEnter(jobject obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Check the release, that is, that the release is performed in the same JNI "segment."
- void CheckMonitorRelease(jobject obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckMonitorRelease(jobject obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Check that no monitors are held that have been acquired in this JNI "segment."
- void CheckNoHeldMonitors() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckNoHeldMonitors() REQUIRES_SHARED(Locks::mutator_lock_);
// Set the functions to the runtime shutdown functions.
void SetFunctionsToRuntimeShutdownFunctions();
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 7bcadd8..a434442 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -90,7 +90,7 @@
static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c,
const char* name, const char* sig, const char* kind)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string temp;
soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"no %s method \"%s.%s%s\"",
@@ -99,7 +99,7 @@
static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::Class* c,
const char* kind, jint idx, bool return_errors)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(return_errors ? ERROR : FATAL) << "Failed to register native method in "
<< PrettyDescriptor(c) << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< ": " << kind << " is null at index " << idx;
@@ -108,7 +108,7 @@
}
static mirror::Class* EnsureInitialized(Thread* self, mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (LIKELY(klass->IsInitialized())) {
return klass;
}
@@ -122,7 +122,7 @@
static jmethodID FindMethodID(ScopedObjectAccess& soa, jclass jni_class,
const char* name, const char* sig, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class));
if (c == nullptr) {
return nullptr;
@@ -149,7 +149,7 @@
}
static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
// If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
@@ -180,7 +180,7 @@
static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, const char* name,
const char* sig, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<2> hs(soa.Self());
Handle<mirror::Class> c(
hs.NewHandle(EnsureInitialized(soa.Self(), soa.Decode<mirror::Class*>(jni_class))));
@@ -228,7 +228,7 @@
static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start,
jsize length, const char* identifier)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string type(PrettyTypeOf(array));
soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"%s offset=%d length=%d %s.length=%d",
@@ -237,7 +237,7 @@
static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length,
jsize array_length)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
"offset=%d length=%d string.length()=%d", start, length,
array_length);
@@ -315,7 +315,7 @@
template <bool kNative>
static ArtMethod* FindMethod(mirror::Class* c, const StringPiece& name, const StringPiece& sig)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
auto pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
for (auto& method : c->GetMethods(pointer_size)) {
if (kNative == method.IsNative() && name == method.GetName() && method.GetSignature() == sig) {
@@ -2372,7 +2372,7 @@
private:
static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity,
const char* caller)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// TODO: we should try to expand the table if necessary.
if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) {
LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity;
@@ -2401,7 +2401,7 @@
template <typename JArrayT, typename ElementT, typename ArtArrayT>
static ArtArrayT* DecodeAndCheckArrayType(ScopedObjectAccess& soa, JArrayT java_array,
const char* fn_name, const char* operation)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array);
if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) {
soa.Vm()->JniAbortF(fn_name,
@@ -2458,7 +2458,7 @@
static void ReleasePrimitiveArray(ScopedObjectAccess& soa, mirror::Array* array,
size_t component_size, void* elements, jint mode)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
void* array_data = array->GetRawData(component_size, 0);
gc::Heap* heap = Runtime::Current()->GetHeap();
bool is_copy = array_data != elements;
diff --git a/runtime/mirror/abstract_method.h b/runtime/mirror/abstract_method.h
index 4f714a6..22a3ea8 100644
--- a/runtime/mirror/abstract_method.h
+++ b/runtime/mirror/abstract_method.h
@@ -35,14 +35,14 @@
public:
// Called from Constructor::CreateFromArtMethod, Method::CreateFromArtMethod.
template <PointerSize kPointerSize, bool kTransactionActive>
- bool CreateFromArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_)
+ bool CreateFromArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
- ArtMethod* GetArtMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* GetArtMethod() REQUIRES_SHARED(Locks::mutator_lock_);
// Only used by the image writer.
template <bool kTransactionActive = false>
- void SetArtMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetArtMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
private:
static MemberOffset ArtMethodOffset() {
diff --git a/runtime/mirror/accessible_object.h b/runtime/mirror/accessible_object.h
index dcf5118..1d934a8 100644
--- a/runtime/mirror/accessible_object.h
+++ b/runtime/mirror/accessible_object.h
@@ -36,12 +36,12 @@
}
template<bool kTransactionActive>
- void SetAccessible(bool value) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetAccessible(bool value) REQUIRES_SHARED(Locks::mutator_lock_) {
UNUSED(padding_);
return SetFieldBoolean<kTransactionActive>(FlagOffset(), value ? 1u : 0u);
}
- bool IsAccessible() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsAccessible() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldBoolean(FlagOffset());
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 014e54b..9d7f98f 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -101,7 +101,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
@@ -125,7 +125,7 @@
}
void operator()(Object* obj, size_t usable_size) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsArray as object is not yet in live bitmap or allocation stack.
Array* array = down_cast<Array*>(obj);
// DCHECK(array->IsArrayInstance());
diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc
index 4128689..aee48cc 100644
--- a/runtime/mirror/array.cc
+++ b/runtime/mirror/array.cc
@@ -43,7 +43,7 @@
static Array* RecursiveCreateMultiArray(Thread* self,
Handle<Class> array_class, int current_dimension,
Handle<mirror::IntArray> dimensions)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
int32_t array_length = dimensions->Get(current_dimension);
StackHandleScope<1> hs(self);
Handle<Array> new_array(
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index ec10a43..6c82eb9 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -40,21 +40,21 @@
template <bool kIsInstrumented, bool kFillUsable = false>
ALWAYS_INLINE static Array* Alloc(Thread* self, Class* array_class, int32_t component_count,
size_t component_size_shift, gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static Array* CreateMultiArray(Thread* self, Handle<Class> element_class,
Handle<IntArray> dimensions)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE int32_t GetLength() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_));
}
- void SetLength(int32_t length) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetLength(int32_t length) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_GE(length, 0);
// We use non transactional version since we can't undo this write. We also disable checking
// since it would fail during a transaction.
@@ -68,7 +68,7 @@
static MemberOffset DataOffset(size_t component_size);
void* GetRawData(size_t component_size, int32_t index)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
intptr_t data = reinterpret_cast<intptr_t>(this) + DataOffset(component_size).Int32Value() +
+ (index * component_size);
return reinterpret_cast<void*>(data);
@@ -83,18 +83,18 @@
// Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
// returns false.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE bool CheckIsValidIndex(int32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
- Array* CopyOf(Thread* self, int32_t new_length) SHARED_REQUIRES(Locks::mutator_lock_)
+ Array* CopyOf(Thread* self, int32_t new_length) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
protected:
- void ThrowArrayStoreException(Object* object) SHARED_REQUIRES(Locks::mutator_lock_)
+ void ThrowArrayStoreException(Object* object) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
private:
void ThrowArrayIndexOutOfBoundsException(int32_t index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The number of array elements.
int32_t length_;
@@ -110,32 +110,32 @@
typedef T ElementType;
static PrimitiveArray<T>* Alloc(Thread* self, size_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- const T* GetData() const ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ const T* GetData() const ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
}
- T* GetData() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ T* GetData() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<T*>(GetRawData(sizeof(T), 0));
}
- T Get(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ T Get(int32_t i) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
- T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ T GetWithoutChecks(int32_t i) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(CheckIsValidIndex(i)) << "i=" << i << " length=" << GetLength();
return GetData()[i];
}
- void Set(int32_t i, T value) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ void Set(int32_t i, T value) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true>
void Set(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive,
bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -147,7 +147,7 @@
* and the arrays non-null.
*/
void Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Works like memcpy(), except we guarantee not to allow tearing of array values (ie using
@@ -155,7 +155,7 @@
* and the arrays non-null.
*/
void Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, int32_t count)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void SetArrayClass(Class* array_class) {
CHECK(array_class_.IsNull());
@@ -164,7 +164,7 @@
}
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!array_class_.IsNull());
return array_class_.Read<kReadBarrierOption>();
}
@@ -174,7 +174,7 @@
array_class_ = GcRoot<Class>(nullptr);
}
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
static GcRoot<Class> array_class_;
@@ -189,14 +189,14 @@
VerifyObjectFlags kVerifyFlags = kVerifyNone,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
T GetElementPtrSize(uint32_t idx, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false>
void SetElementPtrSize(uint32_t idx, uint64_t element, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive = false, bool kUnchecked = false, typename T>
void SetElementPtrSize(uint32_t idx, T* element, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fixup the pointers in the dest arrays by passing our pointers through the visitor. Only copies
// to dest if visitor(source_ptr) != source_ptr.
@@ -204,7 +204,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void Fixup(mirror::PointerArray* dest, PointerSize pointer_size, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
};
} // namespace mirror
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 0f2aac2..d1d8caa 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -42,11 +42,19 @@
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline uint32_t Class::GetObjectSize() {
// Note: Extra parentheses to avoid the comma being interpreted as macro parameter separator.
- DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << " class=" << PrettyTypeOf(this);
+ DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << "class=" << PrettyTypeOf(this);
return GetField32(ObjectSizeOffset());
}
template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
+inline uint32_t Class::GetObjectSizeAllocFastPath() {
+ // Note: Extra parentheses to avoid the comma being interpreted as macro parameter separator.
+ DCHECK((!IsVariableSize<kVerifyFlags, kReadBarrierOption>())) << "class=" << PrettyTypeOf(this);
+ return GetField32(ObjectSizeAllocFastPathOffset());
+}
+
+
+template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
inline Class* Class::GetSuperClass() {
// Can only get super class for loaded classes (hack for when runtime is
// initializing)
@@ -861,6 +869,8 @@
klass->SetPrimitiveType(Primitive::kPrimNot); // Default to not being primitive.
klass->SetDexClassDefIndex(DexFile::kDexNoIndex16); // Default to no valid class def index.
klass->SetDexTypeIndex(DexFile::kDexNoIndex16); // Default to no valid type index.
+ // Default to force slow path until initialized.
+ klass->SetObjectSizeAllocFastPath(std::numeric_limits<uint32_t>::max());
}
inline void Class::SetAccessFlags(uint32_t new_access_flags) {
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index f948be7..c979c28 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -100,9 +100,20 @@
}
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
if (Runtime::Current()->IsActiveTransaction()) {
- h_this->SetField32Volatile<true>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
+ h_this->SetField32Volatile<true>(StatusOffset(), new_status);
} else {
- h_this->SetField32Volatile<false>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
+ h_this->SetField32Volatile<false>(StatusOffset(), new_status);
+ }
+
+ // Setting the object size alloc fast path needs to be after the status write so that if the
+ // alloc path sees a valid object size, we would know that it's initialized as long as it has a
+ // load-acquire/fake dependency.
+ if (new_status == kStatusInitialized && !h_this->IsVariableSize()) {
+ DCHECK_EQ(h_this->GetObjectSizeAllocFastPath(), std::numeric_limits<uint32_t>::max());
+ // Finalizable objects must always go slow path.
+ if (!h_this->IsFinalizable()) {
+ h_this->SetObjectSizeAllocFastPath(RoundUp(h_this->GetObjectSize(), kObjectAlignment));
+ }
}
if (!class_linker_initialized) {
@@ -137,7 +148,7 @@
if (kIsDebugBuild && new_class_size < GetClassSize()) {
DumpClass(LOG(INTERNAL_FATAL), kDumpClassFullDetail);
LOG(INTERNAL_FATAL) << new_class_size << " vs " << GetClassSize();
- LOG(FATAL) << " class=" << PrettyTypeOf(this);
+ LOG(FATAL) << "class=" << PrettyTypeOf(this);
}
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size);
@@ -641,7 +652,7 @@
static ArtField* FindFieldByNameAndType(LengthPrefixedArray<ArtField>* fields,
const StringPiece& name,
const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (fields == nullptr) {
return nullptr;
}
@@ -952,14 +963,14 @@
bool is_static ATTRIBUTE_UNUSED) const {}
void VisitRootIfNonNull(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!root->IsNull()) {
VisitRoot(root);
}
}
void VisitRoot(mirror::CompressedReference<mirror::Object>* root) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Object* old_ref = root->AsMirrorPtr();
mirror::Object* new_ref = ReadBarrier::BarrierForRoot(root);
if (old_ref != new_ref) {
@@ -987,7 +998,7 @@
}
void operator()(mirror::Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self_);
Handle<mirror::Class> h_new_class_obj(hs.NewHandle(obj->AsClass()));
mirror::Object::CopyObject(self_, h_new_class_obj.Get(), orig_->Get(), copy_bytes_);
@@ -1209,5 +1220,13 @@
return flags;
}
+void Class::SetObjectSizeAllocFastPath(uint32_t new_object_size) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetField32Volatile<true>(ObjectSizeAllocFastPathOffset(), new_object_size);
+ } else {
+ SetField32Volatile<false>(ObjectSizeAllocFastPathOffset(), new_object_size);
+ }
+}
+
} // namespace mirror
} // namespace art
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 1fed190..99b7769 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -133,7 +133,7 @@
};
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Status GetStatus() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Status GetStatus() REQUIRES_SHARED(Locks::mutator_lock_) {
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
return static_cast<Status>(
GetField32Volatile<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
@@ -141,7 +141,7 @@
// This is static because 'this' may be moved by GC.
static void SetStatus(Handle<Class> h_this, Status new_status, Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static MemberOffset StatusOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, status_);
@@ -149,157 +149,157 @@
// Returns true if the class has been retired.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsRetired() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsRetired() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusRetired;
}
// Returns true if the class has failed to link.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsErroneous() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsErroneous() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusError;
}
// Returns true if the class has been loaded.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIdxLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsIdxLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusIdx;
}
// Returns true if the class has been loaded.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusLoaded;
}
// Returns true if the class has been linked.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsResolved() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsResolved() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusResolved;
}
// Returns true if the class was compile-time verified.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsCompileTimeVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsCompileTimeVerified() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusRetryVerificationAtRuntime;
}
// Returns true if the class has been verified.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsVerified() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsVerified() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusVerified;
}
// Returns true if the class is initializing.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsInitializing() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInitializing() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() >= kStatusInitializing;
}
// Returns true if the class is initialized.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetStatus<kVerifyFlags>() == kStatusInitialized;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset AccessFlagsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, access_flags_);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE uint32_t GetClassFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t GetClassFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_flags_));
}
- void SetClassFlags(uint32_t new_flags) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetClassFlags(uint32_t new_flags) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetAccessFlags(uint32_t new_access_flags) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetAccessFlags(uint32_t new_access_flags) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the class is an interface.
- ALWAYS_INLINE bool IsInterface() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsInterface() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccInterface) != 0;
}
// Returns true if the class is declared public.
- ALWAYS_INLINE bool IsPublic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsPublic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccPublic) != 0;
}
// Returns true if the class is declared final.
- ALWAYS_INLINE bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinal() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- ALWAYS_INLINE bool IsFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsFinalizable() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccClassIsFinalizable) != 0;
}
- ALWAYS_INLINE void SetRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetRecursivelyInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccRecursivelyInitialized);
}
- ALWAYS_INLINE void SetHasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetHasDefaultMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId());
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccHasDefaultMethod);
}
- ALWAYS_INLINE void SetFinalizable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetFinalizable() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsFinalizable);
}
- ALWAYS_INLINE bool IsStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetClassFlags() & kClassFlagString) != 0;
}
- ALWAYS_INLINE void SetStringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetStringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
SetClassFlags(kClassFlagString | kClassFlagNoReferenceFields);
}
- ALWAYS_INLINE bool IsClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags() == kClassFlagClassLoader;
}
- ALWAYS_INLINE void SetClassLoaderClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetClassLoaderClass() REQUIRES_SHARED(Locks::mutator_lock_) {
SetClassFlags(kClassFlagClassLoader);
}
- ALWAYS_INLINE bool IsDexCacheClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetClassFlags() & kClassFlagDexCache) != 0;
}
- ALWAYS_INLINE void SetDexCacheClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE void SetDexCacheClass() REQUIRES_SHARED(Locks::mutator_lock_) {
SetClassFlags(GetClassFlags() | kClassFlagDexCache);
}
// Returns true if the class is abstract.
- ALWAYS_INLINE bool IsAbstract() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAbstract() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAbstract) != 0;
}
// Returns true if the class is an annotation.
- ALWAYS_INLINE bool IsAnnotation() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsAnnotation() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccAnnotation) != 0;
}
// Returns true if the class is synthetic.
- ALWAYS_INLINE bool IsSynthetic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE bool IsSynthetic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccSynthetic) != 0;
}
// Return whether the class had run the verifier at least once.
// This does not necessarily mean that access checks are avoidable,
// since the class methods might still need to be run with access checks.
- bool WasVerificationAttempted() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool WasVerificationAttempted() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccSkipAccessChecks) != 0;
}
// Mark the class as having gone through a verification attempt.
// Mutually exclusive from whether or not each method is allowed to skip access checks.
- void SetVerificationAttempted() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetVerificationAttempted() REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
if ((flags & kAccVerificationAttempted) == 0) {
SetAccessFlags(flags | kAccVerificationAttempted);
@@ -307,27 +307,27 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsTypeOfReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsTypeOfReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetClassFlags<kVerifyFlags>() & kClassFlagReference) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsWeakReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsWeakReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags<kVerifyFlags>() == kClassFlagWeakReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsSoftReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsSoftReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags<kVerifyFlags>() == kClassFlagSoftReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFinalizerReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsFinalizerReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags<kVerifyFlags>() == kClassFlagFinalizerReference;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPhantomReferenceClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPhantomReferenceClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassFlags<kVerifyFlags>() == kClassFlagPhantomReference;
}
@@ -336,7 +336,7 @@
// For array classes, where all the classes are final due to there being no sub-classes, an
// Object[] may be assigned to by a String[] but a String[] may not be assigned to by other
// types as the component is final.
- bool CannotBeAssignedFromOtherTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CannotBeAssignedFromOtherTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
if (!IsArrayClass()) {
return IsFinal();
} else {
@@ -351,19 +351,19 @@
// Returns true if this class is the placeholder and should retire and
// be replaced with a class with the right size for embedded imt/vtable.
- bool IsTemp() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsTemp() REQUIRES_SHARED(Locks::mutator_lock_) {
Status s = GetStatus();
return s < Status::kStatusResolving && ShouldHaveEmbeddedVTable();
}
- String* GetName() SHARED_REQUIRES(Locks::mutator_lock_); // Returns the cached name.
- void SetName(String* name) SHARED_REQUIRES(Locks::mutator_lock_); // Sets the cached name.
+ String* GetName() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the cached name.
+ void SetName(String* name) REQUIRES_SHARED(Locks::mutator_lock_); // Sets the cached name.
// Computes the name, then sets the cached value.
- static String* ComputeName(Handle<Class> h_this) SHARED_REQUIRES(Locks::mutator_lock_)
+ static String* ComputeName(Handle<Class> h_this) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsProxyClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsProxyClass() REQUIRES_SHARED(Locks::mutator_lock_) {
// Read access flags without using getter as whether something is a proxy can be check in
// any loaded state
// TODO: switch to a check if the super class is java.lang.reflect.Proxy?
@@ -376,9 +376,9 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ Primitive::Type GetPrimitiveType() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
- void SetPrimitiveType(Primitive::Type new_type) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetPrimitiveType(Primitive::Type new_type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
uint32_t v32 = static_cast<uint32_t>(new_type);
DCHECK_EQ(v32 & kPrimitiveTypeMask, v32) << "upper 16 bits aren't zero";
@@ -388,81 +388,81 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t GetPrimitiveTypeSizeShift() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the class is a primitive type.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitive() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitive() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() != Primitive::kPrimNot;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveBoolean() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveBoolean() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimBoolean;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveByte() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveByte() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimByte;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveChar() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveChar() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimChar;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveShort() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveShort() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimShort;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveInt() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveInt() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType() == Primitive::kPrimInt;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveLong() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveLong() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimLong;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveFloat() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveFloat() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimFloat;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveDouble() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveDouble() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimDouble;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveVoid() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveVoid() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPrimitiveType<kVerifyFlags>() == Primitive::kPrimVoid;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPrimitiveArray() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsPrimitiveArray() REQUIRES_SHARED(Locks::mutator_lock_) {
return IsArrayClass<kVerifyFlags>() &&
GetComponentType<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>()->
IsPrimitive();
}
// Depth of class from java.lang.Object
- uint32_t Depth() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t Depth() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClassClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsClassClass() REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsThrowableClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsThrowableClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsReferenceClass() const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsReferenceClass() const REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset ComponentTypeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, component_type_);
@@ -470,9 +470,9 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Class* GetComponentType() SHARED_REQUIRES(Locks::mutator_lock_);
+ Class* GetComponentType() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetComponentType(Class* new_component_type) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetComponentType(Class* new_component_type) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(GetComponentType() == nullptr);
DCHECK(new_component_type != nullptr);
// Component type is invariant: use non-transactional mode without check.
@@ -480,46 +480,46 @@
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetComponentSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetComponentSize() REQUIRES_SHARED(Locks::mutator_lock_) {
return 1U << GetComponentSizeShift<kReadBarrierOption>();
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetComponentSizeShift() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetComponentSizeShift() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetComponentType<kDefaultVerifyFlags, kReadBarrierOption>()->GetPrimitiveTypeSizeShift();
}
- bool IsObjectClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return !IsPrimitive() && GetSuperClass() == nullptr;
}
- bool IsInstantiableNonArray() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInstantiableNonArray() REQUIRES_SHARED(Locks::mutator_lock_) {
return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsInstantiable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsInstantiable() REQUIRES_SHARED(Locks::mutator_lock_) {
return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
(IsAbstract() && IsArrayClass<kVerifyFlags, kReadBarrierOption>());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsObjectArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsObjectArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* const component_type = GetComponentType<kVerifyFlags, kReadBarrierOption>();
return component_type != nullptr && !component_type->IsPrimitive();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsIntArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsIntArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
auto* component_type = GetComponentType<kVerifyFlags>();
return component_type != nullptr && component_type->template IsPrimitiveInt<kNewFlags>();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsLongArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsLongArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
constexpr auto kNewFlags = static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis);
auto* component_type = GetComponentType<kVerifyFlags>();
return component_type != nullptr && component_type->template IsPrimitiveLong<kNewFlags>();
@@ -528,16 +528,16 @@
// Creates a raw object instance but does not invoke the default constructor.
template<bool kIsInstrumented, bool kCheckAddFinalizer = true>
ALWAYS_INLINE Object* Alloc(Thread* self, gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Object* AllocObject(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
Object* AllocNonMovableObject(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsVariableSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsVariableSize() REQUIRES_SHARED(Locks::mutator_lock_) {
// Classes, arrays, and strings vary in size, and so the object_size_ field cannot
// be used to Get their instance size
return IsClassClass<kVerifyFlags, kReadBarrierOption>() ||
@@ -546,17 +546,17 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- uint32_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetClassSize() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetClassSize() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
void SetClassSize(uint32_t new_class_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Compute how many bytes would be used a class with the given elements.
static uint32_t ComputeClassSize(bool has_embedded_vtable,
@@ -582,31 +582,40 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- uint32_t GetObjectSize() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t GetObjectSize() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset ObjectSizeOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, object_size_);
}
+ static MemberOffset ObjectSizeAllocFastPathOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(Class, object_size_alloc_fast_path_);
+ }
- void SetObjectSize(uint32_t new_object_size) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetObjectSize(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!IsVariableSize());
// Not called within a transaction.
return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
+ void SetObjectSizeAllocFastPath(uint32_t new_object_size) REQUIRES_SHARED(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ uint32_t GetObjectSizeAllocFastPath() REQUIRES_SHARED(Locks::mutator_lock_);
+
void SetObjectSizeWithoutChecks(uint32_t new_object_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
return SetField32<false, false, kVerifyNone>(
OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
// Returns true if this class is in the same packages as that class.
- bool IsInSamePackage(Class* that) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsInSamePackage(Class* that) REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2);
// Returns true if this class can access that class.
- bool CanAccess(Class* that) SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool CanAccess(Class* that) REQUIRES_SHARED(Locks::mutator_lock_) {
return that->IsPublic() || this->IsInSamePackage(that);
}
@@ -614,7 +623,7 @@
// Note that access to the class isn't checked in case the declaring class is protected and the
// method has been exposed by a public sub-class
bool CanAccessMember(Class* access_to, uint32_t member_flags)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Classes can access all of their own members
if (this == access_to) {
return true;
@@ -642,40 +651,40 @@
// referenced by the FieldId in the DexFile in case the declaring class is inaccessible.
bool CanAccessResolvedField(Class* access_to, ArtField* field,
DexCache* dex_cache, uint32_t field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CheckResolvedFieldAccess(Class* access_to, ArtField* field,
uint32_t field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this class access a resolved method?
// Note that access to methods's class is checked and this may require looking up the class
// referenced by the MethodId in the DexFile in case the declaring class is inaccessible.
bool CanAccessResolvedMethod(Class* access_to, ArtMethod* resolved_method,
DexCache* dex_cache, uint32_t method_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <InvokeType throw_invoke_type>
bool CheckResolvedMethodAccess(Class* access_to, ArtMethod* resolved_method,
uint32_t method_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- bool IsSubClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsSubClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Can src be assigned to this class? For example, String can be assigned to Object (by an
// upcast), however, an Object cannot be assigned to a String as a potentially exception throwing
// downcast would be necessary. Similarly for interfaces, a class that implements (or an interface
// that extends) another can be assigned to its parent, but not vice-versa. All Classes may assign
// to themselves. Classes for primitive types may not assign to each other.
- ALWAYS_INLINE bool IsAssignableFrom(Class* src) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE bool IsAssignableFrom(Class* src) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE Class* GetSuperClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE Class* GetSuperClass() REQUIRES_SHARED(Locks::mutator_lock_);
// Get first common super class. It will never return null.
// `This` and `klass` must be classes.
- Class* GetCommonSuperClass(Handle<Class> klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ Class* GetCommonSuperClass(Handle<Class> klass) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetSuperClass(Class* new_super_class) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetSuperClass(Class* new_super_class) REQUIRES_SHARED(Locks::mutator_lock_) {
// Super class is assigned once, except during class linker initialization.
Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
@@ -683,7 +692,7 @@
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
}
- bool HasSuperClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasSuperClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetSuperClass() != nullptr;
}
@@ -691,9 +700,9 @@
return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
}
- ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ ClassLoader* GetClassLoader() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
- void SetClassLoader(ClassLoader* new_cl) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetClassLoader(ClassLoader* new_cl) REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DexCacheOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, dex_cache_));
@@ -705,125 +714,125 @@
kDumpClassInitialized = (1 << 2),
};
- void DumpClass(std::ostream& os, int flags) SHARED_REQUIRES(Locks::mutator_lock_);
+ void DumpClass(std::ostream& os, int flags) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
// Also updates the dex_cache_strings_ variable from new_dex_cache.
- void SetDexCache(DexCache* new_dex_cache) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetDexCache(DexCache* new_dex_cache) REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDirectMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE LengthPrefixedArray<ArtMethod>* GetMethodsPtr()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset MethodsOffset() {
return MemberOffset(OFFSETOF_MEMBER(Class, methods_));
}
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetMethodsPtr(LengthPrefixedArray<ArtMethod>* new_methods,
uint32_t num_direct,
uint32_t num_virtual)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Used by image writer.
void SetMethodsPtrUnchecked(LengthPrefixedArray<ArtMethod>* new_methods,
uint32_t num_direct,
uint32_t num_virtual)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetDirectMethod(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Use only when we are allocating populating the method arrays.
ALWAYS_INLINE ArtMethod* GetDirectMethodUnchecked(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetVirtualMethodUnchecked(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of static, private, and constructor methods.
- ALWAYS_INLINE uint32_t NumDirectMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDirectMethods() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredMethods(
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <PointerSize kPointerSize, bool kTransactionActive>
static Method* GetDeclaredMethodInternal(Thread* self,
mirror::Class* klass,
mirror::String* name,
mirror::ObjectArray<mirror::Class>* args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <PointerSize kPointerSize, bool kTransactionActive>
static Constructor* GetDeclaredConstructorInternal(Thread* self,
mirror::Class* klass,
mirror::ObjectArray<mirror::Class>* args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetDeclaredVirtualMethods(
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetCopiedMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSlice(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtMethod>> GetVirtualMethods(
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of non-inherited virtual methods (sum of declared and copied methods).
- ALWAYS_INLINE uint32_t NumVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumVirtualMethods() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of copied virtual methods.
- ALWAYS_INLINE uint32_t NumCopiedVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumCopiedVirtualMethods() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of declared virtual methods.
- ALWAYS_INLINE uint32_t NumDeclaredVirtualMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDeclaredVirtualMethods() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t NumMethods() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumMethods() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ArtMethod* GetVirtualMethod(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetVirtualMethodDuringLinking(size_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE PointerArray* GetVTable() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTable() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE PointerArray* GetVTableDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE PointerArray* GetVTableDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetVTable(PointerArray* new_vtable) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetVTable(PointerArray* new_vtable) REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset VTableOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, vtable_);
@@ -841,212 +850,212 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool ShouldHaveImt() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ShouldHaveImt() REQUIRES_SHARED(Locks::mutator_lock_) {
return ShouldHaveEmbeddedVTable<kVerifyFlags, kReadBarrierOption>();
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool ShouldHaveEmbeddedVTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool ShouldHaveEmbeddedVTable() REQUIRES_SHARED(Locks::mutator_lock_) {
return IsInstantiable<kVerifyFlags, kReadBarrierOption>();
}
- bool HasVTable() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HasVTable() REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset EmbeddedVTableEntryOffset(uint32_t i, PointerSize pointer_size);
- int32_t GetVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetVTableEntry(uint32_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t GetEmbeddedVTableLength() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetEmbeddedVTableLength() REQUIRES_SHARED(Locks::mutator_lock_);
- void SetEmbeddedVTableLength(int32_t len) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetEmbeddedVTableLength(int32_t len) REQUIRES_SHARED(Locks::mutator_lock_);
- ImTable* GetImt(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ ImTable* GetImt(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetImt(ImTable* imt, PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetImt(ImTable* imt, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetEmbeddedVTableEntry(uint32_t i, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetEmbeddedVTableEntry(uint32_t i, ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
inline void SetEmbeddedVTableEntryUnchecked(uint32_t i,
ArtMethod* method,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PopulateEmbeddedVTable(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given a method implemented by this class but potentially from a super class, return the
// specific implementation method for this class.
ArtMethod* FindVirtualMethodForVirtual(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given a method implemented by this class' super class, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForSuper(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given a method from some implementor of this interface, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForInterfaceSuper(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Given a method implemented by this class, but potentially from a
// super class or interface, return the specific implementation
// method for this class.
ArtMethod* FindVirtualMethodForInterface(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) ALWAYS_INLINE;
+ REQUIRES_SHARED(Locks::mutator_lock_) ALWAYS_INLINE;
ArtMethod* FindVirtualMethodForVirtualOrInterface(ArtMethod* method, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindInterfaceMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDirectMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredVirtualMethodByName(const StringPiece& name, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindDeclaredDirectMethodByName(const StringPiece& name, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const StringPiece& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const StringPiece& name, const Signature& signature,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindVirtualMethod(const DexCache* dex_cache, uint32_t dex_method_idx,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* FindClassInitializer(PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* FindClassInitializer(PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
- bool HasDefaultMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasDefaultMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccHasDefaultMethod) != 0;
}
- bool HasBeenRecursivelyInitialized() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool HasBeenRecursivelyInitialized() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccRecursivelyInitialized) != 0;
}
- ALWAYS_INLINE int32_t GetIfTableCount() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE int32_t GetIfTableCount() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE IfTable* GetIfTable() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE IfTable* GetIfTable() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE void SetIfTable(IfTable* new_iftable) REQUIRES_SHARED(Locks::mutator_lock_);
// Get instance fields of the class (See also GetSFields).
- LengthPrefixedArray<ArtField>* GetIFieldsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
+ LengthPrefixedArray<ArtField>* GetIFieldsPtr() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtField>> GetIFields()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetIFieldsPtr(LengthPrefixedArray<ArtField>* new_ifields)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Unchecked edition has no verification flags.
void SetIFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_sfields)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t NumInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_);
- ArtField* GetInstanceField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t NumInstanceFields() REQUIRES_SHARED(Locks::mutator_lock_);
+ ArtField* GetInstanceField(uint32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of instance fields containing reference types. Does not count fields in any
// super classes.
- uint32_t NumReferenceInstanceFields() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t NumReferenceInstanceFields() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
- uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t NumReferenceInstanceFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
- void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetNumReferenceInstanceFields(uint32_t new_num) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_);
void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the offset of the first reference instance field. Other reference instance fields follow.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
MemberOffset GetFirstReferenceInstanceFieldOffset()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the number of static fields containing reference types.
- uint32_t NumReferenceStaticFields() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t NumReferenceStaticFields() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
- uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t NumReferenceStaticFieldsDuringLinking() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous() || IsRetired());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
- void SetNumReferenceStaticFields(uint32_t new_num) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetNumReferenceStaticFields(uint32_t new_num) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num);
}
@@ -1055,53 +1064,53 @@
template <VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
MemberOffset GetFirstReferenceStaticFieldOffset(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the offset of the first reference static field. Other reference static fields follow.
MemberOffset GetFirstReferenceStaticFieldOffsetDuringLinking(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Gets the static fields of the class.
- LengthPrefixedArray<ArtField>* GetSFieldsPtr() SHARED_REQUIRES(Locks::mutator_lock_);
+ LengthPrefixedArray<ArtField>* GetSFieldsPtr() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE IterationRange<StrideIterator<ArtField>> GetSFields()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetSFieldsPtr(LengthPrefixedArray<ArtField>* new_sfields)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Unchecked edition has no verification flags.
void SetSFieldsPtrUnchecked(LengthPrefixedArray<ArtField>* new_sfields)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- uint32_t NumStaticFields() SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t NumStaticFields() REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: uint16_t
- ArtField* GetStaticField(uint32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* GetStaticField(uint32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Find a static or instance field using the JLS resolution order
static ArtField* FindField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given instance field in this class or a superclass.
ArtField* FindInstanceField(const StringPiece& name, const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given instance field in this class or a superclass, only searches classes that
// have the same dex cache.
ArtField* FindInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const StringPiece& name, const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredInstanceField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given static field in this class or a superclass.
static ArtField* FindStaticField(Thread* self, Handle<Class> klass, const StringPiece& name,
const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Finds the given static field in this class or superclass, only searches classes that
// have the same dex cache.
@@ -1109,122 +1118,122 @@
Class* klass,
const DexCache* dex_cache,
uint32_t dex_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const StringPiece& name, const StringPiece& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindDeclaredStaticField(const DexCache* dex_cache, uint32_t dex_field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- pid_t GetClinitThreadId() SHARED_REQUIRES(Locks::mutator_lock_) {
+ pid_t GetClinitThreadId() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsIdxLoaded() || IsErroneous()) << PrettyClass(this);
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_));
}
- void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetClinitThreadId(pid_t new_clinit_thread_id) REQUIRES_SHARED(Locks::mutator_lock_);
- Object* GetVerifyError() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetVerifyError() REQUIRES_SHARED(Locks::mutator_lock_) {
// DCHECK(IsErroneous());
return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_));
}
- uint16_t GetDexClassDefIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint16_t GetDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
}
- void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDexClassDefIndex(uint16_t class_def_idx) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx);
}
- uint16_t GetDexTypeIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint16_t GetDexTypeIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_));
}
- void SetDexTypeIndex(uint16_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDexTypeIndex(uint16_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx);
}
uint32_t FindTypeIndexInOtherDexFile(const DexFile& dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- static Class* GetJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetJavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(HasJavaLangClass());
return java_lang_Class_.Read();
}
- static bool HasJavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static bool HasJavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return !java_lang_Class_.IsNull();
}
// Can't call this SetClass or else gets called instead of Object::SetClass in places.
- static void SetClassClass(Class* java_lang_Class) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClassClass(Class* java_lang_Class) REQUIRES_SHARED(Locks::mutator_lock_);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit native roots visits roots which are keyed off the native pointers such as ArtFields and
// ArtMethods.
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor>
void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// When class is verified, set the kAccSkipAccessChecks flag on each method.
void SetSkipAccessChecksFlagOnAllMethods(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the descriptor of the class. In a few cases a std::string is required, rather than
// always create one the storage argument is populated and its internal c_str() returned. We do
// this to avoid memory allocation in the common case.
- const char* GetDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetDescriptor(std::string* storage) REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetArrayDescriptor(std::string* storage) SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetArrayDescriptor(std::string* storage) REQUIRES_SHARED(Locks::mutator_lock_);
- bool DescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool DescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::ClassDef* GetClassDef() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::ClassDef* GetClassDef() REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t NumDirectInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t NumDirectInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_);
static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass,
uint32_t idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- const char* GetSourceFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const char* GetSourceFile() REQUIRES_SHARED(Locks::mutator_lock_);
- std::string GetLocation() SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string GetLocation() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile& GetDexFile() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile& GetDexFile() REQUIRES_SHARED(Locks::mutator_lock_);
- const DexFile::TypeList* GetInterfaceTypeList() SHARED_REQUIRES(Locks::mutator_lock_);
+ const DexFile::TypeList* GetInterfaceTypeList() REQUIRES_SHARED(Locks::mutator_lock_);
// Asserts we are initialized or initializing in the given thread.
void AssertInitializedOrInitializingInThread(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Class* CopyOf(Thread* self, int32_t new_length, ImTable* imt,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// For proxy class only.
- ObjectArray<Class>* GetInterfaces() SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectArray<Class>* GetInterfaces() REQUIRES_SHARED(Locks::mutator_lock_);
// For proxy class only.
- ObjectArray<ObjectArray<Class>>* GetThrows() SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectArray<ObjectArray<Class>>* GetThrows() REQUIRES_SHARED(Locks::mutator_lock_);
// For reference class only.
- MemberOffset GetDisableIntrinsicFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
- MemberOffset GetSlowPathFlagOffset() SHARED_REQUIRES(Locks::mutator_lock_);
- bool GetSlowPathEnabled() SHARED_REQUIRES(Locks::mutator_lock_);
- void SetSlowPath(bool enabled) SHARED_REQUIRES(Locks::mutator_lock_);
+ MemberOffset GetDisableIntrinsicFlagOffset() REQUIRES_SHARED(Locks::mutator_lock_);
+ MemberOffset GetSlowPathFlagOffset() REQUIRES_SHARED(Locks::mutator_lock_);
+ bool GetSlowPathEnabled() REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetSlowPath(bool enabled) REQUIRES_SHARED(Locks::mutator_lock_);
- StringDexCacheType* GetDexCacheStrings() SHARED_REQUIRES(Locks::mutator_lock_);
+ StringDexCacheType* GetDexCacheStrings() REQUIRES_SHARED(Locks::mutator_lock_);
void SetDexCacheStrings(StringDexCacheType* new_dex_cache_strings)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static MemberOffset DexCacheStringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_strings_);
}
@@ -1232,10 +1241,10 @@
// May cause thread suspension due to EqualParameters.
ArtMethod* GetDeclaredConstructor(
Thread* self, Handle<mirror::ObjectArray<mirror::Class>> args, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static int32_t GetInnerClassFlags(Handle<Class> h_this, int32_t default_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Used to initialize a class in the allocation code path to ensure it is guarded by a StoreStore
// fence.
@@ -1245,7 +1254,7 @@
}
void operator()(mirror::Object* obj, size_t usable_size) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
const uint32_t class_size_;
@@ -1254,7 +1263,7 @@
};
// Returns true if the class loader is null, ie the class loader is the boot strap class loader.
- bool IsBootStrapClassLoaded() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsBootStrapClassLoaded() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetClassLoader() == nullptr;
}
@@ -1267,20 +1276,20 @@
}
ALWAYS_INLINE ArraySlice<ArtMethod> GetDirectMethodsSliceUnchecked(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArraySlice<ArtMethod> GetVirtualMethodsSliceUnchecked(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredMethodsSliceUnchecked(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArraySlice<ArtMethod> GetDeclaredVirtualMethodsSliceUnchecked(
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArraySlice<ArtMethod> GetCopiedMethodsSliceUnchecked(PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Fix up all of the native pointers in the class by running them through the visitor. Only sets
// the corresponding entry in dest if visitor(obj) != obj to prevent dirty memory. Dest should be
@@ -1290,47 +1299,47 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void FixupNativePointers(mirror::Class* dest, PointerSize pointer_size, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
ALWAYS_INLINE void SetMethodsPtrInternal(LengthPrefixedArray<ArtMethod>* new_methods)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void SetVerifyError(Object* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetVerifyError(Object* klass) REQUIRES_SHARED(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache>
bool ResolvedFieldAccessTest(Class* access_to, ArtField* field,
uint32_t field_idx, DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <bool throw_on_failure, bool use_referrers_cache, InvokeType throw_invoke_type>
bool ResolvedMethodAccessTest(Class* access_to, ArtMethod* resolved_method,
uint32_t method_idx, DexCache* dex_cache)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- bool Implements(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsArrayAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsAssignableFromArray(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool Implements(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsArrayAssignableFromArray(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsAssignableFromArray(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- void CheckObjectAlloc() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckObjectAlloc() REQUIRES_SHARED(Locks::mutator_lock_);
// Unchecked editions is for root visiting.
- LengthPrefixedArray<ArtField>* GetSFieldsPtrUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
+ LengthPrefixedArray<ArtField>* GetSFieldsPtrUnchecked() REQUIRES_SHARED(Locks::mutator_lock_);
IterationRange<StrideIterator<ArtField>> GetSFieldsUnchecked()
- SHARED_REQUIRES(Locks::mutator_lock_);
- LengthPrefixedArray<ArtField>* GetIFieldsPtrUnchecked() SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ LengthPrefixedArray<ArtField>* GetIFieldsPtrUnchecked() REQUIRES_SHARED(Locks::mutator_lock_);
IterationRange<StrideIterator<ArtField>> GetIFieldsUnchecked()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The index in the methods_ array where the first declared virtual method is.
- ALWAYS_INLINE uint32_t GetVirtualMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetVirtualMethodsStartOffset() REQUIRES_SHARED(Locks::mutator_lock_);
// The index in the methods_ array where the first direct method is.
- ALWAYS_INLINE uint32_t GetDirectMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetDirectMethodsStartOffset() REQUIRES_SHARED(Locks::mutator_lock_);
// The index in the methods_ array where the first copied method is.
- ALWAYS_INLINE uint32_t GetCopiedMethodsStartOffset() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetCopiedMethodsStartOffset() REQUIRES_SHARED(Locks::mutator_lock_);
- bool ProxyDescriptorEquals(const char* match) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ProxyDescriptorEquals(const char* match) REQUIRES_SHARED(Locks::mutator_lock_);
// Check that the pointer size matches the one in the class linker.
ALWAYS_INLINE static void CheckPointerSize(PointerSize pointer_size);
@@ -1341,7 +1350,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// 'Class' Object Fields
// Order governed by java field ordering. See art::ClassLinker::LinkFields.
@@ -1457,6 +1466,10 @@
// See also class_size_.
uint32_t object_size_;
+ // Aligned object size for allocation fast path. The value is max uint32_t if the object is
+ // uninitialized or finalizable. Not currently used for variable sized objects.
+ uint32_t object_size_alloc_fast_path_;
+
// The lower 16 bits contains a Primitive::Type value. The upper 16
// bits contains the size shift of the primitive type.
uint32_t primitive_type_;
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 1957e13..407678a 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -36,26 +36,26 @@
return sizeof(ClassLoader);
}
- ClassLoader* GetParent() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ClassLoader* GetParent() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, parent_));
}
- ClassTable* GetClassTable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ClassTable* GetClassTable() REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ClassTable*>(
GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_)));
}
- void SetClassTable(ClassTable* class_table) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetClassTable(ClassTable* class_table) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, class_table_),
reinterpret_cast<uint64_t>(class_table));
}
- LinearAlloc* GetAllocator() SHARED_REQUIRES(Locks::mutator_lock_) {
+ LinearAlloc* GetAllocator() REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<LinearAlloc*>(
GetField64(OFFSET_OF_OBJECT_MEMBER(ClassLoader, allocator_)));
}
- void SetAllocator(LinearAlloc* allocator) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetAllocator(LinearAlloc* allocator) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField64<false>(OFFSET_OF_OBJECT_MEMBER(ClassLoader, allocator_),
reinterpret_cast<uint64_t>(allocator));
}
@@ -68,7 +68,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::classlinker_classes_lock_);
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 4ddfc7b..caf00c2 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -105,20 +105,20 @@
uint32_t num_resolved_methods,
ArtField** resolved_fields,
uint32_t num_resolved_fields,
- PointerSize pointer_size) SHARED_REQUIRES(Locks::mutator_lock_);
+ PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_);
void Fixup(ArtMethod* trampoline, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
void FixupStrings(StringDexCacheType* dest, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier, typename Visitor>
void FixupResolvedTypes(GcRoot<mirror::Class>* dest, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- String* GetLocation() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetLocation() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
@@ -159,94 +159,94 @@
}
mirror::String* GetResolvedString(uint32_t string_idx) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetResolvedString(uint32_t string_idx, mirror::String* resolved) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- Class* GetResolvedType(uint32_t type_idx) SHARED_REQUIRES(Locks::mutator_lock_);
+ Class* GetResolvedType(uint32_t type_idx) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetResolvedType(uint32_t type_idx, Class* resolved) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetResolvedType(uint32_t type_idx, Class* resolved) REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE ArtMethod* GetResolvedMethod(uint32_t method_idx, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE void SetResolvedMethod(uint32_t method_idx,
ArtMethod* resolved,
PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
ALWAYS_INLINE ArtField* GetResolvedField(uint32_t idx, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Pointer sized variant, used for patching.
ALWAYS_INLINE void SetResolvedField(uint32_t idx, ArtField* field, PointerSize ptr_size)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- StringDexCacheType* GetStrings() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ StringDexCacheType* GetStrings() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr64<StringDexCacheType*>(StringsOffset());
}
- void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetStrings(StringDexCacheType* strings) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(StringsOffset(), strings);
}
- GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ GcRoot<Class>* GetResolvedTypes() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<GcRoot<Class>*>(ResolvedTypesOffset());
}
void SetResolvedTypes(GcRoot<Class>* resolved_types)
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedTypesOffset(), resolved_types);
}
- ArtMethod** GetResolvedMethods() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod** GetResolvedMethods() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<ArtMethod**>(ResolvedMethodsOffset());
}
void SetResolvedMethods(ArtMethod** resolved_methods)
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedMethodsOffset(), resolved_methods);
}
- ArtField** GetResolvedFields() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtField** GetResolvedFields() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<ArtField**>(ResolvedFieldsOffset());
}
void SetResolvedFields(ArtField** resolved_fields)
ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(ResolvedFieldsOffset(), resolved_fields);
}
- size_t NumStrings() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumStrings() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(NumStringsOffset());
}
- size_t NumResolvedTypes() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumResolvedTypes() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(NumResolvedTypesOffset());
}
- size_t NumResolvedMethods() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumResolvedMethods() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(NumResolvedMethodsOffset());
}
- size_t NumResolvedFields() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumResolvedFields() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(NumResolvedFieldsOffset());
}
- const DexFile* GetDexFile() ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ const DexFile* GetDexFile() ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
}
- void SetDexFile(const DexFile* dex_file) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDexFile(const DexFile* dex_file) REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
}
- void SetLocation(mirror::String* location) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetLocation(mirror::String* location) REQUIRES_SHARED(Locks::mutator_lock_);
// NOTE: Get/SetElementPtrSize() are intended for working with ArtMethod** and ArtField**
// provided by GetResolvedMethods/Fields() and ArtMethod::GetDexCacheResolvedMethods(),
@@ -265,7 +265,7 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitReferences(mirror::Class* klass, const Visitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
HeapReference<Object> dex_;
HeapReference<String> location_;
diff --git a/runtime/mirror/field.h b/runtime/mirror/field.h
index 93fd7f1..7eb9da4 100644
--- a/runtime/mirror/field.h
+++ b/runtime/mirror/field.h
@@ -37,66 +37,66 @@
// C++ mirror of java.lang.reflect.Field.
class MANAGED Field : public AccessibleObject {
public:
- static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return static_class_.Read();
}
- static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return array_class_.Read();
}
- ALWAYS_INLINE uint32_t GetDexFieldIndex() SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE uint32_t GetDexFieldIndex() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_));
}
- mirror::Class* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_));
}
- uint32_t GetAccessFlags() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint32_t GetAccessFlags() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_));
}
- bool IsStatic() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsStatic() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccStatic) != 0;
}
- bool IsFinal() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsFinal() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccFinal) != 0;
}
- bool IsVolatile() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsVolatile() REQUIRES_SHARED(Locks::mutator_lock_) {
return (GetAccessFlags() & kAccVolatile) != 0;
}
ALWAYS_INLINE Primitive::Type GetTypeAsPrimitiveType()
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return GetType()->GetPrimitiveType();
}
- mirror::Class* GetType() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetType() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<mirror::Class>(OFFSET_OF_OBJECT_MEMBER(Field, type_));
}
- int32_t GetOffset() SHARED_REQUIRES(Locks::mutator_lock_) {
+ int32_t GetOffset() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(Field, offset_));
}
- static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
// Slow, try to use only for PrettyField and such.
- ArtField* GetArtField() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* GetArtField() REQUIRES_SHARED(Locks::mutator_lock_);
template <PointerSize kPointerSize, bool kTransactionActive = false>
static mirror::Field* CreateFromArtField(Thread* self, ArtField* field,
bool force_resolve)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
private:
HeapReference<mirror::Class> declaring_class_;
@@ -106,27 +106,27 @@
int32_t offset_;
template<bool kTransactionActive>
- void SetDeclaringClass(mirror::Class* c) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDeclaringClass(mirror::Class* c) REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, declaring_class_), c);
}
template<bool kTransactionActive>
- void SetType(mirror::Class* type) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetType(mirror::Class* type) REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, type_), type);
}
template<bool kTransactionActive>
- void SetAccessFlags(uint32_t flags) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetAccessFlags(uint32_t flags) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, access_flags_), flags);
}
template<bool kTransactionActive>
- void SetDexFieldIndex(uint32_t idx) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetDexFieldIndex(uint32_t idx) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, dex_field_index_), idx);
}
template<bool kTransactionActive>
- void SetOffset(uint32_t offset) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetOffset(uint32_t offset) REQUIRES_SHARED(Locks::mutator_lock_) {
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(Field, offset_), offset);
}
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index d6571f2..a1a2f98 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -25,18 +25,18 @@
class MANAGED IfTable FINAL : public ObjectArray<Object> {
public:
- ALWAYS_INLINE Class* GetInterface(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE Class* GetInterface(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
Class* interface = GetWithoutChecks((i * kMax) + kInterface)->AsClass();
DCHECK(interface != nullptr);
return interface;
}
ALWAYS_INLINE void SetInterface(int32_t i, Class* interface)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- PointerArray* GetMethodArray(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+ PointerArray* GetMethodArray(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
auto* method_array = down_cast<PointerArray*>(Get<kVerifyFlags, kReadBarrierOption>(
(i * kMax) + kMethodArray));
DCHECK(method_array != nullptr);
@@ -45,20 +45,20 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t GetMethodArrayCount(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetMethodArrayCount(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_) {
auto* method_array = down_cast<PointerArray*>(
Get<kVerifyFlags, kReadBarrierOption>((i * kMax) + kMethodArray));
return method_array == nullptr ? 0u : method_array->GetLength();
}
- void SetMethodArray(int32_t i, PointerArray* arr) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetMethodArray(int32_t i, PointerArray* arr) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(arr != nullptr);
auto idx = i * kMax + kMethodArray;
DCHECK(Get(idx) == nullptr);
Set<false>(idx, arr);
}
- size_t Count() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t Count() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetLength() / kMax;
}
diff --git a/runtime/mirror/method.h b/runtime/mirror/method.h
index be51784..6881991 100644
--- a/runtime/mirror/method.h
+++ b/runtime/mirror/method.h
@@ -30,25 +30,25 @@
public:
template <PointerSize kPointerSize, bool kTransactionActive>
static Method* CreateFromArtMethod(Thread* self, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return static_class_.Read();
}
- static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return array_class_.Read();
}
- static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
static GcRoot<Class> static_class_; // java.lang.reflect.Method.class.
@@ -62,25 +62,25 @@
public:
template <PointerSize kPointerSize, bool kTransactionActive>
static Constructor* CreateFromArtMethod(Thread* self, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
- static mirror::Class* StaticClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* StaticClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return static_class_.Read();
}
- static void SetClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static mirror::Class* ArrayClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static mirror::Class* ArrayClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return array_class_.Read();
}
- static void SetArrayClass(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetArrayClass(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
- static void ResetArrayClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void ResetArrayClass() REQUIRES_SHARED(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
static GcRoot<Class> static_class_; // java.lang.reflect.Constructor.class.
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 27f8bd7..0f5cbb2 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -542,7 +542,7 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
@@ -560,7 +560,7 @@
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
bool kIsVolatile>
inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 13c536e..c37deb5 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -47,7 +47,7 @@
: dest_obj_(dest_obj) {}
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
- ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
// GetFieldObject() contains a RB.
Object* ref = obj->GetFieldObject<Object>(offset);
// No WB here as a large object space does not have a card table
@@ -56,7 +56,7 @@
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
- ALWAYS_INLINE SHARED_REQUIRES(Locks::mutator_lock_) {
+ ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_) {
// Copy java.lang.ref.Reference.referent which isn't visited in
// Object::VisitReferences().
DCHECK(klass->IsTypeOfReferenceClass());
@@ -112,7 +112,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Object::CopyObject(self_, obj, orig_->Get(), num_bytes_);
}
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 8649294..262cb57 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -88,49 +88,49 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE Class* GetClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE Class* GetClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetClass(Class* new_klass) REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: Clean these up and change to return int32_t
- Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_);
+ Object* GetReadBarrierPointer() REQUIRES_SHARED(Locks::mutator_lock_);
// Get the read barrier pointer with release semantics, only supported for baker.
- Object* GetReadBarrierPointerAcquire() SHARED_REQUIRES(Locks::mutator_lock_);
+ Object* GetReadBarrierPointerAcquire() REQUIRES_SHARED(Locks::mutator_lock_);
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
- void SetReadBarrierPointer(Object* rb_ptr) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetReadBarrierPointer(Object* rb_ptr) REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kCasRelease = false>
ALWAYS_INLINE bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE uint32_t GetMarkBit() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE uint32_t GetMarkBit() REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE bool AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void AssertReadBarrierPointer() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssertReadBarrierPointer() const REQUIRES_SHARED(Locks::mutator_lock_);
// The verifier treats all interfaces as java.lang.Object and relies on runtime checks in
// invoke-interface to detect incompatible interface types.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool VerifierInstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool VerifierInstanceOf(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE bool InstanceOf(Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
- Object* Clone(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ Object* Clone(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
int32_t IdentityHashCode() const
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_);
static MemberOffset MonitorOffset() {
@@ -140,356 +140,356 @@
// As_volatile can be false if the mutators are suspended. This is an optimization since it
// avoids the barriers.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- LockWord GetLockWord(bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
+ LockWord GetLockWord(bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetLockWord(LockWord new_val, bool as_volatile) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetLockWord(LockWord new_val, bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_);
bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
// Try to enter the monitor, returns non null if we succeeded.
mirror::Object* MonitorTryEnter(Thread* self)
EXCLUSIVE_LOCK_FUNCTION()
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* MonitorEnter(Thread* self)
EXCLUSIVE_LOCK_FUNCTION()
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool MonitorExit(Thread* self)
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
UNLOCK_FUNCTION();
- void Notify(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
- void NotifyAll(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
- void Wait(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
- void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Notify(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+ void NotifyAll(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Wait(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Wait(Thread* self, int64_t timeout, int32_t nanos) REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Class* AsClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ Class* AsClass() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<class T,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ObjectArray<T>* AsObjectArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectArray<T>* AsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ClassLoader* AsClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
+ ClassLoader* AsClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- DexCache* AsDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
+ DexCache* AsDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsArrayInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsArrayInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Array* AsArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ Array* AsArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- BooleanArray* AsBooleanArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ BooleanArray* AsBooleanArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ByteArray* AsByteArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ByteArray* AsByteSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ByteArray* AsByteSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- CharArray* AsCharArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ CharArray* AsCharArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ShortArray* AsShortArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- ShortArray* AsShortSizedArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ ShortArray* AsShortSizedArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- IntArray* AsIntArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ IntArray* AsIntArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- LongArray* AsLongArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ LongArray* AsLongArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsFloatArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- FloatArray* AsFloatArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ FloatArray* AsFloatArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsDoubleArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- DoubleArray* AsDoubleArray() SHARED_REQUIRES(Locks::mutator_lock_);
+ DoubleArray* AsDoubleArray() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsString() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsString() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- String* AsString() SHARED_REQUIRES(Locks::mutator_lock_);
+ String* AsString() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Throwable* AsThrowable() SHARED_REQUIRES(Locks::mutator_lock_);
+ Throwable* AsThrowable() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- bool IsReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Reference* AsReference() SHARED_REQUIRES(Locks::mutator_lock_);
+ Reference* AsReference() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsWeakReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsWeakReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsSoftReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsSoftReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsFinalizerReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsFinalizerReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- FinalizerReference* AsFinalizerReference() SHARED_REQUIRES(Locks::mutator_lock_);
+ FinalizerReference* AsFinalizerReference() REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsPhantomReferenceInstance() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsPhantomReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_);
// Accessor for Java type fields.
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier, bool kIsVolatile = false>
ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset,
Object* old_value,
Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset,
int32_t old_value, int32_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value,
int32_t new_value) ALWAYS_INLINE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
int64_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value,
int64_t new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
void SetFieldPtr(MemberOffset field_offset, T new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
field_offset, new_value, kRuntimePointerSize);
}
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
void SetFieldPtr64(MemberOffset field_offset, T new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldPtrWithSize<kTransactionActive, kCheckTransaction, kVerifyFlags>(
field_offset, new_value, 8u);
}
@@ -499,7 +499,7 @@
ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset,
T new_value,
PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (pointer_size == PointerSize::k32) {
intptr_t ptr = reinterpret_cast<intptr_t>(new_value);
DCHECK_EQ(static_cast<int32_t>(ptr), ptr); // Check that we dont lose any non 0 bits.
@@ -511,7 +511,7 @@
}
}
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template <bool kVisitNativeRoots = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
@@ -520,7 +520,7 @@
void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor)
NO_THREAD_SAFETY_ANALYSIS;
- ArtField* FindFieldByOffset(MemberOffset offset) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* FindFieldByOffset(MemberOffset offset) REQUIRES_SHARED(Locks::mutator_lock_);
// Used by object_test.
static void SetHashCodeSeed(uint32_t new_seed);
@@ -531,19 +531,19 @@
// Accessors for non-Java type fields
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset, kRuntimePointerSize);
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
T GetFieldPtr64(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldPtrWithSize<T, kVerifyFlags, kIsVolatile>(field_offset,
PointerSize::k64);
}
template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, PointerSize pointer_size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (pointer_size == PointerSize::k32) {
return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
} else {
@@ -563,31 +563,31 @@
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier,
typename Visitor>
void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get a field with acquire semantics.
template<typename kSize>
ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Verify the type correctness of stores to fields.
// TODO: This can cause thread suspension and isn't moving GC safe.
void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckFieldAssignment(MemberOffset field_offset, Object* new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kCheckFieldAssignments) {
CheckFieldAssignmentImpl(field_offset, new_value);
}
@@ -598,7 +598,7 @@
// Class::CopyOf().
static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src,
size_t num_bytes)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Atomic<uint32_t> hash_code_seed;
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index a99d616..19b9d87 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -32,23 +32,23 @@
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- ALWAYS_INLINE T* Get(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE T* Get(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns true if the object can be stored into the array. If not, throws
// an ArrayStoreException and returns false.
- // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
+ // TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE void Set(int32_t i, T* object) SHARED_REQUIRES(Locks::mutator_lock_);
- // TODO fix thread safety analysis: should be SHARED_REQUIRES(Locks::mutator_lock_).
+ ALWAYS_INLINE void Set(int32_t i, T* object) REQUIRES_SHARED(Locks::mutator_lock_);
+ // TODO fix thread safety analysis: should be REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
@@ -56,41 +56,41 @@
// Set element without bound and element type checks, to be used in limited
// circumstances, such as during boot image writing.
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
ALWAYS_INLINE void SetWithoutChecksAndWriteBarrier(int32_t i, T* object)
NO_THREAD_SAFETY_ANALYSIS;
- ALWAYS_INLINE T* GetWithoutChecks(int32_t i) SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE T* GetWithoutChecks(int32_t i) REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array assuming no overlap and without assignability checks.
void AssignableMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
- int32_t count) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t count) REQUIRES_SHARED(Locks::mutator_lock_);
// Copy src into this array with assignability checks.
template<bool kTransactionActive>
void AssignableCheckingMemcpy(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
int32_t count, bool throw_exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ObjectArray<T>* CopyOf(Thread* self, int32_t new_length)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static MemberOffset OffsetOfElement(int32_t i);
private:
// TODO fix thread safety analysis broken by the use of template. This should be
- // SHARED_REQUIRES(Locks::mutator_lock_).
+ // REQUIRES_SHARED(Locks::mutator_lock_).
template<typename Visitor>
void VisitReferences(const Visitor& visitor) NO_THREAD_SAFETY_ANALYSIS;
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 2a5c88e..583cfc3 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -33,11 +33,11 @@
template<bool kPoisonReferences, class MirrorType>
class MANAGED ObjectReference {
public:
- MirrorType* AsMirrorPtr() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ MirrorType* AsMirrorPtr() const REQUIRES_SHARED(Locks::mutator_lock_) {
return UnCompress();
}
- void Assign(MirrorType* other) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Assign(MirrorType* other) REQUIRES_SHARED(Locks::mutator_lock_) {
reference_ = Compress(other);
}
@@ -56,18 +56,18 @@
protected:
ObjectReference<kPoisonReferences, MirrorType>(MirrorType* mirror_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: reference_(Compress(mirror_ptr)) {
}
// Compress reference to its bit representation.
- static uint32_t Compress(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static uint32_t Compress(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t as_bits = reinterpret_cast<uintptr_t>(mirror_ptr);
return static_cast<uint32_t>(kPoisonReferences ? -as_bits : as_bits);
}
// Uncompress an encoded reference from its bit representation.
- MirrorType* UnCompress() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ MirrorType* UnCompress() const REQUIRES_SHARED(Locks::mutator_lock_) {
uintptr_t as_bits = kPoisonReferences ? -reference_ : reference_;
return reinterpret_cast<MirrorType*>(as_bits);
}
@@ -83,11 +83,11 @@
class MANAGED HeapReference : public ObjectReference<kPoisonHeapReferences, MirrorType> {
public:
static HeapReference<MirrorType> FromMirrorPtr(MirrorType* mirror_ptr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return HeapReference<MirrorType>(mirror_ptr);
}
private:
- HeapReference<MirrorType>(MirrorType* mirror_ptr) SHARED_REQUIRES(Locks::mutator_lock_)
+ HeapReference<MirrorType>(MirrorType* mirror_ptr) REQUIRES_SHARED(Locks::mutator_lock_)
: ObjectReference<kPoisonHeapReferences, MirrorType>(mirror_ptr) {}
};
@@ -95,16 +95,16 @@
template<class MirrorType>
class MANAGED CompressedReference : public mirror::ObjectReference<false, MirrorType> {
public:
- CompressedReference<MirrorType>() SHARED_REQUIRES(Locks::mutator_lock_)
+ CompressedReference<MirrorType>() REQUIRES_SHARED(Locks::mutator_lock_)
: mirror::ObjectReference<false, MirrorType>(nullptr) {}
static CompressedReference<MirrorType> FromMirrorPtr(MirrorType* p)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return CompressedReference<MirrorType>(p);
}
private:
- CompressedReference<MirrorType>(MirrorType* p) SHARED_REQUIRES(Locks::mutator_lock_)
+ CompressedReference<MirrorType>(MirrorType* p) REQUIRES_SHARED(Locks::mutator_lock_)
: mirror::ObjectReference<false, MirrorType>(p) {}
};
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index b35a479..afd6115 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -49,7 +49,7 @@
const char* utf8_in,
const char* utf16_expected_le,
int32_t expected_hash)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::unique_ptr<uint16_t[]> utf16_expected(new uint16_t[expected_utf16_length]);
for (int32_t i = 0; i < expected_utf16_length; i++) {
uint16_t ch = (((utf16_expected_le[i*2 + 0] & 0xff) << 8) |
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index 38c6616..6a8b32b 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -64,26 +64,26 @@
return OFFSET_OF_OBJECT_MEMBER(Reference, referent_);
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Object* GetReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object, kDefaultVerifyFlags, kReadBarrierOption>(
ReferentOffset());
}
template<bool kTransactionActive>
- void SetReferent(Object* referent) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetReferent(Object* referent) REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
}
template<bool kTransactionActive>
- void ClearReferent() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void ClearReferent() REQUIRES_SHARED(Locks::mutator_lock_) {
SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- Reference* GetPendingNext() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Reference* GetPendingNext() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<Reference, kDefaultVerifyFlags, kReadBarrierOption>(PendingNextOffset());
}
void SetPendingNext(Reference* pending_next)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObject<true>(PendingNextOffset(), pending_next);
} else {
@@ -103,22 +103,22 @@
// should not be processed again until and unless the reference has been
// removed from the list after having determined the reference is not ready
// to be enqueued on a java ReferenceQueue.
- bool IsUnprocessed() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsUnprocessed() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetPendingNext<kWithoutReadBarrier>() == nullptr;
}
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- static Class* GetJavaLangRefReference() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetJavaLangRefReference() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!java_lang_ref_Reference_.IsNull());
return java_lang_ref_Reference_.Read<kReadBarrierOption>();
}
static void SetClass(Class* klass);
static void ResetClass();
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Note: This avoids a read barrier, it should only be used by the GC.
- HeapReference<Object>* GetReferentReferenceAddr() SHARED_REQUIRES(Locks::mutator_lock_) {
+ HeapReference<Object>* GetReferentReferenceAddr() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectReferenceAddr<kDefaultVerifyFlags>(ReferentOffset());
}
@@ -144,10 +144,10 @@
}
template<bool kTransactionActive>
- void SetZombie(Object* zombie) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetZombie(Object* zombie) REQUIRES_SHARED(Locks::mutator_lock_) {
return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
}
- Object* GetZombie() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetZombie() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(ZombieOffset());
}
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 1167391..4b3d9d0 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -31,32 +31,32 @@
// C++ mirror of java.lang.StackTraceElement
class MANAGED StackTraceElement FINAL : public Object {
public:
- String* GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
}
- String* GetMethodName() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetMethodName() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_));
}
- String* GetFileName() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetFileName() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_));
}
- int32_t GetLineNumber() SHARED_REQUIRES(Locks::mutator_lock_) {
+ int32_t GetLineNumber() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
}
static StackTraceElement* Alloc(Thread* self, Handle<String> declaring_class,
Handle<String> method_name, Handle<String> file_name,
int32_t line_number)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static void SetClass(Class* java_lang_StackTraceElement);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
- static Class* GetStackTraceElement() SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ static Class* GetStackTraceElement() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!java_lang_StackTraceElement_.IsNull());
return java_lang_StackTraceElement_.Read();
}
@@ -71,7 +71,7 @@
template<bool kTransactionActive>
void Init(Handle<String> declaring_class, Handle<String> method_name, Handle<String> file_name,
int32_t line_number)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static GcRoot<Class> java_lang_StackTraceElement_;
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
index bc39ea8..86e5139 100644
--- a/runtime/mirror/string-inl.h
+++ b/runtime/mirror/string-inl.h
@@ -44,7 +44,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -64,7 +64,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -100,7 +100,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
@@ -132,7 +132,7 @@
}
void operator()(Object* obj, size_t usable_size ATTRIBUTE_UNUSED) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Avoid AsString as object is not yet in live bitmap or allocation stack.
String* string = down_cast<String*>(obj);
string->SetCount(count_);
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index 8695fe8..a18692f 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -53,100 +53,100 @@
return OFFSET_OF_OBJECT_MEMBER(String, value_);
}
- uint16_t* GetValue() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint16_t* GetValue() REQUIRES_SHARED(Locks::mutator_lock_) {
return &value_[0];
}
- uint8_t* GetValueCompressed() SHARED_REQUIRES(Locks::mutator_lock_) {
+ uint8_t* GetValueCompressed() REQUIRES_SHARED(Locks::mutator_lock_) {
return &value_compressed_[0];
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- size_t SizeOf() SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_);
// Taking out the first/uppermost bit because it is not part of actual length value
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- int32_t GetLength() SHARED_REQUIRES(Locks::mutator_lock_) {
+ int32_t GetLength() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetLengthFromCount(GetCount<kVerifyFlags>());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- int32_t GetCount() SHARED_REQUIRES(Locks::mutator_lock_) {
+ int32_t GetCount() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(String, count_));
}
- void SetCount(int32_t new_count) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetCount(int32_t new_count) REQUIRES_SHARED(Locks::mutator_lock_) {
// Count is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
DCHECK_LE(0, (new_count & INT32_MAX));
SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
}
- int32_t GetHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetHashCode() REQUIRES_SHARED(Locks::mutator_lock_);
// Computes, stores, and returns the hash code.
- int32_t ComputeHashCode() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t ComputeHashCode() REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t GetUtfLength() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetUtfLength() REQUIRES_SHARED(Locks::mutator_lock_);
- uint16_t CharAt(int32_t index) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t CharAt(int32_t index) REQUIRES_SHARED(Locks::mutator_lock_);
- void SetCharAt(int32_t index, uint16_t c) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetCharAt(int32_t index, uint16_t c) REQUIRES_SHARED(Locks::mutator_lock_);
- String* Intern() SHARED_REQUIRES(Locks::mutator_lock_);
+ String* Intern() REQUIRES_SHARED(Locks::mutator_lock_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromByteArray(Thread* self, int32_t byte_length,
Handle<ByteArray> array, int32_t offset,
int32_t high_byte,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromCharArray(Thread* self, int32_t count,
Handle<CharArray> array, int32_t offset,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocFromString(Thread* self, int32_t string_length,
Handle<String> string, int32_t offset,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
template <bool kIsInstrumented>
ALWAYS_INLINE static String* AllocEmptyString(Thread* self,
gc::AllocatorType allocator_type)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromStrings(Thread* self, Handle<String> string, Handle<String> string2)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromUtf16(Thread* self, int32_t utf16_length, const uint16_t* utf16_data_in)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromModifiedUtf8(Thread* self, const char* utf)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length,
const char* utf8_data_in, int32_t utf8_length)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
static String* AllocFromModifiedUtf8(Thread* self, int32_t utf16_length, const char* utf8_data_in)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// TODO: This is only used in the interpreter to compare against
// entries from a dex files constant pool (ArtField names). Should
// we unify this with Equals(const StringPiece&); ?
- bool Equals(const char* modified_utf8) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool Equals(const char* modified_utf8) REQUIRES_SHARED(Locks::mutator_lock_);
// TODO: This is only used to compare DexCache.location with
// a dex_file's location (which is an std::string). Do we really
// need this in mirror::String just for that one usage ?
bool Equals(const StringPiece& modified_utf8)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- bool Equals(String* that) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool Equals(String* that) REQUIRES_SHARED(Locks::mutator_lock_);
// Compare UTF-16 code point values not in a locale-sensitive manner
int Compare(int32_t utf16_length, const char* utf8_data_in);
@@ -154,31 +154,31 @@
// TODO: do we need this overload? give it a more intention-revealing name.
bool Equals(const uint16_t* that_chars, int32_t that_offset,
int32_t that_length)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create a modified UTF-8 encoded std::string from a java/lang/String object.
- std::string ToModifiedUtf8() SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string ToModifiedUtf8() REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t FastIndexOf(int32_t ch, int32_t start) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t FastIndexOf(int32_t ch, int32_t start) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename MemoryType>
int32_t FastIndexOf(MemoryType* chars, int32_t ch, int32_t start)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- int32_t CompareTo(String* other) SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t CompareTo(String* other) REQUIRES_SHARED(Locks::mutator_lock_);
- CharArray* ToCharArray(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_)
+ CharArray* ToCharArray(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
void GetChars(int32_t start, int32_t end, Handle<CharArray> array, int32_t index)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool IsCompressed() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool IsCompressed() REQUIRES_SHARED(Locks::mutator_lock_) {
return kUseStringCompression && GetCompressionFlagFromCount(GetCount());
}
- bool IsValueNull() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsValueNull() REQUIRES_SHARED(Locks::mutator_lock_);
template<typename MemoryType>
static bool AllASCII(const MemoryType* const chars, const int length);
@@ -195,17 +195,17 @@
return kUseStringCompression ? (count | (1u << 31)) : count;
}
- static Class* GetJavaLangString() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetJavaLangString() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!java_lang_String_.IsNull());
return java_lang_String_.Read();
}
- static void SetClass(Class* java_lang_String) SHARED_REQUIRES(Locks::mutator_lock_);
- static void ResetClass() SHARED_REQUIRES(Locks::mutator_lock_);
- static void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void SetClass(Class* java_lang_String) REQUIRES_SHARED(Locks::mutator_lock_);
+ static void ResetClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void SetHashCode(int32_t new_hash_code) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetHashCode(int32_t new_hash_code) REQUIRES_SHARED(Locks::mutator_lock_) {
// Hash code is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_)));
@@ -216,7 +216,7 @@
ALWAYS_INLINE static String* Alloc(Thread* self, int32_t utf16_length_with_flag,
gc::AllocatorType allocator_type,
const PreFenceVisitor& pre_fence_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
// First bit (uppermost/leftmost) is taken out for Compressed/Uncompressed flag
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 0bccc8b..8f3ed84 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -54,7 +54,7 @@
}
}
-void Throwable::SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_) {
+void Throwable::SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(state != nullptr);
if (Runtime::Current()->IsActiveTransaction()) {
SetFieldObjectVolatile<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_), state);
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index 6aacc8d..76824cb 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -31,38 +31,38 @@
// C++ mirror of java.lang.Throwable
class MANAGED Throwable : public Object {
public:
- void SetDetailMessage(String* new_detail_message) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetDetailMessage(String* new_detail_message) REQUIRES_SHARED(Locks::mutator_lock_);
- String* GetDetailMessage() SHARED_REQUIRES(Locks::mutator_lock_) {
+ String* GetDetailMessage() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
}
- std::string Dump() SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() REQUIRES_SHARED(Locks::mutator_lock_);
// This is a runtime version of initCause, you shouldn't use it if initCause may have been
// overridden. Also it asserts rather than throwing exceptions. Currently this is only used
// in cases like the verifier where the checks cannot fail and initCause isn't overridden.
- void SetCause(Throwable* cause) SHARED_REQUIRES(Locks::mutator_lock_);
- void SetStackState(Object* state) SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsCheckedException() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetCause(Throwable* cause) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetStackState(Object* state) REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsCheckedException() REQUIRES_SHARED(Locks::mutator_lock_);
- static Class* GetJavaLangThrowable() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static Class* GetJavaLangThrowable() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!java_lang_Throwable_.IsNull());
return java_lang_Throwable_.Read();
}
- int32_t GetStackDepth() SHARED_REQUIRES(Locks::mutator_lock_);
+ int32_t GetStackDepth() REQUIRES_SHARED(Locks::mutator_lock_);
static void SetClass(Class* java_lang_Throwable);
static void ResetClass();
static void VisitRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
- Object* GetStackState() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetStackState() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
}
- Object* GetStackTrace() SHARED_REQUIRES(Locks::mutator_lock_) {
+ Object* GetStackTrace() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, backtrace_));
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index e863ea9..22cc197 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -219,13 +219,13 @@
struct NthCallerWithDexPcVisitor FINAL : public StackVisitor {
explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_(nullptr),
dex_pc_(0),
current_frame_number_(0),
wanted_frame_number_(frame) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m == nullptr || m->IsRuntimeMethod()) {
// Runtime method, upcall, or resolution issue. Skip.
@@ -449,7 +449,7 @@
__attribute__((format(printf, 1, 2)));
static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
va_list args;
va_start(args, fmt);
Thread* self = Thread::Current();
@@ -1261,7 +1261,7 @@
}
}
-bool Monitor::IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) {
+bool Monitor::IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) {
MutexLock mu(Thread::Current(), monitor_lock_);
return owner_ != nullptr;
}
@@ -1364,7 +1364,7 @@
MonitorDeflateVisitor() : self_(Thread::Current()), deflate_count_(0) {}
virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (Monitor::Deflate(self_, object)) {
DCHECK_NE(object->GetLockWord(true).GetState(), LockWord::kFatLocked);
++deflate_count_;
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 1d829e1..c3da563 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -66,19 +66,19 @@
EXCLUSIVE_LOCK_FUNCTION(obj)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// NO_THREAD_SAFETY_ANALYSIS for mon->Unlock.
static bool MonitorExit(Thread* thread, mirror::Object* obj)
NO_THREAD_SAFETY_ANALYSIS
REQUIRES(!Roles::uninterruptible_)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
UNLOCK_FUNCTION(obj);
- static void Notify(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Notify(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
DoNotify(self, obj, false);
}
- static void NotifyAll(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void NotifyAll(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
DoNotify(self, obj, true);
}
@@ -86,15 +86,15 @@
// NO_THREAD_SAFETY_ANALYSIS for mon->Wait.
static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why)
- SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
+ REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
static void DescribeWait(std::ostream& os, const Thread* thread)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Used to implement JDWP's ThreadReference.CurrentContendedMonitor.
static mirror::Object* GetContendedMonitor(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Calls 'callback' once for each lock held in the single stack frame represented by
// the current state of 'stack_visitor'.
@@ -102,12 +102,12 @@
// is necessary when we have already aborted but want to dump the stack as much as we can.
static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
void* callback_context, bool abort_on_failure = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsValidLockWord(LockWord lock_word);
template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
- mirror::Object* GetObject() SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetObject() REQUIRES_SHARED(Locks::mutator_lock_) {
return obj_.Read<kReadBarrierOption>();
}
@@ -119,7 +119,7 @@
int32_t GetHashCode();
- bool IsLocked() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
+ bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_);
bool HasHashCode() const {
return hash_code_.LoadRelaxed() != 0;
@@ -131,13 +131,13 @@
// Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
- uint32_t hash_code) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t hash_code) REQUIRES_SHARED(Locks::mutator_lock_);
// Not exclusive because ImageWriter calls this during a Heap::VisitObjects() that
// does not allow a thread suspension in the middle. TODO: maybe make this exclusive.
// NO_THREAD_SAFETY_ANALYSIS for monitor->monitor_lock_.
static bool Deflate(Thread* self, mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
+ REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS;
#ifndef __LP64__
void* operator new(size_t size) {
@@ -155,15 +155,15 @@
private:
Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Monitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code, MonitorId id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Install the monitor into its object, may fail if another thread installs a different monitor
// first.
bool Install(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this
// routine.
@@ -178,12 +178,12 @@
// threads inflating the lock, installing hash codes and spurious failures. The caller should
// re-read the lock word following the call.
static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS; // For m->Install(self)
void LogContentionEvent(Thread* self, uint32_t wait_ms, uint32_t sample_percent,
const char* owner_filename, int32_t owner_line_number)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void FailedUnlock(mirror::Object* obj,
uint32_t expected_owner_thread_id,
@@ -191,34 +191,34 @@
Monitor* mon)
REQUIRES(!Locks::thread_list_lock_,
!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to lock without blocking, returns true if we acquired the lock.
bool TryLock(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Variant for already holding the monitor lock.
bool TryLockLocked(Thread* self)
REQUIRES(monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void Lock(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool Unlock(Thread* thread)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void DoNotify(Thread* self, mirror::Object* obj, bool notify_all)
- SHARED_REQUIRES(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify.
+ REQUIRES_SHARED(Locks::mutator_lock_) NO_THREAD_SAFETY_ANALYSIS; // For mon->Notify.
void Notify(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void NotifyAll(Thread* self)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static std::string PrettyContentionInfo(const std::string& owner_name,
pid_t owner_tid,
@@ -226,7 +226,7 @@
uint32_t owners_dex_pc,
size_t num_waiters)
REQUIRES(!Locks::thread_list_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and
// (somewhat indirectly) Thread.sleep() and Thread.join().
@@ -249,13 +249,13 @@
// of the 32-bit time epoch.
void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
REQUIRES(!monitor_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Translates the provided method and pc into its declaring class' source file and line number.
static void TranslateLocation(ArtMethod* method, uint32_t pc,
const char** source_file,
int32_t* line_number)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_);
@@ -263,11 +263,11 @@
ALWAYS_INLINE static void AtraceMonitorLock(Thread* self,
mirror::Object* obj,
bool is_wait)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void AtraceMonitorLockImpl(Thread* self,
mirror::Object* obj,
bool is_wait)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE static void AtraceMonitorUnlock();
static uint32_t lock_profiling_threshold_;
@@ -322,10 +322,10 @@
MonitorList();
~MonitorList();
- void Add(Monitor* m) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
+ void Add(Monitor* m) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_list_lock_);
void SweepMonitorList(IsMarkedVisitor* visitor)
- REQUIRES(!monitor_list_lock_) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES(!monitor_list_lock_) REQUIRES_SHARED(Locks::mutator_lock_);
void DisallowNewMonitors() REQUIRES(!monitor_list_lock_);
void AllowNewMonitors() REQUIRES(!monitor_list_lock_);
void BroadcastForNewMonitors() REQUIRES(!monitor_list_lock_);
diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc
index a47a4b2..0f4e238 100644
--- a/runtime/monitor_pool.cc
+++ b/runtime/monitor_pool.cc
@@ -107,7 +107,7 @@
Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj,
int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// We are gonna allocate, so acquire the writer lock.
MutexLock mu(self, *Locks::allocated_monitor_ids_lock_);
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 99810e0..80bae7f 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -43,7 +43,7 @@
}
static Monitor* CreateMonitor(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
#ifndef __LP64__
Monitor* mon = new Monitor(self, owner, obj, hash_code);
DCHECK_ALIGNED(mon, LockWord::kMonitorIdAlignment);
@@ -123,7 +123,7 @@
void FreeInternal() NO_THREAD_SAFETY_ANALYSIS;
Monitor* CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ReleaseMonitorToPool(Thread* self, Monitor* monitor);
void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors);
diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc
index 48d256c..ac6a4f3 100644
--- a/runtime/monitor_test.cc
+++ b/runtime/monitor_test.cc
@@ -61,7 +61,7 @@
static void FillHeap(Thread* self, ClassLinker* class_linker,
std::unique_ptr<StackHandleScope<kMaxHandles>>* hsp,
std::vector<MutableHandle<mirror::Object>>* handles)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
hsp->reset(new StackHandleScope<kMaxHandles>(self));
diff --git a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
new file mode 100644
index 0000000..08bf978
--- /dev/null
+++ b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.cc
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "dalvik_system_InMemoryDexClassLoader_DexData.h"
+
+#include <sstream>
+
+#include "class_linker.h"
+#include "common_throws.h"
+#include "dex_file.h"
+#include "jni_internal.h"
+#include "mem_map.h"
+#include "mirror/class_loader.h"
+#include "mirror/object-inl.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedUtfChars.h"
+
+namespace art {
+
+static std::unique_ptr<MemMap> AllocateDexMemoryMap(JNIEnv* env, jint start, jint end) {
+ if (end <= start) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("Bad range");
+ return nullptr;
+ }
+
+ std::string error_message;
+ size_t length = static_cast<size_t>(end - start);
+ std::unique_ptr<MemMap> dex_mem_map(MemMap::MapAnonymous("DEX data",
+ nullptr,
+ length,
+ PROT_READ | PROT_WRITE,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &error_message));
+ if (dex_mem_map == nullptr) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("%s", error_message.c_str());
+ }
+ return dex_mem_map;
+}
+
+static jlong DexFileToCookie(const DexFile* dex_file) {
+ return reinterpret_cast<jlong>(dex_file);
+}
+
+static const DexFile* CookieToDexFile(jlong cookie) {
+ return reinterpret_cast<const DexFile*>(cookie);
+}
+
+static const DexFile* CreateDexFile(JNIEnv* env, std::unique_ptr<MemMap> dex_mem_map) {
+ std::string location = StringPrintf("InMemoryDexClassLoader_DexData@%p-%p",
+ dex_mem_map->Begin(),
+ dex_mem_map->End());
+ std::string error_message;
+ std::unique_ptr<const DexFile> dex_file(DexFile::Open(location,
+ 0,
+ std::move(dex_mem_map),
+ /* verify */ true,
+ /* verify_location */ true,
+ &error_message));
+ if (dex_file == nullptr) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("%s", error_message.c_str());
+ return nullptr;
+ }
+
+ if (!dex_file->DisableWrite()) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("Failed to make dex file read-only");
+ return nullptr;
+ }
+
+ return dex_file.release();
+}
+
+static jlong InMemoryDexClassLoader_DexData_initializeWithDirectBuffer(
+ JNIEnv* env, jclass, jobject buffer, jint start, jint end) {
+ uint8_t* base_address = reinterpret_cast<uint8_t*>(env->GetDirectBufferAddress(buffer));
+ if (base_address == nullptr) {
+ ScopedObjectAccess soa(env);
+ ThrowWrappedIOException("dexFileBuffer not direct");
+ return 0;
+ }
+
+ std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
+ if (dex_mem_map == nullptr) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ return 0;
+ }
+
+ size_t length = static_cast<size_t>(end - start);
+ memcpy(dex_mem_map->Begin(), base_address, length);
+ return DexFileToCookie(CreateDexFile(env, std::move(dex_mem_map)));
+}
+
+static jlong InMemoryDexClassLoader_DexData_initializeWithArray(
+ JNIEnv* env, jclass, jbyteArray buffer, jint start, jint end) {
+ std::unique_ptr<MemMap> dex_mem_map(AllocateDexMemoryMap(env, start, end));
+ if (dex_mem_map == nullptr) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ return 0;
+ }
+
+ auto destination = reinterpret_cast<jbyte*>(dex_mem_map.get()->Begin());
+ env->GetByteArrayRegion(buffer, start, end - start, destination);
+ return DexFileToCookie(CreateDexFile(env, std::move(dex_mem_map)));
+}
+
+static void InMemoryDexClassLoader_DexData_uninitialize(JNIEnv* env, jclass, jlong cookie) {
+ const DexFile* dex_file = CookieToDexFile(cookie);
+ if (kIsDebugBuild) {
+ ScopedObjectAccess soa(env);
+ ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
+ CHECK(class_linker->FindDexCache(soa.Self(), *dex_file, true) == nullptr);
+ }
+ delete dex_file;
+}
+
+static jclass InMemoryDexClassLoader_DexData_findClass(
+ JNIEnv* env, jobject dexData, jstring name, jobject loader, jlong cookie) {
+ ScopedUtfChars scoped_class_name(env, name);
+ if (env->ExceptionCheck()) {
+ return nullptr;
+ }
+
+ const char* class_name = scoped_class_name.c_str();
+ const std::string descriptor(DotToDescriptor(class_name));
+ const char* class_descriptor = descriptor.c_str();
+ const size_t hash = ComputeModifiedUtf8Hash(class_descriptor);
+ const DexFile* dex_file = CookieToDexFile(cookie);
+ const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(class_descriptor, hash);
+ if (dex_class_def != nullptr) {
+ ScopedObjectAccess soa(env);
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ StackHandleScope<1> handle_scope(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(
+ handle_scope.NewHandle(soa.Decode<mirror::ClassLoader*>(loader)));
+ class_linker->RegisterDexFile(*dex_file, class_loader.Get());
+ mirror::Class* result = class_linker->DefineClass(
+ soa.Self(), class_descriptor, hash, class_loader, *dex_file, *dex_class_def);
+ if (result != nullptr) {
+ // Ensure the class table has a strong reference to the
+ // InMemoryClassLoader/DexData instance now that a class has
+ // been loaded.
+ class_linker->InsertDexFileInToClassLoader(
+ soa.Decode<mirror::Object*>(dexData), class_loader.Get());
+ return soa.AddLocalReference<jclass>(result);
+ }
+ }
+
+ VLOG(class_linker) << "Failed to find dex_class_def " << class_name;
+ return nullptr;
+}
+
+static JNINativeMethod gMethods[] = {
+ NATIVE_METHOD(InMemoryDexClassLoader_DexData,
+ initializeWithDirectBuffer,
+ "(Ljava/nio/ByteBuffer;II)J"),
+ NATIVE_METHOD(InMemoryDexClassLoader_DexData, initializeWithArray, "([BII)J"),
+ NATIVE_METHOD(InMemoryDexClassLoader_DexData, uninitialize, "(J)V"),
+ NATIVE_METHOD(InMemoryDexClassLoader_DexData,
+ findClass,
+ "(Ljava/lang/String;Ljava/lang/ClassLoader;J)Ljava/lang/Class;"),
+};
+
+void register_dalvik_system_InMemoryDexClassLoader_DexData(JNIEnv* env) {
+ REGISTER_NATIVE_METHODS("dalvik/system/InMemoryDexClassLoader$DexData");
+}
+
+} // namespace art
diff --git a/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.h b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.h
new file mode 100644
index 0000000..f73d07a
--- /dev/null
+++ b/runtime/native/dalvik_system_InMemoryDexClassLoader_DexData.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_INMEMORYDEXCLASSLOADER_DEXDATA_H_
+#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_INMEMORYDEXCLASSLOADER_DEXDATA_H_
+
+#include <jni.h>
+
+namespace art {
+
+void register_dalvik_system_InMemoryDexClassLoader_DexData(JNIEnv* env);
+
+} // namespace art
+
+#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_INMEMORYDEXCLASSLOADER_DEXDATA_H_
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 45e49e2..d88c9d4 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -270,7 +270,7 @@
explicit PreloadDexCachesStringsVisitor(StringTable* table) : table_(table) { }
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::String* string = root->AsString();
table_->operator[](string->ToModifiedUtf8()) = string;
}
@@ -282,7 +282,7 @@
// Based on ClassLinker::ResolveString.
static void PreloadDexCachesResolveString(
Handle<mirror::DexCache> dex_cache, uint32_t string_idx, StringTable& strings)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::String* string = dex_cache->GetResolvedString(string_idx);
if (string != nullptr) {
return;
@@ -300,7 +300,7 @@
// Based on ClassLinker::ResolveType.
static void PreloadDexCachesResolveType(
Thread* self, mirror::DexCache* dex_cache, uint32_t type_idx)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* klass = dex_cache->GetResolvedType(type_idx);
if (klass != nullptr) {
return;
@@ -329,7 +329,7 @@
// Based on ClassLinker::ResolveField.
static void PreloadDexCachesResolveField(Handle<mirror::DexCache> dex_cache, uint32_t field_idx,
bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* field = dex_cache->GetResolvedField(field_idx, kRuntimePointerSize);
if (field != nullptr) {
return;
@@ -357,7 +357,7 @@
// Based on ClassLinker::ResolveMethod.
static void PreloadDexCachesResolveMethod(Handle<mirror::DexCache> dex_cache, uint32_t method_idx,
InvokeType invoke_type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = dex_cache->GetResolvedMethod(method_idx, kRuntimePointerSize);
if (method != nullptr) {
return;
@@ -431,7 +431,7 @@
}
static void PreloadDexCachesStatsFilled(DexCacheStats* filled)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (!kPreloadDexCachesCollectStats) {
return;
}
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 9e12806..9da40b9 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -29,7 +29,7 @@
namespace art {
static jobject GetThreadStack(const ScopedFastNativeObjectAccess& soa, jobject peer)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
jobject trace = nullptr;
if (soa.Decode<mirror::Object*>(peer) == soa.Self()->GetPeer()) {
trace = soa.Self()->CreateInternalStackTrace<false>(soa);
@@ -85,7 +85,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
class_loader(nullptr) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(class_loader == nullptr);
mirror::Class* c = GetMethod()->GetDeclaringClass();
// c is null for runtime methods.
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index d4e54cf..d89a334 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -44,7 +44,7 @@
ALWAYS_INLINE static inline mirror::Class* DecodeClass(
const ScopedFastNativeObjectAccess& soa, jobject java_class)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
DCHECK(c != nullptr);
DCHECK(c->IsClass());
@@ -111,7 +111,7 @@
static mirror::ObjectArray<mirror::Field>* GetDeclaredFields(
Thread* self, mirror::Class* klass, bool public_only, bool force_resolve)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
IterationRange<StrideIterator<ArtField>> ifields = klass->GetIFields();
IterationRange<StrideIterator<ArtField>> sfields = klass->GetSFields();
@@ -192,7 +192,7 @@
// fast.
ALWAYS_INLINE static inline ArtField* FindFieldByName(
Thread* self ATTRIBUTE_UNUSED, mirror::String* name, LengthPrefixedArray<ArtField>* fields)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (fields == nullptr) {
return nullptr;
}
@@ -237,7 +237,7 @@
ALWAYS_INLINE static inline mirror::Field* GetDeclaredField(
Thread* self, mirror::Class* c, mirror::String* name)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtField* art_field = FindFieldByName(self, name, c->GetIFieldsPtr());
if (art_field != nullptr) {
return mirror::Field::CreateFromArtField<kRuntimePointerSize>(self, art_field, true);
@@ -251,7 +251,7 @@
static mirror::Field* GetPublicFieldRecursive(
Thread* self, mirror::Class* clazz, mirror::String* name)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(clazz != nullptr);
DCHECK(name != nullptr);
DCHECK(self != nullptr);
@@ -352,7 +352,7 @@
}
static ALWAYS_INLINE inline bool MethodMatchesConstructor(ArtMethod* m, bool public_only)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(m != nullptr);
return (!public_only || m->IsPublic()) && !m->IsStatic() && m->IsConstructor();
}
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index 9e2d68d..1b399aa 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -36,7 +36,7 @@
*/
static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::string actualType(PrettyTypeOf(array));
Thread* self = Thread::Current();
self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index aac800a..5a4ced2 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -32,7 +32,7 @@
template<bool kIsSet>
ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::Field* field,
mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (kIsSet && field->IsFinal()) {
ThrowIllegalAccessException(
StringPrintf("Cannot set %s field %s of class %s",
@@ -60,7 +60,7 @@
template<bool kAllowReferences>
ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::Field* f,
Primitive::Type field_type, JValue* value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(value->GetJ(), INT64_C(0));
MemberOffset offset(f->GetOffset());
const bool is_volatile = f->IsVolatile();
@@ -105,7 +105,7 @@
ALWAYS_INLINE inline static bool CheckReceiver(const ScopedFastNativeObjectAccess& soa,
jobject j_rcvr, mirror::Field** f,
mirror::Object** class_or_rcvr)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
soa.Self()->AssertThreadSuspensionIsAllowable();
mirror::Class* declaringClass = (*f)->GetDeclaringClass();
if ((*f)->IsStatic()) {
@@ -232,7 +232,7 @@
ALWAYS_INLINE inline static void SetFieldValue(mirror::Object* o, mirror::Field* f,
Primitive::Type field_type, bool allow_references,
const JValue& new_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(f->GetDeclaringClass()->IsInitialized());
MemberOffset offset(f->GetOffset());
const bool is_volatile = f->IsVolatile();
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 9ce4a02..9ed0e7e 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -23,7 +23,6 @@
#include "scoped_fast_native_object_access.h"
#include "ScopedLocalRef.h"
#include "ScopedPrimitiveArray.h"
-#include "stack.h"
#include "thread_list.h"
namespace art {
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index 858849f..472340c 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -305,7 +305,7 @@
static void copyToArray(jlong srcAddr, mirror::PrimitiveArray<T>* array,
size_t array_offset,
size_t size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const T* src = reinterpret_cast<T*>(srcAddr);
size_t sz = size / sizeof(T);
size_t of = array_offset / sizeof(T);
@@ -318,7 +318,7 @@
static void copyFromArray(jlong dstAddr, mirror::PrimitiveArray<T>* array,
size_t array_offset,
size_t size)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
T* dst = reinterpret_cast<T*>(dstAddr);
size_t sz = size / sizeof(T);
size_t of = array_offset / sizeof(T);
diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h
index e9b0d3c..f72a853 100644
--- a/runtime/nth_caller_visitor.h
+++ b/runtime/nth_caller_visitor.h
@@ -33,7 +33,7 @@
count(0),
caller(nullptr) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
bool do_count = false;
if (m == nullptr || m->IsRuntimeMethod()) {
diff --git a/runtime/oat_file_manager.cc b/runtime/oat_file_manager.cc
index 2e67ffe..6d4b2f6 100644
--- a/runtime/oat_file_manager.cc
+++ b/runtime/oat_file_manager.cc
@@ -226,7 +226,7 @@
static void IterateOverJavaDexFile(mirror::Object* dex_file,
ArtField* const cookie_field,
std::function<bool(const DexFile*)> fn)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (dex_file != nullptr) {
mirror::LongArray* long_array = cookie_field->GetObject(dex_file)->AsLongArray();
if (long_array == nullptr) {
@@ -250,7 +250,7 @@
ScopedObjectAccessAlreadyRunnable& soa,
Handle<mirror::ClassLoader> class_loader,
MutableHandle<mirror::ObjectArray<mirror::Object>> dex_elements,
- std::function<bool(const DexFile*)> fn) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::function<bool(const DexFile*)> fn) REQUIRES_SHARED(Locks::mutator_lock_) {
// Handle this step.
// Handle as if this is the child PathClassLoader.
// The class loader is a PathClassLoader which inherits from BaseDexClassLoader.
@@ -286,7 +286,7 @@
static bool GetDexFilesFromClassLoader(
ScopedObjectAccessAlreadyRunnable& soa,
mirror::ClassLoader* class_loader,
- std::priority_queue<DexFileAndClassPair>* queue) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::priority_queue<DexFileAndClassPair>* queue) REQUIRES_SHARED(Locks::mutator_lock_) {
if (ClassLinker::IsBootClassLoader(soa, class_loader)) {
// The boot class loader. We don't load any of these files, as we know we compiled against
// them correctly.
@@ -308,7 +308,7 @@
// Collect all the dex files.
auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (cp_dex_file->NumClassDefs() > 0) {
queue->emplace(cp_dex_file, 0U, true);
}
@@ -329,7 +329,7 @@
static void GetDexFilesFromDexElementsArray(
ScopedObjectAccessAlreadyRunnable& soa,
Handle<mirror::ObjectArray<mirror::Object>> dex_elements,
- std::priority_queue<DexFileAndClassPair>* queue) SHARED_REQUIRES(Locks::mutator_lock_) {
+ std::priority_queue<DexFileAndClassPair>* queue) REQUIRES_SHARED(Locks::mutator_lock_) {
if (dex_elements.Get() == nullptr) {
// Nothing to do.
return;
@@ -345,7 +345,7 @@
// Collect all the dex files.
auto GetDexFilesFn = [&] (const DexFile* cp_dex_file)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (cp_dex_file != nullptr && cp_dex_file->NumClassDefs() > 0) {
queue->emplace(cp_dex_file, 0U, true);
}
diff --git a/runtime/object_lock.h b/runtime/object_lock.h
index 7f02b37..5916f90 100644
--- a/runtime/object_lock.h
+++ b/runtime/object_lock.h
@@ -28,15 +28,15 @@
template <typename T>
class ObjectLock {
public:
- ObjectLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectLock(Thread* self, Handle<T> object) REQUIRES_SHARED(Locks::mutator_lock_);
- ~ObjectLock() SHARED_REQUIRES(Locks::mutator_lock_);
+ ~ObjectLock() REQUIRES_SHARED(Locks::mutator_lock_);
- void WaitIgnoringInterrupts() SHARED_REQUIRES(Locks::mutator_lock_);
+ void WaitIgnoringInterrupts() REQUIRES_SHARED(Locks::mutator_lock_);
- void Notify() SHARED_REQUIRES(Locks::mutator_lock_);
+ void Notify() REQUIRES_SHARED(Locks::mutator_lock_);
- void NotifyAll() SHARED_REQUIRES(Locks::mutator_lock_);
+ void NotifyAll() REQUIRES_SHARED(Locks::mutator_lock_);
private:
Thread* const self_;
@@ -48,9 +48,9 @@
template <typename T>
class ObjectTryLock {
public:
- ObjectTryLock(Thread* self, Handle<T> object) SHARED_REQUIRES(Locks::mutator_lock_);
+ ObjectTryLock(Thread* self, Handle<T> object) REQUIRES_SHARED(Locks::mutator_lock_);
- ~ObjectTryLock() SHARED_REQUIRES(Locks::mutator_lock_);
+ ~ObjectTryLock() REQUIRES_SHARED(Locks::mutator_lock_);
bool Acquired() const {
return acquired_;
diff --git a/runtime/openjdkjvm/Android.bp b/runtime/openjdkjvm/Android.bp
index 3e8dc8c..5ed1615 100644
--- a/runtime/openjdkjvm/Android.bp
+++ b/runtime/openjdkjvm/Android.bp
@@ -31,8 +31,8 @@
art_cc_library {
name: "libopenjdkjvmd",
defaults: [
- "libopenjdkjvm_defaults",
"art_debug_defaults",
+ "libopenjdkjvm_defaults",
],
shared_libs: ["libartd"],
}
diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp
index 4430248..977ef44 100644
--- a/runtime/openjdkjvmti/Android.bp
+++ b/runtime/openjdkjvmti/Android.bp
@@ -31,8 +31,8 @@
art_cc_library {
name: "libopenjdkjvmtid",
defaults: [
- "libopenjdkjvmti_defaults",
"art_debug_defaults",
+ "libopenjdkjvmti_defaults",
],
shared_libs: ["libartd"],
}
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 82e57b4..e3f92c7 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -35,7 +35,7 @@
mirror::Class* GenerateProxyClass(ScopedObjectAccess& soa, jobject jclass_loader,
const char* className,
const std::vector<mirror::Class*>& interfaces)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
mirror::Class* javaLangObject = class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;");
CHECK(javaLangObject != nullptr);
diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc
index a6e3693..dc6f4eb 100644
--- a/runtime/quick/inline_method_analyser.cc
+++ b/runtime/quick/inline_method_analyser.cc
@@ -141,7 +141,7 @@
// Used for a single invoke in a constructor. In that situation, the method verifier makes
// sure we invoke a constructor either in the same class or superclass with at least "this".
ArtMethod* GetTargetConstructor(ArtMethod* method, const Instruction* invoke_direct)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_EQ(invoke_direct->Opcode(), Instruction::INVOKE_DIRECT);
DCHECK_EQ(invoke_direct->VRegC_35c(),
method->GetCodeItem()->registers_size_ - method->GetCodeItem()->ins_size_);
@@ -212,7 +212,7 @@
uint16_t this_vreg,
uint16_t zero_vreg_mask,
/*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(IsInstructionIPut(new_iput->Opcode()));
uint32_t field_index = new_iput->VRegC_22c();
PointerSize pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
@@ -253,7 +253,7 @@
bool DoAnalyseConstructor(const DexFile::CodeItem* code_item,
ArtMethod* method,
/*inout*/ ConstructorIPutData (&iputs)[kMaxConstructorIPuts])
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// On entry we should not have any IPUTs yet.
DCHECK_EQ(0, std::count_if(
iputs,
@@ -367,7 +367,7 @@
bool AnalyseConstructor(const DexFile::CodeItem* code_item,
ArtMethod* method,
InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ConstructorIPutData iputs[kMaxConstructorIPuts];
if (!DoAnalyseConstructor(code_item, method, iputs)) {
return false;
diff --git a/runtime/quick/inline_method_analyser.h b/runtime/quick/inline_method_analyser.h
index 0e12d73..356e290 100644
--- a/runtime/quick/inline_method_analyser.h
+++ b/runtime/quick/inline_method_analyser.h
@@ -217,9 +217,9 @@
* @return true if the method is a candidate for inlining, false otherwise.
*/
static bool AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool AnalyseMethodCode(ArtMethod* method, InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static constexpr bool IsInstructionIGet(Instruction::Code opcode) {
return Instruction::IGET <= opcode && opcode <= Instruction::IGET_SHORT;
@@ -246,7 +246,7 @@
bool is_static,
ArtMethod* method,
InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool AnalyseReturnMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
static bool AnalyseConstMethod(const DexFile::CodeItem* code_item, InlineMethod* result);
static bool AnalyseIGetMethod(const DexFile::CodeItem* code_item,
@@ -254,13 +254,13 @@
bool is_static,
ArtMethod* method,
InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool AnalyseIPutMethod(const DexFile::CodeItem* code_item,
const MethodReference& method_ref,
bool is_static,
ArtMethod* method,
InlineMethod* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can we fast path instance field access in a verified accessor?
// If yes, computes field's offset and volatility and whether the method is static or not.
@@ -268,7 +268,7 @@
uint32_t field_idx,
bool is_put,
InlineIGetIPutData* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
};
} // namespace art
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 46d9e7f..55aba2b 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -30,6 +30,7 @@
#include "mirror/class_loader.h"
#include "mirror/throwable.h"
#include "oat_quick_method_header.h"
+#include "stack.h"
#include "stack_map.h"
#include "verifier/method_verifier.h"
@@ -59,13 +60,13 @@
public:
CatchBlockStackVisitor(Thread* self, Context* context, Handle<mirror::Throwable>* exception,
QuickExceptionHandler* exception_handler)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
exception_(exception),
exception_handler_(exception_handler) {
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
if (method == nullptr) {
@@ -97,7 +98,7 @@
private:
bool HandleTryItems(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t dex_pc = DexFile::kDexNoIndex;
if (!method->IsNative()) {
dex_pc = GetDexPc();
@@ -284,7 +285,7 @@
Context* context,
QuickExceptionHandler* exception_handler,
bool single_frame)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
exception_handler_(exception_handler),
prev_shadow_frame_(nullptr),
@@ -304,7 +305,7 @@
return single_frame_deopt_quick_method_header_;
}
- void FinishStackWalk() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void FinishStackWalk() REQUIRES_SHARED(Locks::mutator_lock_) {
// This is the upcall, or the next full frame in single-frame deopt, or the
// code isn't deoptimizeable. We remember the frame and last pc so that we
// may long jump to them.
@@ -327,7 +328,7 @@
}
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
ArtMethod* method = GetMethod();
if (method == nullptr || single_frame_done_) {
@@ -396,7 +397,7 @@
void HandleOptimizingDeoptimization(ArtMethod* m,
ShadowFrame* new_frame,
const bool* updated_vregs)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
CodeInfo code_info = method_header->GetOptimizedCodeInfo();
uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc());
@@ -577,14 +578,14 @@
class InstrumentationStackVisitor : public StackVisitor {
public:
InstrumentationStackVisitor(Thread* self, size_t frame_depth)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
frame_depth_(frame_depth),
instrumentation_frames_to_pop_(0) {
CHECK_NE(frame_depth_, kInvalidFrameDepth);
}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
size_t current_frame_depth = GetFrameDepth();
if (current_frame_depth < frame_depth_) {
CHECK(GetMethod() != nullptr);
@@ -647,11 +648,11 @@
class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor {
public:
explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
show_details_(show_details) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = GetMethod();
if (show_details_) {
LOG(INFO) << "|> pc = " << std::hex << GetCurrentQuickFramePc();
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 74b7d0d..5592126 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -20,7 +20,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/mutex.h"
-#include "stack.h" // StackReference
+#include "stack_reference.h"
namespace art {
@@ -29,14 +29,16 @@
} // namespace mirror
class ArtMethod;
class Context;
+class OatQuickMethodHeader;
class Thread;
class ShadowFrame;
+class StackVisitor;
// Manages exception delivery for Quick backend.
class QuickExceptionHandler {
public:
QuickExceptionHandler(Thread* self, bool is_deoptimization)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
NO_RETURN ~QuickExceptionHandler() {
LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump.
@@ -44,12 +46,12 @@
}
// Find the catch handler for the given exception.
- void FindCatch(mirror::Throwable* exception) SHARED_REQUIRES(Locks::mutator_lock_);
+ void FindCatch(mirror::Throwable* exception) REQUIRES_SHARED(Locks::mutator_lock_);
// Deoptimize the stack to the upcall/some code that's not deoptimizeable. For
// every compiled frame, we create a "copy" shadow frame that will be executed
// with the interpreter.
- void DeoptimizeStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ void DeoptimizeStack() REQUIRES_SHARED(Locks::mutator_lock_);
// Deoptimize a single frame. It's directly triggered from compiled code. It
// has the following properties:
@@ -60,22 +62,22 @@
// the result of IsDeoptimizeable().
// - It can be either full-fragment, or partial-fragment deoptimization, depending
// on whether that single frame covers full or partial fragment.
- void DeoptimizeSingleFrame() SHARED_REQUIRES(Locks::mutator_lock_);
+ void DeoptimizeSingleFrame() REQUIRES_SHARED(Locks::mutator_lock_);
void DeoptimizePartialFragmentFixup(uintptr_t return_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Update the instrumentation stack by removing all methods that will be unwound
// by the exception being thrown.
// Return the return pc of the last frame that's unwound.
- uintptr_t UpdateInstrumentationStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ uintptr_t UpdateInstrumentationStack() REQUIRES_SHARED(Locks::mutator_lock_);
// Set up environment before delivering an exception to optimized code.
void SetCatchEnvironmentForOptimizedHandler(StackVisitor* stack_visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Long jump either to a catch handler or to the upcall.
- NO_RETURN void DoLongJump(bool smash_caller_saves = true) SHARED_REQUIRES(Locks::mutator_lock_);
+ NO_RETURN void DoLongJump(bool smash_caller_saves = true) REQUIRES_SHARED(Locks::mutator_lock_);
void SetHandlerQuickFrame(ArtMethod** handler_quick_frame) {
handler_quick_frame_ = handler_quick_frame;
@@ -128,7 +130,7 @@
// Walk the stack frames of the given thread, printing out non-runtime methods with their types
// of frames. Helps to verify that partial-fragment deopt really works as expected.
static void DumpFramesWithType(Thread* self, bool details = false)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
Thread* const self_;
@@ -159,7 +161,7 @@
bool full_fragment_done_;
void PrepareForLongJumpToInvokeStubOrInterpreterBridge()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(QuickExceptionHandler);
};
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 5d32c09..a861861 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -48,39 +48,39 @@
bool kAlwaysUpdateField = false>
ALWAYS_INLINE static MirrorType* Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// It's up to the implementation whether the given root gets updated
// whereas the return value must be an updated reference.
template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE static MirrorType* BarrierForRoot(MirrorType** root,
GcRootSource* gc_root_source = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// It's up to the implementation whether the given root gets updated
// whereas the return value must be an updated reference.
template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
ALWAYS_INLINE static MirrorType* BarrierForRoot(mirror::CompressedReference<MirrorType>* root,
GcRootSource* gc_root_source = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static bool IsDuringStartup();
// Without the holder object.
static void AssertToSpaceInvariant(mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
}
// With the holder object.
static void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset,
mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// With GcRootSource.
static void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// ALWAYS_INLINE on this caused a performance regression b/26744236.
- static mirror::Object* Mark(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ static mirror::Object* Mark(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
static mirror::Object* WhitePtr() {
return reinterpret_cast<mirror::Object*>(white_ptr_);
@@ -94,7 +94,7 @@
ALWAYS_INLINE static bool HasGrayReadBarrierPointer(mirror::Object* obj,
uintptr_t* out_rb_ptr_high_bits)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Note: These couldn't be constexpr pointers as reinterpret_cast isn't compatible with them.
static constexpr uintptr_t white_ptr_ = 0x0; // Not marked.
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index 49b6a38..f04d41d 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -62,7 +62,7 @@
// If "obj" is an array, return the number of elements in the array.
// Otherwise, return zero.
-static size_t GetElementCount(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+static size_t GetElementCount(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
// We assume the special cleared value isn't an array in the if statement below.
DCHECK(!Runtime::Current()->GetClearedJniWeakGlobal()->IsArrayInstance());
if (obj == nullptr || !obj->IsArrayInstance()) {
@@ -78,7 +78,7 @@
// or equivalent to the original.
static void DumpSummaryLine(std::ostream& os, mirror::Object* obj, size_t element_count,
int identical, int equiv)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (obj == nullptr) {
os << " null reference (count=" << equiv << ")\n";
return;
diff --git a/runtime/reference_table.h b/runtime/reference_table.h
index f90ccd1..992ded0 100644
--- a/runtime/reference_table.h
+++ b/runtime/reference_table.h
@@ -41,22 +41,22 @@
ReferenceTable(const char* name, size_t initial_size, size_t max_size);
~ReferenceTable();
- void Add(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Add(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
- void Remove(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Remove(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const;
- void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
typedef std::vector<GcRoot<mirror::Object>,
TrackingAllocator<GcRoot<mirror::Object>, kAllocatorTagReferenceTable>> Table;
static void Dump(std::ostream& os, Table& entries)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
friend class IndirectReferenceTable; // For Dump.
std::string name_;
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 8a531d9..f2af3da 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -29,7 +29,7 @@
#include "mirror/object_array-inl.h"
#include "nth_caller_visitor.h"
#include "scoped_thread_state_change.h"
-#include "stack.h"
+#include "stack_reference.h"
#include "well_known_classes.h"
namespace art {
@@ -72,7 +72,7 @@
num_bytes_ += 4;
}
- void Append(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void Append(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
Append(StackReference<mirror::Object>::FromMirrorPtr(obj).AsVRegValue());
}
@@ -96,7 +96,7 @@
void BuildArgArrayFromVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* receiver, va_list ap)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
Append(receiver);
@@ -132,7 +132,7 @@
void BuildArgArrayFromJValues(const ScopedObjectAccessAlreadyRunnable& soa,
mirror::Object* receiver, jvalue* args)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
Append(receiver);
@@ -171,7 +171,7 @@
}
void BuildArgArrayFromFrame(ShadowFrame* shadow_frame, uint32_t arg_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Set receiver if non-null (method is not static)
size_t cur_arg = arg_offset;
if (!shadow_frame->GetMethod()->IsStatic()) {
@@ -206,7 +206,7 @@
static void ThrowIllegalPrimitiveArgumentException(const char* expected,
const char* found_descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ThrowIllegalArgumentException(
StringPrintf("Invalid primitive conversion from %s to %s", expected,
PrettyDescriptor(found_descriptor).c_str()).c_str());
@@ -214,7 +214,7 @@
bool BuildArgArrayFromObjectArray(mirror::Object* receiver,
mirror::ObjectArray<mirror::Object>* args, ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::TypeList* classes = m->GetParameterTypeList();
// Set receiver if non-null (method is not static)
if (receiver != nullptr) {
@@ -346,7 +346,7 @@
};
static void CheckMethodArguments(JavaVMExt* vm, ArtMethod* m, uint32_t* args)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const DexFile::TypeList* params = m->GetParameterTypeList();
if (params == nullptr) {
return; // No arguments so nothing to check.
@@ -424,7 +424,7 @@
}
static ArtMethod* FindVirtualMethod(mirror::Object* receiver, ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(method, kRuntimePointerSize);
}
@@ -432,7 +432,7 @@
static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
ArtMethod* method, ArgArray* arg_array, JValue* result,
const char* shorty)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t* args = arg_array->GetArray();
if (UNLIKELY(soa.Env()->check_jni)) {
CheckMethodArguments(soa.Vm(), method->GetInterfaceMethodIfProxy(kRuntimePointerSize), args);
@@ -442,7 +442,7 @@
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
va_list args)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// We want to make sure that the stack is not within a small distance from the
// protected region in case we are calling into a leaf function whose stack
// check has been elided.
@@ -740,7 +740,7 @@
}
static std::string UnboxingFailureKind(ArtField* f)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (f != nullptr) {
return "field " + PrettyField(f, false);
}
@@ -750,7 +750,7 @@
static bool UnboxPrimitive(mirror::Object* o,
mirror::Class* dst_class, ArtField* f,
JValue* unboxed_value)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
bool unbox_for_result = (f == nullptr);
if (!dst_class->IsPrimitive()) {
if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) {
diff --git a/runtime/reflection.h b/runtime/reflection.h
index d9c38c1..579c6b1 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -33,60 +33,60 @@
class ShadowFrame;
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, ArtField* f,
JValue* unboxed_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE bool ConvertPrimitiveValue(bool unbox_for_result,
Primitive::Type src_class, Primitive::Type dst_class,
const JValue& src, JValue* dst)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
va_list args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeWithJValues(const ScopedObjectAccessAlreadyRunnable& soa, jobject obj, jmethodID mid,
jvalue* args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithJValues(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj, jmethodID mid, jvalue* args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
JValue InvokeVirtualOrInterfaceWithVarArgs(const ScopedObjectAccessAlreadyRunnable& soa,
jobject obj, jmethodID mid, va_list args)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// num_frames is number of frames we look up for access check.
jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject method, jobject receiver,
jobject args, size_t num_frames = 1)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ALWAYS_INLINE bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
uint32_t access_flags, mirror::Class** calling_class, size_t num_frames)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// This version takes a known calling class.
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
uint32_t access_flags, mirror::Class* calling_class)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the calling class by using a stack visitor, may return null for unattached native threads.
mirror::Class* GetCallingClass(Thread* self, size_t num_frames)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void InvalidReceiverError(mirror::Object* o, mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UpdateReference(Thread* self, jobject obj, mirror::Object* result)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace art
diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc
index 016f3c7..4876ff0 100644
--- a/runtime/reflection_test.cc
+++ b/runtime/reflection_test.cc
@@ -86,7 +86,7 @@
mirror::Object** receiver,
bool is_static, const char* method_name,
const char* method_signature)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
const char* class_name = is_static ? "StaticLeafMethods" : "NonStaticLeafMethods";
jobject jclass_loader(LoadDex(class_name));
Thread* self = Thread::Current();
diff --git a/runtime/runtime-inl.h b/runtime/runtime-inl.h
index 3245ba0..2eb0bf7 100644
--- a/runtime/runtime-inl.h
+++ b/runtime/runtime-inl.h
@@ -69,13 +69,13 @@
}
inline ArtMethod* Runtime::GetCalleeSaveMethod(CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(HasCalleeSaveMethod(type));
return GetCalleeSaveMethodUnchecked(type);
}
inline ArtMethod* Runtime::GetCalleeSaveMethodUnchecked(CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return reinterpret_cast<ArtMethod*>(callee_save_methods_[type]);
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index f3fcd34..a365a73 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -90,6 +90,7 @@
#include "mirror/throwable.h"
#include "monitor.h"
#include "native/dalvik_system_DexFile.h"
+#include "native/dalvik_system_InMemoryDexClassLoader_DexData.h"
#include "native/dalvik_system_VMDebug.h"
#include "native/dalvik_system_VMRuntime.h"
#include "native/dalvik_system_VMStack.h"
@@ -1389,6 +1390,7 @@
void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) {
register_dalvik_system_DexFile(env);
+ register_dalvik_system_InMemoryDexClassLoader_DexData(env);
register_dalvik_system_VMDebug(env);
register_dalvik_system_VMRuntime(env);
register_dalvik_system_VMStack(env);
@@ -2008,7 +2010,7 @@
}
bool Runtime::IsDeoptimizeable(uintptr_t code) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
return !heap_->IsInBootImageOatFile(reinterpret_cast<void *>(code));
}
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 5f89d6a..44f765a 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -290,15 +290,15 @@
}
// Is the given object the special object used to mark a cleared JNI weak global?
- bool IsClearedJniWeakGlobal(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsClearedJniWeakGlobal(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Get the special object used to mark a cleared JNI weak global.
- mirror::Object* GetClearedJniWeakGlobal() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Throwable* GetPreAllocatedOutOfMemoryError() SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Throwable* GetPreAllocatedOutOfMemoryError() REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const std::vector<std::string>& GetProperties() const {
return properties_;
@@ -312,33 +312,33 @@
return "2.1.0";
}
- void DisallowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
- void AllowNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
- void BroadcastForNewSystemWeaks() SHARED_REQUIRES(Locks::mutator_lock_);
+ void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
+ void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
+ void BroadcastForNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
// clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit image roots, only used for hprof since the GC uses the image space mod union table
// instead.
- void VisitImageRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all of the roots we can do safely do concurrently.
void VisitConcurrentRoots(RootVisitor* visitor,
VisitRootFlags flags = kVisitRootFlagAllRoots)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all of the non thread roots, we can do this with mutators unpaused.
void VisitNonThreadRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitTransactionRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all of the thread roots.
- void VisitThreadRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitThreadRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
// Flip thread roots from from-space refs to to-space refs.
size_t FlipThreadRoots(Closure* thread_flip_visitor, Closure* flip_callback,
@@ -347,17 +347,17 @@
// Visit all other roots which must be done with mutators suspended.
void VisitNonConcurrentRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
void SweepSystemWeaks(IsMarkedVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Constant roots are the roots which never change after the runtime is initialized, they only
// need to be visited once per GC cycle.
void VisitConstantRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime method resolution
ArtMethod* GetResolutionMethod();
@@ -366,9 +366,9 @@
return resolution_method_ != nullptr;
}
- void SetResolutionMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
- ArtMethod* CreateResolutionMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime imt conflicts.
ArtMethod* GetImtConflictMethod();
@@ -379,11 +379,11 @@
}
void FixupConflictTables();
- void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
- void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
+ void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a special method that describes all callee saves being spilled to the stack.
enum CalleeSaveType {
@@ -399,17 +399,17 @@
}
ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
QuickMethodFrameInfo GetCalleeSaveMethodFrameInfo(CalleeSaveType type) const {
return callee_save_method_frame_infos_[type];
}
QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
return OFFSETOF_MEMBER(Runtime, callee_save_methods_[type]);
@@ -423,7 +423,7 @@
void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
- ArtMethod* CreateCalleeSaveMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -480,9 +480,9 @@
bool IsTransactionAborted() const;
void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ThrowTransactionAbortError(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
bool is_volatile) const;
@@ -499,7 +499,7 @@
void RecordWriteFieldReference(mirror::Object* obj, MemberOffset field_offset,
mirror::Object* value, bool is_volatile) const;
void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void RecordStrongStringInsertion(mirror::String* s) const
REQUIRES(Locks::intern_table_lock_);
void RecordWeakStringInsertion(mirror::String* s) const
@@ -592,7 +592,7 @@
}
// Called from class linker.
- void SetSentinel(mirror::Object* sentinel) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
// Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
LinearAlloc* CreateLinearAlloc();
@@ -640,7 +640,7 @@
// Returns if the code can be deoptimized. Code may be compiled with some
// optimization that makes it impossible to deoptimize.
- bool IsDeoptimizeable(uintptr_t code) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a saved copy of the environment (getenv/setenv values).
// Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
diff --git a/runtime/scoped_thread_state_change.h b/runtime/scoped_thread_state_change.h
index d1cc09a..8a1aca5 100644
--- a/runtime/scoped_thread_state_change.h
+++ b/runtime/scoped_thread_state_change.h
@@ -130,7 +130,7 @@
* it's best if we don't grab a mutex.
*/
template<typename T>
- T AddLocalReference(mirror::Object* obj) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ T AddLocalReference(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
DCHECK_NE(obj, Runtime::Current()->GetClearedJniWeakGlobal());
@@ -139,32 +139,32 @@
template<typename T>
T Decode(jobject obj) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return down_cast<T>(Self()->DecodeJObject(obj));
}
ArtField* DecodeField(jfieldID fid) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<ArtField*>(fid);
}
- jfieldID EncodeField(ArtField* field) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ jfieldID EncodeField(ArtField* field) const REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<jfieldID>(field);
}
- ArtMethod* DecodeMethod(jmethodID mid) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* DecodeMethod(jmethodID mid) const REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<ArtMethod*>(mid);
}
- jmethodID EncodeMethod(ArtMethod* method) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ jmethodID EncodeMethod(ArtMethod* method) const REQUIRES_SHARED(Locks::mutator_lock_) {
Locks::mutator_lock_->AssertSharedHeld(Self());
DCHECK(IsRunnable()); // Don't work with raw objects in non-runnable states.
return reinterpret_cast<jmethodID>(method);
diff --git a/runtime/simulator/Android.bp b/runtime/simulator/Android.bp
index 05f44e3..ec0b49e 100644
--- a/runtime/simulator/Android.bp
+++ b/runtime/simulator/Android.bp
@@ -39,10 +39,10 @@
}
cc_library_host_shared {
- name: "libart-simulatord",
+ name: "libartd-simulator",
defaults: [
- "libart_simulator_defaults",
"art_debug_defaults",
+ "libart_simulator_defaults",
],
shared_libs: [
"libartd",
diff --git a/runtime/stack.cc b/runtime/stack.cc
index ababf78..ec492ed 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -165,7 +165,7 @@
}
extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Object* StackVisitor::GetThisObject() const {
DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), kRuntimePointerSize);
@@ -547,7 +547,7 @@
next_dex_pc_(0) {
}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
if (found_frame_) {
ArtMethod* method = GetMethod();
if (method != nullptr && !method->IsRuntimeMethod()) {
@@ -580,7 +580,7 @@
explicit DescribeStackVisitor(Thread* thread_in)
: StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
return true;
}
@@ -610,7 +610,7 @@
}
static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
return;
}
@@ -708,7 +708,7 @@
// Counts the number of references in the parameter list of the corresponding method.
// Note: Thus does _not_ include "this" for non-static methods.
static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
uint32_t shorty_len;
const char* shorty = method->GetShorty(&shorty_len);
uint32_t refs = 0;
diff --git a/runtime/stack.h b/runtime/stack.h
index 850d2a4..e9ed497 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -25,9 +25,9 @@
#include "base/mutex.h"
#include "dex_file.h"
#include "gc_root.h"
-#include "mirror/object_reference.h"
#include "quick/quick_method_frame_info.h"
#include "read_barrier.h"
+#include "stack_reference.h"
#include "verify_object.h"
namespace art {
@@ -45,6 +45,7 @@
class ShadowFrame;
class StackVisitor;
class Thread;
+union JValue;
// The kind of vreg being accessed in calls to Set/GetVReg.
enum VRegKind {
@@ -61,11 +62,6 @@
};
std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
-// A reference from the shadow stack to a MirrorType object within the Java heap.
-template<class MirrorType>
-class MANAGED StackReference : public mirror::CompressedReference<MirrorType> {
-};
-
// Forward declaration. Just calls the destructor.
struct ShadowFrameDeleter;
using ShadowFrameAllocaUniquePtr = std::unique_ptr<ShadowFrame, ShadowFrameDeleter>;
@@ -80,21 +76,21 @@
public:
// Add the given object to the list of monitors, that is, objects that have been locked. This
// will not throw (but be skipped if there is an exception pending on entry).
- void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void AddMonitor(Thread* self, mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Try to remove the given object from the monitor list, indicating an unlock operation.
// This will throw an IllegalMonitorStateException (clearing any already pending exception), in
// case that there wasn't a lock recorded for the object.
void RemoveMonitorOrThrow(Thread* self,
- const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ const mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Check whether all acquired monitors have been released. This will potentially throw an
// IllegalMonitorStateException, clearing any already pending exception. Returns true if the
// check shows that everything is OK wrt/ lock counting, false otherwise.
- bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CheckAllMonitorsReleasedOrThrow(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
template <typename T, typename... Args>
- void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitMonitors(T visitor, Args&&... args) REQUIRES_SHARED(Locks::mutator_lock_) {
if (monitors_ != nullptr) {
// Visitors may change the Object*. Be careful with the foreach loop.
for (mirror::Object*& obj : *monitors_) {
@@ -239,7 +235,7 @@
// If this returns non-null then this does not mean the vreg is currently a reference
// on non-moving collectors. Check that the raw reg with GetVReg is equal to this if not certain.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- mirror::Object* GetVRegReference(size_t i) const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetVRegReference(size_t i) const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
mirror::Object* ref;
if (HasReferenceArray()) {
@@ -311,7 +307,7 @@
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetVRegReference(size_t i, mirror::Object* val) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetVRegReference(size_t i, mirror::Object* val) REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK_LT(i, NumberOfVRegs());
if (kVerifyFlags & kVerifyWrites) {
VerifyObject(val);
@@ -326,14 +322,14 @@
}
}
- ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(method_ != nullptr);
return method_;
}
- mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetThisObject(uint16_t num_ins) const REQUIRES_SHARED(Locks::mutator_lock_);
bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
if (HasReferenceArray()) {
@@ -479,7 +475,7 @@
: RootInfo(kRootJavaFrame, thread_id), stack_visitor_(stack_visitor), vreg_(vreg) {
}
virtual void Describe(std::ostream& os) const OVERRIDE
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
const StackVisitor* const stack_visitor_;
@@ -556,7 +552,7 @@
return OFFSETOF_MEMBER(ManagedStack, top_shadow_frame_);
}
- size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_);
bool ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const;
@@ -577,25 +573,25 @@
protected:
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
public:
virtual ~StackVisitor() {}
// Return 'true' if we should continue to visit more frames, 'false' to stop.
- virtual bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0;
void WalkStack(bool include_transitions = false)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
Thread* GetThread() const {
return thread_;
}
- ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetOuterMethod() const {
return *GetCurrentQuickFrame();
@@ -605,48 +601,48 @@
return cur_shadow_frame_ != nullptr;
}
- uint32_t GetDexPc(bool abort_on_failure = true) const SHARED_REQUIRES(Locks::mutator_lock_);
+ uint32_t GetDexPc(bool abort_on_failure = true) const REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetThisObject() const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);
- size_t GetNativePcOffset() const SHARED_REQUIRES(Locks::mutator_lock_);
+ size_t GetNativePcOffset() const REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the height of the stack in the managed stack frames, including transitions.
- size_t GetFrameHeight() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetFrameHeight() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetNumFrames() - cur_depth_ - 1;
}
// Returns a frame ID for JDWP use, starting from 1.
- size_t GetFrameId() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetFrameId() REQUIRES_SHARED(Locks::mutator_lock_) {
return GetFrameHeight() + 1;
}
- size_t GetNumFrames() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetNumFrames() REQUIRES_SHARED(Locks::mutator_lock_) {
if (num_frames_ == 0) {
num_frames_ = ComputeNumFrames(thread_, walk_kind_);
}
return num_frames_;
}
- size_t GetFrameDepth() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t GetFrameDepth() REQUIRES_SHARED(Locks::mutator_lock_) {
return cur_depth_;
}
// Get the method and dex pc immediately after the one that's currently being visited.
bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Values will be set in debugger shadow frames. Debugger will make sure deoptimization
// is triggered to make the values effective.
bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Values will be set in debugger shadow frames. Debugger will make sure deoptimization
// is triggered to make the values effective.
@@ -655,7 +651,7 @@
uint64_t new_value,
VRegKind kind_lo,
VRegKind kind_hi)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uintptr_t* GetGPRAddress(uint32_t reg) const;
@@ -671,9 +667,9 @@
return reinterpret_cast<uint32_t*>(vreg_addr);
}
- uintptr_t GetReturnPc() const SHARED_REQUIRES(Locks::mutator_lock_);
+ uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_);
- void SetReturnPc(uintptr_t new_ret_pc) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Return sp-relative offset for a Dalvik virtual register, compiler
@@ -763,23 +759,23 @@
return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
}
- std::string DescribeLocation() const SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string DescribeLocation() const REQUIRES_SHARED(Locks::mutator_lock_);
static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- static void DescribeStack(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_);
+ static void DescribeStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);
const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const {
return cur_oat_quick_method_header_;
}
- QuickMethodFrameInfo GetCurrentQuickFrameInfo() const SHARED_REQUIRES(Locks::mutator_lock_);
+ QuickMethodFrameInfo GetCurrentQuickFrameInfo() const REQUIRES_SHARED(Locks::mutator_lock_);
private:
// Private constructor known in the case that num_frames_ has already been computed.
StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
@@ -796,25 +792,25 @@
uintptr_t GetFPR(uint32_t reg) const;
bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
uint32_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
VRegKind kind_lo, VRegKind kind_hi,
uint64_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
uint64_t* val) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void SanityCheckFrame() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_);
- InlineInfo GetCurrentInlineInfo() const SHARED_REQUIRES(Locks::mutator_lock_);
+ InlineInfo GetCurrentInlineInfo() const REQUIRES_SHARED(Locks::mutator_lock_);
Thread* const thread_;
const StackWalkKind walk_kind_;
diff --git a/runtime/stack_reference.h b/runtime/stack_reference.h
new file mode 100644
index 0000000..3d37b76
--- /dev/null
+++ b/runtime/stack_reference.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_STACK_REFERENCE_H_
+#define ART_RUNTIME_STACK_REFERENCE_H_
+
+#include "base/macros.h"
+#include "mirror/object_reference.h"
+
+namespace art {
+
+// A reference from the shadow stack to a MirrorType object within the Java heap.
+template<class MirrorType>
+class PACKED(4) StackReference : public mirror::CompressedReference<MirrorType> {
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_STACK_REFERENCE_H_
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 79b9f02..dde3640 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1362,7 +1362,7 @@
struct StackDumpVisitor : public StackVisitor {
StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
os(os_in),
can_allocate(can_allocate_in),
@@ -1377,7 +1377,7 @@
}
}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
return true;
@@ -1425,7 +1425,7 @@
}
static void DumpLockedObject(mirror::Object* o, void* context)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
std::ostream& os = *reinterpret_cast<std::ostream*>(context);
os << " - locked ";
if (o == nullptr) {
@@ -1462,7 +1462,7 @@
};
static bool ShouldShowNativeStack(const Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ThreadState state = thread->GetState();
// In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
@@ -1975,11 +1975,11 @@
class CountStackDepthVisitor : public StackVisitor {
public:
explicit CountStackDepthVisitor(Thread* thread)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
depth_(0), skip_depth_(0), skipping_(true) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
// We want to skip frames up to and including the exception's constructor.
// Note we also skip the frame if it doesn't have a method (namely the callee
// save frame)
@@ -2025,7 +2025,7 @@
trace_(nullptr),
pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
- bool Init(int depth) SHARED_REQUIRES(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
+ bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
// Allocate method trace as an object array where the first element is a pointer array that
// contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring
// class of the ArtMethod pointers.
@@ -2061,7 +2061,7 @@
self_->EndAssertNoThreadSuspension(nullptr);
}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
if (trace_ == nullptr) {
return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
}
@@ -2086,7 +2086,7 @@
return true;
}
- mirror::PointerArray* GetTraceMethodsAndPCs() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::PointerArray* GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) {
return down_cast<mirror::PointerArray*>(trace_->Get(0));
}
@@ -2266,7 +2266,7 @@
}
static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* method = self->GetCurrentMethod(nullptr);
return method != nullptr
? method->GetDeclaringClass()->GetClassLoader()
@@ -2670,13 +2670,13 @@
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
struct CurrentMethodVisitor FINAL : public StackVisitor {
CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_object_(nullptr),
method_(nullptr),
dex_pc_(0),
abort_on_error_(abort_on_error) {}
- bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
if (m->IsRuntimeMethod()) {
// Continue if this is a runtime method.
@@ -2716,13 +2716,13 @@
class ReferenceMapVisitor : public StackVisitor {
public:
ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
// We are visiting the references in compiled frames, so we do not need
// to know the inlined frames.
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
visitor_(visitor) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
if (false) {
LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
<< StringPrintf("@ PC:%04x", GetDexPc());
@@ -2736,7 +2736,7 @@
return true;
}
- void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = shadow_frame->GetMethod();
VisitDeclaringClass(m);
DCHECK(m != nullptr);
@@ -2762,7 +2762,7 @@
// is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since
// the threads do not all hold the heap bitmap lock for parallel GC.
void VisitDeclaringClass(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
NO_THREAD_SAFETY_ANALYSIS {
mirror::Class* klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>();
// klass can be null for runtime methods.
@@ -2798,7 +2798,7 @@
}
}
- void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
DCHECK(cur_quick_frame != nullptr);
ArtMethod* m = *cur_quick_frame;
@@ -2852,7 +2852,7 @@
RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
}
@@ -2925,7 +2925,7 @@
class VerifyRootVisitor : public SingleRootVisitor {
public:
void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
- OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
+ OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
VerifyObject(root);
}
};
diff --git a/runtime/thread.h b/runtime/thread.h
index 1c2d4ab..d248123 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -166,18 +166,18 @@
static Thread* Current();
// On a runnable thread, check for pending thread suspension request and handle if pending.
- void AllowThreadSuspension() SHARED_REQUIRES(Locks::mutator_lock_);
+ void AllowThreadSuspension() REQUIRES_SHARED(Locks::mutator_lock_);
// Process pending thread suspension request and handle if pending.
- void CheckSuspend() SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckSuspend() REQUIRES_SHARED(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
mirror::Object* thread_peer)
REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
REQUIRES(Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Translates 172 to pAllocArrayFromCode and so on.
template<PointerSize size_of_pointers>
@@ -191,17 +191,17 @@
bool dump_native_stack = true,
BacktraceMap* backtrace_map = nullptr) const
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpJavaStack(std::ostream& os) const
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
// case we use 'tid' to identify the thread, and we'll include as much information as we can.
static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ThreadState GetState() const {
DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
@@ -248,7 +248,7 @@
// mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
void FullSuspendCheck()
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Transition from non-runnable to runnable state acquiring share on mutator_lock_.
ALWAYS_INLINE ThreadState TransitionFromSuspendedToRunnable()
@@ -297,7 +297,7 @@
size_t NumberOfHeldMutexes() const;
- bool HoldsLock(mirror::Object*) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool HoldsLock(mirror::Object*) const REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Changes the priority of this thread to match that of the java.lang.Thread object.
@@ -326,19 +326,19 @@
// Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
// allocation, or locking.
void GetThreadName(std::string& name) const;
// Sets the thread's name.
- void SetThreadName(const char* name) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetThreadName(const char* name) REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
uint64_t GetCpuMicroTime() const;
- mirror::Object* GetPeer() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetPeer() const REQUIRES_SHARED(Locks::mutator_lock_) {
CHECK(tlsPtr_.jpeer == nullptr);
return tlsPtr_.opeer;
}
@@ -357,23 +357,23 @@
return tlsPtr_.exception != nullptr;
}
- mirror::Throwable* GetException() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Throwable* GetException() const REQUIRES_SHARED(Locks::mutator_lock_) {
return tlsPtr_.exception;
}
void AssertPendingException() const;
- void AssertPendingOOMException() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void AssertPendingOOMException() const REQUIRES_SHARED(Locks::mutator_lock_);
void AssertNoPendingException() const;
void AssertNoPendingExceptionForNewException(const char* msg) const;
- void SetException(mirror::Throwable* new_exception) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetException(mirror::Throwable* new_exception) REQUIRES_SHARED(Locks::mutator_lock_);
- void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) {
+ void ClearException() REQUIRES_SHARED(Locks::mutator_lock_) {
tlsPtr_.exception = nullptr;
}
// Find catch block and perform long jump to appropriate exception handle
- NO_RETURN void QuickDeliverException() SHARED_REQUIRES(Locks::mutator_lock_);
+ NO_RETURN void QuickDeliverException() REQUIRES_SHARED(Locks::mutator_lock_);
Context* GetLongJumpContext();
void ReleaseLongJumpContext(Context* context) {
@@ -395,12 +395,12 @@
// Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
// abort the runtime iff abort_on_error is true.
ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns whether the given exception was thrown by the current Java method being executed
// (Note that this includes native Java methods).
bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetTopOfStack(ArtMethod** top_method) {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
@@ -417,23 +417,23 @@
// If 'msg' is null, no detail message is set.
void ThrowNewException(const char* exception_class_descriptor, const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// If 'msg' is null, no detail message is set. An exception must be pending, and will be
// used as the new exception's cause.
void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
__attribute__((format(printf, 3, 4)))
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_);
// OutOfMemoryError is special, because we need to pre-allocate an instance.
// Only the GC should call this.
- void ThrowOutOfMemoryError(const char* msg) SHARED_REQUIRES(Locks::mutator_lock_)
+ void ThrowOutOfMemoryError(const char* msg) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Roles::uninterruptible_);
static void Startup();
@@ -446,15 +446,15 @@
}
// Convert a jobject into a Object*
- mirror::Object* DecodeJObject(jobject obj) const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::Object* DecodeJObject(jobject obj) const REQUIRES_SHARED(Locks::mutator_lock_);
// Checks if the weak global ref has been cleared by the GC without decoding it.
- bool IsJWeakCleared(jweak obj) const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsJWeakCleared(jweak obj) const REQUIRES_SHARED(Locks::mutator_lock_);
- mirror::Object* GetMonitorEnterObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Object* GetMonitorEnterObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
return tlsPtr_.monitor_enter_object;
}
- void SetMonitorEnterObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) {
+ void SetMonitorEnterObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) {
tlsPtr_.monitor_enter_object = obj;
}
@@ -510,7 +510,7 @@
// and space efficient to compute than the StackTraceElement[].
template<bool kTransactionActive>
jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
// StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
@@ -519,15 +519,15 @@
static jobjectArray InternalStackTraceToStackTraceElementArray(
const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
jobjectArray output_array = nullptr, int* stack_depth = nullptr)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool HasDebuggerShadowFrames() const {
return tlsPtr_.frame_id_to_shadow_frame != nullptr;
}
- void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
- ALWAYS_INLINE void VerifyStack() SHARED_REQUIRES(Locks::mutator_lock_);
+ ALWAYS_INLINE void VerifyStack() REQUIRES_SHARED(Locks::mutator_lock_);
//
// Offsets of various members of native Thread class, used by compiled code.
@@ -555,7 +555,7 @@
}
// Deoptimize the Java stack.
- void DeoptimizeWithDeoptimizationException(JValue* result) SHARED_REQUIRES(Locks::mutator_lock_);
+ void DeoptimizeWithDeoptimizationException(JValue* result) REQUIRES_SHARED(Locks::mutator_lock_);
private:
template<PointerSize pointer_size>
@@ -702,7 +702,7 @@
}
// Set the stack end to that to be used during a stack overflow
- void SetStackEndForStackOverflow() SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetStackEndForStackOverflow() REQUIRES_SHARED(Locks::mutator_lock_);
// Set the stack end to that to be used during regular execution
void ResetDefaultStackEnd() {
@@ -765,7 +765,7 @@
}
// Number of references allocated in JNI ShadowFrames on this thread.
- size_t NumJniShadowFrameReferences() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumJniShadowFrameReferences() const REQUIRES_SHARED(Locks::mutator_lock_) {
return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
}
@@ -773,7 +773,7 @@
size_t NumHandleReferences();
// Number of references allocated in handle scopes & JNI shadow frames on this thread.
- size_t NumStackReferences() SHARED_REQUIRES(Locks::mutator_lock_) {
+ size_t NumStackReferences() REQUIRES_SHARED(Locks::mutator_lock_) {
return NumHandleReferences() + NumJniShadowFrameReferences();
}
@@ -781,7 +781,7 @@
bool HandleScopeContains(jobject obj) const;
void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
HandleScope* GetTopHandleScope() {
return tlsPtr_.top_handle_scope;
@@ -905,32 +905,32 @@
bool is_reference,
bool from_code,
mirror::Throwable* exception)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PopDeoptimizationContext(JValue* result, mirror::Throwable** exception, bool* from_code)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void AssertHasDeoptimizationContext()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present = true);
// For debugger, find the shadow frame that corresponds to a frame id.
// Or return null if there is none.
ShadowFrame* FindDebuggerShadowFrame(size_t frame_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// For debugger, find the bool array that keeps track of the updated vreg set
// for a frame id.
- bool* GetUpdatedVRegFlags(size_t frame_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool* GetUpdatedVRegFlags(size_t frame_id) REQUIRES_SHARED(Locks::mutator_lock_);
// For debugger, find the shadow frame that corresponds to a frame id. If
// one doesn't exist yet, create one and track it in frame_id_to_shadow_frame.
ShadowFrame* FindOrCreateDebuggerShadowFrame(size_t frame_id,
uint32_t num_vregs,
ArtMethod* method,
uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Delete the entry that maps from frame_id to shadow_frame.
void RemoveDebuggerShadowFrameMapping(size_t frame_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
return tlsPtr_.instrumentation_stack;
@@ -1016,7 +1016,7 @@
// Push an object onto the allocation stack.
bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Set the thread local allocation pointers to the given pointers.
void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
@@ -1129,7 +1129,7 @@
template<bool kTransactionActive>
void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
jobject thread_name, jint thread_priority)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
// Dbg::ManageDeoptimization.
@@ -1148,25 +1148,25 @@
return old_state;
}
- void VerifyStackImpl() SHARED_REQUIRES(Locks::mutator_lock_);
+ void VerifyStackImpl() REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpState(std::ostream& os) const SHARED_REQUIRES(Locks::mutator_lock_);
+ void DumpState(std::ostream& os) const REQUIRES_SHARED(Locks::mutator_lock_);
void DumpStack(std::ostream& os,
bool dump_native_stack = true,
BacktraceMap* backtrace_map = nullptr) const
REQUIRES(!Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Out-of-line conveniences for debugging in gdb.
static Thread* CurrentFromGdb(); // Like Thread::Current.
// Like Thread::Dump(std::cerr).
- void DumpFromGdb() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void DumpFromGdb() const REQUIRES_SHARED(Locks::mutator_lock_);
static void* CreateCallback(void* arg);
void HandleUncaughtExceptions(ScopedObjectAccess& soa)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void RemoveFromThreadGroup(ScopedObjectAccess& soa) REQUIRES_SHARED(Locks::mutator_lock_);
// Initialize a thread.
//
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 49f65e1..5880085 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -142,11 +142,11 @@
!Locks::thread_suspend_count_lock_);
void VisitRoots(RootVisitor* visitor) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRootsForSuspendedThreads(RootVisitor* visitor)
REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return a copy of the thread list.
std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) {
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 56a26de..23591c2 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -57,7 +57,7 @@
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_trace_(Trace::AllocStackTrace()) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
// Ignore runtime frames (in particular callee save).
if (!m->IsRuntimeMethod()) {
@@ -220,7 +220,7 @@
*buf++ = static_cast<uint8_t>(val >> 56);
}
-static void GetSample(Thread* thread, void* arg) SHARED_REQUIRES(Locks::mutator_lock_) {
+static void GetSample(Thread* thread, void* arg) REQUIRES_SHARED(Locks::mutator_lock_) {
BuildStackTraceVisitor build_trace_visitor(thread);
build_trace_visitor.WalkStack();
std::vector<ArtMethod*>* stack_trace = build_trace_visitor.GetStackTrace();
@@ -747,7 +747,7 @@
ArtMethod* method,
uint32_t dex_pc,
ArtField* field ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
@@ -758,7 +758,7 @@
uint32_t dex_pc,
ArtField* field ATTRIBUTE_UNUSED,
const JValue& field_value ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
@@ -793,13 +793,13 @@
void Trace::ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED,
mirror::Throwable* exception_object ATTRIBUTE_UNUSED)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
void Trace::Branch(Thread* /*thread*/, ArtMethod* method,
uint32_t /*dex_pc*/, int32_t /*dex_pc_offset*/)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
LOG(ERROR) << "Unexpected branch event in tracing" << PrettyMethod(method);
}
diff --git a/runtime/trace.h b/runtime/trace.h
index 9b29fb9..824b150 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -137,43 +137,43 @@
uint32_t GetClockOverheadNanoSeconds();
void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
// InstrumentationListener implementation.
void MethodEntered(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
void MethodExited(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc,
const JValue& return_value)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
void MethodUnwind(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
void DexPcMoved(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t new_dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_)
OVERRIDE;
void FieldRead(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void FieldWritten(Thread* thread, mirror::Object* this_object,
ArtMethod* method, uint32_t dex_pc, ArtField* field,
const JValue& field_value)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void Branch(Thread* thread, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
void InvokeVirtualOrInterface(Thread* thread,
mirror::Object* this_object,
ArtMethod* caller,
uint32_t dex_pc,
ArtMethod* callee)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_) OVERRIDE;
// Reuse an old stack trace if it exists, otherwise allocate a new one.
static std::vector<ArtMethod*>* AllocStackTrace();
// Clear and store an old stack trace for later use.
@@ -202,26 +202,26 @@
// This causes the negative annotations to incorrectly have a false positive. TODO: Figure out
// how to annotate this.
NO_THREAD_SAFETY_ANALYSIS;
- void FinishTracing() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+ void FinishTracing() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
void LogMethodTraceEvent(Thread* thread, ArtMethod* method,
instrumentation::Instrumentation::InstrumentationEvent event,
uint32_t thread_clock_diff, uint32_t wall_clock_diff)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_, !*streaming_lock_);
// Methods to output traced methods and threads.
void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods)
REQUIRES(!*unique_methods_lock_);
void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_);
// Methods to register seen entitites in streaming mode. The methods return true if the entity
// is newly discovered.
bool RegisterMethod(ArtMethod* method)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(streaming_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(streaming_lock_);
bool RegisterThread(Thread* thread)
REQUIRES(streaming_lock_);
@@ -235,10 +235,10 @@
REQUIRES(!*unique_methods_lock_);
ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!*unique_methods_lock_);
std::string GetMethodLine(ArtMethod* method) REQUIRES(!*unique_methods_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
- SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!*unique_methods_lock_);
// Singleton instance of the Trace or null when no method tracing is active.
static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 8ff0614..bc9c640 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -47,10 +47,10 @@
void Abort(const std::string& abort_message)
REQUIRES(!log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void ThrowAbortError(Thread* self, const std::string* abort_message)
REQUIRES(!log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsAborted() REQUIRES(!log_lock_);
// Record object field changes.
@@ -79,7 +79,7 @@
// Record array change.
void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value)
REQUIRES(!log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Record intern string table changes.
void RecordStrongStringInsertion(mirror::String* s)
@@ -97,12 +97,12 @@
// Abort transaction by undoing all recorded changes.
void Rollback()
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!log_lock_);
void VisitRoots(RootVisitor* visitor)
REQUIRES(!log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
class ObjectLog : public ValueObject {
@@ -115,8 +115,8 @@
void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
- void Undo(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_);
- void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Undo(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return field_values_.size();
@@ -141,7 +141,7 @@
void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
- const FieldValue& field_value) SHARED_REQUIRES(Locks::mutator_lock_);
+ const FieldValue& field_value) REQUIRES_SHARED(Locks::mutator_lock_);
// Maps field's offset to its value.
std::map<uint32_t, FieldValue> field_values_;
@@ -151,7 +151,7 @@
public:
void LogValue(size_t index, uint64_t value);
- void Undo(mirror::Array* obj) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Undo(mirror::Array* obj) REQUIRES_SHARED(Locks::mutator_lock_);
size_t Size() const {
return array_values_.size();
@@ -159,7 +159,7 @@
private:
void UndoArrayWrite(mirror::Array* array, Primitive::Type array_type, size_t index,
- uint64_t value) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint64_t value) REQUIRES_SHARED(Locks::mutator_lock_);
// Maps index to value.
// TODO use JValue instead ?
@@ -182,9 +182,9 @@
}
void Undo(InternTable* intern_table)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(Locks::intern_table_lock_);
- void VisitRoots(RootVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_);
+ void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
private:
mirror::String* str_;
@@ -198,24 +198,24 @@
void UndoObjectModifications()
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UndoArrayModifications()
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UndoInternStringTableModifications()
REQUIRES(Locks::intern_table_lock_)
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitObjectLogs(RootVisitor* visitor)
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitArrayLogs(RootVisitor* visitor)
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitStringLogs(RootVisitor* visitor)
REQUIRES(log_lock_)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const std::string& GetAbortMessage() REQUIRES(!log_lock_);
diff --git a/runtime/utf.h b/runtime/utf.h
index 7c9c333..cbb32fa 100644
--- a/runtime/utf.h
+++ b/runtime/utf.h
@@ -18,7 +18,6 @@
#define ART_RUNTIME_UTF_H_
#include "base/macros.h"
-#include "base/mutex.h"
#include <stddef.h>
#include <stdint.h>
@@ -31,11 +30,6 @@
*/
namespace art {
-namespace mirror {
- template<class T> class PrimitiveArray;
- typedef PrimitiveArray<uint16_t> CharArray;
-} // namespace mirror
-
/*
* Returns the number of UTF-16 characters in the given modified UTF-8 string.
*/
@@ -80,9 +74,6 @@
/*
* The java.lang.String hashCode() algorithm.
*/
-int32_t ComputeUtf16Hash(mirror::CharArray* chars, int32_t offset, size_t char_count)
- SHARED_REQUIRES(Locks::mutator_lock_);
-
template<typename MemoryType>
int32_t ComputeUtf16Hash(const MemoryType* chars, size_t char_count) {
uint32_t hash = 0;
diff --git a/runtime/utils.h b/runtime/utils.h
index 8433492..2389ce7 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -134,22 +134,22 @@
// "[[I" would be "int[][]", "[Ljava/lang/String;" would be
// "java.lang.String[]", and so forth.
std::string PrettyDescriptor(mirror::String* descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string PrettyDescriptor(const char* descriptor);
std::string PrettyDescriptor(mirror::Class* klass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string PrettyDescriptor(Primitive::Type type);
// Returns a human-readable signature for 'f'. Something like "a.b.C.f" or
// "int a.b.C.f" (depending on the value of 'with_type').
std::string PrettyField(ArtField* f, bool with_type = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string PrettyField(uint32_t field_idx, const DexFile& dex_file, bool with_type = true);
// Returns a human-readable signature for 'm'. Something like "a.b.C.m" or
// "a.b.C.m(II)V" (depending on the value of 'with_signature').
std::string PrettyMethod(ArtMethod* m, bool with_signature = true)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
std::string PrettyMethod(uint32_t method_idx, const DexFile& dex_file, bool with_signature = true);
// Returns a human-readable form of the name of the *class* of the given object.
@@ -157,7 +157,7 @@
// be "java.lang.String". Given an array of int, the output would be "int[]".
// Given String.class, the output would be "java.lang.Class<java.lang.String>".
std::string PrettyTypeOf(mirror::Object* obj)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a human-readable form of the type at an index in the specified dex file.
// Example outputs: char[], java.lang.String.
@@ -166,11 +166,11 @@
// Returns a human-readable form of the name of the given class.
// Given String.class, the output would be "java.lang.Class<java.lang.String>".
std::string PrettyClass(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a human-readable form of the name of the given class with its class loader.
std::string PrettyClassAndClassLoader(mirror::Class* c)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a human-readable version of the Java part of the access flags, e.g., "private static "
// (note the trailing whitespace).
@@ -205,10 +205,10 @@
// Returns the JNI native function name for the non-overloaded method 'm'.
std::string JniShortName(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the JNI native function name for the overloaded method 'm'.
std::string JniLongName(ArtMethod* m)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool ReadFileToString(const std::string& file_name, std::string* result);
bool PrintFileToLog(const std::string& file_name, LogSeverity level);
@@ -311,7 +311,7 @@
return pointer_size == 4 || pointer_size == 8;
}
-void DumpMethodCFG(ArtMethod* method, std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+void DumpMethodCFG(ArtMethod* method, std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_);
void DumpMethodCFG(const DexFile* dex_file, uint32_t dex_method_idx, std::ostream& os);
static inline const void* EntryPointToCodePointer(const void* entry_point) {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 40f12e9..589e71c 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -159,7 +159,7 @@
&dex_file,
dex_cache,
class_loader,
- class_def,
+ *class_def,
callbacks,
allow_soft_failures,
log_level,
@@ -190,7 +190,7 @@
MethodVerifier::FailureData MethodVerifier::VerifyMethods(Thread* self,
ClassLinker* linker,
const DexFile* dex_file,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
ClassDataItemIterator* it,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
@@ -214,7 +214,7 @@
continue;
}
previous_method_idx = method_idx;
- InvokeType type = it->GetMethodInvokeType(*class_def);
+ InvokeType type = it->GetMethodInvokeType(class_def);
ArtMethod* method = linker->ResolveMethod<ClassLinker::kNoICCECheckForCache>(
*dex_file, method_idx, dex_cache, class_loader, nullptr, type);
if (method == nullptr) {
@@ -247,7 +247,7 @@
} else {
// If we didn't log a hard failure before, print the header of the message.
*error_string += "Verifier rejected class ";
- *error_string += PrettyDescriptor(dex_file->GetClassDescriptor(*class_def));
+ *error_string += PrettyDescriptor(dex_file->GetClassDescriptor(class_def));
*error_string += ":";
}
*error_string += " ";
@@ -264,23 +264,22 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
CompilerCallbacks* callbacks,
bool allow_soft_failures,
LogSeverity log_level,
std::string* error) {
- DCHECK(class_def != nullptr);
ScopedTrace trace(__FUNCTION__);
// A class must not be abstract and final.
- if ((class_def->access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) {
+ if ((class_def.access_flags_ & (kAccAbstract | kAccFinal)) == (kAccAbstract | kAccFinal)) {
*error = "Verifier rejected class ";
- *error += PrettyDescriptor(dex_file->GetClassDescriptor(*class_def));
+ *error += PrettyDescriptor(dex_file->GetClassDescriptor(class_def));
*error += ": class is abstract and final.";
return kHardFailure;
}
- const uint8_t* class_data = dex_file->GetClassData(*class_def);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
if (class_data == nullptr) {
// empty class, probably a marker interface
return kNoFailure;
@@ -327,7 +326,7 @@
// warning.
std::string tmp =
StringPrintf("Class %s failed lock verification and will run slower.",
- PrettyDescriptor(dex_file->GetClassDescriptor(*class_def)).c_str());
+ PrettyDescriptor(dex_file->GetClassDescriptor(class_def)).c_str());
if (!gPrintedDxMonitorText) {
tmp = tmp + "\nCommon causes for lock verification issues are non-optimized dex code\n"
"and incorrect proguard optimizations.";
@@ -355,7 +354,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags,
@@ -436,7 +435,7 @@
if (callbacks != nullptr) {
// Let the interested party know that we failed the class.
- ClassReference ref(dex_file, dex_file->GetIndexForClassDef(*class_def));
+ ClassReference ref(dex_file, dex_file->GetIndexForClassDef(class_def));
callbacks->ClassRejected(ref);
}
}
@@ -463,7 +462,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags) {
@@ -499,7 +498,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
uint32_t dex_method_idx,
ArtMethod* method,
@@ -544,7 +543,6 @@
is_constructor_(false),
link_(nullptr) {
self->PushVerifier(this);
- DCHECK(class_def != nullptr);
}
MethodVerifier::~MethodVerifier() {
@@ -561,7 +559,7 @@
m->GetDexFile(),
dex_cache,
class_loader,
- &m->GetClassDef(),
+ m->GetClassDef(),
m->GetCodeItem(),
m->GetDexMethodIndex(),
m,
@@ -616,7 +614,7 @@
m->GetDexFile(),
dex_cache,
class_loader,
- &m->GetClassDef(),
+ m->GetClassDef(),
m->GetCodeItem(),
m->GetDexMethodIndex(),
m,
@@ -656,7 +654,7 @@
m->GetDexFile(),
dex_cache,
class_loader,
- &m->GetClassDef(),
+ m->GetClassDef(),
m->GetCodeItem(),
m->GetDexMethodIndex(),
m,
@@ -761,7 +759,7 @@
return false;
}
}
- if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
+ if ((class_def_.GetJavaAccessFlags() & kAccInterface) != 0) {
// Interface methods must be public and abstract (if default methods are disabled).
uint32_t kRequired = kAccPublic;
if ((method_access_flags_ & kRequired) != kRequired) {
@@ -792,7 +790,7 @@
return false;
}
- if ((class_def_->GetJavaAccessFlags() & kAccInterface) != 0) {
+ if ((class_def_.GetJavaAccessFlags() & kAccInterface) != 0) {
// Interfaces may always have static initializers for their fields. If we are running with
// default methods enabled we also allow other public, static, non-final methods to have code.
// Otherwise that is the only type of method allowed.
@@ -3986,7 +3984,7 @@
++pos_;
}
- const char* GetDescriptor() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const char* GetDescriptor() REQUIRES_SHARED(Locks::mutator_lock_) {
return res_method_->GetTypeDescriptorFromTypeIdx(params_->GetTypeItem(pos_).type_idx_);
}
@@ -4023,7 +4021,7 @@
}
if (reference_class->IsInterface()) {
// TODO Can we verify anything else.
- if (class_idx == class_def_->class_idx_) {
+ if (class_idx == class_def_.class_idx_) {
Fail(VERIFY_ERROR_CLASS_CHANGE) << "Cannot invoke-super on self as interface";
return nullptr;
}
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index 5fe95c2..d4e12f7 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -154,17 +154,17 @@
bool allow_soft_failures,
LogSeverity log_level,
std::string* error)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static FailureKind VerifyClass(Thread* self,
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
CompilerCallbacks* callbacks,
bool allow_soft_failures,
LogSeverity log_level,
std::string* error)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static MethodVerifier* VerifyMethodAndDump(Thread* self,
VariableIndentationOutputStream* vios,
@@ -172,10 +172,10 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item, ArtMethod* method,
uint32_t method_access_flags)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint8_t EncodePcToReferenceMapData() const;
@@ -198,26 +198,26 @@
// Dump the state of the verifier, namely each instruction, what flags are set on it, register
// information
- void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
- void Dump(VariableIndentationOutputStream* vios) SHARED_REQUIRES(Locks::mutator_lock_);
+ void Dump(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_);
+ void Dump(VariableIndentationOutputStream* vios) REQUIRES_SHARED(Locks::mutator_lock_);
// Fills 'monitor_enter_dex_pcs' with the dex pcs of the monitor-enter instructions corresponding
// to the locks held at 'dex_pc' in method 'm'.
static void FindLocksAtDexPc(ArtMethod* m, uint32_t dex_pc,
std::vector<uint32_t>* monitor_enter_dex_pcs)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the accessed field corresponding to the quick instruction's field
// offset at 'dex_pc' in method 'm'.
static ArtField* FindAccessedFieldAtDexPc(ArtMethod* m, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the invoked method corresponding to the quick instruction's vtable
// index at 'dex_pc' in method 'm'.
static ArtMethod* FindInvokedMethodAtDexPc(ArtMethod* m, uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- static void Init() SHARED_REQUIRES(Locks::mutator_lock_);
+ static void Init() REQUIRES_SHARED(Locks::mutator_lock_);
static void Shutdown();
bool CanLoadClasses() const {
@@ -228,24 +228,24 @@
// Run verification on the method. Returns true if verification completes and false if the input
// has an irrecoverable corruption.
- bool Verify() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool Verify() REQUIRES_SHARED(Locks::mutator_lock_);
// Describe VRegs at the given dex pc.
std::vector<int32_t> DescribeVRegs(uint32_t dex_pc);
static void VisitStaticRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void VisitRoots(RootVisitor* visitor, const RootInfo& roots)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Accessors used by the compiler via CompilerCallback
const DexFile::CodeItem* CodeItem() const;
RegisterLine* GetRegLine(uint32_t dex_pc);
ALWAYS_INLINE const InstructionFlags& GetInstructionFlags(size_t index) const;
ALWAYS_INLINE InstructionFlags& GetInstructionFlags(size_t index);
- mirror::ClassLoader* GetClassLoader() SHARED_REQUIRES(Locks::mutator_lock_);
- mirror::DexCache* GetDexCache() SHARED_REQUIRES(Locks::mutator_lock_);
- ArtMethod* GetMethod() const SHARED_REQUIRES(Locks::mutator_lock_);
+ mirror::ClassLoader* GetClassLoader() REQUIRES_SHARED(Locks::mutator_lock_);
+ mirror::DexCache* GetDexCache() REQUIRES_SHARED(Locks::mutator_lock_);
+ ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);
MethodReference GetMethodReference() const;
uint32_t GetAccessFlags() const;
bool HasCheckCasts() const;
@@ -256,15 +256,15 @@
}
const RegType& ResolveCheckedClass(uint32_t class_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the method of a quick invoke or null if it cannot be found.
ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
bool is_range, bool allow_failure)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the access field of a quick field access (iget/iput-quick) or null
// if it cannot be found.
ArtField* GetQuickFieldAccess(const Instruction* inst, RegisterLine* reg_line)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
uint32_t GetEncounteredFailureTypes() {
return encountered_failure_types_;
@@ -283,7 +283,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
const DexFile::CodeItem* code_item,
uint32_t method_idx,
ArtMethod* method,
@@ -293,10 +293,10 @@
bool need_precise_constants,
bool verify_to_dump,
bool allow_thread_suspension)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void UninstantiableError(const char* descriptor);
- static bool IsInstantiableOrPrimitive(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_);
+ static bool IsInstantiableOrPrimitive(mirror::Class* klass) REQUIRES_SHARED(Locks::mutator_lock_);
// Is the method being verified a constructor? See the comment on the field.
bool IsConstructor() const {
@@ -330,7 +330,7 @@
static FailureData VerifyMethods(Thread* self,
ClassLinker* linker,
const DexFile* dex_file,
- const DexFile::ClassDef* class_def,
+ const DexFile::ClassDef& class_def,
ClassDataItemIterator* it,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
@@ -339,7 +339,7 @@
LogSeverity log_level,
bool need_precise_constants,
std::string* error_string)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Perform verification on a single method.
@@ -356,7 +356,7 @@
const DexFile* dex_file,
Handle<mirror::DexCache> dex_cache,
Handle<mirror::ClassLoader> class_loader,
- const DexFile::ClassDef* class_def_idx,
+ const DexFile::ClassDef& class_def_idx,
const DexFile::CodeItem* code_item,
ArtMethod* method,
uint32_t method_access_flags,
@@ -365,18 +365,18 @@
LogSeverity log_level,
bool need_precise_constants,
std::string* hard_failure_msg)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
- void FindLocksAtDexPc() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FindLocksAtDexPc() REQUIRES_SHARED(Locks::mutator_lock_);
ArtField* FindAccessedFieldAtDexPc(uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* FindInvokedMethodAtDexPc(uint32_t dex_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
SafeMap<uint32_t, std::set<uint32_t>>& FindStringInitMap()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Compute the width of the instruction at each address in the instruction stream, and store it in
@@ -404,7 +404,7 @@
* Returns "false" if something in the exception table looks fishy, but we're expecting the
* exception table to be somewhat sane.
*/
- bool ScanTryCatchBlocks() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool ScanTryCatchBlocks() REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Perform static verification on all instructions in a method.
@@ -510,11 +510,11 @@
bool* selfOkay);
/* Perform detailed code-flow analysis on a single method. */
- bool VerifyCodeFlow() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool VerifyCodeFlow() REQUIRES_SHARED(Locks::mutator_lock_);
// Set the register types for the first instruction in the method based on the method signature.
// This has the side-effect of validating the signature.
- bool SetTypesFromSignature() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool SetTypesFromSignature() REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Perform code flow on a method.
@@ -562,7 +562,7 @@
* reordering by specifying that you can't execute the new-instance instruction if a register
* contains an uninitialized instance created by that same instruction.
*/
- bool CodeFlowVerifyMethod() SHARED_REQUIRES(Locks::mutator_lock_);
+ bool CodeFlowVerifyMethod() REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Perform verification for a single instruction.
@@ -574,33 +574,33 @@
* addresses. Does not set or clear any other flags in "insn_flags_".
*/
bool CodeFlowVerifyInstruction(uint32_t* start_guess)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Perform verification of a new array instruction
void VerifyNewArray(const Instruction* inst, bool is_filled, bool is_range)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Helper to perform verification on puts of primitive type.
void VerifyPrimitivePut(const RegType& target_type, const RegType& insn_type,
- const uint32_t vregA) SHARED_REQUIRES(Locks::mutator_lock_);
+ const uint32_t vregA) REQUIRES_SHARED(Locks::mutator_lock_);
// Perform verification of an aget instruction. The destination register's type will be set to
// be that of component type of the array unless the array type is unknown, in which case a
// bottom type inferred from the type of instruction is used. is_primitive is false for an
// aget-object.
void VerifyAGet(const Instruction* inst, const RegType& insn_type,
- bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool is_primitive) REQUIRES_SHARED(Locks::mutator_lock_);
// Perform verification of an aput instruction.
void VerifyAPut(const Instruction* inst, const RegType& insn_type,
- bool is_primitive) SHARED_REQUIRES(Locks::mutator_lock_);
+ bool is_primitive) REQUIRES_SHARED(Locks::mutator_lock_);
// Lookup instance field and fail for resolution violations
ArtField* GetInstanceField(const RegType& obj_type, int field_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Lookup static field and fail for resolution violations
- ArtField* GetStaticField(int field_idx) SHARED_REQUIRES(Locks::mutator_lock_);
+ ArtField* GetStaticField(int field_idx) REQUIRES_SHARED(Locks::mutator_lock_);
// Perform verification of an iget/sget/iput/sput instruction.
enum class FieldAccessType { // private
@@ -610,16 +610,16 @@
template <FieldAccessType kAccType>
void VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type,
bool is_primitive, bool is_static)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <FieldAccessType kAccType>
void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Resolves a class based on an index and performs access checks to ensure the referrer can
// access the resolved class.
const RegType& ResolveClassAndCheckAccess(uint32_t class_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* For the "move-exception" instruction at "work_insn_idx_", which must be at an exception handler
@@ -627,7 +627,7 @@
* exception handler can be found or if the Join of exception types fails.
*/
const RegType& GetCaughtExceptionType()
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Resolves a method based on an index and performs access checks to ensure
@@ -635,7 +635,7 @@
* Does not throw exceptions.
*/
ArtMethod* ResolveMethodAndCheckAccess(uint32_t method_idx, MethodType method_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify the arguments to a method. We're executing in "method", making
@@ -660,22 +660,22 @@
* set appropriately).
*/
ArtMethod* VerifyInvocationArgs(const Instruction* inst, MethodType method_type, bool is_range)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Similar checks to the above, but on the proto. Will be used when the method cannot be
// resolved.
void VerifyInvocationArgsUnresolvedMethod(const Instruction* inst, MethodType method_type,
bool is_range)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
template <class T>
ArtMethod* VerifyInvocationArgsFromIterator(T* it, const Instruction* inst,
MethodType method_type, bool is_range,
ArtMethod* res_method)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify that the target instruction is not "move-exception". It's important that the only way
@@ -707,18 +707,18 @@
* Returns "false" if an error is encountered.
*/
bool UpdateRegisters(uint32_t next_insn, RegisterLine* merge_line, bool update_merge_line)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Return the register type for the method.
- const RegType& GetMethodReturnType() SHARED_REQUIRES(Locks::mutator_lock_);
+ const RegType& GetMethodReturnType() REQUIRES_SHARED(Locks::mutator_lock_);
// Get a type representing the declaring class of the method.
- const RegType& GetDeclaringClass() SHARED_REQUIRES(Locks::mutator_lock_);
+ const RegType& GetDeclaringClass() REQUIRES_SHARED(Locks::mutator_lock_);
InstructionFlags* CurrentInsnFlags();
const RegType& DetermineCat1Constant(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Try to create a register type from the given class. In case a precise type is requested, but
// the class is not instantiable, a soft error (of type NO_CLASS) will be enqueued and a
@@ -726,7 +726,7 @@
// Note: we reuse NO_CLASS as this will throw an exception at runtime, when the failing class is
// actually touched.
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The thread we're verifying on.
Thread* const self_;
@@ -759,7 +759,7 @@
Handle<mirror::DexCache> dex_cache_ GUARDED_BY(Locks::mutator_lock_);
// The class loader for the declaring class of the method.
Handle<mirror::ClassLoader> class_loader_ GUARDED_BY(Locks::mutator_lock_);
- const DexFile::ClassDef* const class_def_; // The class def of the declaring class of the method.
+ const DexFile::ClassDef& class_def_; // The class def of the declaring class of the method.
const DexFile::CodeItem* const code_item_; // The code item containing the code for the method.
const RegType* declaring_class_; // Lazily computed reg type of the method's declaring class.
// Instruction widths and flags, one entry per code unit.
diff --git a/runtime/verifier/method_verifier_test.cc b/runtime/verifier/method_verifier_test.cc
index b036313..4fd581d 100644
--- a/runtime/verifier/method_verifier_test.cc
+++ b/runtime/verifier/method_verifier_test.cc
@@ -30,7 +30,7 @@
class MethodVerifierTest : public CommonRuntimeTest {
protected:
void VerifyClass(const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
ASSERT_TRUE(descriptor != nullptr);
Thread* self = Thread::Current();
mirror::Class* klass = class_linker_->FindSystemClass(self, descriptor.c_str());
@@ -47,7 +47,7 @@
}
void VerifyDexFile(const DexFile& dex)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Verify all the classes defined in this file
for (size_t i = 0; i < dex.NumClassDefs(); i++) {
const DexFile::ClassDef& class_def = dex.GetClassDef(i);
diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc
index 85daba9..5c19969 100644
--- a/runtime/verifier/reg_type.cc
+++ b/runtime/verifier/reg_type.cc
@@ -279,7 +279,7 @@
}
}
-std::string UndefinedType::Dump() const SHARED_REQUIRES(Locks::mutator_lock_) {
+std::string UndefinedType::Dump() const REQUIRES_SHARED(Locks::mutator_lock_) {
return "Undefined";
}
@@ -517,11 +517,11 @@
}
}
-bool RegType::IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_) {
+bool RegType::IsJavaLangObject() const REQUIRES_SHARED(Locks::mutator_lock_) {
return IsReference() && GetClass()->IsObjectClass();
}
-bool RegType::IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) {
+bool RegType::IsObjectArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsUnresolvedTypes()) {
DCHECK(!IsUnresolvedMergedReference());
@@ -542,7 +542,7 @@
}
}
-bool RegType::IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_) {
+bool RegType::IsArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_) {
if (IsUnresolvedTypes()) {
DCHECK(!IsUnresolvedMergedReference());
diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h
index 4837490..c3ed77a 100644
--- a/runtime/verifier/reg_type.h
+++ b/runtime/verifier/reg_type.h
@@ -35,6 +35,7 @@
namespace art {
namespace mirror {
class Class;
+class ClassLoader;
} // namespace mirror
class ArenaBitVector;
@@ -118,7 +119,7 @@
}
// The high half that corresponds to this low half
const RegType& HighHalf(RegTypeCache* cache) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsConstantBoolean() const;
virtual bool IsConstantChar() const { return false; }
@@ -171,20 +172,20 @@
return result;
}
virtual bool HasClassVirtual() const { return false; }
- bool IsJavaLangObject() const SHARED_REQUIRES(Locks::mutator_lock_);
- virtual bool IsArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
- virtual bool IsObjectArrayTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsJavaLangObject() const REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual bool IsArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_);
+ virtual bool IsObjectArrayTypes() const REQUIRES_SHARED(Locks::mutator_lock_);
Primitive::Type GetPrimitiveType() const;
bool IsJavaLangObjectArray() const
- SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsInstantiableTypes() const SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsInstantiableTypes() const REQUIRES_SHARED(Locks::mutator_lock_);
const StringPiece& GetDescriptor() const {
DCHECK(HasClass() ||
(IsUnresolvedTypes() && !IsUnresolvedMergedReference() &&
!IsUnresolvedSuperClass()));
return descriptor_;
}
- mirror::Class* GetClass() const SHARED_REQUIRES(Locks::mutator_lock_) {
+ mirror::Class* GetClass() const REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(!IsUnresolvedReference());
DCHECK(!klass_.IsNull()) << Dump();
DCHECK(HasClass());
@@ -192,25 +193,25 @@
}
uint16_t GetId() const { return cache_id_; }
const RegType& GetSuperClass(RegTypeCache* cache) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual std::string Dump() const
- SHARED_REQUIRES(Locks::mutator_lock_) = 0;
+ REQUIRES_SHARED(Locks::mutator_lock_) = 0;
// Can this type access other?
bool CanAccess(const RegType& other) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this type access a member with the given properties?
bool CanAccessMember(mirror::Class* klass, uint32_t access_flags) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this type be assigned by src?
// Note: Object and interface types may always be assigned to one another, see
// comment on
// ClassJoin.
bool IsAssignableFrom(const RegType& src) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this array type potentially be assigned by src.
// This function is necessary as array types are valid even if their components types are not,
@@ -221,13 +222,13 @@
// (both are reference types).
bool CanAssignArray(const RegType& src, RegTypeCache& reg_types,
Handle<mirror::ClassLoader> class_loader, bool* soft_error) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Can this type be assigned by src? Variant of IsAssignableFrom that doesn't
// allow assignment to
// an interface from an Object.
bool IsStrictlyAssignableFrom(const RegType& src) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Are these RegTypes the same?
bool Equals(const RegType& other) const { return GetId() == other.GetId(); }
@@ -235,10 +236,10 @@
// Compute the merge of this register from one edge (path) with incoming_type
// from another.
const RegType& Merge(const RegType& incoming_type, RegTypeCache* reg_types) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Same as above, but also handles the case where incoming_type == this.
const RegType& SafeMerge(const RegType& incoming_type, RegTypeCache* reg_types) const
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (Equals(incoming_type)) {
return *this;
}
@@ -262,12 +263,12 @@
* [1] Java bytecode verification: algorithms and formalizations, Xavier Leroy
*/
static mirror::Class* ClassJoin(mirror::Class* s, mirror::Class* t)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
virtual ~RegType() {}
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void* operator new(size_t size) noexcept {
return ::operator new(size);
@@ -279,7 +280,7 @@
protected:
RegType(mirror::Class* klass,
const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: descriptor_(descriptor),
klass_(klass),
cache_id_(cache_id) {
@@ -288,7 +289,7 @@
}
}
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
const StringPiece descriptor_;
mutable GcRoot<mirror::Class> klass_; // Non-const only due to moving classes.
@@ -298,7 +299,7 @@
private:
static bool AssignableFrom(const RegType& lhs, const RegType& rhs, bool strict)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
DISALLOW_COPY_AND_ASSIGN(RegType);
};
@@ -308,7 +309,7 @@
public:
bool IsConflict() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Conflict instance.
static const ConflictType* GetInstance() PURE;
@@ -317,14 +318,14 @@
static const ConflictType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Destroy the singleton instance.
static void Destroy();
private:
ConflictType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
static const ConflictType* instance_;
@@ -337,7 +338,7 @@
public:
bool IsUndefined() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
// Get the singleton Undefined instance.
static const UndefinedType* GetInstance() PURE;
@@ -346,14 +347,14 @@
static const UndefinedType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Destroy the singleton instance.
static void Destroy();
private:
UndefinedType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
static const UndefinedType* instance_;
@@ -362,7 +363,7 @@
class PrimitiveType : public RegType {
public:
PrimitiveType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
bool HasClassVirtual() const OVERRIDE { return true; }
};
@@ -370,23 +371,23 @@
class Cat1Type : public PrimitiveType {
public:
Cat1Type(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
class IntegerType : public Cat1Type {
public:
bool IsInteger() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const IntegerType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const IntegerType* GetInstance() PURE;
static void Destroy();
private:
IntegerType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const IntegerType* instance_;
};
@@ -394,17 +395,17 @@
class BooleanType FINAL : public Cat1Type {
public:
bool IsBoolean() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const BooleanType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const BooleanType* GetInstance() PURE;
static void Destroy();
private:
BooleanType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const BooleanType* instance_;
@@ -413,17 +414,17 @@
class ByteType FINAL : public Cat1Type {
public:
bool IsByte() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const ByteType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const ByteType* GetInstance() PURE;
static void Destroy();
private:
ByteType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ByteType* instance_;
};
@@ -431,17 +432,17 @@
class ShortType FINAL : public Cat1Type {
public:
bool IsShort() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const ShortType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const ShortType* GetInstance() PURE;
static void Destroy();
private:
ShortType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const ShortType* instance_;
};
@@ -449,17 +450,17 @@
class CharType FINAL : public Cat1Type {
public:
bool IsChar() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const CharType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const CharType* GetInstance() PURE;
static void Destroy();
private:
CharType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const CharType* instance_;
};
@@ -467,17 +468,17 @@
class FloatType FINAL : public Cat1Type {
public:
bool IsFloat() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
static const FloatType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const FloatType* GetInstance() PURE;
static void Destroy();
private:
FloatType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat1Type(klass, descriptor, cache_id) {}
static const FloatType* instance_;
};
@@ -485,86 +486,86 @@
class Cat2Type : public PrimitiveType {
public:
Cat2Type(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_);
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_);
};
class LongLoType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLongLo() const OVERRIDE { return true; }
bool IsLong() const OVERRIDE { return true; }
static const LongLoType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const LongLoType* GetInstance() PURE;
static void Destroy();
private:
LongLoType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongLoType* instance_;
};
class LongHiType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
bool IsLongHi() const OVERRIDE { return true; }
static const LongHiType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const LongHiType* GetInstance() PURE;
static void Destroy();
private:
LongHiType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const LongHiType* instance_;
};
class DoubleLoType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
bool IsDoubleLo() const OVERRIDE { return true; }
bool IsDouble() const OVERRIDE { return true; }
static const DoubleLoType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const DoubleLoType* GetInstance() PURE;
static void Destroy();
private:
DoubleLoType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleLoType* instance_;
};
class DoubleHiType FINAL : public Cat2Type {
public:
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
virtual bool IsDoubleHi() const OVERRIDE { return true; }
static const DoubleHiType* CreateInstance(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static const DoubleHiType* GetInstance() PURE;
static void Destroy();
private:
DoubleHiType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: Cat2Type(klass, descriptor, cache_id) {}
static const DoubleHiType* instance_;
};
class ConstantType : public RegType {
public:
- ConstantType(uint32_t constant, uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ ConstantType(uint32_t constant, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(nullptr, "", cache_id), constant_(constant) {
}
@@ -622,58 +623,58 @@
class PreciseConstType FINAL : public ConstantType {
public:
PreciseConstType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsPreciseConstant() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class PreciseConstLoType FINAL : public ConstantType {
public:
PreciseConstLoType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsPreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class PreciseConstHiType FINAL : public ConstantType {
public:
PreciseConstHiType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsPreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class ImpreciseConstType FINAL : public ConstantType {
public:
ImpreciseConstType(uint32_t constat, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constat, cache_id) {
}
bool IsImpreciseConstant() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class ImpreciseConstLoType FINAL : public ConstantType {
public:
ImpreciseConstLoType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsImpreciseConstantLo() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
class ImpreciseConstHiType FINAL : public ConstantType {
public:
ImpreciseConstHiType(uint32_t constant, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: ConstantType(constant, cache_id) {}
bool IsImpreciseConstantHi() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
// Common parent of all uninitialized types. Uninitialized types are created by
@@ -703,14 +704,14 @@
UninitializedReferenceType(mirror::Class* klass,
const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, allocation_pc, cache_id) {}
bool IsUninitializedReference() const OVERRIDE { return true; }
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
// Similar to UnresolvedReferenceType but not yet having been passed to a
@@ -719,7 +720,7 @@
public:
UnresolvedUninitializedRefType(const StringPiece& descriptor,
uint32_t allocation_pc, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, allocation_pc, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -730,10 +731,10 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
};
// Similar to UninitializedReferenceType but special case for the this argument
@@ -743,7 +744,7 @@
UninitializedThisReferenceType(mirror::Class* klass,
const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(klass, descriptor, 0, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -754,17 +755,17 @@
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
};
class UnresolvedUninitializedThisRefType FINAL : public UninitializedType {
public:
UnresolvedUninitializedThisRefType(const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UninitializedType(nullptr, descriptor, 0, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -775,10 +776,10 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass or a
@@ -786,7 +787,7 @@
class ReferenceType FINAL : public RegType {
public:
ReferenceType(mirror::Class* klass, const StringPiece& descriptor,
- uint16_t cache_id) SHARED_REQUIRES(Locks::mutator_lock_)
+ uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(klass, descriptor, cache_id) {}
bool IsReference() const OVERRIDE { return true; }
@@ -795,7 +796,7 @@
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
// A type of register holding a reference to an Object of type GetClass and only
@@ -805,7 +806,7 @@
public:
PreciseReferenceType(mirror::Class* klass, const StringPiece& descriptor,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool IsPreciseReference() const OVERRIDE { return true; }
@@ -813,14 +814,14 @@
bool HasClassVirtual() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
};
// Common parent of unresolved types.
class UnresolvedType : public RegType {
public:
UnresolvedType(const StringPiece& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: RegType(nullptr, descriptor, cache_id) {}
bool IsNonZeroReferenceTypes() const OVERRIDE;
@@ -832,7 +833,7 @@
class UnresolvedReferenceType FINAL : public UnresolvedType {
public:
UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UnresolvedType(descriptor, cache_id) {
if (kIsDebugBuild) {
CheckInvariants();
@@ -843,10 +844,10 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
};
// Type representing the super-class of an unresolved type.
@@ -854,7 +855,7 @@
public:
UnresolvedSuperClass(uint16_t child_id, RegTypeCache* reg_type_cache,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: UnresolvedType("", cache_id),
unresolved_child_id_(child_id),
reg_type_cache_(reg_type_cache) {
@@ -872,10 +873,10 @@
return static_cast<uint16_t>(unresolved_child_id_ & 0xFFFF);
}
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
const uint16_t unresolved_child_id_;
const RegTypeCache* const reg_type_cache_;
@@ -890,7 +891,7 @@
const BitVector& unresolved,
const RegTypeCache* reg_type_cache,
uint16_t cache_id)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// The resolved part. See description below.
const RegType& GetResolvedPart() const {
@@ -905,13 +906,13 @@
bool IsUnresolvedTypes() const OVERRIDE { return true; }
- bool IsArrayTypes() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
- bool IsObjectArrayTypes() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
+ bool IsObjectArrayTypes() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
- std::string Dump() const OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void CheckInvariants() const SHARED_REQUIRES(Locks::mutator_lock_);
+ void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_);
const RegTypeCache* const reg_type_cache_;
@@ -927,7 +928,7 @@
};
std::ostream& operator<<(std::ostream& os, const RegType& rhs)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
} // namespace verifier
} // namespace art
diff --git a/runtime/verifier/reg_type_cache.cc b/runtime/verifier/reg_type_cache.cc
index 71c2a90..4d4886e 100644
--- a/runtime/verifier/reg_type_cache.cc
+++ b/runtime/verifier/reg_type_cache.cc
@@ -36,7 +36,7 @@
kMinSmallConstant + 1];
ALWAYS_INLINE static inline bool MatchingPrecisionForClass(const RegType* entry, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_) {
if (entry->IsPreciseReference() == precise) {
// We were or weren't looking for a precise reference and we found what we need.
return true;
diff --git a/runtime/verifier/reg_type_cache.h b/runtime/verifier/reg_type_cache.h
index 6f9a04e..14d9509 100644
--- a/runtime/verifier/reg_type_cache.h
+++ b/runtime/verifier/reg_type_cache.h
@@ -46,7 +46,7 @@
public:
explicit RegTypeCache(bool can_load_classes, ScopedArenaAllocator& arena);
~RegTypeCache();
- static void Init() SHARED_REQUIRES(Locks::mutator_lock_) {
+ static void Init() REQUIRES_SHARED(Locks::mutator_lock_) {
if (!RegTypeCache::primitive_initialized_) {
CHECK_EQ(RegTypeCache::primitive_count_, 0);
CreatePrimitiveAndSmallConstantTypes();
@@ -57,114 +57,114 @@
static void ShutDown();
const art::verifier::RegType& GetFromId(uint16_t id) const;
const RegType& From(mirror::ClassLoader* loader, const char* descriptor, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Find a RegType, returns null if not found.
const RegType* FindClass(mirror::Class* klass, bool precise) const
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Insert a new class with a specified descriptor, must not already be in the cache.
const RegType* InsertClass(const StringPiece& descriptor, mirror::Class* klass, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get or insert a reg type for a description, klass, and precision.
const RegType& FromClass(const char* descriptor, mirror::Class* klass, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ConstantType& FromCat1Const(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ConstantType& FromCat2ConstLo(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ConstantType& FromCat2ConstHi(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& FromDescriptor(mirror::ClassLoader* loader, const char* descriptor, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& FromUnresolvedMerge(const RegType& left, const RegType& right)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& FromUnresolvedSuperClass(const RegType& child)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const ConstantType& Zero() SHARED_REQUIRES(Locks::mutator_lock_) {
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ const ConstantType& Zero() REQUIRES_SHARED(Locks::mutator_lock_) {
return FromCat1Const(0, true);
}
- const ConstantType& One() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ConstantType& One() REQUIRES_SHARED(Locks::mutator_lock_) {
return FromCat1Const(1, true);
}
size_t GetCacheSize() {
return entries_.size();
}
- const BooleanType& Boolean() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const BooleanType& Boolean() REQUIRES_SHARED(Locks::mutator_lock_) {
return *BooleanType::GetInstance();
}
- const ByteType& Byte() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ByteType& Byte() REQUIRES_SHARED(Locks::mutator_lock_) {
return *ByteType::GetInstance();
}
- const CharType& Char() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const CharType& Char() REQUIRES_SHARED(Locks::mutator_lock_) {
return *CharType::GetInstance();
}
- const ShortType& Short() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const ShortType& Short() REQUIRES_SHARED(Locks::mutator_lock_) {
return *ShortType::GetInstance();
}
- const IntegerType& Integer() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const IntegerType& Integer() REQUIRES_SHARED(Locks::mutator_lock_) {
return *IntegerType::GetInstance();
}
- const FloatType& Float() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const FloatType& Float() REQUIRES_SHARED(Locks::mutator_lock_) {
return *FloatType::GetInstance();
}
- const LongLoType& LongLo() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const LongLoType& LongLo() REQUIRES_SHARED(Locks::mutator_lock_) {
return *LongLoType::GetInstance();
}
- const LongHiType& LongHi() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const LongHiType& LongHi() REQUIRES_SHARED(Locks::mutator_lock_) {
return *LongHiType::GetInstance();
}
- const DoubleLoType& DoubleLo() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const DoubleLoType& DoubleLo() REQUIRES_SHARED(Locks::mutator_lock_) {
return *DoubleLoType::GetInstance();
}
- const DoubleHiType& DoubleHi() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const DoubleHiType& DoubleHi() REQUIRES_SHARED(Locks::mutator_lock_) {
return *DoubleHiType::GetInstance();
}
- const UndefinedType& Undefined() SHARED_REQUIRES(Locks::mutator_lock_) {
+ const UndefinedType& Undefined() REQUIRES_SHARED(Locks::mutator_lock_) {
return *UndefinedType::GetInstance();
}
const ConflictType& Conflict() {
return *ConflictType::GetInstance();
}
- const PreciseReferenceType& JavaLangClass() SHARED_REQUIRES(Locks::mutator_lock_);
- const PreciseReferenceType& JavaLangString() SHARED_REQUIRES(Locks::mutator_lock_);
- const RegType& JavaLangThrowable(bool precise) SHARED_REQUIRES(Locks::mutator_lock_);
- const RegType& JavaLangObject(bool precise) SHARED_REQUIRES(Locks::mutator_lock_);
+ const PreciseReferenceType& JavaLangClass() REQUIRES_SHARED(Locks::mutator_lock_);
+ const PreciseReferenceType& JavaLangString() REQUIRES_SHARED(Locks::mutator_lock_);
+ const RegType& JavaLangThrowable(bool precise) REQUIRES_SHARED(Locks::mutator_lock_);
+ const RegType& JavaLangObject(bool precise) REQUIRES_SHARED(Locks::mutator_lock_);
const UninitializedType& Uninitialized(const RegType& type, uint32_t allocation_pc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Create an uninitialized 'this' argument for the given type.
const UninitializedType& UninitializedThisArgument(const RegType& type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& FromUninitialized(const RegType& uninit_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& ByteConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& CharConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& ShortConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& IntConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& PosByteConstant() SHARED_REQUIRES(Locks::mutator_lock_);
- const ImpreciseConstType& PosShortConstant() SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& ByteConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& CharConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& ShortConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& IntConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& PosByteConstant() REQUIRES_SHARED(Locks::mutator_lock_);
+ const ImpreciseConstType& PosShortConstant() REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& GetComponentType(const RegType& array, mirror::ClassLoader* loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
- void Dump(std::ostream& os) SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ void Dump(std::ostream& os) REQUIRES_SHARED(Locks::mutator_lock_);
const RegType& RegTypeFromPrimitiveType(Primitive::Type) const;
void VisitRoots(RootVisitor* visitor, const RootInfo& root_info)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static void VisitStaticRoots(RootVisitor* visitor)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
private:
- void FillPrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_);
+ void FillPrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Class* ResolveClass(const char* descriptor, mirror::ClassLoader* loader)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool MatchDescriptor(size_t idx, const StringPiece& descriptor, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
const ConstantType& FromCat1NonSmallConstant(int32_t value, bool precise)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Returns the pass in RegType.
template <class RegTypeType>
- RegTypeType& AddEntry(RegTypeType* new_entry) SHARED_REQUIRES(Locks::mutator_lock_);
+ RegTypeType& AddEntry(RegTypeType* new_entry) REQUIRES_SHARED(Locks::mutator_lock_);
// Add a string piece to the arena allocator so that it stays live for the lifetime of the
// verifier.
@@ -172,8 +172,8 @@
template <class Type>
static const Type* CreatePrimitiveTypeInstance(const std::string& descriptor)
- SHARED_REQUIRES(Locks::mutator_lock_);
- static void CreatePrimitiveAndSmallConstantTypes() SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
+ static void CreatePrimitiveAndSmallConstantTypes() REQUIRES_SHARED(Locks::mutator_lock_);
// A quick look up for popular small constants.
static constexpr int32_t kMinSmallConstant = -1;
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index 56846c1..7603a79 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -67,25 +67,25 @@
// Implement category-1 "move" instructions. Copy a 32-bit value from "vsrc" to "vdst".
void CopyRegister1(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc, TypeCategory cat)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implement category-2 "move" instructions. Copy a 64-bit value from "vsrc" to "vdst". This
// copies both halves of the register.
void CopyRegister2(MethodVerifier* verifier, uint32_t vdst, uint32_t vsrc)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implement "move-result". Copy the category-1 value from the result register to another
// register, and reset the result register.
void CopyResultRegister1(MethodVerifier* verifier, uint32_t vdst, bool is_reference)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Implement "move-result-wide". Copy the category-2 value from the result register to another
// register, and reset the result register.
void CopyResultRegister2(MethodVerifier* verifier, uint32_t vdst)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Set the invisible result register to unknown
- void SetResultTypeToUnknown(MethodVerifier* verifier) SHARED_REQUIRES(Locks::mutator_lock_);
+ void SetResultTypeToUnknown(MethodVerifier* verifier) REQUIRES_SHARED(Locks::mutator_lock_);
// Set the type of register N, verifying that the register is valid. If "newType" is the "Lo"
// part of a 64-bit value, register N+1 will be set to "newType+1".
@@ -102,20 +102,20 @@
ALWAYS_INLINE bool SetRegisterType(MethodVerifier* verifier,
uint32_t vdst,
const RegType& new_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool SetRegisterTypeWide(MethodVerifier* verifier,
uint32_t vdst,
const RegType& new_type1,
const RegType& new_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/* Set the type of the "result" register. */
void SetResultRegisterType(MethodVerifier* verifier, const RegType& new_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void SetResultRegisterTypeWide(const RegType& new_type1, const RegType& new_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Get the type of register vsrc.
const RegType& GetRegisterType(MethodVerifier* verifier, uint32_t vsrc) const;
@@ -123,13 +123,13 @@
ALWAYS_INLINE bool VerifyRegisterType(MethodVerifier* verifier,
uint32_t vsrc,
const RegType& check_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
bool VerifyRegisterTypeWide(MethodVerifier* verifier,
uint32_t vsrc,
const RegType& check_type1,
const RegType& check_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CopyFromLine(const RegisterLine* src) {
DCHECK_EQ(num_regs_, src->num_regs_);
@@ -139,7 +139,7 @@
this_initialized_ = src->this_initialized_;
}
- std::string Dump(MethodVerifier* verifier) const SHARED_REQUIRES(Locks::mutator_lock_);
+ std::string Dump(MethodVerifier* verifier) const REQUIRES_SHARED(Locks::mutator_lock_);
void FillWithGarbage() {
memset(&line_, 0xf1, num_regs_ * sizeof(uint16_t));
@@ -154,7 +154,7 @@
* the new ones at the same time).
*/
void MarkUninitRefsAsInvalid(MethodVerifier* verifier, const RegType& uninit_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Update all registers holding "uninit_type" to instead hold the corresponding initialized
@@ -162,7 +162,7 @@
* the reference must be marked as initialized.
*/
void MarkRefsAsInitialized(MethodVerifier* verifier, const RegType& uninit_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Update all registers to be Conflict except vsrc.
@@ -219,7 +219,7 @@
const Instruction* inst,
bool is_range,
bool allow_failure = false)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify types for a simple two-register instruction (e.g. "neg-int").
@@ -229,7 +229,7 @@
const Instruction* inst,
const RegType& dst_type,
const RegType& src_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckUnaryOpWide(MethodVerifier* verifier,
const Instruction* inst,
@@ -237,21 +237,21 @@
const RegType& dst_type2,
const RegType& src_type1,
const RegType& src_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckUnaryOpToWide(MethodVerifier* verifier,
const Instruction* inst,
const RegType& dst_type1,
const RegType& dst_type2,
const RegType& src_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckUnaryOpFromWide(MethodVerifier* verifier,
const Instruction* inst,
const RegType& dst_type,
const RegType& src_type1,
const RegType& src_type2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify types for a simple three-register instruction (e.g. "add-int").
@@ -264,7 +264,7 @@
const RegType& src_type1,
const RegType& src_type2,
bool check_boolean_op)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckBinaryOpWide(MethodVerifier* verifier,
const Instruction* inst,
@@ -274,14 +274,14 @@
const RegType& src_type1_2,
const RegType& src_type2_1,
const RegType& src_type2_2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckBinaryOpWideShift(MethodVerifier* verifier,
const Instruction* inst,
const RegType& long_lo_type,
const RegType& long_hi_type,
const RegType& int_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify types for a binary "2addr" operation. "src_type1"/"src_type2"
@@ -293,7 +293,7 @@
const RegType& src_type1,
const RegType& src_type2,
bool check_boolean_op)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckBinaryOp2addrWide(MethodVerifier* verifier,
const Instruction* inst,
@@ -303,14 +303,14 @@
const RegType& src_type1_2,
const RegType& src_type2_1,
const RegType& src_type2_2)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
void CheckBinaryOp2addrWideShift(MethodVerifier* verifier,
const Instruction* inst,
const RegType& long_lo_type,
const RegType& long_hi_type,
const RegType& int_type)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
/*
* Verify types for A two-register instruction with a literal constant (e.g. "add-int/lit8").
@@ -324,15 +324,15 @@
const RegType& src_type,
bool check_boolean_op,
bool is_lit16)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Verify/push monitor onto the monitor stack, locking the value in reg_idx at location insn_idx.
void PushMonitor(MethodVerifier* verifier, uint32_t reg_idx, int32_t insn_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Verify/pop monitor from monitor stack ensuring that we believe the monitor is locked
void PopMonitor(MethodVerifier* verifier, uint32_t reg_idx)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
// Stack of currently held monitors and where they were locked
size_t MonitorStackDepth() const {
@@ -344,7 +344,7 @@
void VerifyMonitorStackEmpty(MethodVerifier* verifier) const;
bool MergeRegisters(MethodVerifier* verifier, const RegisterLine* incoming_line)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
size_t GetMonitorEnterCount() const {
return monitors_.size();
diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h
index b8e05b8..25c9424 100644
--- a/runtime/well_known_classes.h
+++ b/runtime/well_known_classes.h
@@ -38,7 +38,7 @@
static jmethodID StringInitToStringFactoryMethodID(jmethodID string_init);
static mirror::Class* ToClass(jclass global_jclass)
- SHARED_REQUIRES(Locks::mutator_lock_);
+ REQUIRES_SHARED(Locks::mutator_lock_);
static jclass com_android_dex_Dex;
static jclass dalvik_annotation_optimization_FastNative;
diff --git a/test/004-ReferenceMap/stack_walk_refmap_jni.cc b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
index 5304590..6c16100 100644
--- a/test/004-ReferenceMap/stack_walk_refmap_jni.cc
+++ b/test/004-ReferenceMap/stack_walk_refmap_jni.cc
@@ -34,10 +34,10 @@
} while (false);
struct ReferenceMap2Visitor : public CheckReferenceMapVisitor {
- explicit ReferenceMap2Visitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit ReferenceMap2Visitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: CheckReferenceMapVisitor(thread) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
if (CheckReferenceMapVisitor::VisitFrame()) {
return true;
}
diff --git a/test/004-StackWalk/stack_walk_jni.cc b/test/004-StackWalk/stack_walk_jni.cc
index 420224d..795f168 100644
--- a/test/004-StackWalk/stack_walk_jni.cc
+++ b/test/004-StackWalk/stack_walk_jni.cc
@@ -30,10 +30,10 @@
class TestReferenceMapVisitor : public CheckReferenceMapVisitor {
public:
- explicit TestReferenceMapVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
+ explicit TestReferenceMapVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
: CheckReferenceMapVisitor(thread) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
if (CheckReferenceMapVisitor::VisitFrame()) {
return true;
}
diff --git a/test/115-native-bridge/run b/test/115-native-bridge/run
index aeb5721..fb0b967 100644
--- a/test/115-native-bridge/run
+++ b/test/115-native-bridge/run
@@ -18,11 +18,11 @@
# Use libnativebridgetest as a native bridge, start NativeBridgeMain (Main is JniTest main file).
LIBPATH=$(echo ${ARGS} | sed -r 's/.*Djava.library.path=([^ ]*) .*/\1/')
-ln -s ${LIBPATH}/libnativebridgetest.so .
+ln -sf ${LIBPATH}/libnativebridgetest.so .
touch libarttest.so
touch libarttestd.so
-ln -s ${LIBPATH}/libarttest.so libarttest2.so
-ln -s ${LIBPATH}/libarttestd.so libarttestd2.so
+ln -sf ${LIBPATH}/libarttest.so libarttest2.so
+ln -sf ${LIBPATH}/libarttestd.so libarttestd2.so
# pwd likely has /, so it's a pain to put that into a sed rule.
LEFT=$(echo ${ARGS} | sed -r 's/-Djava.library.path.*//')
diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java
index 33ef10b..64180d5 100644
--- a/test/442-checker-constant-folding/src/Main.java
+++ b/test/442-checker-constant-folding/src/Main.java
@@ -27,6 +27,12 @@
}
}
+ public static void assertTrue(boolean condition) {
+ if (!condition) {
+ throw new Error();
+ }
+ }
+
public static void assertIntEquals(int expected, int result) {
if (expected != result) {
throw new Error("Expected: " + expected + ", found: " + result);
@@ -1322,6 +1328,58 @@
/**
+ * Test optimizations of comparisons with null yielding a constant result.
+ */
+
+ /// CHECK-START: boolean Main.ConstStringEqualsNull() constant_folding$after_inlining (before)
+ /// CHECK-DAG: <<ConstStr:l\d+>> LoadString
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: <<Eq:z\d+>> Equal [<<ConstStr>>,<<Null>>]
+ /// CHECK-DAG: If [<<Eq>>]
+
+ /// CHECK-START: boolean Main.ConstStringEqualsNull() constant_folding$after_inlining (after)
+ /// CHECK-DAG: <<False:i\d+>> IntConstant 0
+ /// CHECK-DAG: If [<<False>>]
+
+ /// CHECK-START: boolean Main.ConstStringEqualsNull() constant_folding$after_inlining (after)
+ /// CHECK-NOT: Equal
+
+ public static boolean ConstStringEqualsNull() {
+ // Due to Jack emitting code using the opposite condition, use != to generate Equal.
+ if ($inline$ConstString() != null) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ /// CHECK-START: boolean Main.ConstStringNotEqualsNull() constant_folding$after_inlining (before)
+ /// CHECK-DAG: <<ConstStr:l\d+>> LoadString
+ /// CHECK-DAG: <<Null:l\d+>> NullConstant
+ /// CHECK-DAG: <<Ne:z\d+>> NotEqual [<<ConstStr>>,<<Null>>]
+ /// CHECK-DAG: If [<<Ne>>]
+
+ /// CHECK-START: boolean Main.ConstStringNotEqualsNull() constant_folding$after_inlining (after)
+ /// CHECK-DAG: <<True:i\d+>> IntConstant 1
+ /// CHECK-DAG: If [<<True>>]
+
+ /// CHECK-START: boolean Main.ConstStringNotEqualsNull() constant_folding$after_inlining (after)
+ /// CHECK-NOT: NotEqual
+
+ public static boolean ConstStringNotEqualsNull() {
+ // Due to Jack emitting code using the opposite condition, use == to generate NotEqual.
+ if ($inline$ConstString() == null) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ public static String $inline$ConstString() {
+ return "";
+ }
+
+ /**
* Exercise constant folding on type conversions.
*/
@@ -1601,6 +1659,9 @@
assertFalse(CmpFloatGreaterThanNaN(arbitrary));
assertFalse(CmpDoubleLessThanNaN(arbitrary));
+ assertFalse(ConstStringEqualsNull());
+ assertTrue(ConstStringNotEqualsNull());
+
Main main = new Main();
assertIntEquals(1, main.smaliCmpLongConstants());
assertIntEquals(-1, main.smaliCmpGtFloatConstants());
diff --git a/test/454-get-vreg/get_vreg_jni.cc b/test/454-get-vreg/get_vreg_jni.cc
index 30f9954..5762754 100644
--- a/test/454-get-vreg/get_vreg_jni.cc
+++ b/test/454-get-vreg/get_vreg_jni.cc
@@ -29,12 +29,12 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_value_(this_value),
found_method_index_(0) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/457-regs/regs_jni.cc b/test/457-regs/regs_jni.cc
index 79fa8b0..08db775 100644
--- a/test/457-regs/regs_jni.cc
+++ b/test/457-regs/regs_jni.cc
@@ -29,10 +29,10 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/461-get-reference-vreg/get_reference_vreg_jni.cc b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
index 8108c97..8122c6d 100644
--- a/test/461-get-reference-vreg/get_reference_vreg_jni.cc
+++ b/test/461-get-reference-vreg/get_reference_vreg_jni.cc
@@ -28,12 +28,12 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(Thread* thread, Context* context, mirror::Object* this_value)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
this_value_(this_value),
found_method_index_(0) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc
index 4f89e91..3618b4f 100644
--- a/test/466-get-live-vreg/get_live_vreg_jni.cc
+++ b/test/466-get-live-vreg/get_live_vreg_jni.cc
@@ -28,10 +28,10 @@
class TestVisitor : public StackVisitor {
public:
- TestVisitor(Thread* thread, Context* context) SHARED_REQUIRES(Locks::mutator_lock_)
+ TestVisitor(Thread* thread, Context* context) REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/543-env-long-ref/env_long_ref.cc b/test/543-env-long-ref/env_long_ref.cc
index 4108323..557def6 100644
--- a/test/543-env-long-ref/env_long_ref.cc
+++ b/test/543-env-long-ref/env_long_ref.cc
@@ -28,13 +28,13 @@
class TestVisitor : public StackVisitor {
public:
TestVisitor(const ScopedObjectAccess& soa, Context* context, jobject expected_value)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(soa.Self(), context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
expected_value_(expected_value),
found_(false),
soa_(soa) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/563-checker-fakestring/smali/TestCase.smali b/test/563-checker-fakestring/smali/TestCase.smali
index 54312a4..9f86352 100644
--- a/test/563-checker-fakestring/smali/TestCase.smali
+++ b/test/563-checker-fakestring/smali/TestCase.smali
@@ -42,16 +42,19 @@
# Test usage of String new-instance before it is initialized.
## CHECK-START: void TestCase.compareNewInstance() register (after)
-## CHECK-DAG: <<Null:l\d+>> NullConstant
+## CHECK-DAG: <<Null:l\d+>> InvokeStaticOrDirect method_name:Main.$noinline$HiddenNull
## CHECK-DAG: <<String:l\d+>> NewInstance
-## CHECK-DAG: <<Cond:z\d+>> NotEqual [<<String>>,<<Null>>]
+## CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Null>>,<<String>>]
## CHECK-DAG: If [<<Cond>>]
.method public static compareNewInstance()V
.registers 3
+ invoke-static {}, LMain;->$noinline$HiddenNull()Ljava/lang/Object;
+ move-result-object v1
+
new-instance v0, Ljava/lang/String;
- if-nez v0, :return
+ if-ne v0, v1, :return
# Will throw NullPointerException if this branch is taken.
const v1, 0x0
diff --git a/test/563-checker-fakestring/src/Main.java b/test/563-checker-fakestring/src/Main.java
index 1ac8a5b..78cb37a 100644
--- a/test/563-checker-fakestring/src/Main.java
+++ b/test/563-checker-fakestring/src/Main.java
@@ -79,4 +79,11 @@
assertEqual(testString, result);
}
}
+
+ public static boolean doThrow = false;
+
+ public static Object $noinline$HiddenNull() {
+ if (doThrow) { throw new Error(); }
+ return null;
+ }
}
diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc
index cf413ba..adda3cc 100644
--- a/test/570-checker-osr/osr.cc
+++ b/test/570-checker-osr/osr.cc
@@ -28,13 +28,13 @@
class OsrVisitor : public StackVisitor {
public:
explicit OsrVisitor(Thread* thread, const char* method_name)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name),
in_osr_method_(false),
in_interpreter_(false) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
@@ -90,11 +90,11 @@
class ProfilingInfoVisitor : public StackVisitor {
public:
explicit ProfilingInfoVisitor(Thread* thread, const char* method_name)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
@@ -124,11 +124,11 @@
class OsrCheckVisitor : public StackVisitor {
public:
OsrCheckVisitor(Thread* thread, const char* method_name)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc
index 0d26f45..a265dce 100644
--- a/test/595-profile-saving/profile-saving.cc
+++ b/test/595-profile-saving/profile-saving.cc
@@ -34,11 +34,11 @@
class CreateProfilingInfoVisitor : public StackVisitor {
public:
explicit CreateProfilingInfoVisitor(Thread* thread, const char* method_name)
- SHARED_REQUIRES(Locks::mutator_lock_)
+ REQUIRES_SHARED(Locks::mutator_lock_)
: StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
method_name_(method_name) {}
- bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
+ bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) {
ArtMethod* m = GetMethod();
std::string m_name(m->GetName());
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index ee2ee1a..fd1ba02 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -22,12 +22,10 @@
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "mirror/class-inl.h"
-#include "nth_caller_visitor.h"
#include "oat_quick_method_header.h"
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "ScopedUtfChars.h"
-#include "stack.h"
#include "thread-inl.h"
namespace art {
diff --git a/test/valgrind-suppressions.txt b/test/valgrind-suppressions.txt
index acab6e5..fd3c331 100644
--- a/test/valgrind-suppressions.txt
+++ b/test/valgrind-suppressions.txt
@@ -13,3 +13,12 @@
fun:_dl_start
obj:/lib/x86_64-linux-gnu/ld-2.19.so
}
+
+{
+ b/31275764
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ ...
+ fun:_ZN3art7Runtime17InitNativeMethodsEv
+}
diff --git a/test/valgrind-target-suppressions.txt b/test/valgrind-target-suppressions.txt
index 7ae6d53..fbc99b1 100644
--- a/test/valgrind-target-suppressions.txt
+++ b/test/valgrind-target-suppressions.txt
@@ -50,3 +50,12 @@
fun:malloc
fun:setenv
}
+
+{
+ b/31275764
+ Memcheck:Leak
+ match-leak-kinds: definite
+ fun:malloc
+ ...
+ fun:_ZN3art7Runtime17InitNativeMethodsEv
+}
diff --git a/tools/cpp-define-generator/constant_dexcache.def b/tools/cpp-define-generator/constant_dexcache.def
index d10ca1e..ede16d2 100644
--- a/tools/cpp-define-generator/constant_dexcache.def
+++ b/tools/cpp-define-generator/constant_dexcache.def
@@ -24,5 +24,5 @@
art::mirror::DexCache::kDexCacheStringCacheSize - 1)
DEFINE_EXPR(STRING_DEX_CACHE_HASH_BITS, int32_t,
art::LeastSignificantBit(art::mirror::DexCache::kDexCacheStringCacheSize))
-DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE, size_t,
+DEFINE_EXPR(STRING_DEX_CACHE_ELEMENT_SIZE, int32_t,
sizeof(art::mirror::StringDexCachePair))
\ No newline at end of file
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index a69b58b..7b5e9ed 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -86,7 +86,6 @@
"libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarFtpURLConnection",
"libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithLoggingSocketHandler",
"libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithHttpURLConnection",
- "libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarHttpURLConnection",
"org.apache.harmony.luni.tests.internal.net.www.protocol.http.HttpURLConnectionTest",
"org.apache.harmony.luni.tests.internal.net.www.protocol.https.HttpsURLConnectionTest",
"org.apache.harmony.luni.tests.java.net.URLConnectionTest",
@@ -234,5 +233,11 @@
modes: [device],
names: ["libcore.java.lang.ProcessBuilderTest#testRedirectInherit",
"libcore.java.lang.ProcessBuilderTest#testRedirect_nullStreams"]
+},
+{
+ description: "Sometimes timeouts",
+ result: EXEC_FAILED,
+ bug: 31258002,
+ names: ["libcore.net.NetworkSecurityPolicyTest#testCleartextTrafficPolicyWithJarHttpURLConnection"]
}
]
diff --git a/tools/run-jdwp-tests.sh b/tools/run-jdwp-tests.sh
index bdb2d4b..01dae43 100755
--- a/tools/run-jdwp-tests.sh
+++ b/tools/run-jdwp-tests.sh
@@ -50,6 +50,7 @@
host="no"
# Use JIT compiling by default.
use_jit=true
+variant_cmdline_parameter="--variant=X32"
while true; do
if [[ "$1" == "--mode=host" ]]; then
@@ -93,11 +94,34 @@
shift
elif [[ "$1" == "" ]]; then
break
+ elif [[ $1 == --variant=* ]]; then
+ variant_cmdline_parameter=$1
+ shift
else
shift
fi
done
+# For the host:
+#
+# If, on the other hand, there is a variant set, use it to modify the art_debugee parameter to
+# force the fork to have the same bitness as the controller. This should be fine and not impact
+# testing (cross-bitness), as the protocol is always 64-bit anyways (our implementation).
+#
+# Note: this isn't necessary for the device as the BOOTCLASSPATH environment variable is set there
+# and used as a fallback.
+if [[ $host == "yes" ]]; then
+ variant=${variant_cmdline_parameter:10}
+ if [[ $variant == "x32" || $variant == "X32" ]]; then
+ art_debugee="$art_debugee --32"
+ elif [[ $variant == "x64" || $variant == "X64" ]]; then
+ art_debugee="$art_debugee --64"
+ else
+ echo "Error, do not understand variant $variant_cmdline_parameter."
+ exit 1
+ fi
+fi
+
if [[ "$image" != "" ]]; then
vm_args="--vm-arg $image"
fi