Merge "Add a read barrier for weak roots in monitors."
diff --git a/build/Android.executable.mk b/build/Android.executable.mk
index 3c33975..a186e85 100644
--- a/build/Android.executable.mk
+++ b/build/Android.executable.mk
@@ -99,8 +99,8 @@
LOCAL_MULTILIB := $$(art_multilib)
endif
+ include external/libcxx/libcxx.mk
ifeq ($$(art_target_or_host),target)
- include external/libcxx/libcxx.mk
include $(BUILD_EXECUTABLE)
ART_TARGET_EXECUTABLES := $(ART_TARGET_EXECUTABLES) $(TARGET_OUT_EXECUTABLES)/$$(LOCAL_MODULE)
else # host
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 952f79a..765216b 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -182,6 +182,7 @@
endif
LOCAL_CFLAGS := $(ART_TEST_CFLAGS)
+ include external/libcxx/libcxx.mk
ifeq ($$(art_target_or_host),target)
LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS) $(ART_TARGET_DEBUG_CFLAGS)
@@ -191,7 +192,6 @@
LOCAL_MODULE_PATH_32 := $(ART_NATIVETEST_OUT)/$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $(ART_NATIVETEST_OUT)/$(ART_TARGET_ARCH_64)
LOCAL_MULTILIB := both
- include external/libcxx/libcxx.mk
include $(BUILD_EXECUTABLE)
ART_TARGET_GTEST_EXECUTABLES$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_NATIVETEST_OUT)/$(TARGET_ARCH)/$$(LOCAL_MODULE)
@@ -216,7 +216,7 @@
LOCAL_STATIC_LIBRARIES += libcutils libvixl
ifneq ($(WITHOUT_HOST_CLANG),true)
# GCC host compiled tests fail with this linked, presumably due to destructors that run.
- LOCAL_STATIC_LIBRARIES += libgtest_host
+ LOCAL_STATIC_LIBRARIES += libgtest_libc++_host
endif
LOCAL_LDLIBS += -lpthread -ldl
LOCAL_IS_HOST_MODULE := true
diff --git a/build/Android.libarttest.mk b/build/Android.libarttest.mk
index 6965326..9e5f3d6 100644
--- a/build/Android.libarttest.mk
+++ b/build/Android.libarttest.mk
@@ -46,6 +46,7 @@
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/build/Android.common.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/build/Android.libarttest.mk
+ include external/libcxx/libcxx.mk
ifeq ($$(art_target_or_host),target)
LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_CFLAGS := $(ART_TARGET_CFLAGS) $(ART_TARGET_DEBUG_CFLAGS)
@@ -56,13 +57,12 @@
LOCAL_MODULE_PATH_32 := $(ART_TEST_OUT)/$(ART_TARGET_ARCH_32)
LOCAL_MODULE_PATH_64 := $(ART_TEST_OUT)/$(ART_TARGET_ARCH_64)
LOCAL_MODULE_TARGET_ARCH := $(ART_SUPPORTED_ARCH)
- include external/libcxx/libcxx.mk
include $(BUILD_SHARED_LIBRARY)
else # host
LOCAL_CLANG := $(ART_HOST_CLANG)
LOCAL_CFLAGS := $(ART_HOST_CFLAGS) $(ART_HOST_DEBUG_CFLAGS)
LOCAL_STATIC_LIBRARIES := libcutils
- LOCAL_LDLIBS := -ldl -lpthread
+ LOCAL_LDLIBS += -ldl -lpthread
ifeq ($(HOST_OS),linux)
LOCAL_LDLIBS += -lrt
endif
diff --git a/compiler/Android.mk b/compiler/Android.mk
index cb900ea..cb9e41a 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -59,8 +59,8 @@
dex/mir_field_info.cc \
dex/mir_method_info.cc \
dex/mir_optimization.cc \
- dex/pass_driver.cc \
dex/bb_optimizations.cc \
+ dex/pass_driver_me.cc \
dex/bit_vector_block_iterator.cc \
dex/frontend.cc \
dex/mir_graph.cc \
@@ -194,8 +194,8 @@
LOCAL_GENERATED_SOURCES += $$(ENUM_OPERATOR_OUT_GEN)
LOCAL_CFLAGS := $$(LIBART_COMPILER_CFLAGS)
+ include external/libcxx/libcxx.mk
ifeq ($$(art_target_or_host),target)
- include external/libcxx/libcxx.mk
LOCAL_CLANG := $(ART_TARGET_CLANG)
LOCAL_CFLAGS += $(ART_TARGET_CFLAGS)
else # host
@@ -247,7 +247,7 @@
LOCAL_C_INCLUDES += $(ART_C_INCLUDES) art/runtime
ifeq ($$(art_target_or_host),host)
- LOCAL_LDLIBS := -ldl -lpthread
+ LOCAL_LDLIBS += -ldl -lpthread
endif
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
diff --git a/compiler/dex/bb_optimizations.cc b/compiler/dex/bb_optimizations.cc
index abfa7a7..1852f80 100644
--- a/compiler/dex/bb_optimizations.cc
+++ b/compiler/dex/bb_optimizations.cc
@@ -23,7 +23,13 @@
/*
* Code Layout pass implementation start.
*/
-bool CodeLayout::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+bool CodeLayout::Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+ DCHECK(cUnit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
cUnit->mir_graph->LayoutBlocks(bb);
// No need of repeating, so just return false.
return false;
@@ -32,13 +38,22 @@
/*
* SSATransformation pass implementation start.
*/
-bool SSATransformation::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+bool SSATransformation::Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+ DCHECK(cUnit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
cUnit->mir_graph->InsertPhiNodeOperands(bb);
// No need of repeating, so just return false.
return false;
}
-void SSATransformation::End(CompilationUnit* cUnit) const {
+void SSATransformation::End(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
// Verify the dataflow information after the pass.
if (cUnit->enable_debug & (1 << kDebugVerifyDataflow)) {
cUnit->mir_graph->VerifyDataflow();
@@ -48,7 +63,13 @@
/*
* ConstantPropagation pass implementation start
*/
-bool ConstantPropagation::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+bool ConstantPropagation::Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+ DCHECK(cUnit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
cUnit->mir_graph->DoConstantPropagation(bb);
// No need of repeating, so just return false.
return false;
@@ -57,7 +78,10 @@
/*
* MethodUseCount pass implementation start.
*/
-bool MethodUseCount::Gate(const CompilationUnit* cUnit) const {
+bool MethodUseCount::Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
// First initialize the data.
cUnit->mir_graph->InitializeMethodUses();
@@ -67,7 +91,13 @@
return res;
}
-bool MethodUseCount::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+bool MethodUseCount::Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+ DCHECK(cUnit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
cUnit->mir_graph->CountUses(bb);
// No need of repeating, so just return false.
return false;
@@ -76,7 +106,13 @@
/*
* BasicBlock Combine pass implementation start.
*/
-bool BBCombine::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+bool BBCombine::Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+ DCHECK(cUnit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
cUnit->mir_graph->CombineBlocks(bb);
// No need of repeating, so just return false.
@@ -86,7 +122,10 @@
/*
* BasicBlock Optimization pass implementation start.
*/
-void BBOptimizations::Start(CompilationUnit* cUnit) const {
+void BBOptimizations::Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
/*
* This pass has a different ordering depEnding on the suppress exception,
* so do the pass here for now:
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 6d500a5..43dcdf4 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -18,7 +18,7 @@
#define ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
#include "compiler_internals.h"
-#include "pass.h"
+#include "pass_me.h"
namespace art {
@@ -26,16 +26,22 @@
* @class CacheFieldLoweringInfo
* @brief Cache the lowering info for fields used by IGET/IPUT/SGET/SPUT insns.
*/
-class CacheFieldLoweringInfo : public Pass {
+class CacheFieldLoweringInfo : public PassME {
public:
- CacheFieldLoweringInfo() : Pass("CacheFieldLoweringInfo", kNoNodes) {
+ CacheFieldLoweringInfo() : PassME("CacheFieldLoweringInfo", kNoNodes) {
}
- void Start(CompilationUnit* cUnit) const {
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->DoCacheFieldLoweringInfo();
}
- bool Gate(const CompilationUnit *cUnit) const {
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
return cUnit->mir_graph->HasFieldAccess();
}
};
@@ -44,16 +50,22 @@
* @class CacheMethodLoweringInfo
* @brief Cache the lowering info for methods called by INVOKEs.
*/
-class CacheMethodLoweringInfo : public Pass {
+class CacheMethodLoweringInfo : public PassME {
public:
- CacheMethodLoweringInfo() : Pass("CacheMethodLoweringInfo", kNoNodes) {
+ CacheMethodLoweringInfo() : PassME("CacheMethodLoweringInfo", kNoNodes) {
}
- void Start(CompilationUnit* cUnit) const {
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->DoCacheMethodLoweringInfo();
}
- bool Gate(const CompilationUnit *cUnit) const {
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
return cUnit->mir_graph->HasInvokes();
}
};
@@ -62,26 +74,41 @@
* @class CallInlining
* @brief Perform method inlining pass.
*/
-class CallInlining : public Pass {
+class CallInlining : public PassME {
public:
- CallInlining() : Pass("CallInlining") {
+ CallInlining() : PassME("CallInlining") {
}
- bool Gate(const CompilationUnit* cUnit) const {
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
return cUnit->mir_graph->InlineCallsGate();
}
- void Start(CompilationUnit* cUnit) const {
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->InlineCallsStart();
}
- bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+ bool Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+ DCHECK(cUnit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
cUnit->mir_graph->InlineCalls(bb);
// No need of repeating, so just return false.
return false;
}
- void End(CompilationUnit* cUnit) const {
+ void End(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->InlineCallsEnd();
}
};
@@ -90,48 +117,57 @@
* @class CodeLayout
* @brief Perform the code layout pass.
*/
-class CodeLayout : public Pass {
+class CodeLayout : public PassME {
public:
- CodeLayout() : Pass("CodeLayout", "2_post_layout_cfg") {
+ CodeLayout() : PassME("CodeLayout", "2_post_layout_cfg") {
}
- void Start(CompilationUnit* cUnit) const {
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->VerifyDataflow();
}
- bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
+ bool Worker(const PassDataHolder* data) const;
};
/**
* @class SSATransformation
* @brief Perform an SSA representation pass on the CompilationUnit.
*/
-class SSATransformation : public Pass {
+class SSATransformation : public PassME {
public:
- SSATransformation() : Pass("SSATransformation", kPreOrderDFSTraversal, "3_post_ssa_cfg") {
+ SSATransformation() : PassME("SSATransformation", kPreOrderDFSTraversal, "3_post_ssa_cfg") {
}
- bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
+ bool Worker(const PassDataHolder* data) const;
- void Start(CompilationUnit* cUnit) const {
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->InitializeSSATransformation();
}
- void End(CompilationUnit* cUnit) const;
+ void End(const PassDataHolder* data) const;
};
/**
* @class ConstantPropagation
* @brief Perform a constant propagation pass.
*/
-class ConstantPropagation : public Pass {
+class ConstantPropagation : public PassME {
public:
- ConstantPropagation() : Pass("ConstantPropagation") {
+ ConstantPropagation() : PassME("ConstantPropagation") {
}
- bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
+ bool Worker(const PassDataHolder* data) const;
- void Start(CompilationUnit* cUnit) const {
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->InitializeConstantPropagation();
}
};
@@ -140,12 +176,15 @@
* @class InitRegLocations
* @brief Initialize Register Locations.
*/
-class InitRegLocations : public Pass {
+class InitRegLocations : public PassME {
public:
- InitRegLocations() : Pass("InitRegLocation", kNoNodes) {
+ InitRegLocations() : PassME("InitRegLocation", kNoNodes) {
}
- void Start(CompilationUnit* cUnit) const {
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->InitRegLocations();
}
};
@@ -154,53 +193,77 @@
* @class MethodUseCount
* @brief Count the register uses of the method
*/
-class MethodUseCount : public Pass {
+class MethodUseCount : public PassME {
public:
- MethodUseCount() : Pass("UseCount") {
+ MethodUseCount() : PassME("UseCount") {
}
- bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
+ bool Worker(const PassDataHolder* data) const;
- bool Gate(const CompilationUnit* cUnit) const;
+ bool Gate(const PassDataHolder* data) const;
};
/**
* @class NullCheckEliminationAndTypeInference
* @brief Null check elimination and type inference.
*/
-class NullCheckEliminationAndTypeInference : public Pass {
+class NullCheckEliminationAndTypeInference : public PassME {
public:
NullCheckEliminationAndTypeInference()
- : Pass("NCE_TypeInference", kRepeatingPreOrderDFSTraversal, "4_post_nce_cfg") {
+ : PassME("NCE_TypeInference", kRepeatingPreOrderDFSTraversal, "4_post_nce_cfg") {
}
- void Start(CompilationUnit* cUnit) const {
+ void Start(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->EliminateNullChecksAndInferTypesStart();
}
- bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+ bool Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+ DCHECK(cUnit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
return cUnit->mir_graph->EliminateNullChecksAndInferTypes(bb);
}
- void End(CompilationUnit* cUnit) const {
+ void End(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->EliminateNullChecksAndInferTypesEnd();
}
};
-class ClassInitCheckElimination : public Pass {
+class ClassInitCheckElimination : public PassME {
public:
- ClassInitCheckElimination() : Pass("ClInitCheckElimination", kRepeatingPreOrderDFSTraversal) {
+ ClassInitCheckElimination() : PassME("ClInitCheckElimination", kRepeatingPreOrderDFSTraversal) {
}
- bool Gate(const CompilationUnit* cUnit) const {
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
return cUnit->mir_graph->EliminateClassInitChecksGate();
}
- bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+ bool Worker(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+ CompilationUnit* cUnit = pass_me_data_holder->c_unit;
+ DCHECK(cUnit != nullptr);
+ BasicBlock* bb = pass_me_data_holder->bb;
+ DCHECK(bb != nullptr);
return cUnit->mir_graph->EliminateClassInitChecks(bb);
}
- void End(CompilationUnit* cUnit) const {
+ void End(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
cUnit->mir_graph->EliminateClassInitChecksEnd();
}
};
@@ -209,32 +272,38 @@
* @class NullCheckEliminationAndTypeInference
* @brief Null check elimination and type inference.
*/
-class BBCombine : public Pass {
+class BBCombine : public PassME {
public:
- BBCombine() : Pass("BBCombine", kPreOrderDFSTraversal, "5_post_bbcombine_cfg") {
+ BBCombine() : PassME("BBCombine", kPreOrderDFSTraversal, "5_post_bbcombine_cfg") {
}
- bool Gate(const CompilationUnit* cUnit) const {
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
return ((cUnit->disable_opt & (1 << kSuppressExceptionEdges)) != 0);
}
- bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
+ bool Worker(const PassDataHolder* data) const;
};
/**
* @class BasicBlock Optimizations
* @brief Any simple BasicBlock optimization can be put here.
*/
-class BBOptimizations : public Pass {
+class BBOptimizations : public PassME {
public:
- BBOptimizations() : Pass("BBOptimizations", kNoNodes, "5_post_bbo_cfg") {
+ BBOptimizations() : PassME("BBOptimizations", kNoNodes, "5_post_bbo_cfg") {
}
- bool Gate(const CompilationUnit* cUnit) const {
+ bool Gate(const PassDataHolder* data) const {
+ DCHECK(data != nullptr);
+ CompilationUnit* cUnit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+ DCHECK(cUnit != nullptr);
return ((cUnit->disable_opt & (1 << kBBOpt)) == 0);
}
- void Start(CompilationUnit* cUnit) const;
+ void Start(const PassDataHolder* data) const;
};
} // namespace art
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 77b5057..ec2556b 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -21,7 +21,7 @@
#include "dataflow_iterator-inl.h"
#include "leb128.h"
#include "mirror/object.h"
-#include "pass_driver.h"
+#include "pass_driver_me.h"
#include "runtime.h"
#include "base/logging.h"
#include "base/timing_logger.h"
@@ -924,7 +924,7 @@
}
/* Create the pass driver and launch it */
- PassDriver pass_driver(&cu);
+ PassDriverME pass_driver(&cu);
pass_driver.Launch();
if (cu.enable_debug & (1 << kDebugDumpCheckStats)) {
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 2b1c420..e56e016 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -144,7 +144,6 @@
mir->ssa_rep->fp_def = nullptr; // Not used by LVN.
mir->dalvikInsn.opcode = def->opcode;
mir->offset = i; // LVN uses offset only for debug output
- mir->width = 1u; // Not used by LVN.
mir->optimization_flags = 0u;
if (i != 0u) {
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index ba4224e..4ba6677 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -196,7 +196,7 @@
}
orig_block->last_mir_insn = prev;
- prev->next = NULL;
+ prev->next = nullptr;
/*
* Update the immediate predecessor block pointer so that outgoing edges
@@ -220,6 +220,7 @@
while (p != bottom_block->last_mir_insn) {
p = p->next;
DCHECK(p != nullptr);
+ p->bb = bottom_block->id;
int opcode = p->dalvikInsn.opcode;
/*
* Some messiness here to ensure that we only enter real opcodes and only the
@@ -543,7 +544,7 @@
new_block->start_offset = insn->offset;
cur_block->fall_through = new_block->id;
new_block->predecessors->Insert(cur_block->id);
- MIR* new_insn = static_cast<MIR*>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
+ MIR* new_insn = NewMIR();
*new_insn = *insn;
insn->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpCheck);
@@ -629,11 +630,10 @@
/* Parse all instructions and put them into containing basic blocks */
while (code_ptr < code_end) {
- MIR *insn = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
+ MIR *insn = NewMIR();
insn->offset = current_offset_;
insn->m_unit_index = current_method_;
int width = ParseInsn(code_ptr, &insn->dalvikInsn);
- insn->width = width;
Instruction::Code opcode = insn->dalvikInsn.opcode;
if (opcode_count_ != NULL) {
opcode_count_[static_cast<int>(opcode)]++;
@@ -924,7 +924,7 @@
fclose(file);
}
-/* Insert an MIR instruction to the end of a basic block */
+/* Insert an MIR instruction to the end of a basic block. */
void BasicBlock::AppendMIR(MIR* mir) {
if (first_mir_insn == nullptr) {
DCHECK(last_mir_insn == nullptr);
@@ -935,9 +935,11 @@
mir->next = nullptr;
last_mir_insn = mir;
}
+
+ mir->bb = id;
}
-/* Insert an MIR instruction to the head of a basic block */
+/* Insert an MIR instruction to the head of a basic block. */
void BasicBlock::PrependMIR(MIR* mir) {
if (first_mir_insn == nullptr) {
DCHECK(last_mir_insn == nullptr);
@@ -947,17 +949,53 @@
mir->next = first_mir_insn;
first_mir_insn = mir;
}
+
+ mir->bb = id;
}
-/* Insert a MIR instruction after the specified MIR */
+/* Insert a MIR instruction after the specified MIR. */
void BasicBlock::InsertMIRAfter(MIR* current_mir, MIR* new_mir) {
new_mir->next = current_mir->next;
current_mir->next = new_mir;
if (last_mir_insn == current_mir) {
- /* Is the last MIR in the block */
+ /* Is the last MIR in the block? */
last_mir_insn = new_mir;
}
+
+ new_mir->bb = id;
+}
+
+MIR* BasicBlock::FindPreviousMIR(MIR* mir) {
+ MIR* current = first_mir_insn;
+
+ while (current != nullptr) {
+ MIR* next = current->next;
+
+ if (next == mir) {
+ return current;
+ }
+
+ current = next;
+ }
+
+ return nullptr;
+}
+
+void BasicBlock::InsertMIRBefore(MIR* current_mir, MIR* new_mir) {
+ if (first_mir_insn == current_mir) {
+ /* Is the first MIR in the block? */
+ first_mir_insn = new_mir;
+ new_mir->bb = id;
+ }
+
+ MIR* prev = FindPreviousMIR(current_mir);
+
+ if (prev != nullptr) {
+ prev->next = new_mir;
+ new_mir->next = current_mir;
+ new_mir->bb = id;
+ }
}
MIR* BasicBlock::GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current) {
@@ -1240,6 +1278,12 @@
return info;
}
+// Allocate a new MIR.
+MIR* MIRGraph::NewMIR() {
+ MIR* mir = new (arena_) MIR();
+ return mir;
+}
+
// Allocate a new basic block.
BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
BasicBlock* bb = static_cast<BasicBlock*>(arena_->Alloc(sizeof(BasicBlock),
@@ -1344,4 +1388,106 @@
return nullptr;
}
+bool BasicBlock::RemoveMIR(MIR* mir) {
+ if (mir == nullptr) {
+ return false;
+ }
+
+ // Find the MIR, and the one before it if they exist.
+ MIR* current = nullptr;
+ MIR* prev = nullptr;
+
+ // Find the mir we are looking for.
+ for (current = first_mir_insn; current != nullptr; prev = current, current = current->next) {
+ if (current == mir) {
+ break;
+ }
+ }
+
+ // Did we find it?
+ if (current != nullptr) {
+ MIR* next = current->next;
+
+ // Just update the links of prev and next and current is almost gone.
+ if (prev != nullptr) {
+ prev->next = next;
+ }
+
+ // Exceptions are if first or last mirs are invoke.
+ if (first_mir_insn == current) {
+ first_mir_insn = next;
+ }
+
+ if (last_mir_insn == current) {
+ last_mir_insn = prev;
+ }
+
+ // Found it and removed it.
+ return true;
+ }
+
+ // We did not find it.
+ return false;
+}
+
+MIR* MIR::Copy(MIRGraph* mir_graph) {
+ MIR* res = mir_graph->NewMIR();
+ *res = *this;
+
+ // Remove links
+ res->next = nullptr;
+ res->bb = NullBasicBlockId;
+ res->ssa_rep = nullptr;
+
+ return res;
+}
+
+MIR* MIR::Copy(CompilationUnit* c_unit) {
+ return Copy(c_unit->mir_graph.get());
+}
+
+uint32_t SSARepresentation::GetStartUseIndex(Instruction::Code opcode) {
+ // Default result.
+ int res = 0;
+
+ // We are basically setting the iputs to their igets counterparts.
+ switch (opcode) {
+ case Instruction::IPUT:
+ case Instruction::IPUT_OBJECT:
+ case Instruction::IPUT_BOOLEAN:
+ case Instruction::IPUT_BYTE:
+ case Instruction::IPUT_CHAR:
+ case Instruction::IPUT_SHORT:
+ case Instruction::IPUT_QUICK:
+ case Instruction::IPUT_OBJECT_QUICK:
+ case Instruction::APUT:
+ case Instruction::APUT_OBJECT:
+ case Instruction::APUT_BOOLEAN:
+ case Instruction::APUT_BYTE:
+ case Instruction::APUT_CHAR:
+ case Instruction::APUT_SHORT:
+ case Instruction::SPUT:
+ case Instruction::SPUT_OBJECT:
+ case Instruction::SPUT_BOOLEAN:
+ case Instruction::SPUT_BYTE:
+ case Instruction::SPUT_CHAR:
+ case Instruction::SPUT_SHORT:
+ // Skip the VR containing what to store.
+ res = 1;
+ break;
+ case Instruction::IPUT_WIDE:
+ case Instruction::IPUT_WIDE_QUICK:
+ case Instruction::APUT_WIDE:
+ case Instruction::SPUT_WIDE:
+ // Skip the two VRs containing what to store.
+ res = 2;
+ break;
+ default:
+ // Do nothing in the general case.
+ break;
+ }
+
+ return res;
+}
+
} // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 11d2fbe..0bb8265 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -242,6 +242,8 @@
bool* fp_use;
int32_t* defs;
bool* fp_def;
+
+ static uint32_t GetStartUseIndex(Instruction::Code opcode);
};
/*
@@ -261,12 +263,15 @@
uint32_t vC;
uint32_t arg[5]; /* vC/D/E/F/G in invoke or filled-new-array */
Instruction::Code opcode;
+
+ explicit DecodedInstruction():vA(0), vB(0), vB_wide(0), vC(0), opcode(Instruction::NOP) {
+ }
} dalvikInsn;
- uint16_t width; // Note: width can include switch table or fill array data.
NarrowDexOffset offset; // Offset of the instruction in code units.
uint16_t optimization_flags;
int16_t m_unit_index; // From which method was this MIR included
+ BasicBlockId bb;
MIR* next;
SSARepresentation* ssa_rep;
union {
@@ -285,6 +290,23 @@
// INVOKE data index, points to MIRGraph::method_lowering_infos_.
uint32_t method_lowering_info;
} meta;
+
+ explicit MIR():offset(0), optimization_flags(0), m_unit_index(0), bb(NullBasicBlockId),
+ next(nullptr), ssa_rep(nullptr) {
+ memset(&meta, 0, sizeof(meta));
+ }
+
+ uint32_t GetStartUseIndex() const {
+ return SSARepresentation::GetStartUseIndex(dalvikInsn.opcode);
+ }
+
+ MIR* Copy(CompilationUnit *c_unit);
+ MIR* Copy(MIRGraph* mir_Graph);
+
+ static void* operator new(size_t size, ArenaAllocator* arena) {
+ return arena->Alloc(sizeof(MIR), kArenaAllocMIR);
+ }
+ static void operator delete(void* p) {} // Nop.
};
struct SuccessorBlockInfo;
@@ -319,6 +341,8 @@
void AppendMIR(MIR* mir);
void PrependMIR(MIR* mir);
void InsertMIRAfter(MIR* current_mir, MIR* new_mir);
+ void InsertMIRBefore(MIR* current_mir, MIR* new_mir);
+ MIR* FindPreviousMIR(MIR* mir);
/**
* @brief Used to obtain the next MIR that follows unconditionally.
@@ -329,6 +353,7 @@
* @return Returns the following MIR if one can be found.
*/
MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
+ bool RemoveMIR(MIR* mir);
};
/*
@@ -836,6 +861,7 @@
void DumpMIRGraph();
CallInfo* NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type, bool is_range);
BasicBlock* NewMemBB(BBType block_type, int block_id);
+ MIR* NewMIR();
MIR* AdvanceMIR(BasicBlock** p_bb, MIR* mir);
BasicBlock* NextDominatedBlock(BasicBlock* bb);
bool LayoutBlocks(BasicBlock* bb);
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
index 891d9fb..86092b6 100644
--- a/compiler/dex/mir_optimization_test.cc
+++ b/compiler/dex/mir_optimization_test.cc
@@ -170,7 +170,6 @@
}
mir->ssa_rep = nullptr;
mir->offset = 2 * i; // All insns need to be at least 2 code units long.
- mir->width = 2u;
mir->optimization_flags = 0u;
merged_df_flags |= MIRGraph::GetDataFlowAttributes(def->opcode);
}
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index 9457d5b..ac22294 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -19,49 +19,21 @@
#include <string>
+#include "base/macros.h"
namespace art {
-// Forward declarations.
-struct BasicBlock;
-struct CompilationUnit;
-class Pass;
-
-/**
- * @brief OptimizationFlag is an enumeration to perform certain tasks for a given pass.
- * @details Each enum should be a power of 2 to be correctly used.
- */
-enum OptimizationFlag {
-};
-
-enum DataFlowAnalysisMode {
- kAllNodes = 0, /**< @brief All nodes. */
- kPreOrderDFSTraversal, /**< @brief Depth-First-Search / Pre-Order. */
- kRepeatingPreOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Pre-Order. */
- kReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Reverse Post-Order. */
- kRepeatingPostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Post-Order. */
- kRepeatingReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
- kPostOrderDOMTraversal, /**< @brief Dominator tree / Post-Order. */
- kNoNodes, /**< @brief Skip BasicBlock traversal. */
+// Empty Pass Data Class, can be extended by any pass extending the base Pass class.
+class PassDataHolder {
};
/**
* @class Pass
- * @brief Pass is the Pass structure for the optimizations.
- * @details The following structure has the different optimization passes that we are going to do.
+ * @brief Base Pass class, can be extended to perform a more defined way of doing the work call.
*/
class Pass {
public:
- explicit Pass(const char* name, DataFlowAnalysisMode type = kAllNodes,
- unsigned int flags = 0u, const char* dump = "")
- : pass_name_(name), traversal_type_(type), flags_(flags), dump_cfg_folder_(dump) {
- }
-
- Pass(const char* name, DataFlowAnalysisMode type, const char* dump)
- : pass_name_(name), traversal_type_(type), flags_(0), dump_cfg_folder_(dump) {
- }
-
- Pass(const char* name, const char* dump)
- : pass_name_(name), traversal_type_(kAllNodes), flags_(0), dump_cfg_folder_(dump) {
+ explicit Pass(const char* name)
+ : pass_name_(name) {
}
virtual ~Pass() {
@@ -71,59 +43,42 @@
return pass_name_;
}
- virtual DataFlowAnalysisMode GetTraversal() const {
- return traversal_type_;
- }
-
- virtual bool GetFlag(OptimizationFlag flag) const {
- return (flags_ & flag);
- }
-
- const char* GetDumpCFGFolder() const {
- return dump_cfg_folder_;
- }
-
/**
* @brief Gate for the pass: determines whether to execute the pass or not considering a CompilationUnit
- * @param c_unit the CompilationUnit.
- * @return whether or not to execute the pass
+ * @param data the PassDataHolder.
+ * @return whether or not to execute the pass.
*/
- virtual bool Gate(const CompilationUnit* c_unit) const {
+ virtual bool Gate(const PassDataHolder* data) const {
// Unused parameter.
- UNUSED(c_unit);
+ UNUSED(data);
// Base class says yes.
return true;
}
/**
- * @brief Start of the pass: called before the WalkBasicBlocks function
- * @param c_unit the considered CompilationUnit.
+ * @brief Start of the pass: called before the Worker function.
*/
- virtual void Start(CompilationUnit* c_unit) const {
+ virtual void Start(const PassDataHolder* data) const {
// Unused parameter.
- UNUSED(c_unit);
+ UNUSED(data);
}
/**
- * @brief End of the pass: called after the WalkBasicBlocks function
- * @param c_unit the considered CompilationUnit.
+ * @brief End of the pass: called after the WalkBasicBlocks function.
*/
- virtual void End(CompilationUnit* c_unit) const {
+ virtual void End(const PassDataHolder* data) const {
// Unused parameter.
- UNUSED(c_unit);
+ UNUSED(data);
}
/**
- * @brief Actually walk the BasicBlocks following a particular traversal type.
- * @param c_unit the CompilationUnit.
- * @param bb the BasicBlock.
+ * @param data the object containing data necessary for the pass.
* @return whether or not there is a change when walking the BasicBlock
*/
- virtual bool WalkBasicBlocks(CompilationUnit* c_unit, BasicBlock* bb) const {
- // Unused parameters.
- UNUSED(c_unit);
- UNUSED(bb);
+ virtual bool Worker(const PassDataHolder* data) const {
+ // Unused parameter.
+ UNUSED(data);
// BasicBlock did not change.
return false;
@@ -133,15 +88,6 @@
/** @brief The pass name: used for searching for a pass when running a particular pass or debugging. */
const char* const pass_name_;
- /** @brief Type of traversal: determines the order to execute the pass on the BasicBlocks. */
- const DataFlowAnalysisMode traversal_type_;
-
- /** @brief Flags for additional directives: used to determine if a particular clean-up is necessary post pass. */
- const unsigned int flags_;
-
- /** @brief CFG Dump Folder: what sub-folder to use for dumping the CFGs post pass. */
- const char* const dump_cfg_folder_;
-
private:
// In order to make the all passes not copy-friendly.
DISALLOW_COPY_AND_ASSIGN(Pass);
diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h
index 2b7196e..aa0d1ae 100644
--- a/compiler/dex/pass_driver.h
+++ b/compiler/dex/pass_driver.h
@@ -22,77 +22,169 @@
#include "safe_map.h"
// Forward Declarations.
-class CompilationUnit;
class Pass;
-
+class PassDriver;
namespace art {
+/**
+ * @brief Helper function to create a single instance of a given Pass and can be shared across
+ * the threads.
+ */
+template <typename PassType>
+const Pass* GetPassInstance() {
+ static const PassType pass;
+ return &pass;
+}
+
+// Empty holder for the constructor.
+class PassDriverDataHolder {
+};
/**
* @class PassDriver
- * @brief PassDriver is the wrapper around all Pass instances in order to execute them from the Middle-End
+ * @brief PassDriver is the wrapper around all Pass instances in order to execute them
*/
+template <typename PassDriverType>
class PassDriver {
public:
- explicit PassDriver(CompilationUnit* cu, bool create_default_passes = true);
+ explicit PassDriver() {
+ InitializePasses();
+ }
- ~PassDriver();
+ virtual ~PassDriver() {
+ }
/**
* @brief Insert a Pass: can warn if multiple passes have the same name.
- * @param new_pass the new Pass to insert in the map and list.
- * @param warn_override warn if the name of the Pass is already used.
*/
- void InsertPass(const Pass* new_pass);
+ void InsertPass(const Pass* new_pass) {
+ DCHECK(new_pass != nullptr);
+ DCHECK(new_pass->GetName() != nullptr && new_pass->GetName()[0] != 0);
+
+ // It is an error to override an existing pass.
+ DCHECK(GetPass(new_pass->GetName()) == nullptr)
+ << "Pass name " << new_pass->GetName() << " already used.";
+
+ // Now add to the list.
+ pass_list_.push_back(new_pass);
+ }
/**
* @brief Run a pass using the name as key.
- * @param c_unit the considered CompilationUnit.
- * @param pass_name the Pass name.
* @return whether the pass was applied.
*/
- bool RunPass(CompilationUnit* c_unit, const char* pass_name);
+ virtual bool RunPass(const char* pass_name) {
+ // Paranoid: c_unit cannot be nullptr and we need a pass name.
+ DCHECK(pass_name != nullptr && pass_name[0] != 0);
+
+ const Pass* cur_pass = GetPass(pass_name);
+
+ if (cur_pass != nullptr) {
+ return RunPass(cur_pass);
+ }
+
+ // Return false, we did not find the pass.
+ return false;
+ }
+
+ /**
+ * @brief Runs all the passes with the pass_list_.
+ */
+ void Launch() {
+ for (const Pass* cur_pass : pass_list_) {
+ RunPass(cur_pass);
+ }
+ }
+
+ /**
+ * @brief Searches for a particular pass.
+ * @param the name of the pass to be searched for.
+ */
+ const Pass* GetPass(const char* name) const {
+ for (const Pass* cur_pass : pass_list_) {
+ if (strcmp(name, cur_pass->GetName()) == 0) {
+ return cur_pass;
+ }
+ }
+ return nullptr;
+ }
+
+ static void CreateDefaultPassList(const std::string& disable_passes) {
+ // Insert each pass from g_passes into g_default_pass_list.
+ PassDriverType::g_default_pass_list.clear();
+ PassDriverType::g_default_pass_list.reserve(PassDriver<PassDriverType>::g_passes_size);
+ for (uint16_t i = 0; i < PassDriver<PassDriverType>::g_passes_size; ++i) {
+ const Pass* pass = PassDriver<PassDriverType>::g_passes[i];
+ // Check if we should disable this pass.
+ if (disable_passes.find(pass->GetName()) != std::string::npos) {
+ LOG(INFO) << "Skipping " << pass->GetName();
+ } else {
+ PassDriver<PassDriverType>::g_default_pass_list.push_back(pass);
+ }
+ }
+ }
/**
* @brief Run a pass using the Pass itself.
* @param time_split do we want a time split request(default: false)?
* @return whether the pass was applied.
*/
- bool RunPass(CompilationUnit* c_unit, const Pass* pass, bool time_split = false);
+ virtual bool RunPass(const Pass* pass, bool time_split = false) = 0;
- void Launch();
+ /**
+ * @brief Print the pass names of all the passes available.
+ */
+ static void PrintPassNames() {
+ LOG(INFO) << "Loop Passes are:";
- void HandlePassFlag(CompilationUnit* c_unit, const Pass* pass);
+ for (const Pass* cur_pass : PassDriver<PassDriverType>::g_default_pass_list) {
+ LOG(INFO) << "\t-" << cur_pass->GetName();
+ }
+ }
+
+ protected:
+ /**
+ * @brief Gets the list of passes currently schedule to execute.
+ * @return pass_list_
+ */
+ std::vector<const Pass*>& GetPasses() {
+ return pass_list_;
+ }
+
+ virtual void InitializePasses() {
+ SetDefaultPasses();
+ }
+
+ void SetDefaultPasses() {
+ pass_list_ = PassDriver<PassDriverType>::g_default_pass_list;
+ }
/**
* @brief Apply a patch: perform start/work/end functions.
*/
- void ApplyPass(CompilationUnit* c_unit, const Pass* pass);
-
- /**
- * @brief Dispatch a patch: walk the BasicBlocks depending on the traversal mode
- */
- void DispatchPass(CompilationUnit* c_unit, const Pass* pass);
-
- static void PrintPassNames();
- static void CreateDefaultPassList(const std::string& disable_passes);
-
- const Pass* GetPass(const char* name) const;
-
- const char* GetDumpCFGFolder() const {
- return dump_cfg_folder_;
+ virtual void ApplyPass(PassDataHolder* data, const Pass* pass) {
+ pass->Start(data);
+ DispatchPass(pass);
+ pass->End(data);
}
-
- protected:
- void CreatePasses();
+ /**
+ * @brief Dispatch a patch.
+ * Gives the ability to add logic when running the patch.
+ */
+ virtual void DispatchPass(const Pass* pass) {
+ UNUSED(pass);
+ }
/** @brief List of passes: provides the order to execute the passes. */
std::vector<const Pass*> pass_list_;
- /** @brief The CompilationUnit on which to execute the passes on. */
- CompilationUnit* const cu_;
+ /** @brief The number of passes within g_passes. */
+ static const uint16_t g_passes_size;
- /** @brief Dump CFG base folder: where is the base folder for dumping CFGs. */
- const char* dump_cfg_folder_;
+ /** @brief The number of passes within g_passes. */
+ static const Pass* const g_passes[];
+
+ /** @brief The default pass list is used to initialize pass_list_. */
+ static std::vector<const Pass*> g_default_pass_list;
};
} // namespace art
diff --git a/compiler/dex/pass_driver_me.cc b/compiler/dex/pass_driver_me.cc
new file mode 100644
index 0000000..d054500
--- /dev/null
+++ b/compiler/dex/pass_driver_me.cc
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "base/macros.h"
+#include "bb_optimizations.h"
+#include "compiler_internals.h"
+#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
+#include "pass_driver_me.h"
+
+namespace art {
+
+namespace { // anonymous namespace
+
+void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass, DataflowIterator* iterator) {
+ // Paranoid: Check the iterator before walking the BasicBlocks.
+ DCHECK(iterator != nullptr);
+ bool change = false;
+ for (BasicBlock *bb = iterator->Next(change); bb != 0; bb = iterator->Next(change)) {
+ data->bb = bb;
+ change = pass->Worker(data);
+ }
+}
+
+template <typename Iterator>
+inline void DoWalkBasicBlocks(PassMEDataHolder* data, const PassME* pass) {
+ DCHECK(data != nullptr);
+ CompilationUnit* c_unit = data->c_unit;
+ DCHECK(c_unit != nullptr);
+ Iterator iterator(c_unit->mir_graph.get());
+ DoWalkBasicBlocks(data, pass, &iterator);
+}
+} // anonymous namespace
+
+/*
+ * Create the pass list. These passes are immutable and are shared across the threads.
+ *
+ * Advantage is that there will be no race conditions here.
+ * Disadvantage is the passes can't change their internal states depending on CompilationUnit:
+ * - This is not yet an issue: no current pass would require it.
+ */
+// The initial list of passes to be used by the PassDriveME.
+template<>
+const Pass* const PassDriver<PassDriverME>::g_passes[] = {
+ GetPassInstance<CacheFieldLoweringInfo>(),
+ GetPassInstance<CacheMethodLoweringInfo>(),
+ GetPassInstance<CallInlining>(),
+ GetPassInstance<CodeLayout>(),
+ GetPassInstance<SSATransformation>(),
+ GetPassInstance<ConstantPropagation>(),
+ GetPassInstance<InitRegLocations>(),
+ GetPassInstance<MethodUseCount>(),
+ GetPassInstance<NullCheckEliminationAndTypeInference>(),
+ GetPassInstance<ClassInitCheckElimination>(),
+ GetPassInstance<BBCombine>(),
+ GetPassInstance<BBOptimizations>(),
+};
+
+// The number of the passes in the initial list of Passes (g_passes).
+template<>
+uint16_t const PassDriver<PassDriverME>::g_passes_size = arraysize(PassDriver<PassDriverME>::g_passes);
+
+// The default pass list is used by the PassDriverME instance of PassDriver to initialize pass_list_.
+template<>
+std::vector<const Pass*> PassDriver<PassDriverME>::g_default_pass_list(PassDriver<PassDriverME>::g_passes, PassDriver<PassDriverME>::g_passes + PassDriver<PassDriverME>::g_passes_size);
+
+PassDriverME::PassDriverME(CompilationUnit* cu)
+ : PassDriver(), pass_me_data_holder_(), dump_cfg_folder_("/sdcard/") {
+ pass_me_data_holder_.bb = nullptr;
+ pass_me_data_holder_.c_unit = cu;
+}
+
+PassDriverME::~PassDriverME() {
+}
+
+void PassDriverME::DispatchPass(const Pass* pass) {
+ VLOG(compiler) << "Dispatching " << pass->GetName();
+ const PassME* me_pass = down_cast<const PassME*>(pass);
+
+ DataFlowAnalysisMode mode = me_pass->GetTraversal();
+
+ switch (mode) {
+ case kPreOrderDFSTraversal:
+ DoWalkBasicBlocks<PreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kRepeatingPreOrderDFSTraversal:
+ DoWalkBasicBlocks<RepeatingPreOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kRepeatingPostOrderDFSTraversal:
+ DoWalkBasicBlocks<RepeatingPostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kReversePostOrderDFSTraversal:
+ DoWalkBasicBlocks<ReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kRepeatingReversePostOrderDFSTraversal:
+ DoWalkBasicBlocks<RepeatingReversePostOrderDfsIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kPostOrderDOMTraversal:
+ DoWalkBasicBlocks<PostOrderDOMIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kAllNodes:
+ DoWalkBasicBlocks<AllNodesIterator>(&pass_me_data_holder_, me_pass);
+ break;
+ case kNoNodes:
+ break;
+ default:
+ LOG(FATAL) << "Iterator mode not handled in dispatcher: " << mode;
+ break;
+ }
+}
+
+bool PassDriverME::RunPass(const Pass* pass, bool time_split) {
+ // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name
+ DCHECK(pass != nullptr);
+ DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0);
+ CompilationUnit* c_unit = pass_me_data_holder_.c_unit;
+ DCHECK(c_unit != nullptr);
+
+ // Do we perform a time split
+ if (time_split) {
+ c_unit->NewTimingSplit(pass->GetName());
+ }
+
+ // Check the pass gate first.
+ bool should_apply_pass = pass->Gate(&pass_me_data_holder_);
+ if (should_apply_pass) {
+ // Applying the pass: first start, doWork, and end calls.
+ ApplyPass(&pass_me_data_holder_, pass);
+
+ // Do we want to log it?
+ if ((c_unit->enable_debug& (1 << kDebugDumpCFG)) != 0) {
+ // Do we have a pass folder?
+ const PassME* me_pass = (down_cast<const PassME*>(pass));
+ const char* passFolder = me_pass->GetDumpCFGFolder();
+ DCHECK(passFolder != nullptr);
+
+ if (passFolder[0] != 0) {
+ // Create directory prefix.
+ std::string prefix = GetDumpCFGFolder();
+ prefix += passFolder;
+ prefix += "/";
+
+ c_unit->mir_graph->DumpCFG(prefix.c_str(), false);
+ }
+ }
+ }
+
+ // If the pass gate passed, we can declare success.
+ return should_apply_pass;
+}
+
+const char* PassDriverME::GetDumpCFGFolder() const {
+ return dump_cfg_folder_;
+}
+
+
+} // namespace art
diff --git a/compiler/dex/pass_driver_me.h b/compiler/dex/pass_driver_me.h
new file mode 100644
index 0000000..0142934
--- /dev/null
+++ b/compiler/dex/pass_driver_me.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_H_
+#define ART_COMPILER_DEX_PASS_DRIVER_ME_H_
+
+#include "bb_optimizations.h"
+#include "pass_driver.h"
+#include "pass_me.h"
+
+namespace art {
+
+class PassDriverME: public PassDriver<PassDriverME> {
+ public:
+ explicit PassDriverME(CompilationUnit* cu);
+ ~PassDriverME();
+ /**
+ * @brief Dispatch a patch: walk the BasicBlocks depending on the traversal mode
+ */
+ void DispatchPass(const Pass* pass);
+ bool RunPass(const Pass* pass, bool time_split = false);
+ const char* GetDumpCFGFolder() const;
+ protected:
+ /** @brief The data holder that contains data needed for the PassDriverME. */
+ PassMEDataHolder pass_me_data_holder_;
+
+ /** @brief Dump CFG base folder: where is the base folder for dumping CFGs. */
+ const char* dump_cfg_folder_;
+};
+
+} // namespace art
+#endif // ART_COMPILER_DEX_PASS_DRIVER_ME_H_
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
new file mode 100644
index 0000000..1132166
--- /dev/null
+++ b/compiler/dex/pass_me.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_PASS_ME_H_
+#define ART_COMPILER_DEX_PASS_ME_H_
+
+#include <string>
+#include "pass.h"
+
+namespace art {
+
+// Forward declarations.
+struct BasicBlock;
+struct CompilationUnit;
+class Pass;
+
+/**
+ * @brief OptimizationFlag is an enumeration to perform certain tasks for a given pass.
+ * @details Each enum should be a power of 2 to be correctly used.
+ */
+enum OptimizationFlag {
+};
+
+// Data holder class.
+class PassMEDataHolder: public PassDataHolder {
+ public:
+ CompilationUnit* c_unit;
+ BasicBlock* bb;
+};
+
+enum DataFlowAnalysisMode {
+ kAllNodes = 0, /**< @brief All nodes. */
+ kPreOrderDFSTraversal, /**< @brief Depth-First-Search / Pre-Order. */
+ kRepeatingPreOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Pre-Order. */
+ kReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Reverse Post-Order. */
+ kRepeatingPostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Post-Order. */
+ kRepeatingReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
+ kPostOrderDOMTraversal, /**< @brief Dominator tree / Post-Order. */
+ kNoNodes, /**< @brief Skip BasicBlock traversal. */
+};
+
+/**
+ * @class Pass
+ * @brief Pass is the Pass structure for the optimizations.
+ * @details The following structure has the different optimization passes that we are going to do.
+ */
+class PassME: public Pass {
+ public:
+ explicit PassME(const char* name, DataFlowAnalysisMode type = kAllNodes,
+ unsigned int flags = 0u, const char* dump = "")
+ : Pass(name), traversal_type_(type), flags_(flags), dump_cfg_folder_(dump) {
+ }
+
+ PassME(const char* name, DataFlowAnalysisMode type, const char* dump)
+ : Pass(name), traversal_type_(type), flags_(0), dump_cfg_folder_(dump) {
+ }
+
+ PassME(const char* name, const char* dump)
+ : Pass(name), traversal_type_(kAllNodes), flags_(0), dump_cfg_folder_(dump) {
+ }
+
+ ~PassME() {
+ }
+
+ virtual DataFlowAnalysisMode GetTraversal() const {
+ return traversal_type_;
+ }
+
+ const char* GetDumpCFGFolder() const {
+ return dump_cfg_folder_;
+ }
+
+ bool GetFlag(OptimizationFlag flag) const {
+ return (flags_ & flag);
+ }
+
+ protected:
+ /** @brief Type of traversal: determines the order to execute the pass on the BasicBlocks. */
+ const DataFlowAnalysisMode traversal_type_;
+
+ /** @brief Flags for additional directives: used to determine if a particular clean-up is necessary post pass. */
+ const unsigned int flags_;
+
+ /** @brief CFG Dump Folder: what sub-folder to use for dumping the CFGs post pass. */
+ const char* const dump_cfg_folder_;
+};
+} // namespace art
+#endif // ART_COMPILER_DEX_PASS_ME_H_
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 6ccf252..256135d 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -364,6 +364,18 @@
return NULL;
}
+/* Search the existing constants in the literal pool for an exact method match */
+LIR* Mir2Lir::ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method) {
+ while (data_target) {
+ if (static_cast<uint32_t>(data_target->operands[0]) == method.dex_method_index &&
+ UnwrapPointer(data_target->operands[1]) == method.dex_file) {
+ return data_target;
+ }
+ data_target = data_target->next;
+ }
+ return nullptr;
+}
+
/*
* The following are building blocks to insert constants into the pool or
* instruction streams.
@@ -1143,11 +1155,13 @@
void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
- int target_method_idx = target_method.dex_method_index;
- LIR* data_target = ScanLiteralPool(code_literal_list_, target_method_idx, 0);
+ LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method);
if (data_target == NULL) {
- data_target = AddWordData(&code_literal_list_, target_method_idx);
+ data_target = AddWordData(&code_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
+ // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
+ // the same method invoked with kVirtual, kSuper and kInterface but the class linker will
+ // resolve these invokes to the same method, so we don't care which one we record here.
data_target->operands[2] = type;
}
LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target);
@@ -1157,11 +1171,13 @@
void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
- int target_method_idx = target_method.dex_method_index;
- LIR* data_target = ScanLiteralPool(method_literal_list_, target_method_idx, 0);
+ LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method);
if (data_target == NULL) {
- data_target = AddWordData(&method_literal_list_, target_method_idx);
+ data_target = AddWordData(&method_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
+ // NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
+ // the same method invoked with kVirtual, kSuper and kInterface but the class linker will
+ // resolve these invokes to the same method, so we don't care which one we record here.
data_target->operands[2] = type;
}
LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg), data_target);
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 3ec31ba..526c981 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -35,15 +35,9 @@
namespace { // anonymous namespace
MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
- ArenaAllocator* arena = mir_graph->GetArena();
- MIR* insn = static_cast<MIR*>(arena->Alloc(sizeof(MIR), kArenaAllocMIR));
+ MIR* insn = mir_graph->NewMIR();
insn->offset = invoke->offset;
- insn->width = invoke->width;
insn->optimization_flags = MIR_CALLEE;
- if (move_return != nullptr) {
- DCHECK_EQ(move_return->offset, invoke->offset + invoke->width);
- insn->width += move_return->width;
- }
return insn;
}
@@ -660,7 +654,6 @@
}
MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
- insn->width += insn->offset - invoke->offset;
insn->offset = invoke->offset;
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
@@ -737,9 +730,7 @@
if (move_result != nullptr) {
MIR* move = AllocReplacementMIR(mir_graph, invoke, move_result);
- insn->width = invoke->width;
move->offset = move_result->offset;
- move->width = move_result->width;
if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
move->dalvikInsn.opcode = Instruction::MOVE_FROM16;
} else if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 3e0ba75..3584c33 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -617,6 +617,7 @@
LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2);
LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
+ LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method);
LIR* AddWordData(LIR* *constant_list_p, int value);
LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
void ProcessSwitchTables();
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 5aa093a..865311b 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -557,8 +557,7 @@
if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) {
continue;
}
- MIR *phi =
- static_cast<MIR*>(arena_->Alloc(sizeof(MIR), kArenaAllocDFInfo));
+ MIR *phi = NewMIR();
phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
phi->dalvikInsn.vA = dalvik_reg;
phi->offset = phi_bb->start_offset;
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index eb62f1b..0f41d2b 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -1152,28 +1152,22 @@
*type = sharp_type;
}
} else {
- if (compiling_boot) {
+ bool method_in_image = compiling_boot ||
+ Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
+ if (method_in_image) {
+ CHECK(!method->IsAbstract());
*type = sharp_type;
- *direct_method = -1;
- *direct_code = -1;
+ *direct_method = compiling_boot ? -1 : reinterpret_cast<uintptr_t>(method);
+ *direct_code = compiling_boot ? -1 : compiler_->GetEntryPointOf(method);
+ target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ target_method->dex_method_index = method->GetDexMethodIndex();
+ } else if (!must_use_direct_pointers) {
+ // Set the code and rely on the dex cache for the method.
+ *type = sharp_type;
+ *direct_code = compiler_->GetEntryPointOf(method);
} else {
- bool method_in_image =
- Runtime::Current()->GetHeap()->FindSpaceFromObject(method, false)->IsImageSpace();
- if (method_in_image) {
- CHECK(!method->IsAbstract());
- *type = sharp_type;
- *direct_method = reinterpret_cast<uintptr_t>(method);
- *direct_code = compiler_->GetEntryPointOf(method);
- target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
- target_method->dex_method_index = method->GetDexMethodIndex();
- } else if (!must_use_direct_pointers) {
- // Set the code and rely on the dex cache for the method.
- *type = sharp_type;
- *direct_code = compiler_->GetEntryPointOf(method);
- } else {
- // Direct pointers were required but none were available.
- VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
- }
+ // Direct pointers were required but none were available.
+ VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
}
}
}
@@ -1369,7 +1363,7 @@
self->AssertNoPendingException();
CHECK_GT(work_units, 0U);
- index_ = begin;
+ index_.StoreRelaxed(begin);
for (size_t i = 0; i < work_units; ++i) {
thread_pool_->AddTask(self, new ForAllClosure(this, end, callback));
}
@@ -1384,7 +1378,7 @@
}
size_t NextIndex() {
- return index_.FetchAndAdd(1);
+ return index_.FetchAndAddSequentiallyConsistent(1);
}
private:
diff --git a/compiler/output_stream.h b/compiler/output_stream.h
index 478a854..b5ac933 100644
--- a/compiler/output_stream.h
+++ b/compiler/output_stream.h
@@ -18,8 +18,8 @@
#define ART_COMPILER_OUTPUT_STREAM_H_
#include <stdint.h>
-
#include <string>
+#include <sys/types.h>
#include "base/macros.h"
diff --git a/dalvikvm/Android.mk b/dalvikvm/Android.mk
index 0ded2d8..03d32f0 100644
--- a/dalvikvm/Android.mk
+++ b/dalvikvm/Android.mk
@@ -51,6 +51,7 @@
LOCAL_LDFLAGS := -ldl -lpthread
LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
LOCAL_IS_HOST_MODULE := true
+include external/libcxx/libcxx.mk
include $(BUILD_HOST_EXECUTABLE)
ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE)
endif
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 9914875..f0b5750 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -33,7 +33,7 @@
#include "compiler.h"
#include "compiler_callbacks.h"
#include "dex_file-inl.h"
-#include "dex/pass_driver.h"
+#include "dex/pass_driver_me.h"
#include "dex/verification_results.h"
#include "driver/compiler_callbacks_impl.h"
#include "driver/compiler_driver.h"
@@ -918,10 +918,10 @@
} else if (option == "--no-profile-file") {
// No profile
} else if (option == "--print-pass-names") {
- PassDriver::PrintPassNames();
+ PassDriverME::PrintPassNames();
} else if (option.starts_with("--disable-passes=")) {
std::string disable_passes = option.substr(strlen("--disable-passes=")).data();
- PassDriver::CreateDefaultPassList(disable_passes);
+ PassDriverME::CreateDefaultPassList(disable_passes);
} else {
Usage("Unknown argument %s", option.data());
}
diff --git a/disassembler/Android.mk b/disassembler/Android.mk
index dd4e9d5..814323c 100644
--- a/disassembler/Android.mk
+++ b/disassembler/Android.mk
@@ -87,8 +87,8 @@
LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk
LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk
+ include external/libcxx/libcxx.mk
ifeq ($$(art_target_or_host),target)
- include external/libcxx/libcxx.mk
LOCAL_SHARED_LIBRARIES += libcutils libvixl
include $(BUILD_SHARED_LIBRARY)
else # host
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 1521caa..296cec9 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -397,12 +397,8 @@
endif
LOCAL_C_INCLUDES += $(ART_C_INCLUDES)
LOCAL_SHARED_LIBRARIES += liblog libnativehelper
- ifeq ($$(art_target_or_host),target)
- include external/libcxx/libcxx.mk
- LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
- else
- LOCAL_SHARED_LIBRARIES += libbacktrace
- endif
+ include external/libcxx/libcxx.mk
+ LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
ifeq ($$(art_target_or_host),target)
LOCAL_SHARED_LIBRARIES += libcutils libdl libselinux libutils
LOCAL_STATIC_LIBRARIES := libziparchive libz
diff --git a/runtime/atomic.h b/runtime/atomic.h
index 1f975dc..9262db6 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -17,7 +17,15 @@
#ifndef ART_RUNTIME_ATOMIC_H_
#define ART_RUNTIME_ATOMIC_H_
+#ifdef __clang__
+#define ART_HAVE_STDATOMIC 1
+#endif
+
#include <stdint.h>
+#if ART_HAVE_STDATOMIC
+#include <atomic>
+#endif
+#include <limits>
#include <vector>
#include "base/logging.h"
@@ -27,6 +35,76 @@
class Mutex;
+#if ART_HAVE_STDATOMIC
+template<typename T>
+class Atomic : public std::atomic<T> {
+ public:
+ COMPILE_ASSERT(sizeof(T) == sizeof(std::atomic<T>),
+ std_atomic_size_differs_from_that_of_underlying_type);
+ COMPILE_ASSERT(alignof(T) == alignof(std::atomic<T>),
+ std_atomic_alignment_differs_from_that_of_underlying_type);
+
+ Atomic<T>() : std::atomic<T>() { }
+
+ explicit Atomic<T>(T value) : std::atomic<T>(value) { }
+
+ // Load from memory without ordering or synchronization constraints.
+ T LoadRelaxed() const {
+ return this->load(std::memory_order_relaxed);
+ }
+
+ // Load from memory with a total ordering.
+ T LoadSequentiallyConsistent() const {
+ return this->load(std::memory_order_seq_cst);
+ }
+
+ // Store to memory without ordering or synchronization constraints.
+ void StoreRelaxed(T desired) {
+ this->store(desired, std::memory_order_relaxed);
+ }
+
+ // Store to memory with a total ordering.
+ void StoreSequentiallyConsistent(T desired) {
+ this->store(desired, std::memory_order_seq_cst);
+ }
+
+ // Atomically replace the value with desired value if it matches the expected value. Doesn't
+ // imply ordering or synchronization constraints.
+ bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
+ return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed);
+ }
+
+ // Atomically replace the value with desired value if it matches the expected value. Prior writes
+ // made to other memory locations by the thread that did the release become visible in this
+ // thread.
+ bool CompareExchangeWeakAcquire(T expected_value, T desired_value) {
+ return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire);
+ }
+
+ // Atomically replace the value with desired value if it matches the expected value. prior writes
+ // to other memory locations become visible to the threads that do a consume or an acquire on the
+ // same location.
+ bool CompareExchangeWeakRelease(T expected_value, T desired_value) {
+ return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
+ }
+
+ T FetchAndAddSequentiallyConsistent(const T value) {
+ return this->fetch_add(value, std::memory_order_seq_cst); // Return old_value.
+ }
+
+ T FetchAndSubSequentiallyConsistent(const T value) {
+ return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value.
+ }
+
+ volatile T* Address() {
+ return reinterpret_cast<T*>(this);
+ }
+
+ static T MaxValue() {
+ return std::numeric_limits<T>::max();
+ }
+};
+#else
template<typename T>
class Atomic {
public:
@@ -34,24 +112,54 @@
explicit Atomic<T>(T value) : value_(value) { }
- Atomic<T>& operator=(T desired) {
- Store(desired);
- return *this;
- }
-
- T Load() const {
+ // Load from memory without ordering or synchronization constraints.
+ T LoadRelaxed() const {
return value_;
}
- operator T() const {
- return Load();
+ // Load from memory with a total ordering.
+ T LoadSequentiallyConsistent() const;
+
+ // Store to memory without ordering or synchronization constraints.
+ void StoreRelaxed(T desired) {
+ value_ = desired;
}
- T FetchAndAdd(const T value) {
+ // Store to memory with a total ordering.
+ void StoreSequentiallyConsistent(T desired);
+
+ // Atomically replace the value with desired value if it matches the expected value. Doesn't
+ // imply ordering or synchronization constraints.
+ bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
+ // TODO: make this relaxed.
+ return __sync_bool_compare_and_swap(&value_, expected_value, desired_value);
+ }
+
+ // Atomically replace the value with desired value if it matches the expected value. Prior writes
+ // made to other memory locations by the thread that did the release become visible in this
+ // thread.
+ bool CompareExchangeWeakAcquire(T expected_value, T desired_value) {
+ // TODO: make this acquire.
+ return __sync_bool_compare_and_swap(&value_, expected_value, desired_value);
+ }
+
+ // Atomically replace the value with desired value if it matches the expected value. prior writes
+ // to other memory locations become visible to the threads that do a consume or an acquire on the
+ // same location.
+ bool CompareExchangeWeakRelease(T expected_value, T desired_value) {
+ // TODO: make this release.
+ return __sync_bool_compare_and_swap(&value_, expected_value, desired_value);
+ }
+
+ volatile T* Address() {
+ return &value_;
+ }
+
+ T FetchAndAddSequentiallyConsistent(const T value) {
return __sync_fetch_and_add(&value_, value); // Return old_value.
}
- T FetchAndSub(const T value) {
+ T FetchAndSubSequentiallyConsistent(const T value) {
return __sync_fetch_and_sub(&value_, value); // Return old value.
}
@@ -71,22 +179,14 @@
return __sync_fetch_and_sub(&value_, 1); // Return old value.
}
- bool CompareAndSwap(T expected_value, T desired_value) {
- return __sync_bool_compare_and_swap(&value_, expected_value, desired_value);
- }
-
- volatile T* Address() {
- return &value_;
+ static T MaxValue() {
+ return std::numeric_limits<T>::max();
}
private:
- // Unsafe = operator for non atomic operations on the integer.
- void Store(T desired) {
- value_ = desired;
- }
-
- volatile T value_;
+ T value_;
};
+#endif
typedef Atomic<int32_t> AtomicInteger;
@@ -260,6 +360,23 @@
DISALLOW_COPY_AND_ASSIGN(QuasiAtomic);
};
+#if !ART_HAVE_STDATOMIC
+template<typename T>
+inline T Atomic<T>::LoadSequentiallyConsistent() const {
+ T result = value_;
+ QuasiAtomic::MembarLoadLoad();
+ return result;
+}
+
+template<typename T>
+inline void Atomic<T>::StoreSequentiallyConsistent(T desired) {
+ QuasiAtomic::MembarStoreStore();
+ value_ = desired;
+ QuasiAtomic::MembarStoreLoad();
+}
+
+#endif
+
} // namespace art
#endif // ART_RUNTIME_ATOMIC_H_
diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc
index 331d0c0..086ef44 100644
--- a/runtime/barrier_test.cc
+++ b/runtime/barrier_test.cc
@@ -77,20 +77,20 @@
barrier.Increment(self, num_threads);
// At this point each thread should have passed through the barrier. The first count should be
// equal to num_threads.
- EXPECT_EQ(num_threads, count1);
+ EXPECT_EQ(num_threads, count1.LoadRelaxed());
// Count 3 should still be zero since no thread should have gone past the second barrier.
- EXPECT_EQ(0, count3);
+ EXPECT_EQ(0, count3.LoadRelaxed());
// Now lets tell the threads to pass again.
barrier.Increment(self, num_threads);
// Count 2 should be equal to num_threads since each thread must have passed the second barrier
// at this point.
- EXPECT_EQ(num_threads, count2);
+ EXPECT_EQ(num_threads, count2.LoadRelaxed());
// Wait for all the threads to finish.
thread_pool.Wait(self, true, false);
// All three counts should be equal to num_threads now.
- EXPECT_EQ(count1, count2);
- EXPECT_EQ(count2, count3);
- EXPECT_EQ(num_threads, count3);
+ EXPECT_EQ(count1.LoadRelaxed(), count2.LoadRelaxed());
+ EXPECT_EQ(count2.LoadRelaxed(), count3.LoadRelaxed());
+ EXPECT_EQ(num_threads, count3.LoadRelaxed());
}
class CheckPassTask : public Task {
@@ -133,7 +133,7 @@
// Wait for all the tasks to complete using the barrier.
barrier.Increment(self, expected_total_tasks);
// The total number of completed tasks should be equal to expected_total_tasks.
- EXPECT_EQ(count, expected_total_tasks);
+ EXPECT_EQ(count.LoadRelaxed(), expected_total_tasks);
}
} // namespace art
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index 8175514..47571f8 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -169,7 +169,7 @@
// bionic and glibc both have TEMP_FAILURE_RETRY, but Mac OS' libc doesn't.
#ifndef TEMP_FAILURE_RETRY
#define TEMP_FAILURE_RETRY(exp) ({ \
- typeof(exp) _rc; \
+ decltype(exp) _rc; \
do { \
_rc = (exp); \
} while (_rc == -1 && errno == EINTR); \
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index a7e25cb..adf4c66 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -221,7 +221,7 @@
// Reduce state by 1.
done = android_atomic_release_cas(cur_state, cur_state - 1, &state_) == 0;
if (done && (cur_state - 1) == 0) { // cas may fail due to noise?
- if (num_pending_writers_ > 0 || num_pending_readers_ > 0) {
+ if (num_pending_writers_.LoadRelaxed() > 0 || num_pending_readers_ > 0) {
// Wake any exclusive waiters as there are now no readers.
futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 2bc17bf..6f7f2c1 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -71,12 +71,12 @@
class ScopedAllMutexesLock {
public:
explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) {
- while (!gAllMutexData->all_mutexes_guard.CompareAndSwap(0, mutex)) {
+ while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakAcquire(0, mutex)) {
NanoSleep(100);
}
}
~ScopedAllMutexesLock() {
- while (!gAllMutexData->all_mutexes_guard.CompareAndSwap(mutex_, 0)) {
+ while (!gAllMutexData->all_mutexes_guard.CompareExchangeWeakRelease(mutex_, 0)) {
NanoSleep(100);
}
}
@@ -174,34 +174,34 @@
uint64_t owner_tid,
uint64_t nano_time_blocked) {
if (kLogLockContentions) {
- ContentionLogData* data = contetion_log_data_;
+ ContentionLogData* data = contention_log_data_;
++(data->contention_count);
data->AddToWaitTime(nano_time_blocked);
ContentionLogEntry* log = data->contention_log;
// This code is intentionally racy as it is only used for diagnostics.
- uint32_t slot = data->cur_content_log_entry;
+ uint32_t slot = data->cur_content_log_entry.LoadRelaxed();
if (log[slot].blocked_tid == blocked_tid &&
log[slot].owner_tid == blocked_tid) {
++log[slot].count;
} else {
uint32_t new_slot;
do {
- slot = data->cur_content_log_entry;
+ slot = data->cur_content_log_entry.LoadRelaxed();
new_slot = (slot + 1) % kContentionLogSize;
- } while (!data->cur_content_log_entry.CompareAndSwap(slot, new_slot));
+ } while (!data->cur_content_log_entry.CompareExchangeWeakRelaxed(slot, new_slot));
log[new_slot].blocked_tid = blocked_tid;
log[new_slot].owner_tid = owner_tid;
- log[new_slot].count = 1;
+ log[new_slot].count.StoreRelaxed(1);
}
}
}
void BaseMutex::DumpContention(std::ostream& os) const {
if (kLogLockContentions) {
- const ContentionLogData* data = contetion_log_data_;
+ const ContentionLogData* data = contention_log_data_;
const ContentionLogEntry* log = data->contention_log;
uint64_t wait_time = data->wait_time;
- uint32_t contention_count = data->contention_count;
+ uint32_t contention_count = data->contention_count.LoadRelaxed();
if (contention_count == 0) {
os << "never contended";
} else {
@@ -213,7 +213,7 @@
for (size_t i = 0; i < kContentionLogSize; ++i) {
uint64_t blocked_tid = log[i].blocked_tid;
uint64_t owner_tid = log[i].owner_tid;
- uint32_t count = log[i].count;
+ uint32_t count = log[i].count.LoadRelaxed();
if (count > 0) {
auto it = most_common_blocked.find(blocked_tid);
if (it != most_common_blocked.end()) {
@@ -261,7 +261,7 @@
#if ART_USE_FUTEXES
state_ = 0;
exclusive_owner_ = 0;
- num_contenders_ = 0;
+ DCHECK_EQ(0, num_contenders_.LoadRelaxed());
#elif defined(__BIONIC__) || defined(__APPLE__)
// Use recursive mutexes for bionic and Apple otherwise the
// non-recursive mutexes don't have TIDs to check lock ownership of.
@@ -283,7 +283,8 @@
LOG(shutting_down ? WARNING : FATAL) << "destroying mutex with owner: " << exclusive_owner_;
} else {
CHECK_EQ(exclusive_owner_, 0U) << "unexpectedly found an owner on unlocked mutex " << name_;
- CHECK_EQ(num_contenders_, 0) << "unexpectedly found a contender on mutex " << name_;
+ CHECK_EQ(num_contenders_.LoadRelaxed(), 0)
+ << "unexpectedly found a contender on mutex " << name_;
}
#else
// We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
@@ -406,7 +407,7 @@
done = __sync_bool_compare_and_swap(&state_, cur_state, 0 /* new state */);
if (LIKELY(done)) { // Spurious fail?
// Wake a contender
- if (UNLIKELY(num_contenders_ > 0)) {
+ if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) {
futex(&state_, FUTEX_WAKE, 1, NULL, NULL, 0);
}
}
@@ -459,7 +460,7 @@
CHECK_EQ(state_, 0);
CHECK_EQ(exclusive_owner_, 0U);
CHECK_EQ(num_pending_readers_, 0);
- CHECK_EQ(num_pending_writers_, 0);
+ CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0);
#else
// We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread
// may still be using locks.
@@ -523,7 +524,7 @@
done = __sync_bool_compare_and_swap(&state_, -1 /* cur_state*/, 0 /* new state */);
if (LIKELY(done)) { // cmpxchg may fail due to noise?
// Wake any waiters.
- if (UNLIKELY(num_pending_readers_ > 0 || num_pending_writers_ > 0)) {
+ if (UNLIKELY(num_pending_readers_ > 0 || num_pending_writers_.LoadRelaxed() > 0)) {
futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
}
}
@@ -646,7 +647,7 @@
ConditionVariable::ConditionVariable(const char* name, Mutex& guard)
: name_(name), guard_(guard) {
#if ART_USE_FUTEXES
- sequence_ = 0;
+ DCHECK_EQ(0, sequence_.LoadRelaxed());
num_waiters_ = 0;
#else
pthread_condattr_t cond_attrs;
@@ -691,7 +692,7 @@
sequence_++; // Indicate the broadcast occurred.
bool done = false;
do {
- int32_t cur_sequence = sequence_;
+ int32_t cur_sequence = sequence_.LoadRelaxed();
// Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring
// mutex unlocks will awaken the requeued waiter thread.
done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0,
@@ -740,7 +741,7 @@
// Ensure the Mutex is contended so that requeued threads are awoken.
guard_.num_contenders_++;
guard_.recursion_count_ = 1;
- int32_t cur_sequence = sequence_;
+ int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, NULL, NULL, 0) != 0) {
// Futex failed, check it is an expected error.
@@ -754,7 +755,7 @@
CHECK_GE(num_waiters_, 0);
num_waiters_--;
// We awoke and so no longer require awakes from the guard_'s unlock.
- CHECK_GE(guard_.num_contenders_, 0);
+ CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
guard_.num_contenders_--;
#else
guard_.recursion_count_ = 0;
@@ -775,7 +776,7 @@
// Ensure the Mutex is contended so that requeued threads are awoken.
guard_.num_contenders_++;
guard_.recursion_count_ = 1;
- int32_t cur_sequence = sequence_;
+ int32_t cur_sequence = sequence_.LoadRelaxed();
guard_.ExclusiveUnlock(self);
if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, NULL, 0) != 0) {
if (errno == ETIMEDOUT) {
@@ -790,7 +791,7 @@
CHECK_GE(num_waiters_, 0);
num_waiters_--;
// We awoke and so no longer require awakes from the guard_'s unlock.
- CHECK_GE(guard_.num_contenders_, 0);
+ CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0);
guard_.num_contenders_--;
#else
#if !defined(__APPLE__)
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index 3f35670..e13c8d5 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -160,12 +160,12 @@
void AddToWaitTime(uint64_t value);
ContentionLogData() : wait_time(0) {}
};
- ContentionLogData contetion_log_data_[kContentionLogDataSize];
+ ContentionLogData contention_log_data_[kContentionLogDataSize];
public:
bool HasEverContended() const {
if (kLogLockContentions) {
- return contetion_log_data_->contention_count > 0;
+ return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0;
}
return false;
}
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index f3ed8d3..979970c 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -46,8 +46,8 @@
void Reset() {
DCHECK(mem_map_.get() != NULL);
DCHECK(begin_ != NULL);
- front_index_ = 0;
- back_index_ = 0;
+ front_index_.StoreRelaxed(0);
+ back_index_.StoreRelaxed(0);
debug_is_sorted_ = true;
int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
if (result == -1) {
@@ -64,12 +64,12 @@
}
int32_t index;
do {
- index = back_index_;
+ index = back_index_.LoadRelaxed();
if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
// Stack overflow.
return false;
}
- } while (!back_index_.CompareAndSwap(index, index + 1));
+ } while (!back_index_.CompareExchangeWeakRelaxed(index, index + 1));
begin_[index] = value;
return true;
}
@@ -83,13 +83,13 @@
int32_t index;
int32_t new_index;
do {
- index = back_index_;
+ index = back_index_.LoadRelaxed();
new_index = index + num_slots;
if (UNLIKELY(static_cast<size_t>(new_index) >= capacity_)) {
// Stack overflow.
return false;
}
- } while (!back_index_.CompareAndSwap(index, new_index));
+ } while (!back_index_.CompareExchangeWeakRelaxed(index, new_index));
*start_address = &begin_[index];
*end_address = &begin_[new_index];
if (kIsDebugBuild) {
@@ -114,31 +114,31 @@
if (kIsDebugBuild) {
debug_is_sorted_ = false;
}
- int32_t index = back_index_;
+ int32_t index = back_index_.LoadRelaxed();
DCHECK_LT(static_cast<size_t>(index), capacity_);
- back_index_ = index + 1;
+ back_index_.StoreRelaxed(index + 1);
begin_[index] = value;
}
T PopBack() {
- DCHECK_GT(back_index_, front_index_);
+ DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed());
// Decrement the back index non atomically.
- back_index_ = back_index_ - 1;
- return begin_[back_index_];
+ back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1);
+ return begin_[back_index_.LoadRelaxed()];
}
// Take an item from the front of the stack.
T PopFront() {
- int32_t index = front_index_;
- DCHECK_LT(index, back_index_.Load());
- front_index_ = front_index_ + 1;
+ int32_t index = front_index_.LoadRelaxed();
+ DCHECK_LT(index, back_index_.LoadRelaxed());
+ front_index_.StoreRelaxed(index + 1);
return begin_[index];
}
// Pop a number of elements.
void PopBackCount(int32_t n) {
DCHECK_GE(Size(), static_cast<size_t>(n));
- back_index_.FetchAndSub(n);
+ back_index_.FetchAndSubSequentiallyConsistent(n);
}
bool IsEmpty() const {
@@ -146,16 +146,16 @@
}
size_t Size() const {
- DCHECK_LE(front_index_, back_index_);
- return back_index_ - front_index_;
+ DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed());
+ return back_index_.LoadRelaxed() - front_index_.LoadRelaxed();
}
T* Begin() const {
- return const_cast<T*>(begin_ + front_index_);
+ return const_cast<T*>(begin_ + front_index_.LoadRelaxed());
}
T* End() const {
- return const_cast<T*>(begin_ + back_index_);
+ return const_cast<T*>(begin_ + back_index_.LoadRelaxed());
}
size_t Capacity() const {
@@ -169,11 +169,11 @@
}
void Sort() {
- int32_t start_back_index = back_index_.Load();
- int32_t start_front_index = front_index_.Load();
+ int32_t start_back_index = back_index_.LoadRelaxed();
+ int32_t start_front_index = front_index_.LoadRelaxed();
std::sort(Begin(), End());
- CHECK_EQ(start_back_index, back_index_.Load());
- CHECK_EQ(start_front_index, front_index_.Load());
+ CHECK_EQ(start_back_index, back_index_.LoadRelaxed());
+ CHECK_EQ(start_front_index, front_index_.LoadRelaxed());
if (kIsDebugBuild) {
debug_is_sorted_ = true;
}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index cc258f5..43331c3 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -99,9 +99,10 @@
: GarbageCollector(heap,
name_prefix +
(is_concurrent ? "concurrent mark sweep": "mark sweep")),
+ current_space_bitmap_(nullptr), mark_bitmap_(nullptr), mark_stack_(nullptr),
gc_barrier_(new Barrier(0)),
mark_stack_lock_("mark sweep mark stack lock", kMarkSweepMarkStackLock),
- is_concurrent_(is_concurrent) {
+ is_concurrent_(is_concurrent), live_stack_freeze_size_(0) {
}
void MarkSweep::InitializePhase() {
@@ -109,19 +110,19 @@
mark_stack_ = heap_->GetMarkStack();
DCHECK(mark_stack_ != nullptr);
immune_region_.Reset();
- class_count_ = 0;
- array_count_ = 0;
- other_count_ = 0;
- large_object_test_ = 0;
- large_object_mark_ = 0;
- overhead_time_ = 0;
- work_chunks_created_ = 0;
- work_chunks_deleted_ = 0;
- reference_count_ = 0;
- mark_null_count_ = 0;
- mark_immune_count_ = 0;
- mark_fastpath_count_ = 0;
- mark_slowpath_count_ = 0;
+ class_count_.StoreRelaxed(0);
+ array_count_.StoreRelaxed(0);
+ other_count_.StoreRelaxed(0);
+ large_object_test_.StoreRelaxed(0);
+ large_object_mark_.StoreRelaxed(0);
+ overhead_time_ .StoreRelaxed(0);
+ work_chunks_created_.StoreRelaxed(0);
+ work_chunks_deleted_.StoreRelaxed(0);
+ reference_count_.StoreRelaxed(0);
+ mark_null_count_.StoreRelaxed(0);
+ mark_immune_count_.StoreRelaxed(0);
+ mark_fastpath_count_.StoreRelaxed(0);
+ mark_slowpath_count_.StoreRelaxed(0);
{
// TODO: I don't think we should need heap bitmap lock to Get the mark bitmap.
ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
@@ -596,7 +597,7 @@
if (kUseFinger) {
android_memory_barrier();
if (reinterpret_cast<uintptr_t>(ref) >=
- static_cast<uintptr_t>(mark_sweep_->atomic_finger_)) {
+ static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) {
return;
}
}
@@ -881,7 +882,7 @@
// This function does not handle heap end increasing, so we must use the space end.
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
- atomic_finger_ = static_cast<int32_t>(0xFFFFFFFF);
+ atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue());
// Create a few worker tasks.
const size_t n = thread_count * 2;
@@ -1214,7 +1215,9 @@
thread_pool->Wait(self, true, true);
thread_pool->StopWorkers(self);
mark_stack_->Reset();
- CHECK_EQ(work_chunks_created_, work_chunks_deleted_) << " some of the work chunks were leaked";
+ CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(),
+ work_chunks_deleted_.LoadSequentiallyConsistent())
+ << " some of the work chunks were leaked";
}
// Scan anything that's on the mark stack.
@@ -1269,24 +1272,27 @@
void MarkSweep::FinishPhase() {
TimingLogger::ScopedSplit split("FinishPhase", &timings_);
if (kCountScannedTypes) {
- VLOG(gc) << "MarkSweep scanned classes=" << class_count_ << " arrays=" << array_count_
- << " other=" << other_count_;
+ VLOG(gc) << "MarkSweep scanned classes=" << class_count_.LoadRelaxed()
+ << " arrays=" << array_count_.LoadRelaxed() << " other=" << other_count_.LoadRelaxed();
}
if (kCountTasks) {
- VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_;
+ VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed();
}
if (kMeasureOverhead) {
- VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_);
+ VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed());
}
if (kProfileLargeObjects) {
- VLOG(gc) << "Large objects tested " << large_object_test_ << " marked " << large_object_mark_;
+ VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed()
+ << " marked " << large_object_mark_.LoadRelaxed();
}
if (kCountJavaLangRefs) {
- VLOG(gc) << "References scanned " << reference_count_;
+ VLOG(gc) << "References scanned " << reference_count_.LoadRelaxed();
}
if (kCountMarkedObjects) {
- VLOG(gc) << "Marked: null=" << mark_null_count_ << " immune=" << mark_immune_count_
- << " fastpath=" << mark_fastpath_count_ << " slowpath=" << mark_slowpath_count_;
+ VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed()
+ << " immune=" << mark_immune_count_.LoadRelaxed()
+ << " fastpath=" << mark_fastpath_count_.LoadRelaxed()
+ << " slowpath=" << mark_slowpath_count_.LoadRelaxed();
}
CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty.
mark_stack_->Reset();
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index e9a3c3a..d73bf3f 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -305,14 +305,14 @@
AtomicInteger mark_fastpath_count_;
AtomicInteger mark_slowpath_count_;
- // Verification.
- size_t live_stack_freeze_size_;
-
std::unique_ptr<Barrier> gc_barrier_;
Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
const bool is_concurrent_;
+ // Verification.
+ size_t live_stack_freeze_size_;
+
private:
friend class AddIfReachesAllocSpaceVisitor; // Used by mod-union table.
friend class CardScanTask;
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 7cee5a0..03b72b6 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -96,7 +96,7 @@
CHECK_LE(obj->SizeOf(), usable_size);
}
const size_t new_num_bytes_allocated =
- static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated;
+ static_cast<size_t>(num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated)) + bytes_allocated;
// TODO: Deprecate.
if (kInstrumented) {
if (Runtime::Current()->HasStatsEnabled()) {
@@ -264,7 +264,7 @@
// Only if the allocation succeeded, record the time.
if (allocated_obj != nullptr) {
uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
- heap_->total_allocation_time_.FetchAndAdd(allocation_end_time - allocation_start_time_);
+ heap_->total_allocation_time_.FetchAndAddSequentiallyConsistent(allocation_end_time - allocation_start_time_);
}
}
};
@@ -279,7 +279,7 @@
template <bool kGrow>
inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
- size_t new_footprint = num_bytes_allocated_ + alloc_size;
+ size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size;
if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
if (UNLIKELY(new_footprint > growth_limit_)) {
return true;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index e7f7517..ea1ccdd 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -293,7 +293,7 @@
}
// TODO: Count objects in the image space here.
- num_bytes_allocated_ = 0;
+ num_bytes_allocated_.StoreRelaxed(0);
// Default mark stack size in bytes.
static const size_t default_mark_stack_size = 64 * KB;
@@ -659,13 +659,13 @@
void Heap::RegisterGCAllocation(size_t bytes) {
if (this != nullptr) {
- gc_memory_overhead_.FetchAndAdd(bytes);
+ gc_memory_overhead_.FetchAndAddSequentiallyConsistent(bytes);
}
}
void Heap::RegisterGCDeAllocation(size_t bytes) {
if (this != nullptr) {
- gc_memory_overhead_.FetchAndSub(bytes);
+ gc_memory_overhead_.FetchAndSubSequentiallyConsistent(bytes);
}
}
@@ -700,7 +700,8 @@
}
collector->ResetMeasurements();
}
- uint64_t allocation_time = static_cast<uint64_t>(total_allocation_time_) * kTimeAdjust;
+ uint64_t allocation_time =
+ static_cast<uint64_t>(total_allocation_time_.LoadRelaxed()) * kTimeAdjust;
if (total_duration != 0) {
const double total_seconds = static_cast<double>(total_duration / 1000) / 1000000.0;
os << "Total time spent in GC: " << PrettyDuration(total_duration) << "\n";
@@ -720,7 +721,7 @@
}
os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
- os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_;
+ os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_.LoadRelaxed();
BaseMutex::DumpAll(os);
}
@@ -1022,7 +1023,7 @@
return;
}
// Ignore early dawn of the universe verifications.
- if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.Load()) < 10 * KB)) {
+ if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) {
return;
}
CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
@@ -1053,9 +1054,9 @@
// Use signed comparison since freed bytes can be negative when background compaction foreground
// transitions occurs. This is caused by the moving objects from a bump pointer space to a
// free list backed space typically increasing memory footprint due to padding and binning.
- DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.Load()));
+ DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed()));
// Note: This relies on 2s complement for handling negative freed_bytes.
- num_bytes_allocated_.FetchAndSub(static_cast<ssize_t>(freed_bytes));
+ num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes));
if (Runtime::Current()->HasStatsEnabled()) {
RuntimeStats* thread_stats = Thread::Current()->GetStats();
thread_stats->freed_objects += freed_objects;
@@ -1313,7 +1314,7 @@
VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_)
<< " -> " << static_cast<int>(collector_type);
uint64_t start_time = NanoTime();
- uint32_t before_allocated = num_bytes_allocated_.Load();
+ uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
ThreadList* tl = Runtime::Current()->GetThreadList();
Thread* self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
@@ -1391,7 +1392,7 @@
uint64_t duration = NanoTime() - start_time;
GrowForUtilization(semi_space_collector_);
FinishGC(self, collector::kGcTypeFull);
- int32_t after_allocated = num_bytes_allocated_.Load();
+ int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent();
int32_t delta_allocated = before_allocated - after_allocated;
LOG(INFO) << "Heap transition to " << process_state_ << " took "
<< PrettyDuration(duration) << " saved at least " << PrettySize(delta_allocated);
@@ -1866,7 +1867,7 @@
: heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {}
size_t GetFailureCount() const {
- return fail_count_->Load();
+ return fail_count_->LoadSequentiallyConsistent();
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
@@ -1903,7 +1904,7 @@
// Verify that the reference is live.
return true;
}
- if (fail_count_->FetchAndAdd(1) == 0) {
+ if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) {
// Print message on only on first failure to prevent spam.
LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!";
}
@@ -2019,7 +2020,7 @@
}
size_t GetFailureCount() const {
- return fail_count_->Load();
+ return fail_count_->LoadSequentiallyConsistent();
}
private:
@@ -2429,7 +2430,7 @@
}
void Heap::UpdateMaxNativeFootprint() {
- size_t native_size = native_bytes_allocated_;
+ size_t native_size = native_bytes_allocated_.LoadRelaxed();
// TODO: Tune the native heap utilization to be a value other than the java heap utilization.
size_t target_size = native_size / GetTargetHeapUtilization();
if (target_size > native_size + max_free_) {
@@ -2701,21 +2702,22 @@
native_need_to_run_finalization_ = false;
}
// Total number of native bytes allocated.
- native_bytes_allocated_.FetchAndAdd(bytes);
- if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_gc_watermark_) {
+ size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
+ new_native_bytes_allocated += bytes;
+ if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
collector::kGcTypeFull;
// The second watermark is higher than the gc watermark. If you hit this it means you are
// allocating native objects faster than the GC can keep up with.
- if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+ if (new_native_bytes_allocated > native_footprint_limit_) {
if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
// Just finished a GC, attempt to run finalizers.
RunFinalization(env);
CHECK(!env->ExceptionCheck());
}
// If we still are over the watermark, attempt a GC for alloc and run finalizers.
- if (static_cast<size_t>(native_bytes_allocated_) > native_footprint_limit_) {
+ if (new_native_bytes_allocated > native_footprint_limit_) {
CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
RunFinalization(env);
native_need_to_run_finalization_ = false;
@@ -2737,7 +2739,7 @@
void Heap::RegisterNativeFree(JNIEnv* env, int bytes) {
int expected_size, new_size;
do {
- expected_size = native_bytes_allocated_.Load();
+ expected_size = native_bytes_allocated_.LoadRelaxed();
new_size = expected_size - bytes;
if (UNLIKELY(new_size < 0)) {
ScopedObjectAccess soa(env);
@@ -2746,7 +2748,7 @@
"registered as allocated", bytes, expected_size).c_str());
break;
}
- } while (!native_bytes_allocated_.CompareAndSwap(expected_size, new_size));
+ } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size, new_size));
}
size_t Heap::GetTotalMemory() const {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 890332a..887b17e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -372,7 +372,7 @@
// Returns the number of bytes currently allocated.
size_t GetBytesAllocated() const {
- return num_bytes_allocated_;
+ return num_bytes_allocated_.LoadSequentiallyConsistent();
}
// Returns the number of objects currently allocated.
@@ -408,7 +408,7 @@
// Implements java.lang.Runtime.freeMemory.
size_t GetFreeMemory() const {
- return GetTotalMemory() - num_bytes_allocated_;
+ return GetTotalMemory() - num_bytes_allocated_.LoadSequentiallyConsistent();
}
// get the space that corresponds to an object's address. Current implementation searches all
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 497a61f..71c295e 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -48,8 +48,8 @@
end_ += num_bytes;
*bytes_allocated = num_bytes;
// Use the CAS free versions as an optimization.
- objects_allocated_ = objects_allocated_ + 1;
- bytes_allocated_ = bytes_allocated_ + num_bytes;
+ objects_allocated_.StoreRelaxed(objects_allocated_.LoadRelaxed() + 1);
+ bytes_allocated_.StoreRelaxed(bytes_allocated_.LoadRelaxed() + num_bytes);
if (UNLIKELY(usable_size != nullptr)) {
*usable_size = num_bytes;
}
@@ -76,8 +76,8 @@
inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) {
mirror::Object* ret = AllocNonvirtualWithoutAccounting(num_bytes);
if (ret != nullptr) {
- objects_allocated_.FetchAndAdd(1);
- bytes_allocated_.FetchAndAdd(num_bytes);
+ objects_allocated_.FetchAndAddSequentiallyConsistent(1);
+ bytes_allocated_.FetchAndAddSequentiallyConsistent(num_bytes);
}
return ret;
}
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index fcd772b..fd0a92d 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -68,8 +68,8 @@
// Reset the end of the space back to the beginning, we move the end forward as we allocate
// objects.
SetEnd(Begin());
- objects_allocated_ = 0;
- bytes_allocated_ = 0;
+ objects_allocated_.StoreRelaxed(0);
+ bytes_allocated_.StoreRelaxed(0);
growth_end_ = Limit();
{
MutexLock mu(Thread::Current(), block_lock_);
@@ -204,7 +204,7 @@
uint64_t BumpPointerSpace::GetBytesAllocated() {
// Start out pre-determined amount (blocks which are not being allocated into).
- uint64_t total = static_cast<uint64_t>(bytes_allocated_.Load());
+ uint64_t total = static_cast<uint64_t>(bytes_allocated_.LoadRelaxed());
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -222,7 +222,7 @@
uint64_t BumpPointerSpace::GetObjectsAllocated() {
// Start out pre-determined amount (blocks which are not being allocated into).
- uint64_t total = static_cast<uint64_t>(objects_allocated_.Load());
+ uint64_t total = static_cast<uint64_t>(objects_allocated_.LoadRelaxed());
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -239,8 +239,8 @@
}
void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
- objects_allocated_.FetchAndAdd(thread->GetThreadLocalObjectsAllocated());
- bytes_allocated_.FetchAndAdd(thread->GetThreadLocalBytesAllocated());
+ objects_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalObjectsAllocated());
+ bytes_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalBytesAllocated());
thread->SetTlab(nullptr, nullptr);
}
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 6ea94a9..45fee14 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -239,7 +239,7 @@
*error_msg = StringPrintf("Failed to map image bitmap: %s", error_msg->c_str());
return nullptr;
}
- uint32_t bitmap_index = bitmap_index_.FetchAndAdd(1);
+ uint32_t bitmap_index = bitmap_index_.FetchAndAddSequentiallyConsistent(1);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename,
bitmap_index));
std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 0466413..fb3a12e 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -115,7 +115,7 @@
// Need to mark the card since this will update the mod-union table next GC cycle.
card_table->MarkCard(ptrs[i]);
}
- zygote_space->objects_allocated_.FetchAndSub(num_ptrs);
+ zygote_space->objects_allocated_.FetchAndSubSequentiallyConsistent(num_ptrs);
}
} // namespace space
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 50fc62b..5d5fe76 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -65,7 +65,7 @@
}
uint64_t GetObjectsAllocated() {
- return objects_allocated_;
+ return objects_allocated_.LoadSequentiallyConsistent();
}
void Clear() OVERRIDE;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 075d225..2dbcc80 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -522,9 +522,9 @@
void Instrumentation::InstrumentQuickAllocEntryPoints() {
// TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
// should be guarded by a lock.
- DCHECK_GE(quick_alloc_entry_points_instrumentation_counter_.Load(), 0);
+ DCHECK_GE(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
const bool enable_instrumentation =
- quick_alloc_entry_points_instrumentation_counter_.FetchAndAdd(1) == 0;
+ quick_alloc_entry_points_instrumentation_counter_.FetchAndAddSequentiallyConsistent(1) == 0;
if (enable_instrumentation) {
SetEntrypointsInstrumented(true);
}
@@ -533,9 +533,9 @@
void Instrumentation::UninstrumentQuickAllocEntryPoints() {
// TODO: the read of quick_alloc_entry_points_instrumentation_counter_ is racey and this code
// should be guarded by a lock.
- DCHECK_GT(quick_alloc_entry_points_instrumentation_counter_.Load(), 0);
+ DCHECK_GT(quick_alloc_entry_points_instrumentation_counter_.LoadSequentiallyConsistent(), 0);
const bool disable_instrumentation =
- quick_alloc_entry_points_instrumentation_counter_.FetchAndSub(1) == 1;
+ quick_alloc_entry_points_instrumentation_counter_.FetchAndSubSequentiallyConsistent(1) == 1;
if (disable_instrumentation) {
SetEntrypointsInstrumented(false);
}
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index e0f9e5f..9a274f6 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -234,9 +234,9 @@
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MOVE_EXCEPTION) {
- Throwable* exception = self->GetException(NULL);
- self->ClearException();
+ Throwable* exception = self->GetException(nullptr);
shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+ self->ClearException();
ADVANCE(1);
}
HANDLE_INSTRUCTION_END();
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index c0275f6..68759ad 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -163,9 +163,9 @@
break;
case Instruction::MOVE_EXCEPTION: {
PREAMBLE();
- Throwable* exception = self->GetException(NULL);
- self->ClearException();
+ Throwable* exception = self->GetException(nullptr);
shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
+ self->ClearException();
inst = inst->Next_1xx();
break;
}
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 04905a5..69e5a84 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -139,10 +139,10 @@
static AtomicInteger seed(987654321 + std::time(nullptr));
int32_t expected_value, new_value;
do {
- expected_value = static_cast<uint32_t>(seed.Load());
+ expected_value = static_cast<uint32_t>(seed.LoadRelaxed());
new_value = expected_value * 1103515245 + 12345;
} while ((expected_value & LockWord::kHashMask) == 0 ||
- !seed.CompareAndSwap(expected_value, new_value));
+ !seed.CompareExchangeWeakRelaxed(expected_value, new_value));
return expected_value & LockWord::kHashMask;
}
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index e0cd193..7d297cb 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -99,12 +99,12 @@
int32_t Monitor::GetHashCode() {
while (!HasHashCode()) {
- if (hash_code_.CompareAndSwap(0, mirror::Object::GenerateIdentityHashCode())) {
+ if (hash_code_.CompareExchangeWeakRelaxed(0, mirror::Object::GenerateIdentityHashCode())) {
break;
}
}
DCHECK(HasHashCode());
- return hash_code_.Load();
+ return hash_code_.LoadRelaxed();
}
bool Monitor::Install(Thread* self) {
@@ -119,7 +119,7 @@
break;
}
case LockWord::kHashCode: {
- CHECK_EQ(hash_code_, static_cast<int32_t>(lw.GetHashCode()));
+ CHECK_EQ(hash_code_.LoadRelaxed(), static_cast<int32_t>(lw.GetHashCode()));
break;
}
case LockWord::kFatLocked: {
diff --git a/runtime/monitor.h b/runtime/monitor.h
index ed1b27b..7af2d4c 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -109,7 +109,7 @@
bool IsLocked() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool HasHashCode() const {
- return hash_code_.Load() != 0;
+ return hash_code_.LoadRelaxed() != 0;
}
MonitorId GetMonitorId() const {
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 69b05f4..d9c9b59 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -155,6 +155,21 @@
return env->NewStringUTF(kIsDebugBuild ? "libartd.so" : "libart.so");
}
+static jstring VMRuntime_vmInstructionSet(JNIEnv* env, jobject) {
+ InstructionSet isa = Runtime::Current()->GetInstructionSet();
+ const char* isa_string = GetInstructionSetString(isa);
+ return env->NewStringUTF(isa_string);
+}
+
+static jboolean VMRuntime_is64Bit(JNIEnv* env, jobject) {
+ bool is64BitMode = (sizeof(void*) == sizeof(uint64_t));
+ return is64BitMode ? JNI_TRUE : JNI_FALSE;
+}
+
+static jboolean VMRuntime_isCheckJniEnabled(JNIEnv* env, jobject) {
+ return Runtime::Current()->GetJavaVM()->check_jni ? JNI_TRUE : JNI_FALSE;
+}
+
static void VMRuntime_setTargetSdkVersionNative(JNIEnv* env, jobject, jint targetSdkVersion) {
// This is the target SDK version of the app we're about to run. It is intended that this a place
// where workarounds can be enabled.
@@ -529,6 +544,9 @@
NATIVE_METHOD(VMRuntime, trimHeap, "()V"),
NATIVE_METHOD(VMRuntime, vmVersion, "()Ljava/lang/String;"),
NATIVE_METHOD(VMRuntime, vmLibrary, "()Ljava/lang/String;"),
+ NATIVE_METHOD(VMRuntime, vmInstructionSet, "()Ljava/lang/String;"),
+ NATIVE_METHOD(VMRuntime, is64Bit, "!()Z"),
+ NATIVE_METHOD(VMRuntime, isCheckJniEnabled, "!()Z"),
NATIVE_METHOD(VMRuntime, preloadDexCaches, "()V"),
NATIVE_METHOD(VMRuntime, registerAppInfo, "(Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;)V"),
};
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 2987393..4330d27 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -533,7 +533,7 @@
Trace::SetDefaultClockSource(kProfilerClockSourceWall);
} else if (option == "-Xprofile:dualclock") {
Trace::SetDefaultClockSource(kProfilerClockSourceDual);
- } else if (StartsWith(option, "-Xprofile:")) {
+ } else if (StartsWith(option, "-Xprofile-filename:")) {
if (!ParseStringAfterChar(option, ':', &profile_output_filename_)) {
return false;
}
@@ -786,7 +786,7 @@
UsageMessage(stream, " -Xmethod-trace\n");
UsageMessage(stream, " -Xmethod-trace-file:filename");
UsageMessage(stream, " -Xmethod-trace-file-size:integervalue\n");
- UsageMessage(stream, " -Xprofile=filename\n");
+ UsageMessage(stream, " -Xprofile-filename:filename\n");
UsageMessage(stream, " -Xprofile-period:integervalue\n");
UsageMessage(stream, " -Xprofile-duration:integervalue\n");
UsageMessage(stream, " -Xprofile-interval:integervalue\n");
diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc
index c1a1ad7..292c94f 100644
--- a/runtime/thread_pool_test.cc
+++ b/runtime/thread_pool_test.cc
@@ -69,7 +69,7 @@
// Wait for tasks to complete.
thread_pool.Wait(self, true, false);
// Make sure that we finished all the work.
- EXPECT_EQ(num_tasks, count);
+ EXPECT_EQ(num_tasks, count.LoadSequentiallyConsistent());
}
TEST_F(ThreadPoolTest, StopStart) {
@@ -82,7 +82,7 @@
}
usleep(200);
// Check that no threads started prematurely.
- EXPECT_EQ(0, count);
+ EXPECT_EQ(0, count.LoadSequentiallyConsistent());
// Signal the threads to start processing tasks.
thread_pool.StartWorkers(self);
usleep(200);
@@ -91,10 +91,11 @@
thread_pool.AddTask(self, new CountTask(&bad_count));
usleep(200);
// Ensure that the task added after the workers were stopped doesn't get run.
- EXPECT_EQ(0, bad_count);
+ EXPECT_EQ(0, bad_count.LoadSequentiallyConsistent());
// Allow tasks to finish up and delete themselves.
thread_pool.StartWorkers(self);
- while (count.Load() != num_tasks && bad_count.Load() != 1) {
+ while (count.LoadSequentiallyConsistent() != num_tasks &&
+ bad_count.LoadSequentiallyConsistent() != 1) {
usleep(200);
}
thread_pool.StopWorkers(self);
@@ -135,7 +136,7 @@
thread_pool.AddTask(self, new TreeTask(&thread_pool, &count, depth));
thread_pool.StartWorkers(self);
thread_pool.Wait(self, true, false);
- EXPECT_EQ((1 << depth) - 1, count);
+ EXPECT_EQ((1 << depth) - 1, count.LoadSequentiallyConsistent());
}
} // namespace art