Class initialization check elimination.
Also, move null check elimination temporaries to the
ScopedArenaAllocator and reuse the same variables in the
class initialization check elimination.
Change-Id: Ic746f95427065506fa6016d4931e4cb8b34937af
diff --git a/compiler/dex/bb_optimizations.cc b/compiler/dex/bb_optimizations.cc
index 2ab6252..abfa7a7 100644
--- a/compiler/dex/bb_optimizations.cc
+++ b/compiler/dex/bb_optimizations.cc
@@ -74,27 +74,6 @@
}
/*
- * Null Check Elimination and Type Inference Initialization pass implementation start.
- */
-
-bool NullCheckEliminationAndTypeInferenceInit::Gate(const CompilationUnit* cUnit) const {
- // First check the ssa register vector
- cUnit->mir_graph->CheckSSARegisterVector();
-
- // Did we disable the pass?
- bool performInit = ((cUnit->disable_opt & (1 << kNullCheckElimination)) == 0);
-
- return performInit;
-}
-
-bool NullCheckEliminationAndTypeInferenceInit::WalkBasicBlocks(CompilationUnit* cUnit,
- BasicBlock* bb) const {
- cUnit->mir_graph->NullCheckEliminationInit(bb);
- // No need of repeating, so just return false.
- return false;
-}
-
-/*
* BasicBlock Combine pass implementation start.
*/
bool BBCombine::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h
index 1ad4958..fb482bf 100644
--- a/compiler/dex/bb_optimizations.h
+++ b/compiler/dex/bb_optimizations.h
@@ -137,20 +137,6 @@
};
/**
- * @class NullCheckEliminationAndTypeInferenceInit
- * @brief Null check elimination and type inference initialization step.
- */
-class NullCheckEliminationAndTypeInferenceInit : public Pass {
- public:
- NullCheckEliminationAndTypeInferenceInit() : Pass("NCE_TypeInferenceInit") {
- }
-
- bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const;
-
- bool Gate(const CompilationUnit* cUnit) const;
-};
-
-/**
* @class NullCheckEliminationAndTypeInference
* @brief Null check elimination and type inference.
*/
@@ -160,9 +146,35 @@
: Pass("NCE_TypeInference", kRepeatingPreOrderDFSTraversal, "4_post_nce_cfg") {
}
+ void Start(CompilationUnit* cUnit) const {
+ cUnit->mir_graph->EliminateNullChecksAndInferTypesStart();
+ }
+
bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
return cUnit->mir_graph->EliminateNullChecksAndInferTypes(bb);
}
+
+ void End(CompilationUnit* cUnit) const {
+ cUnit->mir_graph->EliminateNullChecksAndInferTypesEnd();
+ }
+};
+
+class ClassInitCheckElimination : public Pass {
+ public:
+ ClassInitCheckElimination() : Pass("ClInitCheckElimination", kRepeatingPreOrderDFSTraversal) {
+ }
+
+ bool Gate(const CompilationUnit* cUnit) const {
+ return cUnit->mir_graph->EliminateClassInitChecksGate();
+ }
+
+ bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const {
+ return cUnit->mir_graph->EliminateClassInitChecks(bb);
+ }
+
+ void End(CompilationUnit* cUnit) const {
+ cUnit->mir_graph->EliminateClassInitChecksEnd();
+ }
};
/**
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index cd21568..147e840 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -136,6 +136,7 @@
kMIRNullCheckOnly,
kMIRIgnoreRangeCheck,
kMIRRangeCheckOnly,
+ kMIRIgnoreClInitCheck,
kMIRInlined, // Invoke is inlined (ie dead).
kMIRInlinedPred, // Invoke is inlined via prediction.
kMIRCallee, // Instruction is inlined from callee.
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 83fbca5..4485b15 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -44,6 +44,7 @@
// (1 << kLoadHoisting) |
// (1 << kSuppressLoads) |
// (1 << kNullCheckElimination) |
+ // (1 << kClassInitCheckElimination) |
// (1 << kPromoteRegs) |
// (1 << kTrackLiveTemps) |
// (1 << kSafeOptimizations) |
diff --git a/compiler/dex/frontend.h b/compiler/dex/frontend.h
index 22a7b8c..37c85b1 100644
--- a/compiler/dex/frontend.h
+++ b/compiler/dex/frontend.h
@@ -44,6 +44,7 @@
kLoadHoisting,
kSuppressLoads,
kNullCheckElimination,
+ kClassInitCheckElimination,
kPromoteRegs,
kTrackLiveTemps,
kSafeOptimizations,
diff --git a/compiler/dex/mir_field_info.h b/compiler/dex/mir_field_info.h
index e64e9fc..cad516d 100644
--- a/compiler/dex/mir_field_info.h
+++ b/compiler/dex/mir_field_info.h
@@ -203,6 +203,7 @@
// -1 if the field is unresolved or there's no appropriate TypeId in that dex file.
uint32_t storage_index_;
+ friend class ClassInitCheckEliminationTest;
friend class LocalValueNumberingTest;
};
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 8bb5615..60719a5 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -63,9 +63,11 @@
dom_post_order_traversal_(NULL),
i_dom_list_(NULL),
def_block_matrix_(NULL),
- temp_block_v_(NULL),
temp_dalvik_register_v_(NULL),
- temp_ssa_register_v_(NULL),
+ temp_scoped_alloc_(),
+ temp_insn_data_(nullptr),
+ temp_bit_vector_size_(0u),
+ temp_bit_vector_(nullptr),
block_list_(arena, 100, kGrowableArrayBlockList),
try_block_addr_(NULL),
entry_block_(NULL),
@@ -1237,17 +1239,6 @@
/* Rename register names by local defs and phi nodes */
ClearAllVisitedFlags();
DoDFSPreOrderSSARename(GetEntryBlock());
-
- /*
- * Shared temp bit vector used by each block to count the number of defs
- * from all the predecessor blocks.
- */
- temp_ssa_register_v_ =
- new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapTempSSARegisterV);
-}
-
-void MIRGraph::CheckSSARegisterVector() {
- DCHECK(temp_ssa_register_v_ != nullptr);
}
} // namespace art
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 80311ec..036dd84 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -182,6 +182,7 @@
#define MIR_NULL_CHECK_ONLY (1 << kMIRNullCheckOnly)
#define MIR_IGNORE_RANGE_CHECK (1 << kMIRIgnoreRangeCheck)
#define MIR_RANGE_CHECK_ONLY (1 << kMIRRangeCheckOnly)
+#define MIR_IGNORE_CLINIT_CHECK (1 << kMIRIgnoreClInitCheck)
#define MIR_INLINED (1 << kMIRInlined)
#define MIR_INLINED_PRED (1 << kMIRInlinedPred)
#define MIR_CALLEE (1 << kMIRCallee)
@@ -224,7 +225,7 @@
ArenaBitVector* live_in_v;
ArenaBitVector* phi_v;
int32_t* vreg_to_ssa_map;
- ArenaBitVector* ending_null_check_v;
+ ArenaBitVector* ending_check_v; // For null check and class init check elimination.
};
/*
@@ -493,6 +494,10 @@
return (merged_df_flags_ & (DF_IFIELD | DF_SFIELD)) != 0u;
}
+ bool HasStaticFieldAccess() const {
+ return (merged_df_flags_ & DF_SFIELD) != 0u;
+ }
+
bool HasInvokes() const {
// NOTE: These formats include the rare filled-new-array/range.
return (merged_df_flags_ & (DF_FORMAT_35C | DF_FORMAT_3RC)) != 0u;
@@ -745,7 +750,12 @@
int SRegToVReg(int ssa_reg) const;
void VerifyDataflow();
void CheckForDominanceFrontier(BasicBlock* dom_bb, const BasicBlock* succ_bb);
+ void EliminateNullChecksAndInferTypesStart();
bool EliminateNullChecksAndInferTypes(BasicBlock *bb);
+ void EliminateNullChecksAndInferTypesEnd();
+ bool EliminateClassInitChecksGate();
+ bool EliminateClassInitChecks(BasicBlock* bb);
+ void EliminateClassInitChecksEnd();
/*
* Type inference handling helpers. Because Dalvik's bytecode is not fully typed,
* we have to do some work to figure out the sreg type. For some operations it is
@@ -836,17 +846,6 @@
void CountUses(struct BasicBlock* bb);
/**
- * @brief Initialize the data structures with Null Check data
- * @param bb the considered BasicBlock
- */
- void NullCheckEliminationInit(BasicBlock* bb);
-
- /**
- * @brief Check if the temporary ssa register vector is allocated
- */
- void CheckSSARegisterVector();
-
- /**
* @brief Combine BasicBlocks
* @param the BasicBlock we are considering
*/
@@ -943,9 +942,11 @@
GrowableArray<BasicBlockId>* dom_post_order_traversal_;
int* i_dom_list_;
ArenaBitVector** def_block_matrix_; // num_dalvik_register x num_blocks.
- ArenaBitVector* temp_block_v_;
ArenaBitVector* temp_dalvik_register_v_;
- ArenaBitVector* temp_ssa_register_v_; // num_ssa_regs.
+ UniquePtr<ScopedArenaAllocator> temp_scoped_alloc_;
+ uint16_t* temp_insn_data_;
+ uint32_t temp_bit_vector_size_;
+ ArenaBitVector* temp_bit_vector_;
static const int kInvalidEntry = -1;
GrowableArray<BasicBlock*> block_list_;
ArenaBitVector* try_block_addr_;
@@ -979,6 +980,7 @@
GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
GrowableArray<MirMethodLoweringInfo> method_lowering_infos_;
+ friend class ClassInitCheckEliminationTest;
friend class LocalValueNumberingTest;
};
diff --git a/compiler/dex/mir_method_info.h b/compiler/dex/mir_method_info.h
index a43238c..f927f1d 100644
--- a/compiler/dex/mir_method_info.h
+++ b/compiler/dex/mir_method_info.h
@@ -178,6 +178,8 @@
uint16_t target_method_idx_;
uint16_t vtable_idx_;
int stats_flags_;
+
+ friend class ClassInitCheckEliminationTest;
};
} // namespace art
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index cb737ab..333126b 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -545,13 +545,6 @@
return true;
}
-void MIRGraph::NullCheckEliminationInit(struct BasicBlock* bb) {
- if (bb->data_flow_info != NULL) {
- bb->data_flow_info->ending_null_check_v =
- new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false, kBitMapNullCheck);
- }
-}
-
/* Collect stats on number of checks removed */
void MIRGraph::CountChecks(struct BasicBlock* bb) {
if (bb->data_flow_info != NULL) {
@@ -690,6 +683,23 @@
}
}
+void MIRGraph::EliminateNullChecksAndInferTypesStart() {
+ if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
+ if (kIsDebugBuild) {
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
+ }
+ }
+
+ DCHECK(temp_scoped_alloc_.get() == nullptr);
+ temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+ temp_bit_vector_size_ = GetNumSSARegs();
+ temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapTempSSARegisterV);
+ }
+}
+
/*
* Eliminate unnecessary null checks for a basic block. Also, while we're doing
* an iterative walk go ahead and perform type and size inference.
@@ -699,6 +709,7 @@
bool infer_changed = false;
bool do_nce = ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0);
+ ArenaBitVector* ssa_regs_to_check = temp_bit_vector_;
if (do_nce) {
/*
* Set initial state. Be conservative with catch
@@ -706,20 +717,22 @@
* status (except for "this").
*/
if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
- temp_ssa_register_v_->ClearAllBits();
+ ssa_regs_to_check->ClearAllBits();
// Assume all ins are objects.
for (uint16_t in_reg = cu_->num_dalvik_registers - cu_->num_ins;
in_reg < cu_->num_dalvik_registers; in_reg++) {
- temp_ssa_register_v_->SetBit(in_reg);
+ ssa_regs_to_check->SetBit(in_reg);
}
if ((cu_->access_flags & kAccStatic) == 0) {
// If non-static method, mark "this" as non-null
int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
- temp_ssa_register_v_->ClearBit(this_reg);
+ ssa_regs_to_check->ClearBit(this_reg);
}
} else if (bb->predecessors->Size() == 1) {
BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
- temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
+ // pred_bb must have already been processed at least once.
+ DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
+ ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
if (pred_bb->block_type == kDalvikByteCode) {
// Check to see if predecessor had an explicit null-check.
MIR* last_insn = pred_bb->last_mir_insn;
@@ -728,13 +741,13 @@
if (pred_bb->fall_through == bb->id) {
// The fall-through of a block following a IF_EQZ, set the vA of the IF_EQZ to show that
// it can't be null.
- temp_ssa_register_v_->ClearBit(last_insn->ssa_rep->uses[0]);
+ ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
}
} else if (last_opcode == Instruction::IF_NEZ) {
if (pred_bb->taken == bb->id) {
// The taken block following a IF_NEZ, set the vA of the IF_NEZ to show that it can't be
// null.
- temp_ssa_register_v_->ClearBit(last_insn->ssa_rep->uses[0]);
+ ssa_regs_to_check->ClearBit(last_insn->ssa_rep->uses[0]);
}
}
}
@@ -742,19 +755,25 @@
// Starting state is union of all incoming arcs
GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
BasicBlock* pred_bb = GetBasicBlock(iter.Next());
- DCHECK(pred_bb != NULL);
- temp_ssa_register_v_->Copy(pred_bb->data_flow_info->ending_null_check_v);
+ CHECK(pred_bb != NULL);
+ while (pred_bb->data_flow_info->ending_check_v == nullptr) {
+ pred_bb = GetBasicBlock(iter.Next());
+ // At least one predecessor must have been processed before this bb.
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ }
+ ssa_regs_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
while (true) {
pred_bb = GetBasicBlock(iter.Next());
if (!pred_bb) break;
- if ((pred_bb->data_flow_info == NULL) ||
- (pred_bb->data_flow_info->ending_null_check_v == NULL)) {
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ if (pred_bb->data_flow_info->ending_check_v == nullptr) {
continue;
}
- temp_ssa_register_v_->Union(pred_bb->data_flow_info->ending_null_check_v);
+ ssa_regs_to_check->Union(pred_bb->data_flow_info->ending_check_v);
}
}
- // At this point, temp_ssa_register_v_ shows which sregs have an object definition with
+ // At this point, ssa_regs_to_check shows which sregs have an object definition with
// no intervening uses.
}
@@ -783,14 +802,14 @@
src_idx = 0;
}
int src_sreg = mir->ssa_rep->uses[src_idx];
- if (!temp_ssa_register_v_->IsBitSet(src_sreg)) {
+ if (!ssa_regs_to_check->IsBitSet(src_sreg)) {
// Eliminate the null check.
mir->optimization_flags |= MIR_IGNORE_NULL_CHECK;
} else {
// Do the null check.
mir->optimization_flags &= ~MIR_IGNORE_NULL_CHECK;
// Mark s_reg as null-checked
- temp_ssa_register_v_->ClearBit(src_sreg);
+ ssa_regs_to_check->ClearBit(src_sreg);
}
}
@@ -806,13 +825,13 @@
*/
if (((df_attributes & (DF_DA | DF_REF_A)) == (DF_DA | DF_REF_A)) ||
(df_attributes & DF_SETS_CONST)) {
- temp_ssa_register_v_->SetBit(mir->ssa_rep->defs[0]);
+ ssa_regs_to_check->SetBit(mir->ssa_rep->defs[0]);
}
// Now, remove mark from all object definitions we know are non-null.
if (df_attributes & DF_NON_NULL_DST) {
// Mark target of NEW* as non-null
- temp_ssa_register_v_->ClearBit(mir->ssa_rep->defs[0]);
+ ssa_regs_to_check->ClearBit(mir->ssa_rep->defs[0]);
}
// Mark non-null returns from invoke-style NEW*
@@ -822,7 +841,7 @@
if (next_mir &&
next_mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
// Mark as null checked
- temp_ssa_register_v_->ClearBit(next_mir->ssa_rep->defs[0]);
+ ssa_regs_to_check->ClearBit(next_mir->ssa_rep->defs[0]);
} else {
if (next_mir) {
LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
@@ -837,7 +856,7 @@
// First non-pseudo should be MOVE_RESULT_OBJECT
if (tmir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) {
// Mark as null checked
- temp_ssa_register_v_->ClearBit(tmir->ssa_rep->defs[0]);
+ ssa_regs_to_check->ClearBit(tmir->ssa_rep->defs[0]);
} else {
LOG(WARNING) << "Unexpected op after new: " << tmir->dalvikInsn.opcode;
}
@@ -858,24 +877,242 @@
mir->ssa_rep->num_uses;
bool needs_null_check = false;
for (int i = 0; i < operands; i++) {
- needs_null_check |= temp_ssa_register_v_->IsBitSet(mir->ssa_rep->uses[i]);
+ needs_null_check |= ssa_regs_to_check->IsBitSet(mir->ssa_rep->uses[i]);
}
if (needs_null_check) {
- temp_ssa_register_v_->SetBit(tgt_sreg);
+ ssa_regs_to_check->SetBit(tgt_sreg);
} else {
- temp_ssa_register_v_->ClearBit(tgt_sreg);
+ ssa_regs_to_check->ClearBit(tgt_sreg);
}
}
}
// Did anything change?
- bool nce_changed = do_nce && !temp_ssa_register_v_->Equal(bb->data_flow_info->ending_null_check_v);
- if (nce_changed) {
- bb->data_flow_info->ending_null_check_v->Copy(temp_ssa_register_v_);
+ bool nce_changed = false;
+ if (do_nce) {
+ if (bb->data_flow_info->ending_check_v == nullptr) {
+ DCHECK(temp_scoped_alloc_.get() != nullptr);
+ bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapNullCheck);
+ nce_changed = ssa_regs_to_check->GetHighestBitSet() != -1;
+ bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
+ } else if (!ssa_regs_to_check->Equal(bb->data_flow_info->ending_check_v)) {
+ nce_changed = true;
+ bb->data_flow_info->ending_check_v->Copy(ssa_regs_to_check);
+ }
}
return infer_changed | nce_changed;
}
+void MIRGraph::EliminateNullChecksAndInferTypesEnd() {
+ if ((cu_->disable_opt & (1 << kNullCheckElimination)) == 0) {
+ // Clean up temporaries.
+ temp_bit_vector_size_ = 0u;
+ temp_bit_vector_ = nullptr;
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->data_flow_info != nullptr) {
+ bb->data_flow_info->ending_check_v = nullptr;
+ }
+ }
+ DCHECK(temp_scoped_alloc_.get() != nullptr);
+ temp_scoped_alloc_.reset();
+ }
+}
+
+bool MIRGraph::EliminateClassInitChecksGate() {
+ if ((cu_->disable_opt & (1 << kClassInitCheckElimination)) != 0 ||
+ !cu_->mir_graph->HasStaticFieldAccess()) {
+ return false;
+ }
+
+ if (kIsDebugBuild) {
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ CHECK(bb->data_flow_info == nullptr || bb->data_flow_info->ending_check_v == nullptr);
+ }
+ }
+
+ DCHECK(temp_scoped_alloc_.get() == nullptr);
+ temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
+
+ // Each insn we use here has at least 2 code units, offset/2 will be a unique index.
+ const size_t end = (cu_->code_item->insns_size_in_code_units_ + 1u) / 2u;
+ temp_insn_data_ = static_cast<uint16_t*>(
+ temp_scoped_alloc_->Alloc(end * sizeof(*temp_insn_data_), kArenaAllocGrowableArray));
+
+ uint32_t unique_class_count = 0u;
+ {
+ // Get unique_class_count and store indexes in temp_insn_data_ using a map on a nested
+ // ScopedArenaAllocator.
+
+ // Embed the map value in the entry to save space.
+ struct MapEntry {
+ // Map key: the class identified by the declaring dex file and type index.
+ const DexFile* declaring_dex_file;
+ uint16_t declaring_class_idx;
+ // Map value: index into bit vectors of classes requiring initialization checks.
+ uint16_t index;
+ };
+ struct MapEntryComparator {
+ bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
+ if (lhs.declaring_class_idx != rhs.declaring_class_idx) {
+ return lhs.declaring_class_idx < rhs.declaring_class_idx;
+ }
+ return lhs.declaring_dex_file < rhs.declaring_dex_file;
+ }
+ };
+
+ typedef std::set<MapEntry, MapEntryComparator, ScopedArenaAllocatorAdapter<MapEntry> >
+ ClassToIndexMap;
+
+ ScopedArenaAllocator allocator(&cu_->arena_stack);
+ ClassToIndexMap class_to_index_map(MapEntryComparator(), allocator.Adapter());
+
+ // First, find all SGET/SPUTs that may need class initialization checks, record INVOKE_STATICs.
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ DCHECK(bb->data_flow_info != nullptr);
+ if (mir->dalvikInsn.opcode >= Instruction::SGET &&
+ mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
+ uint16_t index = 0xffffu;
+ if (field_info.IsResolved() && !field_info.IsInitialized()) {
+ DCHECK_LT(class_to_index_map.size(), 0xffffu);
+ MapEntry entry = {
+ field_info.DeclaringDexFile(),
+ field_info.DeclaringClassIndex(),
+ static_cast<uint16_t>(class_to_index_map.size())
+ };
+ index = class_to_index_map.insert(entry).first->index;
+ }
+ // Using offset/2 for index into temp_insn_data_.
+ temp_insn_data_[mir->offset / 2u] = index;
+ }
+ }
+ }
+ unique_class_count = static_cast<uint32_t>(class_to_index_map.size());
+ }
+
+ if (unique_class_count == 0u) {
+ // All SGET/SPUTs refer to initialized classes. Nothing to do.
+ temp_insn_data_ = nullptr;
+ temp_scoped_alloc_.reset();
+ return false;
+ }
+
+ temp_bit_vector_size_ = unique_class_count;
+ temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
+ DCHECK_GT(temp_bit_vector_size_, 0u);
+ return true;
+}
+
+/*
+ * Eliminate unnecessary class initialization checks for a basic block.
+ */
+bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
+ DCHECK_EQ((cu_->disable_opt & (1 << kClassInitCheckElimination)), 0u);
+ if (bb->data_flow_info == NULL) {
+ return false;
+ }
+
+ /*
+ * Set initial state. Be conservative with catch
+ * blocks and start with no assumptions about class init check status.
+ */
+ ArenaBitVector* classes_to_check = temp_bit_vector_;
+ DCHECK(classes_to_check != nullptr);
+ if ((bb->block_type == kEntryBlock) | bb->catch_entry) {
+ classes_to_check->SetInitialBits(temp_bit_vector_size_);
+ } else if (bb->predecessors->Size() == 1) {
+ BasicBlock* pred_bb = GetBasicBlock(bb->predecessors->Get(0));
+ // pred_bb must have already been processed at least once.
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ DCHECK(pred_bb->data_flow_info->ending_check_v != nullptr);
+ classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
+ } else {
+ // Starting state is union of all incoming arcs
+ GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
+ BasicBlock* pred_bb = GetBasicBlock(iter.Next());
+ DCHECK(pred_bb != NULL);
+ DCHECK(pred_bb->data_flow_info != NULL);
+ while (pred_bb->data_flow_info->ending_check_v == nullptr) {
+ pred_bb = GetBasicBlock(iter.Next());
+ // At least one predecessor must have been processed before this bb.
+ DCHECK(pred_bb != nullptr);
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ }
+ classes_to_check->Copy(pred_bb->data_flow_info->ending_check_v);
+ while (true) {
+ pred_bb = GetBasicBlock(iter.Next());
+ if (!pred_bb) break;
+ DCHECK(pred_bb->data_flow_info != nullptr);
+ if (pred_bb->data_flow_info->ending_check_v == nullptr) {
+ continue;
+ }
+ classes_to_check->Union(pred_bb->data_flow_info->ending_check_v);
+ }
+ }
+ // At this point, classes_to_check shows which classes need clinit checks.
+
+ // Walk through the instruction in the block, updating as necessary
+ for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+ if (mir->dalvikInsn.opcode >= Instruction::SGET &&
+ mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ uint16_t index = temp_insn_data_[mir->offset / 2u];
+ if (index != 0xffffu) {
+ if (mir->dalvikInsn.opcode >= Instruction::SGET &&
+ mir->dalvikInsn.opcode <= Instruction::SPUT_SHORT) {
+ if (!classes_to_check->IsBitSet(index)) {
+ // Eliminate the class init check.
+ mir->optimization_flags |= MIR_IGNORE_CLINIT_CHECK;
+ } else {
+ // Do the class init check.
+ mir->optimization_flags &= ~MIR_IGNORE_CLINIT_CHECK;
+ }
+ }
+ // Mark the class as initialized.
+ classes_to_check->ClearBit(index);
+ }
+ }
+ }
+
+ // Did anything change?
+ bool changed = false;
+ if (bb->data_flow_info->ending_check_v == nullptr) {
+ DCHECK(temp_scoped_alloc_.get() != nullptr);
+ DCHECK(bb->data_flow_info != nullptr);
+ bb->data_flow_info->ending_check_v = new (temp_scoped_alloc_.get()) ArenaBitVector(
+ temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapClInitCheck);
+ changed = classes_to_check->GetHighestBitSet() != -1;
+ bb->data_flow_info->ending_check_v->Copy(classes_to_check);
+ } else if (!classes_to_check->Equal(bb->data_flow_info->ending_check_v)) {
+ changed = true;
+ bb->data_flow_info->ending_check_v->Copy(classes_to_check);
+ }
+ return changed;
+}
+
+void MIRGraph::EliminateClassInitChecksEnd() {
+ // Clean up temporaries.
+ temp_bit_vector_size_ = 0u;
+ temp_bit_vector_ = nullptr;
+ AllNodesIterator iter(this);
+ for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
+ if (bb->data_flow_info != nullptr) {
+ bb->data_flow_info->ending_check_v = nullptr;
+ }
+ }
+
+ DCHECK(temp_insn_data_ != nullptr);
+ temp_insn_data_ = nullptr;
+ DCHECK(temp_scoped_alloc_.get() != nullptr);
+ temp_scoped_alloc_.reset();
+}
+
void MIRGraph::DumpCheckStats() {
Checkstats* stats =
static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
diff --git a/compiler/dex/mir_optimization_test.cc b/compiler/dex/mir_optimization_test.cc
new file mode 100644
index 0000000..f499364
--- /dev/null
+++ b/compiler/dex/mir_optimization_test.cc
@@ -0,0 +1,406 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vector>
+
+#include "compiler_internals.h"
+#include "dataflow_iterator.h"
+#include "dataflow_iterator-inl.h"
+#include "gtest/gtest.h"
+
+namespace art {
+
+class ClassInitCheckEliminationTest : public testing::Test {
+ protected:
+ struct SFieldDef {
+ uint16_t field_idx;
+ uintptr_t declaring_dex_file;
+ uint16_t declaring_class_idx;
+ uint16_t declaring_field_idx;
+ };
+
+ struct BBDef {
+ static constexpr size_t kMaxSuccessors = 4;
+ static constexpr size_t kMaxPredecessors = 4;
+
+ BBType type;
+ size_t num_successors;
+ BasicBlockId successors[kMaxPredecessors];
+ size_t num_predecessors;
+ BasicBlockId predecessors[kMaxPredecessors];
+ };
+
+ struct MIRDef {
+ Instruction::Code opcode;
+ BasicBlockId bbid;
+ uint32_t field_or_method_info;
+ };
+
+#define DEF_SUCC0() \
+ 0u, { }
+#define DEF_SUCC1(s1) \
+ 1u, { s1 }
+#define DEF_SUCC2(s1, s2) \
+ 2u, { s1, s2 }
+#define DEF_SUCC3(s1, s2, s3) \
+ 3u, { s1, s2, s3 }
+#define DEF_SUCC4(s1, s2, s3, s4) \
+ 4u, { s1, s2, s3, s4 }
+#define DEF_PRED0() \
+ 0u, { }
+#define DEF_PRED1(p1) \
+ 1u, { p1 }
+#define DEF_PRED2(p1, p2) \
+ 2u, { p1, p2 }
+#define DEF_PRED3(p1, p2, p3) \
+ 3u, { p1, p2, p3 }
+#define DEF_PRED4(p1, p2, p3, p4) \
+ 4u, { p1, p2, p3, p4 }
+#define DEF_BB(type, succ, pred) \
+ { type, succ, pred }
+
+#define DEF_MIR(opcode, bb, field_info) \
+ { opcode, bb, field_info }
+
+ void DoPrepareSFields(const SFieldDef* defs, size_t count) {
+ cu_.mir_graph->sfield_lowering_infos_.Reset();
+ cu_.mir_graph->sfield_lowering_infos_.Resize(count);
+ for (size_t i = 0u; i != count; ++i) {
+ const SFieldDef* def = &defs[i];
+ MirSFieldLoweringInfo field_info(def->field_idx);
+ if (def->declaring_dex_file != 0u) {
+ field_info.declaring_dex_file_ = reinterpret_cast<const DexFile*>(def->declaring_dex_file);
+ field_info.declaring_class_idx_ = def->declaring_class_idx;
+ field_info.declaring_field_idx_ = def->declaring_field_idx;
+ field_info.flags_ = MirSFieldLoweringInfo::kFlagIsStatic;
+ }
+ ASSERT_EQ(def->declaring_dex_file != 0u, field_info.IsResolved());
+ ASSERT_FALSE(field_info.IsInitialized());
+ cu_.mir_graph->sfield_lowering_infos_.Insert(field_info);
+ }
+ }
+
+ template <size_t count>
+ void PrepareSFields(const SFieldDef (&defs)[count]) {
+ DoPrepareSFields(defs, count);
+ }
+
+ void DoPrepareBasicBlocks(const BBDef* defs, size_t count) {
+ cu_.mir_graph->block_id_map_.clear();
+ cu_.mir_graph->block_list_.Reset();
+ ASSERT_LT(3u, count); // null, entry, exit and at least one bytecode block.
+ ASSERT_EQ(kNullBlock, defs[0].type);
+ ASSERT_EQ(kEntryBlock, defs[1].type);
+ ASSERT_EQ(kExitBlock, defs[2].type);
+ for (size_t i = 0u; i != count; ++i) {
+ const BBDef* def = &defs[i];
+ BasicBlock* bb = cu_.mir_graph->NewMemBB(def->type, i);
+ cu_.mir_graph->block_list_.Insert(bb);
+ if (def->num_successors <= 2) {
+ bb->successor_block_list_type = kNotUsed;
+ bb->successor_blocks = nullptr;
+ bb->fall_through = (def->num_successors >= 1) ? def->successors[0] : 0u;
+ bb->taken = (def->num_successors >= 2) ? def->successors[1] : 0u;
+ } else {
+ bb->successor_block_list_type = kPackedSwitch;
+ bb->fall_through = 0u;
+ bb->taken = 0u;
+ bb->successor_blocks = new (&cu_.arena) GrowableArray<SuccessorBlockInfo*>(
+ &cu_.arena, def->num_successors, kGrowableArraySuccessorBlocks);
+ for (size_t j = 0u; j != def->num_successors; ++j) {
+ SuccessorBlockInfo* successor_block_info =
+ static_cast<SuccessorBlockInfo*>(cu_.arena.Alloc(sizeof(SuccessorBlockInfo),
+ kArenaAllocSuccessor));
+ successor_block_info->block = j;
+ successor_block_info->key = 0u; // Not used by class init check elimination.
+ bb->successor_blocks->Insert(successor_block_info);
+ }
+ }
+ bb->predecessors = new (&cu_.arena) GrowableArray<BasicBlockId>(
+ &cu_.arena, def->num_predecessors, kGrowableArrayPredecessors);
+ for (size_t j = 0u; j != def->num_predecessors; ++j) {
+ ASSERT_NE(0u, def->predecessors[j]);
+ bb->predecessors->Insert(def->predecessors[j]);
+ }
+ if (def->type == kDalvikByteCode || def->type == kEntryBlock || def->type == kExitBlock) {
+ bb->data_flow_info = static_cast<BasicBlockDataFlow*>(
+ cu_.arena.Alloc(sizeof(BasicBlockDataFlow), kArenaAllocDFInfo));
+ }
+ }
+ cu_.mir_graph->num_blocks_ = count;
+ ASSERT_EQ(count, cu_.mir_graph->block_list_.Size());
+ cu_.mir_graph->entry_block_ = cu_.mir_graph->block_list_.Get(1);
+ ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
+ cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_.Get(2);
+ ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+ }
+
+ template <size_t count>
+ void PrepareBasicBlocks(const BBDef (&defs)[count]) {
+ DoPrepareBasicBlocks(defs, count);
+ }
+
+ void DoPrepareMIRs(const MIRDef* defs, size_t count) {
+ mir_count_ = count;
+ mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
+ uint64_t merged_df_flags = 0u;
+ for (size_t i = 0u; i != count; ++i) {
+ const MIRDef* def = &defs[i];
+ MIR* mir = &mirs_[i];
+ mir->dalvikInsn.opcode = def->opcode;
+ ASSERT_LT(def->bbid, cu_.mir_graph->block_list_.Size());
+ BasicBlock* bb = cu_.mir_graph->block_list_.Get(def->bbid);
+ cu_.mir_graph->AppendMIR(bb, mir);
+ if (def->opcode >= Instruction::SGET && def->opcode <= Instruction::SPUT_SHORT) {
+ ASSERT_LT(def->field_or_method_info, cu_.mir_graph->sfield_lowering_infos_.Size());
+ mir->meta.sfield_lowering_info = def->field_or_method_info;
+ }
+ mir->ssa_rep = nullptr;
+ mir->offset = 2 * i; // All insns need to be at least 2 code units long.
+ mir->width = 2u;
+ mir->optimization_flags = 0u;
+ merged_df_flags |= MIRGraph::oat_data_flow_attributes_[def->opcode];
+ }
+ cu_.mir_graph->merged_df_flags_ = merged_df_flags;
+
+ code_item_ = static_cast<DexFile::CodeItem*>(
+ cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+ memset(code_item_, 0, sizeof(DexFile::CodeItem));
+ code_item_->insns_size_in_code_units_ = 2u * count;
+ cu_.mir_graph->current_code_item_ = cu_.code_item = code_item_;
+ }
+
+ template <size_t count>
+ void PrepareMIRs(const MIRDef (&defs)[count]) {
+ DoPrepareMIRs(defs, count);
+ }
+
+ void PerformClassInitCheckElimination() {
+ cu_.mir_graph->ComputeDFSOrders();
+ bool gate_result = cu_.mir_graph->EliminateClassInitChecksGate();
+ ASSERT_TRUE(gate_result);
+ RepeatingPreOrderDfsIterator iterator(cu_.mir_graph.get());
+ bool change = false;
+ for (BasicBlock *bb = iterator.Next(change); bb != 0; bb = iterator.Next(change)) {
+ change = cu_.mir_graph->EliminateClassInitChecks(bb);
+ }
+ cu_.mir_graph->EliminateClassInitChecksEnd();
+ }
+
+ ClassInitCheckEliminationTest()
+ : pool_(),
+ cu_(&pool_),
+ mir_count_(0u),
+ mirs_(nullptr),
+ code_item_(nullptr) {
+ cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
+ }
+
+ ArenaPool pool_;
+ CompilationUnit cu_;
+ size_t mir_count_;
+ MIR* mirs_;
+ DexFile::CodeItem* code_item_;
+};
+
+TEST_F(ClassInitCheckEliminationTest, SingleBlock) {
+ static const SFieldDef sfields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 1u, 1u },
+ { 2u, 1u, 2u, 2u },
+ { 3u, 1u, 3u, 3u }, // Same declaring class as sfield[4].
+ { 4u, 1u, 3u, 4u }, // Same declaring class as sfield[3].
+ { 5u, 0u, 0u, 0u }, // Unresolved.
+ };
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(1)),
+ };
+ static const MIRDef mirs[] = {
+ DEF_MIR(Instruction::SPUT, 3u, 5u), // Unresolved.
+ DEF_MIR(Instruction::SPUT, 3u, 0u),
+ DEF_MIR(Instruction::SGET, 3u, 1u),
+ DEF_MIR(Instruction::SGET, 3u, 2u),
+ DEF_MIR(Instruction::SGET, 3u, 5u), // Unresolved.
+ DEF_MIR(Instruction::SGET, 3u, 0u),
+ DEF_MIR(Instruction::SGET, 3u, 1u),
+ DEF_MIR(Instruction::SGET, 3u, 2u),
+ DEF_MIR(Instruction::SGET, 3u, 5u), // Unresolved.
+ DEF_MIR(Instruction::SGET, 3u, 3u),
+ DEF_MIR(Instruction::SGET, 3u, 4u),
+ };
+ static const bool expected_ignore_clinit_check[] = {
+ false, false, false, false, false, true, true, true, false, false, true
+ };
+
+ PrepareSFields(sfields);
+ PrepareBasicBlocks(bbs);
+ PrepareMIRs(mirs);
+ PerformClassInitCheckElimination();
+ ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_ignore_clinit_check[i],
+ (mirs_[i].optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0) << i;
+ }
+}
+
+TEST_F(ClassInitCheckEliminationTest, Diamond) {
+ static const SFieldDef sfields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 1u, 1u },
+ { 2u, 1u, 2u, 2u },
+ { 3u, 1u, 3u, 3u },
+ { 4u, 1u, 4u, 4u },
+ { 5u, 1u, 5u, 5u },
+ { 6u, 1u, 6u, 6u },
+ { 7u, 1u, 7u, 7u },
+ { 8u, 1u, 8u, 8u }, // Same declaring class as sfield[9].
+ { 9u, 1u, 8u, 9u }, // Same declaring class as sfield[8].
+ { 10u, 0u, 0u, 0u }, // Unresolved.
+ };
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(6)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(4, 5), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(6), DEF_PRED1(3)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(4, 5)),
+ };
+ static const MIRDef mirs[] = {
+ // NOTE: MIRs here are ordered by unique tests. They will be put into appropriate blocks.
+ DEF_MIR(Instruction::SGET, 3u, 10u), // Unresolved.
+ DEF_MIR(Instruction::SPUT, 3u, 10u), // Unresolved.
+ DEF_MIR(Instruction::SPUT, 3u, 0u),
+ DEF_MIR(Instruction::SGET, 6u, 0u), // Eliminated (block #3 dominates #6).
+ DEF_MIR(Instruction::SPUT, 4u, 1u),
+ DEF_MIR(Instruction::SGET, 6u, 1u), // Not eliminated (block #4 doesn't dominate #6).
+ DEF_MIR(Instruction::SGET, 3u, 2u),
+ DEF_MIR(Instruction::SGET, 4u, 2u), // Eliminated (block #3 dominates #4).
+ DEF_MIR(Instruction::SGET, 3u, 3u),
+ DEF_MIR(Instruction::SGET, 5u, 3u), // Eliminated (block #3 dominates #5).
+ DEF_MIR(Instruction::SGET, 3u, 4u),
+ DEF_MIR(Instruction::SGET, 6u, 4u), // Eliminated (block #3 dominates #6).
+ DEF_MIR(Instruction::SGET, 4u, 5u),
+ DEF_MIR(Instruction::SGET, 6u, 5u), // Not eliminated (block #4 doesn't dominate #6).
+ DEF_MIR(Instruction::SGET, 5u, 6u),
+ DEF_MIR(Instruction::SGET, 6u, 6u), // Not eliminated (block #5 doesn't dominate #6).
+ DEF_MIR(Instruction::SGET, 4u, 7u),
+ DEF_MIR(Instruction::SGET, 5u, 7u),
+ DEF_MIR(Instruction::SGET, 6u, 7u), // Eliminated (initialized in both blocks #3 and #4).
+ DEF_MIR(Instruction::SGET, 4u, 8u),
+ DEF_MIR(Instruction::SGET, 5u, 9u),
+ DEF_MIR(Instruction::SGET, 6u, 8u), // Eliminated (with sfield[9] in block #5).
+ DEF_MIR(Instruction::SPUT, 6u, 9u), // Eliminated (with sfield[8] in block #4).
+ };
+ static const bool expected_ignore_clinit_check[] = {
+ false, false, // Unresolved: sfield[10], method[2]
+ false, true, // sfield[0]
+ false, false, // sfield[1]
+ false, true, // sfield[2]
+ false, true, // sfield[3]
+ false, true, // sfield[4]
+ false, false, // sfield[5]
+ false, false, // sfield[6]
+ false, false, true, // sfield[7]
+ false, false, true, true, // sfield[8], sfield[9]
+ };
+
+ PrepareSFields(sfields);
+ PrepareBasicBlocks(bbs);
+ PrepareMIRs(mirs);
+ PerformClassInitCheckElimination();
+ ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_ignore_clinit_check[i],
+ (mirs_[i].optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0) << i;
+ }
+}
+
+TEST_F(ClassInitCheckEliminationTest, Loop) {
+ static const SFieldDef sfields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 1u, 1u },
+ };
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED2(3, 4)), // "taken" loops to self.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED1(4)),
+ };
+ static const MIRDef mirs[] = {
+ DEF_MIR(Instruction::SGET, 3u, 0u),
+ DEF_MIR(Instruction::SGET, 4u, 1u),
+ DEF_MIR(Instruction::SGET, 5u, 0u), // Eliminated.
+ DEF_MIR(Instruction::SGET, 5u, 1u), // Eliminated.
+ };
+ static const bool expected_ignore_clinit_check[] = {
+ false, false, true, true
+ };
+
+ PrepareSFields(sfields);
+ PrepareBasicBlocks(bbs);
+ PrepareMIRs(mirs);
+ PerformClassInitCheckElimination();
+ ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_ignore_clinit_check[i],
+ (mirs_[i].optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0) << i;
+ }
+}
+
+TEST_F(ClassInitCheckEliminationTest, Catch) {
+ static const SFieldDef sfields[] = {
+ { 0u, 1u, 0u, 0u },
+ { 1u, 1u, 1u, 1u },
+ };
+ static const BBDef bbs[] = {
+ DEF_BB(kNullBlock, DEF_SUCC0(), DEF_PRED0()),
+ DEF_BB(kEntryBlock, DEF_SUCC1(3), DEF_PRED0()),
+ DEF_BB(kExitBlock, DEF_SUCC0(), DEF_PRED1(5)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC2(5, 4), DEF_PRED1(1)),
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(5), DEF_PRED1(3)), // Catch handler.
+ DEF_BB(kDalvikByteCode, DEF_SUCC1(2), DEF_PRED2(3, 4)),
+ };
+ static const MIRDef mirs[] = {
+ DEF_MIR(Instruction::SGET, 3u, 0u),
+ DEF_MIR(Instruction::SGET, 3u, 1u),
+ DEF_MIR(Instruction::SGET, 4u, 1u),
+ DEF_MIR(Instruction::SGET, 5u, 0u), // Not eliminated.
+ DEF_MIR(Instruction::SGET, 5u, 1u), // Eliminated.
+ };
+ static const bool expected_ignore_clinit_check[] = {
+ false, false, false, false, true
+ };
+
+ PrepareSFields(sfields);
+ PrepareBasicBlocks(bbs);
+ BasicBlock* catch_handler = cu_.mir_graph->GetBasicBlock(4u);
+ catch_handler->catch_entry = true;
+ PrepareMIRs(mirs);
+ PerformClassInitCheckElimination();
+ ASSERT_EQ(arraysize(expected_ignore_clinit_check), mir_count_);
+ for (size_t i = 0u; i != arraysize(mirs); ++i) {
+ EXPECT_EQ(expected_ignore_clinit_check[i],
+ (mirs_[i].optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0) << i;
+ }
+}
+
+} // namespace art
diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc
index 72d3ea6..f195aff 100644
--- a/compiler/dex/pass_driver.cc
+++ b/compiler/dex/pass_driver.cc
@@ -97,8 +97,8 @@
GetPassInstance<ConstantPropagation>(),
GetPassInstance<InitRegLocations>(),
GetPassInstance<MethodUseCount>(),
- GetPassInstance<NullCheckEliminationAndTypeInferenceInit>(),
GetPassInstance<NullCheckEliminationAndTypeInference>(),
+ GetPassInstance<ClassInitCheckElimination>(),
GetPassInstance<BBCombine>(),
GetPassInstance<BBOptimizations>(),
};
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 1e21991..58db984 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -449,7 +449,8 @@
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!field_info.IsInitialized()) {
+ if (!field_info.IsInitialized() &&
+ (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
@@ -533,7 +534,8 @@
LoadWordDisp(r_base, mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
sizeof(int32_t*) * field_info.StorageIndex(), r_base);
// r_base now points at static storage (Class*) or NULL if the type is not yet resolved.
- if (!field_info.IsInitialized()) {
+ if (!field_info.IsInitialized() &&
+ (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) == 0) {
// Check if r_base is NULL or a not yet initialized class.
// The slow path is invoked if the r_base is NULL or the class pointed
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 8091528..d70e3f5 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -373,7 +373,6 @@
/* Compute dominators, immediate dominator, and dominance fronter */
void MIRGraph::ComputeDominators() {
int num_reachable_blocks = num_reachable_blocks_;
- int num_total_blocks = GetBasicBlockListCount();
/* Initialize domination-related data structures */
PreOrderDfsIterator iter(this);
@@ -405,12 +404,6 @@
GetEntryBlock()->dominators->ClearAllBits();
GetEntryBlock()->dominators->SetBit(GetEntryBlock()->id);
- if (temp_block_v_ == NULL) {
- temp_block_v_ = new (arena_) ArenaBitVector(arena_, num_total_blocks,
- false /* expandable */, kBitMapTmpBlockV);
- } else {
- temp_block_v_->ClearAllBits();
- }
GetEntryBlock()->i_dom = 0;
PreOrderDfsIterator iter3(this);