Merge "ART: Make dex2oat timing a bit more granular"
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 326a92b..71a55bb 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -249,6 +249,7 @@
compiler/optimizing/graph_test.cc \
compiler/optimizing/gvn_test.cc \
compiler/optimizing/induction_var_analysis_test.cc \
+ compiler/optimizing/induction_var_range_test.cc \
compiler/optimizing/licm_test.cc \
compiler/optimizing/live_interval_test.cc \
compiler/optimizing/nodes_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index ce9e367..41e9744 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -72,6 +72,7 @@
optimizing/graph_visualizer.cc \
optimizing/gvn.cc \
optimizing/induction_var_analysis.cc \
+ optimizing/induction_var_range.cc \
optimizing/inliner.cc \
optimizing/instruction_simplifier.cc \
optimizing/intrinsics.cc \
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 4b67884..1727657 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -192,7 +192,7 @@
GetImageClasses(),
GetCompiledClasses(),
GetCompiledMethods(),
- 2, true, true, "", timer_.get(), -1, ""));
+ 2, true, true, "", false, timer_.get(), -1, ""));
}
// We typically don't generate an image in unit tests, disable this optimization by default.
compiler_driver_->SetSupportBootImageFixup(false);
diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc
index 16c161e..18c2e55 100644
--- a/compiler/dex/quick/quick_cfi_test.cc
+++ b/compiler/dex/quick/quick_cfi_test.cc
@@ -77,7 +77,7 @@
isa_features.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
CompilerDriver driver(&compiler_options, &verification_results, &method_inliner_map,
Compiler::kQuick, isa, isa_features.get(),
- false, nullptr, nullptr, nullptr, 0, false, false, "", 0, -1, "");
+ false, nullptr, nullptr, nullptr, 0, false, false, "", false, 0, -1, "");
ClassLinker* linker = nullptr;
CompilationUnit cu(&pool, isa, &driver, linker);
DexFile::CodeItem code_item { 0, 0, 0, 0, 0, 0, { 0 } }; // NOLINT
diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
index 98e9f38..d9571c5 100644
--- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc
+++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc
@@ -70,6 +70,7 @@
false,
false,
"",
+ false,
0,
-1,
""));
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 4c1408a..8d41595 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -345,8 +345,9 @@
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
size_t thread_count, bool dump_stats, bool dump_passes,
- const std::string& dump_cfg_file_name, CumulativeLogger* timer,
- int swap_fd, const std::string& profile_file)
+ const std::string& dump_cfg_file_name, bool dump_cfg_append,
+ CumulativeLogger* timer, int swap_fd,
+ const std::string& profile_file)
: swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)),
swap_space_allocator_(new SwapAllocator<void>(swap_space_.get())),
profile_present_(false), compiler_options_(compiler_options),
@@ -372,6 +373,7 @@
dump_stats_(dump_stats),
dump_passes_(dump_passes),
dump_cfg_file_name_(dump_cfg_file_name),
+ dump_cfg_append_(dump_cfg_append),
timings_logger_(timer),
compiler_context_(nullptr),
support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64),
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index b229184..11e782f 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -99,7 +99,7 @@
std::unordered_set<std::string>* compiled_classes,
std::unordered_set<std::string>* compiled_methods,
size_t thread_count, bool dump_stats, bool dump_passes,
- const std::string& dump_cfg_file_name,
+ const std::string& dump_cfg_file_name, bool dump_cfg_append,
CumulativeLogger* timer, int swap_fd,
const std::string& profile_file);
@@ -426,6 +426,10 @@
return dump_cfg_file_name_;
}
+ bool GetDumpCfgAppend() const {
+ return dump_cfg_append_;
+ }
+
CumulativeLogger* GetTimingsLogger() const {
return timings_logger_;
}
@@ -667,6 +671,7 @@
bool dump_stats_;
const bool dump_passes_;
const std::string dump_cfg_file_name_;
+ const bool dump_cfg_append_;
CumulativeLogger* const timings_logger_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 4215f3c..b6a40a2 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -108,6 +108,7 @@
/* dump_stats */ false,
/* dump_passes */ false,
/* dump_cfg_file_name */ "",
+ /* dump_cfg_append */ false,
cumulative_logger_.get(),
/* swap_fd */ -1,
/* profile_file */ ""));
diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h
index 1f7500a..31d1bce 100644
--- a/compiler/linker/relative_patcher_test.h
+++ b/compiler/linker/relative_patcher_test.h
@@ -46,7 +46,7 @@
driver_(&compiler_options_, &verification_results_, &inliner_map_,
Compiler::kQuick, instruction_set, nullptr,
false, nullptr, nullptr, nullptr, 1u,
- false, false, "", nullptr, -1, ""),
+ false, false, "", false, nullptr, -1, ""),
error_msg_(),
instruction_set_(instruction_set),
features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)),
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 88dc29e..2d9d91a 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -94,7 +94,7 @@
method_inliner_map_.get(),
compiler_kind, insn_set,
insn_features.get(), false, nullptr, nullptr, nullptr,
- 2, true, true, "", timer_.get(), -1, ""));
+ 2, true, true, "", false, timer_.get(), -1, ""));
jobject class_loader = nullptr;
if (kCompile) {
TimingLogger timings2("OatTest::WriteRead", false, false);
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index 70ad408..62f5b9a 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -16,6 +16,7 @@
#include "base/arena_containers.h"
#include "bounds_check_elimination.h"
+#include "induction_var_range.h"
#include "nodes.h"
namespace art {
@@ -126,14 +127,17 @@
return instruction_ == bound.instruction_ && constant_ == bound.constant_;
}
- static HInstruction* FromArrayLengthToArray(HInstruction* instruction) {
- DCHECK(instruction->IsArrayLength() || instruction->IsNewArray());
- if (instruction->IsArrayLength()) {
- HInstruction* input = instruction->InputAt(0);
- if (input->IsNullCheck()) {
- input = input->AsNullCheck()->InputAt(0);
- }
- return input;
+ /*
+ * Hunt "under the hood" of array lengths (leading to array references),
+ * null checks (also leading to array references), and new arrays
+ * (leading to the actual length). This makes it more likely related
+ * instructions become actually comparable.
+ */
+ static HInstruction* HuntForDeclaration(HInstruction* instruction) {
+ while (instruction->IsArrayLength() ||
+ instruction->IsNullCheck() ||
+ instruction->IsNewArray()) {
+ instruction = instruction->InputAt(0);
}
return instruction;
}
@@ -142,16 +146,11 @@
if (instruction1 == instruction2) {
return true;
}
-
if (instruction1 == nullptr || instruction2 == nullptr) {
return false;
}
-
- // Some bounds are created with HNewArray* as the instruction instead
- // of HArrayLength*. They are treated the same.
- // HArrayLength with the same array input are considered equal also.
- instruction1 = FromArrayLengthToArray(instruction1);
- instruction2 = FromArrayLengthToArray(instruction2);
+ instruction1 = HuntForDeclaration(instruction1);
+ instruction2 = HuntForDeclaration(instruction2);
return instruction1 == instruction2;
}
@@ -285,8 +284,7 @@
}
static bool DominatesAllBackEdges(HBasicBlock* block, HLoopInformation* loop_info) {
- for (size_t i = 0, e = loop_info->GetBackEdges().Size(); i < e; ++i) {
- HBasicBlock* back_edge = loop_info->GetBackEdges().Get(i);
+ for (HBasicBlock* back_edge : loop_info->GetBackEdges()) {
if (!block->Dominates(back_edge)) {
return false;
}
@@ -1108,9 +1106,12 @@
return block->GetBlockId() >= initial_block_size_;
}
- explicit BCEVisitor(HGraph* graph)
- : HGraphVisitor(graph), maps_(graph->GetBlocks().Size()),
- need_to_revisit_block_(false), initial_block_size_(graph->GetBlocks().Size()) {}
+ BCEVisitor(HGraph* graph, HInductionVarAnalysis* induction_analysis)
+ : HGraphVisitor(graph),
+ maps_(graph->GetBlocks().size()),
+ need_to_revisit_block_(false),
+ initial_block_size_(graph->GetBlocks().size()),
+ induction_range_(induction_analysis) {}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
DCHECK(!IsAddedBlock(block));
@@ -1159,6 +1160,23 @@
return nullptr;
}
+ // Return the range resulting from induction variable analysis of "instruction" when the value
+ // is used from "context", for example, an index used from a bounds-check inside a loop body.
+ ValueRange* LookupInductionRange(HInstruction* context, HInstruction* instruction) {
+ InductionVarRange::Value v1 = induction_range_.GetMinInduction(context, instruction);
+ InductionVarRange::Value v2 = induction_range_.GetMaxInduction(context, instruction);
+ if ((v1.a_constant == 0 || v1.a_constant == 1) && v1.b_constant != INT_MIN &&
+ (v2.a_constant == 0 || v2.a_constant == 1) && v2.b_constant != INT_MAX) {
+ DCHECK(v1.a_constant == 1 || v1.instruction == nullptr);
+ DCHECK(v2.a_constant == 1 || v2.instruction == nullptr);
+ ValueBound low = ValueBound(v1.instruction, v1.b_constant);
+ ValueBound up = ValueBound(v2.instruction, v2.b_constant);
+ return new (GetGraph()->GetArena()) ValueRange(GetGraph()->GetArena(), low, up);
+ }
+ // Didn't find anything useful.
+ return nullptr;
+ }
+
// Narrow the value range of `instruction` at the end of `basic_block` with `range`,
// and push the narrowed value range to `successor`.
void ApplyRangeFromComparison(HInstruction* instruction, HBasicBlock* basic_block,
@@ -1390,16 +1408,20 @@
}
if (!index->IsIntConstant()) {
+ ValueBound lower = ValueBound(nullptr, 0); // constant 0
+ ValueBound upper = ValueBound(array_length, -1); // array_length - 1
+ ValueRange array_range(GetGraph()->GetArena(), lower, upper);
+ // Try range obtained by local analysis.
ValueRange* index_range = LookupValueRange(index, block);
- if (index_range != nullptr) {
- ValueBound lower = ValueBound(nullptr, 0); // constant 0
- ValueBound upper = ValueBound(array_length, -1); // array_length - 1
- ValueRange* array_range = new (GetGraph()->GetArena())
- ValueRange(GetGraph()->GetArena(), lower, upper);
- if (index_range->FitsIn(array_range)) {
- ReplaceBoundsCheck(bounds_check, index);
- return;
- }
+ if (index_range != nullptr && index_range->FitsIn(&array_range)) {
+ ReplaceBoundsCheck(bounds_check, index);
+ return;
+ }
+ // Try range obtained by induction variable analysis.
+ index_range = LookupInductionRange(bounds_check, index);
+ if (index_range != nullptr && index_range->FitsIn(&array_range)) {
+ ReplaceBoundsCheck(bounds_check, index);
+ return;
}
} else {
int32_t constant = index->AsIntConstant()->GetValue();
@@ -1829,7 +1851,10 @@
bool need_to_revisit_block_;
// Initial number of blocks.
- int32_t initial_block_size_;
+ uint32_t initial_block_size_;
+
+ // Range analysis based on induction variables.
+ InductionVarRange induction_range_;
DISALLOW_COPY_AND_ASSIGN(BCEVisitor);
};
@@ -1839,7 +1864,7 @@
return;
}
- BCEVisitor visitor(graph_);
+ BCEVisitor visitor(graph_, induction_analysis_);
// Reverse post order guarantees a node's dominators are visited first.
// We want to visit in the dominator-based order since if a value is known to
// be bounded by a range at one instruction, it must be true that all uses of
diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h
index 24b8ea7..cdff3ca 100644
--- a/compiler/optimizing/bounds_check_elimination.h
+++ b/compiler/optimizing/bounds_check_elimination.h
@@ -21,16 +21,21 @@
namespace art {
+class HInductionVarAnalysis;
+
class BoundsCheckElimination : public HOptimization {
public:
- explicit BoundsCheckElimination(HGraph* graph)
- : HOptimization(graph, kBoundsCheckEliminiationPassName) {}
+ BoundsCheckElimination(HGraph* graph, HInductionVarAnalysis* induction_analysis)
+ : HOptimization(graph, kBoundsCheckEliminiationPassName),
+ induction_analysis_(induction_analysis) {}
void Run() OVERRIDE;
static constexpr const char* kBoundsCheckEliminiationPassName = "BCE";
private:
+ HInductionVarAnalysis* induction_analysis_;
+
DISALLOW_COPY_AND_ASSIGN(BoundsCheckElimination);
};
diff --git a/compiler/optimizing/bounds_check_elimination_test.cc b/compiler/optimizing/bounds_check_elimination_test.cc
index 4701bdd..08e1e36 100644
--- a/compiler/optimizing/bounds_check_elimination_test.cc
+++ b/compiler/optimizing/bounds_check_elimination_test.cc
@@ -18,6 +18,7 @@
#include "bounds_check_elimination.h"
#include "builder.h"
#include "gvn.h"
+#include "induction_var_analysis.h"
#include "instruction_simplifier.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
@@ -27,101 +28,122 @@
namespace art {
-static void RunSimplifierAndGvn(HGraph* graph) {
- InstructionSimplifier simplify(graph);
- simplify.Run();
- SideEffectsAnalysis side_effects(graph);
- side_effects.Run();
- GVNOptimization(graph, side_effects).Run();
-}
+/**
+ * Fixture class for the BoundsCheckElimination tests.
+ */
+class BoundsCheckEliminationTest : public testing::Test {
+ public:
+ BoundsCheckEliminationTest() : pool_(), allocator_(&pool_) {
+ graph_ = CreateGraph(&allocator_);
+ graph_->SetHasBoundsChecks(true);
+ }
+
+ ~BoundsCheckEliminationTest() { }
+
+ void RunBCE() {
+ graph_->BuildDominatorTree();
+ graph_->AnalyzeNaturalLoops();
+
+ InstructionSimplifier(graph_).Run();
+
+ SideEffectsAnalysis side_effects(graph_);
+ side_effects.Run();
+
+ GVNOptimization(graph_, side_effects).Run();
+
+ HInductionVarAnalysis induction(graph_);
+ induction.Run();
+
+ BoundsCheckElimination(graph_, &induction).Run();
+ }
+
+ ArenaPool pool_;
+ ArenaAllocator allocator_;
+ HGraph* graph_;
+};
+
// if (i < 0) { array[i] = 1; // Can't eliminate. }
// else if (i >= array.length) { array[i] = 1; // Can't eliminate. }
// else { array[i] = 1; // Can eliminate. }
-TEST(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator)
+TEST_F(BoundsCheckEliminationTest, NarrowingRangeArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter1 = new (&allocator_)
HParameterValue(0, Primitive::kPrimNot); // array
- HInstruction* parameter2 = new (&allocator)
+ HInstruction* parameter2 = new (&allocator_)
HParameterValue(0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- HInstruction* constant_1 = graph->GetIntConstant(1);
- HInstruction* constant_0 = graph->GetIntConstant(0);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
+ HInstruction* constant_0 = graph_->GetIntConstant(0);
- HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block1);
- HInstruction* cmp = new (&allocator) HGreaterThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block1);
+ HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (&allocator_) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block2);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check2 = new (&allocator)
+ HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block2);
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check2 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
- HArraySet* array_set = new (&allocator) HArraySet(
+ HArraySet* array_set = new (&allocator_) HArraySet(
null_check, bounds_check2, constant_1, Primitive::kPrimInt, 0);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(bounds_check2);
block2->AddInstruction(array_set);
- HBasicBlock* block3 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block3);
- null_check = new (&allocator) HNullCheck(parameter1, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- cmp = new (&allocator) HLessThan(parameter2, array_length);
- if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block3);
+ null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ cmp = new (&allocator_) HLessThan(parameter2, array_length);
+ if_inst = new (&allocator_) HIf(cmp);
block3->AddInstruction(null_check);
block3->AddInstruction(array_length);
block3->AddInstruction(cmp);
block3->AddInstruction(if_inst);
- HBasicBlock* block4 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block4);
- null_check = new (&allocator) HNullCheck(parameter1, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check4 = new (&allocator)
+ HBasicBlock* block4 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block4);
+ null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check4 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator) HArraySet(
+ array_set = new (&allocator_) HArraySet(
null_check, bounds_check4, constant_1, Primitive::kPrimInt, 0);
block4->AddInstruction(null_check);
block4->AddInstruction(array_length);
block4->AddInstruction(bounds_check4);
block4->AddInstruction(array_set);
- HBasicBlock* block5 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block5);
- null_check = new (&allocator) HNullCheck(parameter1, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check5 = new (&allocator)
+ HBasicBlock* block5 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block5);
+ null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check5 = new (&allocator_)
HBoundsCheck(parameter2, array_length, 0);
- array_set = new (&allocator) HArraySet(
+ array_set = new (&allocator_) HArraySet(
null_check, bounds_check5, constant_1, Primitive::kPrimInt, 0);
block5->AddInstruction(null_check);
block5->AddInstruction(array_length);
block5->AddInstruction(bounds_check5);
block5->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
block2->AddSuccessor(exit);
block4->AddSuccessor(exit);
block5->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator) HExit());
+ exit->AddInstruction(new (&allocator_) HExit());
block1->AddSuccessor(block3); // True successor
block1->AddSuccessor(block2); // False successor
@@ -129,10 +151,8 @@
block3->AddSuccessor(block5); // True successor
block3->AddSuccessor(block4); // False successor
- graph->BuildDominatorTree();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ RunBCE();
+
ASSERT_FALSE(IsRemoved(bounds_check2));
ASSERT_FALSE(IsRemoved(bounds_check4));
ASSERT_TRUE(IsRemoved(bounds_check5));
@@ -143,230 +163,203 @@
// int j = i + Integer.MAX_VALUE;
// if (j < array.length) array[j] = 1; // Can't eliminate.
// }
-TEST(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator)
+TEST_F(BoundsCheckEliminationTest, OverflowArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter1 = new (&allocator_)
HParameterValue(0, Primitive::kPrimNot); // array
- HInstruction* parameter2 = new (&allocator)
+ HInstruction* parameter2 = new (&allocator_)
HParameterValue(0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- HInstruction* constant_1 = graph->GetIntConstant(1);
- HInstruction* constant_0 = graph->GetIntConstant(0);
- HInstruction* constant_max_int = graph->GetIntConstant(INT_MAX);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
+ HInstruction* constant_0 = graph_->GetIntConstant(0);
+ HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
- HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block1);
- HInstruction* cmp = new (&allocator) HLessThanOrEqual(parameter2, constant_0);
- HIf* if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block1);
+ HInstruction* cmp = new (&allocator_) HLessThanOrEqual(parameter2, constant_0);
+ HIf* if_inst = new (&allocator_) HIf(cmp);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block2);
- HInstruction* add = new (&allocator) HAdd(Primitive::kPrimInt, parameter2, constant_max_int);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* cmp2 = new (&allocator) HGreaterThanOrEqual(add, array_length);
- if_inst = new (&allocator) HIf(cmp2);
+ HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block2);
+ HInstruction* add = new (&allocator_) HAdd(Primitive::kPrimInt, parameter2, constant_max_int);
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* cmp2 = new (&allocator_) HGreaterThanOrEqual(add, array_length);
+ if_inst = new (&allocator_) HIf(cmp2);
block2->AddInstruction(add);
block2->AddInstruction(null_check);
block2->AddInstruction(array_length);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator)
+ HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block3);
+ HBoundsCheck* bounds_check = new (&allocator_)
HBoundsCheck(add, array_length, 0);
- HArraySet* array_set = new (&allocator) HArraySet(
+ HArraySet* array_set = new (&allocator_) HArraySet(
null_check, bounds_check, constant_1, Primitive::kPrimInt, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
- exit->AddInstruction(new (&allocator) HExit());
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
+ exit->AddInstruction(new (&allocator_) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
block2->AddSuccessor(block3); // false successor
block3->AddSuccessor(exit);
- graph->BuildDominatorTree();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ RunBCE();
+
ASSERT_FALSE(IsRemoved(bounds_check));
}
// if (i < array.length) {
// int j = i - Integer.MAX_VALUE;
-// j = j - Integer.MAX_VALUE; // j is (i+2) after substracting MAX_INT twice
+// j = j - Integer.MAX_VALUE; // j is (i+2) after subtracting MAX_INT twice
// if (j > 0) array[j] = 1; // Can't eliminate.
// }
-TEST(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter1 = new (&allocator)
+TEST_F(BoundsCheckEliminationTest, UnderflowArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter1 = new (&allocator_)
HParameterValue(0, Primitive::kPrimNot); // array
- HInstruction* parameter2 = new (&allocator)
+ HInstruction* parameter2 = new (&allocator_)
HParameterValue(0, Primitive::kPrimInt); // i
entry->AddInstruction(parameter1);
entry->AddInstruction(parameter2);
- HInstruction* constant_1 = graph->GetIntConstant(1);
- HInstruction* constant_0 = graph->GetIntConstant(0);
- HInstruction* constant_max_int = graph->GetIntConstant(INT_MAX);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
+ HInstruction* constant_0 = graph_->GetIntConstant(0);
+ HInstruction* constant_max_int = graph_->GetIntConstant(INT_MAX);
- HBasicBlock* block1 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block1);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter1, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* cmp = new (&allocator) HGreaterThanOrEqual(parameter2, array_length);
- HIf* if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* block1 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block1);
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter1, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(parameter2, array_length);
+ HIf* if_inst = new (&allocator_) HIf(cmp);
block1->AddInstruction(null_check);
block1->AddInstruction(array_length);
block1->AddInstruction(cmp);
block1->AddInstruction(if_inst);
entry->AddSuccessor(block1);
- HBasicBlock* block2 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block2);
- HInstruction* sub1 = new (&allocator) HSub(Primitive::kPrimInt, parameter2, constant_max_int);
- HInstruction* sub2 = new (&allocator) HSub(Primitive::kPrimInt, sub1, constant_max_int);
- HInstruction* cmp2 = new (&allocator) HLessThanOrEqual(sub2, constant_0);
- if_inst = new (&allocator) HIf(cmp2);
+ HBasicBlock* block2 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block2);
+ HInstruction* sub1 = new (&allocator_) HSub(Primitive::kPrimInt, parameter2, constant_max_int);
+ HInstruction* sub2 = new (&allocator_) HSub(Primitive::kPrimInt, sub1, constant_max_int);
+ HInstruction* cmp2 = new (&allocator_) HLessThanOrEqual(sub2, constant_0);
+ if_inst = new (&allocator_) HIf(cmp2);
block2->AddInstruction(sub1);
block2->AddInstruction(sub2);
block2->AddInstruction(cmp2);
block2->AddInstruction(if_inst);
- HBasicBlock* block3 = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block3);
- HBoundsCheck* bounds_check = new (&allocator)
+ HBasicBlock* block3 = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block3);
+ HBoundsCheck* bounds_check = new (&allocator_)
HBoundsCheck(sub2, array_length, 0);
- HArraySet* array_set = new (&allocator) HArraySet(
+ HArraySet* array_set = new (&allocator_) HArraySet(
null_check, bounds_check, constant_1, Primitive::kPrimInt, 0);
block3->AddInstruction(bounds_check);
block3->AddInstruction(array_set);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
- exit->AddInstruction(new (&allocator) HExit());
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
+ exit->AddInstruction(new (&allocator_) HExit());
block1->AddSuccessor(exit); // true successor
block1->AddSuccessor(block2); // false successor
block2->AddSuccessor(exit); // true successor
block2->AddSuccessor(block3); // false successor
block3->AddSuccessor(exit);
- graph->BuildDominatorTree();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ RunBCE();
+
ASSERT_FALSE(IsRemoved(bounds_check));
}
// array[6] = 1; // Can't eliminate.
// array[5] = 1; // Can eliminate.
// array[4] = 1; // Can eliminate.
-TEST(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+TEST_F(BoundsCheckEliminationTest, ConstantArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
- HInstruction* constant_5 = graph->GetIntConstant(5);
- HInstruction* constant_4 = graph->GetIntConstant(4);
- HInstruction* constant_6 = graph->GetIntConstant(6);
- HInstruction* constant_1 = graph->GetIntConstant(1);
+ HInstruction* constant_5 = graph_->GetIntConstant(5);
+ HInstruction* constant_4 = graph_->GetIntConstant(4);
+ HInstruction* constant_6 = graph_->GetIntConstant(6);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block);
+ HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block);
entry->AddSuccessor(block);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check6 = new (&allocator)
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check6 = new (&allocator_)
HBoundsCheck(constant_6, array_length, 0);
- HInstruction* array_set = new (&allocator) HArraySet(
+ HInstruction* array_set = new (&allocator_) HArraySet(
null_check, bounds_check6, constant_1, Primitive::kPrimInt, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check6);
block->AddInstruction(array_set);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check5 = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check5 = new (&allocator_)
HBoundsCheck(constant_5, array_length, 0);
- array_set = new (&allocator) HArraySet(
+ array_set = new (&allocator_) HArraySet(
null_check, bounds_check5, constant_1, Primitive::kPrimInt, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check5);
block->AddInstruction(array_set);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check4 = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check4 = new (&allocator_)
HBoundsCheck(constant_4, array_length, 0);
- array_set = new (&allocator) HArraySet(
+ array_set = new (&allocator_) HArraySet(
null_check, bounds_check4, constant_1, Primitive::kPrimInt, 0);
block->AddInstruction(null_check);
block->AddInstruction(array_length);
block->AddInstruction(bounds_check4);
block->AddInstruction(array_set);
- block->AddInstruction(new (&allocator) HGoto());
+ block->AddInstruction(new (&allocator_) HGoto());
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
block->AddSuccessor(exit);
- exit->AddInstruction(new (&allocator) HExit());
+ exit->AddInstruction(new (&allocator_) HExit());
- graph->BuildDominatorTree();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ RunBCE();
+
ASSERT_FALSE(IsRemoved(bounds_check6));
ASSERT_TRUE(IsRemoved(bounds_check5));
ASSERT_TRUE(IsRemoved(bounds_check4));
}
// for (int i=initial; i<array.length; i+=increment) { array[i] = 10; }
-static HGraph* BuildSSAGraph1(ArenaAllocator* allocator,
- HInstruction** bounds_check,
- int initial,
- int increment,
- IfCondition cond = kCondGE) {
- HGraph* graph = CreateGraph(allocator);
- graph->SetHasBoundsChecks(true);
-
+static HInstruction* BuildSSAGraph1(HGraph* graph,
+ ArenaAllocator* allocator,
+ int initial,
+ int increment,
+ IfCondition cond = kCondGE) {
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -414,14 +407,14 @@
null_check = new (allocator) HNullCheck(parameter, 0);
array_length = new (allocator) HArrayLength(null_check);
- *bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
+ HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
- null_check, *bounds_check, constant_10, Primitive::kPrimInt, 0);
+ null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_increment);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
- loop_body->AddInstruction(*bounds_check);
+ loop_body->AddInstruction(bounds_check);
loop_body->AddInstruction(array_set);
loop_body->AddInstruction(add);
loop_body->AddInstruction(new (allocator) HGoto());
@@ -429,79 +422,58 @@
exit->AddInstruction(new (allocator) HExit());
- return graph;
+ return bounds_check;
}
-TEST(BoundsCheckEliminationTest, LoopArrayBoundsElimination1) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1a) {
// for (int i=0; i<array.length; i++) { array[i] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = nullptr;
- HGraph* graph = BuildSSAGraph1(&allocator, &bounds_check, 0, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1b) {
// for (int i=1; i<array.length; i++) { array[i] = 10; // Can eliminate. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, 1, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_1(graph);
- bounds_check_elimination_with_initial_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 1);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1c) {
// for (int i=-1; i<array.length; i++) { array[i] = 10; // Can't eliminate. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, -1, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_minus_1(graph);
- bounds_check_elimination_with_initial_minus_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, -1, 1);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1d) {
// for (int i=0; i<=array.length; i++) { array[i] = 10; // Can't eliminate. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, 0, 1, kCondGT);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_greater_than(graph);
- bounds_check_elimination_with_greater_than.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 1, kCondGT);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1e) {
// for (int i=0; i<array.length; i += 2) {
// array[i] = 10; // Can't eliminate due to overflow concern. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, 0, 2);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_increment_2(graph);
- bounds_check_elimination_with_increment_2.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 0, 2);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination1f) {
// for (int i=1; i<array.length; i += 2) { array[i] = 10; // Can eliminate. }
- graph = BuildSSAGraph1(&allocator, &bounds_check, 1, 2);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_increment_2_from_1(graph);
- bounds_check_elimination_with_increment_2_from_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph1(graph_, &allocator_, 1, 2);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
// for (int i=array.length; i>0; i+=increment) { array[i-1] = 10; }
-static HGraph* BuildSSAGraph2(ArenaAllocator* allocator,
- HInstruction** bounds_check,
- int initial,
- int increment = -1,
- IfCondition cond = kCondLE) {
- HGraph* graph = CreateGraph(allocator);
- graph->SetHasBoundsChecks(true);
-
+static HInstruction* BuildSSAGraph2(HGraph *graph,
+ ArenaAllocator* allocator,
+ int initial,
+ int increment = -1,
+ IfCondition cond = kCondLE) {
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -551,14 +523,14 @@
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_minus_1);
null_check = new (allocator) HNullCheck(parameter, 0);
array_length = new (allocator) HArrayLength(null_check);
- *bounds_check = new (allocator) HBoundsCheck(add, array_length, 0);
+ HInstruction* bounds_check = new (allocator) HBoundsCheck(add, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
- null_check, *bounds_check, constant_10, Primitive::kPrimInt, 0);
+ null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
HInstruction* add_phi = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_increment);
loop_body->AddInstruction(add);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
- loop_body->AddInstruction(*bounds_check);
+ loop_body->AddInstruction(bounds_check);
loop_body->AddInstruction(array_set);
loop_body->AddInstruction(add_phi);
loop_body->AddInstruction(new (allocator) HGoto());
@@ -566,70 +538,51 @@
exit->AddInstruction(new (allocator) HExit());
- return graph;
+ return bounds_check;
}
-TEST(BoundsCheckEliminationTest, LoopArrayBoundsElimination2) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2a) {
// for (int i=array.length; i>0; i--) { array[i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = nullptr;
- HGraph* graph = BuildSSAGraph2(&allocator, &bounds_check, 0);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2b) {
// for (int i=array.length; i>1; i--) { array[i-1] = 10; // Can eliminate. }
- graph = BuildSSAGraph2(&allocator, &bounds_check, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_1(graph);
- bounds_check_elimination_with_initial_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 1);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2c) {
// for (int i=array.length; i>-1; i--) { array[i-1] = 10; // Can't eliminate. }
- graph = BuildSSAGraph2(&allocator, &bounds_check, -1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_minus_1(graph);
- bounds_check_elimination_with_initial_minus_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, -1);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2d) {
// for (int i=array.length; i>=0; i--) { array[i-1] = 10; // Can't eliminate. }
- graph = BuildSSAGraph2(&allocator, &bounds_check, 0, -1, kCondLT);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_less_than(graph);
- bounds_check_elimination_with_less_than.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -1, kCondLT);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination2e) {
// for (int i=array.length; i>0; i-=2) { array[i-1] = 10; // Can eliminate. }
- graph = BuildSSAGraph2(&allocator, &bounds_check, 0, -2);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_increment_minus_2(graph);
- bounds_check_elimination_increment_minus_2.Run();
+ HInstruction* bounds_check = BuildSSAGraph2(graph_, &allocator_, 0, -2);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
// int[] array = new int[10];
// for (int i=0; i<10; i+=increment) { array[i] = 10; }
-static HGraph* BuildSSAGraph3(ArenaAllocator* allocator,
- HInstruction** bounds_check,
- int initial,
- int increment,
- IfCondition cond) {
- HGraph* graph = CreateGraph(allocator);
- graph->SetHasBoundsChecks(true);
-
+static HInstruction* BuildSSAGraph3(HGraph* graph,
+ ArenaAllocator* allocator,
+ int initial,
+ int increment,
+ IfCondition cond) {
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -679,13 +632,13 @@
HNullCheck* null_check = new (allocator) HNullCheck(new_array, 0);
HArrayLength* array_length = new (allocator) HArrayLength(null_check);
- *bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
+ HInstruction* bounds_check = new (allocator) HBoundsCheck(phi, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
- null_check, *bounds_check, constant_10, Primitive::kPrimInt, 0);
+ null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_increment);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
- loop_body->AddInstruction(*bounds_check);
+ loop_body->AddInstruction(bounds_check);
loop_body->AddInstruction(array_set);
loop_body->AddInstruction(add);
loop_body->AddInstruction(new (allocator) HGoto());
@@ -693,63 +646,46 @@
exit->AddInstruction(new (allocator) HExit());
- return graph;
+ return bounds_check;
}
-TEST(BoundsCheckEliminationTest, LoopArrayBoundsElimination3) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3a) {
// int[] array = new int[10];
// for (int i=0; i<10; i++) { array[i] = 10; // Can eliminate. }
- HInstruction* bounds_check = nullptr;
- HGraph* graph = BuildSSAGraph3(&allocator, &bounds_check, 0, 1, kCondGE);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGE);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3b) {
// int[] array = new int[10];
// for (int i=1; i<10; i++) { array[i] = 10; // Can eliminate. }
- graph = BuildSSAGraph3(&allocator, &bounds_check, 1, 1, kCondGE);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_1(graph);
- bounds_check_elimination_with_initial_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 1, kCondGE);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3c) {
// int[] array = new int[10];
// for (int i=0; i<=10; i++) { array[i] = 10; // Can't eliminate. }
- graph = BuildSSAGraph3(&allocator, &bounds_check, 0, 1, kCondGT);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_greater_than(graph);
- bounds_check_elimination_with_greater_than.Run();
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 0, 1, kCondGT);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination3d) {
// int[] array = new int[10];
// for (int i=1; i<10; i+=8) { array[i] = 10; // Can eliminate. }
- graph = BuildSSAGraph3(&allocator, &bounds_check, 1, 8, kCondGE);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_increment_8(graph);
- bounds_check_elimination_increment_8.Run();
+ HInstruction* bounds_check = BuildSSAGraph3(graph_, &allocator_, 1, 8, kCondGE);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
}
// for (int i=initial; i<array.length; i++) { array[array.length-i-1] = 10; }
-static HGraph* BuildSSAGraph4(ArenaAllocator* allocator,
- HInstruction** bounds_check,
- int initial,
- IfCondition cond = kCondGE) {
- HGraph* graph = CreateGraph(allocator);
- graph->SetHasBoundsChecks(true);
-
+static HInstruction* BuildSSAGraph4(HGraph* graph,
+ ArenaAllocator* allocator,
+ int initial,
+ IfCondition cond = kCondGE) {
HBasicBlock* entry = new (allocator) HBasicBlock(graph);
graph->AddBlock(entry);
graph->SetEntryBlock(entry);
@@ -800,15 +736,15 @@
HInstruction* sub = new (allocator) HSub(Primitive::kPrimInt, array_length, phi);
HInstruction* add_minus_1 = new (allocator)
HAdd(Primitive::kPrimInt, sub, constant_minus_1);
- *bounds_check = new (allocator) HBoundsCheck(add_minus_1, array_length, 0);
+ HInstruction* bounds_check = new (allocator) HBoundsCheck(add_minus_1, array_length, 0);
HInstruction* array_set = new (allocator) HArraySet(
- null_check, *bounds_check, constant_10, Primitive::kPrimInt, 0);
+ null_check, bounds_check, constant_10, Primitive::kPrimInt, 0);
HInstruction* add = new (allocator) HAdd(Primitive::kPrimInt, phi, constant_1);
loop_body->AddInstruction(null_check);
loop_body->AddInstruction(array_length);
loop_body->AddInstruction(sub);
loop_body->AddInstruction(add_minus_1);
- loop_body->AddInstruction(*bounds_check);
+ loop_body->AddInstruction(bounds_check);
loop_body->AddInstruction(array_set);
loop_body->AddInstruction(add);
loop_body->AddInstruction(new (allocator) HGoto());
@@ -816,39 +752,27 @@
exit->AddInstruction(new (allocator) HExit());
- return graph;
+ return bounds_check;
}
-TEST(BoundsCheckEliminationTest, LoopArrayBoundsElimination4) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4a) {
// for (int i=0; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate with gvn. }
- HInstruction* bounds_check = nullptr;
- HGraph* graph = BuildSSAGraph4(&allocator, &bounds_check, 0);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4b) {
// for (int i=1; i<array.length; i++) { array[array.length-i-1] = 10; // Can eliminate. }
- graph = BuildSSAGraph4(&allocator, &bounds_check, 1);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_initial_1(graph);
- bounds_check_elimination_with_initial_1.Run();
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 1);
+ RunBCE();
ASSERT_TRUE(IsRemoved(bounds_check));
+}
+TEST_F(BoundsCheckEliminationTest, LoopArrayBoundsElimination4c) {
// for (int i=0; i<=array.length; i++) { array[array.length-i] = 10; // Can't eliminate. }
- graph = BuildSSAGraph4(&allocator, &bounds_check, 0, kCondGT);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- BoundsCheckElimination bounds_check_elimination_with_greater_than(graph);
- bounds_check_elimination_with_greater_than.Run();
+ HInstruction* bounds_check = BuildSSAGraph4(graph_, &allocator_, 0, kCondGT);
+ RunBCE();
ASSERT_FALSE(IsRemoved(bounds_check));
}
@@ -863,40 +787,34 @@
// }
// }
// }
-TEST(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- graph->SetHasBoundsChecks(true);
-
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- HInstruction* parameter = new (&allocator) HParameterValue(0, Primitive::kPrimNot);
+TEST_F(BoundsCheckEliminationTest, BubbleSortArrayBoundsElimination) {
+ HBasicBlock* entry = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(entry);
+ graph_->SetEntryBlock(entry);
+ HInstruction* parameter = new (&allocator_) HParameterValue(0, Primitive::kPrimNot);
entry->AddInstruction(parameter);
- HInstruction* constant_0 = graph->GetIntConstant(0);
- HInstruction* constant_minus_1 = graph->GetIntConstant(-1);
- HInstruction* constant_1 = graph->GetIntConstant(1);
+ HInstruction* constant_0 = graph_->GetIntConstant(0);
+ HInstruction* constant_minus_1 = graph_->GetIntConstant(-1);
+ HInstruction* constant_1 = graph_->GetIntConstant(1);
- HBasicBlock* block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(block);
+ HBasicBlock* block = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(block);
entry->AddSuccessor(block);
- block->AddInstruction(new (&allocator) HGoto());
+ block->AddInstruction(new (&allocator_) HGoto());
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit);
- exit->AddInstruction(new (&allocator) HExit());
+ HBasicBlock* exit = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(exit);
+ exit->AddInstruction(new (&allocator_) HExit());
- HBasicBlock* outer_header = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(outer_header);
- HPhi* phi_i = new (&allocator) HPhi(&allocator, 0, 0, Primitive::kPrimInt);
- HNullCheck* null_check = new (&allocator) HNullCheck(parameter, 0);
- HArrayLength* array_length = new (&allocator) HArrayLength(null_check);
- HAdd* add = new (&allocator) HAdd(Primitive::kPrimInt, array_length, constant_minus_1);
- HInstruction* cmp = new (&allocator) HGreaterThanOrEqual(phi_i, add);
- HIf* if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* outer_header = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(outer_header);
+ HPhi* phi_i = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
+ HNullCheck* null_check = new (&allocator_) HNullCheck(parameter, 0);
+ HArrayLength* array_length = new (&allocator_) HArrayLength(null_check);
+ HAdd* add = new (&allocator_) HAdd(Primitive::kPrimInt, array_length, constant_minus_1);
+ HInstruction* cmp = new (&allocator_) HGreaterThanOrEqual(phi_i, add);
+ HIf* if_inst = new (&allocator_) HIf(cmp);
outer_header->AddPhi(phi_i);
outer_header->AddInstruction(null_check);
outer_header->AddInstruction(array_length);
@@ -905,15 +823,15 @@
outer_header->AddInstruction(if_inst);
phi_i->AddInput(constant_0);
- HBasicBlock* inner_header = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(inner_header);
- HPhi* phi_j = new (&allocator) HPhi(&allocator, 0, 0, Primitive::kPrimInt);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HSub* sub = new (&allocator) HSub(Primitive::kPrimInt, array_length, phi_i);
- add = new (&allocator) HAdd(Primitive::kPrimInt, sub, constant_minus_1);
- cmp = new (&allocator) HGreaterThanOrEqual(phi_j, add);
- if_inst = new (&allocator) HIf(cmp);
+ HBasicBlock* inner_header = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(inner_header);
+ HPhi* phi_j = new (&allocator_) HPhi(&allocator_, 0, 0, Primitive::kPrimInt);
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HSub* sub = new (&allocator_) HSub(Primitive::kPrimInt, array_length, phi_i);
+ add = new (&allocator_) HAdd(Primitive::kPrimInt, sub, constant_minus_1);
+ cmp = new (&allocator_) HGreaterThanOrEqual(phi_j, add);
+ if_inst = new (&allocator_) HIf(cmp);
inner_header->AddPhi(phi_j);
inner_header->AddInstruction(null_check);
inner_header->AddInstruction(array_length);
@@ -923,25 +841,25 @@
inner_header->AddInstruction(if_inst);
phi_j->AddInput(constant_0);
- HBasicBlock* inner_body_compare = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(inner_body_compare);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check1 = new (&allocator) HBoundsCheck(phi_j, array_length, 0);
- HArrayGet* array_get_j = new (&allocator)
+ HBasicBlock* inner_body_compare = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(inner_body_compare);
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check1 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
+ HArrayGet* array_get_j = new (&allocator_)
HArrayGet(null_check, bounds_check1, Primitive::kPrimInt);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
inner_body_compare->AddInstruction(bounds_check1);
inner_body_compare->AddInstruction(array_get_j);
- HInstruction* j_plus_1 = new (&allocator) HAdd(Primitive::kPrimInt, phi_j, constant_1);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HBoundsCheck* bounds_check2 = new (&allocator) HBoundsCheck(j_plus_1, array_length, 0);
- HArrayGet* array_get_j_plus_1 = new (&allocator)
+ HInstruction* j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HBoundsCheck* bounds_check2 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
+ HArrayGet* array_get_j_plus_1 = new (&allocator_)
HArrayGet(null_check, bounds_check2, Primitive::kPrimInt);
- cmp = new (&allocator) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
- if_inst = new (&allocator) HIf(cmp);
+ cmp = new (&allocator_) HGreaterThanOrEqual(array_get_j, array_get_j_plus_1);
+ if_inst = new (&allocator_) HIf(cmp);
inner_body_compare->AddInstruction(j_plus_1);
inner_body_compare->AddInstruction(null_check);
inner_body_compare->AddInstruction(array_length);
@@ -950,14 +868,14 @@
inner_body_compare->AddInstruction(cmp);
inner_body_compare->AddInstruction(if_inst);
- HBasicBlock* inner_body_swap = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(inner_body_swap);
- j_plus_1 = new (&allocator) HAdd(Primitive::kPrimInt, phi_j, constant_1);
+ HBasicBlock* inner_body_swap = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(inner_body_swap);
+ j_plus_1 = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
// temp = array[j+1]
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* bounds_check3 = new (&allocator) HBoundsCheck(j_plus_1, array_length, 0);
- array_get_j_plus_1 = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* bounds_check3 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
+ array_get_j_plus_1 = new (&allocator_)
HArrayGet(null_check, bounds_check3, Primitive::kPrimInt);
inner_body_swap->AddInstruction(j_plus_1);
inner_body_swap->AddInstruction(null_check);
@@ -965,48 +883,48 @@
inner_body_swap->AddInstruction(bounds_check3);
inner_body_swap->AddInstruction(array_get_j_plus_1);
// array[j+1] = array[j]
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* bounds_check4 = new (&allocator) HBoundsCheck(phi_j, array_length, 0);
- array_get_j = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* bounds_check4 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
+ array_get_j = new (&allocator_)
HArrayGet(null_check, bounds_check4, Primitive::kPrimInt);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check4);
inner_body_swap->AddInstruction(array_get_j);
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* bounds_check5 = new (&allocator) HBoundsCheck(j_plus_1, array_length, 0);
- HArraySet* array_set_j_plus_1 = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* bounds_check5 = new (&allocator_) HBoundsCheck(j_plus_1, array_length, 0);
+ HArraySet* array_set_j_plus_1 = new (&allocator_)
HArraySet(null_check, bounds_check5, array_get_j, Primitive::kPrimInt, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check5);
inner_body_swap->AddInstruction(array_set_j_plus_1);
// array[j] = temp
- null_check = new (&allocator) HNullCheck(parameter, 0);
- array_length = new (&allocator) HArrayLength(null_check);
- HInstruction* bounds_check6 = new (&allocator) HBoundsCheck(phi_j, array_length, 0);
- HArraySet* array_set_j = new (&allocator)
+ null_check = new (&allocator_) HNullCheck(parameter, 0);
+ array_length = new (&allocator_) HArrayLength(null_check);
+ HInstruction* bounds_check6 = new (&allocator_) HBoundsCheck(phi_j, array_length, 0);
+ HArraySet* array_set_j = new (&allocator_)
HArraySet(null_check, bounds_check6, array_get_j_plus_1, Primitive::kPrimInt, 0);
inner_body_swap->AddInstruction(null_check);
inner_body_swap->AddInstruction(array_length);
inner_body_swap->AddInstruction(bounds_check6);
inner_body_swap->AddInstruction(array_set_j);
- inner_body_swap->AddInstruction(new (&allocator) HGoto());
+ inner_body_swap->AddInstruction(new (&allocator_) HGoto());
- HBasicBlock* inner_body_add = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(inner_body_add);
- add = new (&allocator) HAdd(Primitive::kPrimInt, phi_j, constant_1);
+ HBasicBlock* inner_body_add = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(inner_body_add);
+ add = new (&allocator_) HAdd(Primitive::kPrimInt, phi_j, constant_1);
inner_body_add->AddInstruction(add);
- inner_body_add->AddInstruction(new (&allocator) HGoto());
+ inner_body_add->AddInstruction(new (&allocator_) HGoto());
phi_j->AddInput(add);
- HBasicBlock* outer_body_add = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(outer_body_add);
- add = new (&allocator) HAdd(Primitive::kPrimInt, phi_i, constant_1);
+ HBasicBlock* outer_body_add = new (&allocator_) HBasicBlock(graph_);
+ graph_->AddBlock(outer_body_add);
+ add = new (&allocator_) HAdd(Primitive::kPrimInt, phi_i, constant_1);
outer_body_add->AddInstruction(add);
- outer_body_add->AddInstruction(new (&allocator) HGoto());
+ outer_body_add->AddInstruction(new (&allocator_) HGoto());
phi_i->AddInput(add);
block->AddSuccessor(outer_header);
@@ -1020,19 +938,8 @@
inner_body_add->AddSuccessor(inner_header);
outer_body_add->AddSuccessor(outer_header);
- graph->BuildDominatorTree();
- graph->AnalyzeNaturalLoops();
- RunSimplifierAndGvn(graph);
- // gvn should remove the same bounds check.
- ASSERT_FALSE(IsRemoved(bounds_check1));
- ASSERT_FALSE(IsRemoved(bounds_check2));
- ASSERT_TRUE(IsRemoved(bounds_check3));
- ASSERT_TRUE(IsRemoved(bounds_check4));
- ASSERT_TRUE(IsRemoved(bounds_check5));
- ASSERT_TRUE(IsRemoved(bounds_check6));
+ RunBCE(); // gvn removes same bounds check already
- BoundsCheckElimination bounds_check_elimination(graph);
- bounds_check_elimination.Run();
ASSERT_TRUE(IsRemoved(bounds_check1));
ASSERT_TRUE(IsRemoved(bounds_check2));
ASSERT_TRUE(IsRemoved(bounds_check3));
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 0a3f083..97545ed 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -339,11 +339,10 @@
// Bit vector stores information on which blocks contain throwing instructions.
// Must be expandable because catch blocks may be split into two.
- ArenaBitVector can_block_throw(arena_, graph_->GetBlocks().Size(), /* expandable */ true);
+ ArenaBitVector can_block_throw(arena_, graph_->GetBlocks().size(), /* expandable */ true);
// Scan blocks and mark those which contain throwing instructions.
- for (size_t block_id = 0, e = graph_->GetBlocks().Size(); block_id < e; ++block_id) {
- HBasicBlock* block = graph_->GetBlocks().Get(block_id);
+ for (HBasicBlock* block : graph_->GetBlocks()) {
bool can_throw = false;
for (HInstructionIterator insn(block->GetInstructions()); !insn.Done(); insn.Advance()) {
if (insn.Current()->CanThrow()) {
@@ -381,9 +380,7 @@
// (c) link the new blocks to corresponding exception handlers.
// We cannot iterate only over blocks in `branch_targets_` because switch-case
// blocks share the same dex_pc.
- for (size_t block_id = 0, e = graph_->GetBlocks().Size(); block_id < e; ++block_id) {
- HBasicBlock* try_block = graph_->GetBlocks().Get(block_id);
-
+ for (HBasicBlock* try_block : graph_->GetBlocks()) {
// TryBoundary blocks are added at the end of the list and not iterated over.
DCHECK(!try_block->IsSingleTryBoundary());
@@ -465,7 +462,7 @@
}
bool HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
- DCHECK(graph_->GetBlocks().IsEmpty());
+ DCHECK(graph_->GetBlocks().empty());
const uint16_t* code_ptr = code_item.insns_;
const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 3bbff6a..50108a7 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -155,13 +155,14 @@
}
bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
- DCHECK_EQ(block_order_->Get(current_block_index_), current);
+ DCHECK_LT(current_block_index_, block_order_->size());
+ DCHECK_EQ((*block_order_)[current_block_index_], current);
return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
}
HBasicBlock* CodeGenerator::GetNextBlockToEmit() const {
- for (size_t i = current_block_index_ + 1; i < block_order_->Size(); ++i) {
- HBasicBlock* block = block_order_->Get(i);
+ for (size_t i = current_block_index_ + 1; i < block_order_->size(); ++i) {
+ HBasicBlock* block = (*block_order_)[i];
if (!block->IsSingleJump()) {
return block;
}
@@ -225,8 +226,8 @@
disasm_info_->SetFrameEntryInterval(frame_start, GetAssembler()->CodeSize());
}
- for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
- HBasicBlock* block = block_order_->Get(current_block_index_);
+ for (size_t e = block_order_->size(); current_block_index_ < e; ++current_block_index_) {
+ HBasicBlock* block = (*block_order_)[current_block_index_];
// Don't generate code for an empty block. Its predecessors will branch to its successor
// directly. Also, the label of that block will not be emitted, so this helps catch
// errors where we reference that label.
@@ -305,9 +306,10 @@
size_t maximum_number_of_live_core_registers,
size_t maximum_number_of_live_fp_registers,
size_t number_of_out_slots,
- const GrowableArray<HBasicBlock*>& block_order) {
+ const ArenaVector<HBasicBlock*>& block_order) {
block_order_ = &block_order;
- DCHECK(block_order_->Get(0) == GetGraph()->GetEntryBlock());
+ DCHECK(!block_order.empty());
+ DCHECK(block_order[0] == GetGraph()->GetEntryBlock());
ComputeSpillMask();
first_register_slot_in_slow_path_ = (number_of_out_slots + number_of_spill_slots) * kVRegSize;
@@ -540,24 +542,33 @@
}
}
+void CodeGenerator::MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count) const {
+ if (stats_ != nullptr) {
+ stats_->RecordStat(compilation_stat, count);
+ }
+}
+
CodeGenerator* CodeGenerator::Create(HGraph* graph,
InstructionSet instruction_set,
const InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options) {
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats) {
switch (instruction_set) {
#ifdef ART_ENABLE_CODEGEN_arm
case kArm:
case kThumb2: {
return new arm::CodeGeneratorARM(graph,
- *isa_features.AsArmInstructionSetFeatures(),
- compiler_options);
+ *isa_features.AsArmInstructionSetFeatures(),
+ compiler_options,
+ stats);
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case kArm64: {
return new arm64::CodeGeneratorARM64(graph,
- *isa_features.AsArm64InstructionSetFeatures(),
- compiler_options);
+ *isa_features.AsArm64InstructionSetFeatures(),
+ compiler_options,
+ stats);
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
@@ -570,22 +581,25 @@
#ifdef ART_ENABLE_CODEGEN_mips64
case kMips64: {
return new mips64::CodeGeneratorMIPS64(graph,
- *isa_features.AsMips64InstructionSetFeatures(),
- compiler_options);
+ *isa_features.AsMips64InstructionSetFeatures(),
+ compiler_options,
+ stats);
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case kX86: {
return new x86::CodeGeneratorX86(graph,
- *isa_features.AsX86InstructionSetFeatures(),
- compiler_options);
+ *isa_features.AsX86InstructionSetFeatures(),
+ compiler_options,
+ stats);
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case kX86_64: {
return new x86_64::CodeGeneratorX86_64(graph,
- *isa_features.AsX86_64InstructionSetFeatures(),
- compiler_options);
+ *isa_features.AsX86_64InstructionSetFeatures(),
+ compiler_options,
+ stats);
}
#endif
default:
@@ -632,8 +646,7 @@
}
// Walk over the blocks and find which ones correspond to catch block entries.
- for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
- HBasicBlock* block = graph_->GetBlocks().Get(i);
+ for (HBasicBlock* block : graph_->GetBlocks()) {
if (block->IsCatchBlock()) {
intptr_t native_pc = GetAddressOf(block);
++dex2pc_entries;
@@ -671,8 +684,7 @@
pc2dex_dalvik_offset = stack_map_entry.dex_pc;
}
- for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
- HBasicBlock* block = graph_->GetBlocks().Get(i);
+ for (HBasicBlock* block : graph_->GetBlocks()) {
if (block->IsCatchBlock()) {
intptr_t native_pc = GetAddressOf(block);
write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset);
@@ -699,8 +711,7 @@
CHECK_EQ(stack_map_entry.dex_pc, it.DexPc());
++it;
}
- for (size_t i = 0; i < graph_->GetBlocks().Size(); ++i) {
- HBasicBlock* block = graph_->GetBlocks().Get(i);
+ for (HBasicBlock* block : graph_->GetBlocks()) {
if (block->IsCatchBlock()) {
CHECK_EQ(GetAddressOf(block), it2.NativePcOffset());
CHECK_EQ(block->GetDexPc(), it2.DexPc());
@@ -814,8 +825,7 @@
void CodeGenerator::RecordCatchBlockInfo() {
ArenaAllocator* arena = graph_->GetArena();
- for (size_t i = 0, e = block_order_->Size(); i < e; ++i) {
- HBasicBlock* block = block_order_->Get(i);
+ for (HBasicBlock* block : *block_order_) {
if (!block->IsCatchBlock()) {
continue;
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index a93d07a..d2af56a 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -28,6 +28,7 @@
#include "locations.h"
#include "memory_region.h"
#include "nodes.h"
+#include "optimizing_compiler_stats.h"
#include "stack_map_stream.h"
namespace art {
@@ -144,7 +145,8 @@
static CodeGenerator* Create(HGraph* graph,
InstructionSet instruction_set,
const InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options);
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGenerator() {}
HGraph* GetGraph() const { return graph_; }
@@ -176,7 +178,7 @@
size_t maximum_number_of_live_core_registers,
size_t maximum_number_of_live_fp_registers,
size_t number_of_out_slots,
- const GrowableArray<HBasicBlock*>& block_order);
+ const ArenaVector<HBasicBlock*>& block_order);
int32_t GetStackSlot(HLocal* local) const;
Location GetTemporaryLocation(HTemporary* temp) const;
@@ -209,6 +211,8 @@
const CompilerOptions& GetCompilerOptions() const { return compiler_options_; }
+ void MaybeRecordStat(MethodCompilationStat compilation_stat, size_t count = 1) const;
+
// Saves the register in the stack. Returns the size taken on stack.
virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
// Restores the register from the stack. Returns the size taken on stack.
@@ -392,7 +396,8 @@
size_t number_of_register_pairs,
uint32_t core_callee_save_mask,
uint32_t fpu_callee_save_mask,
- const CompilerOptions& compiler_options)
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
: frame_size_(0),
core_spill_mask_(0),
fpu_spill_mask_(0),
@@ -409,6 +414,7 @@
block_order_(nullptr),
is_baseline_(false),
disasm_info_(nullptr),
+ stats_(stats),
graph_(graph),
compiler_options_(compiler_options),
src_map_(nullptr),
@@ -488,7 +494,7 @@
StackMapStream stack_map_stream_;
// The order to use for code generation.
- const GrowableArray<HBasicBlock*>* block_order_;
+ const ArenaVector<HBasicBlock*>* block_order_;
// Whether we are using baseline.
bool is_baseline_;
@@ -503,6 +509,8 @@
void BlockIfInRegister(Location location, bool is_out = false) const;
void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path);
+ OptimizingCompilerStats* stats_;
+
HGraph* const graph_;
const CompilerOptions& compiler_options_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index b3e38f0..c3d63b9 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -402,7 +402,8 @@
CodeGeneratorARM::CodeGeneratorARM(HGraph* graph,
const ArmInstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
: CodeGenerator(graph,
kNumberOfCoreRegisters,
kNumberOfSRegisters,
@@ -411,7 +412,8 @@
arraysize(kCoreCalleeSaves)),
ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
arraysize(kFpuCalleeSaves)),
- compiler_options),
+ compiler_options,
+ stats),
block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
@@ -436,8 +438,7 @@
stack_map_stream_.SetStackMapNativePcOffset(i, new_position);
}
// Adjust native pc offsets of block labels.
- for (size_t block_idx = 0u, end = block_order_->Size(); block_idx != end; ++block_idx) {
- HBasicBlock* block = block_order_->Get(block_idx);
+ for (HBasicBlock* block : *block_order_) {
// Get the label directly from block_labels_ rather than through GetLabelOf() to avoid
// FirstNonEmptyBlock() which could lead to adjusting a label more than once.
DCHECK_LT(static_cast<size_t>(block->GetBlockId()), block_labels_.Size());
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 4a0df4e..e44209d 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -231,7 +231,8 @@
public:
CodeGeneratorARM(HGraph* graph,
const ArmInstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options);
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorARM() {}
void GenerateFrameEntry() OVERRIDE;
@@ -309,7 +310,7 @@
}
void Initialize() OVERRIDE {
- block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ block_labels_.SetSize(GetGraph()->GetBlocks().size());
}
void Finalize(CodeAllocator* allocator) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 5094f67..377eaf6 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -510,14 +510,16 @@
CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
const Arm64InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
: CodeGenerator(graph,
kNumberOfAllocatableRegisters,
kNumberOfAllocatableFPRegisters,
kNumberOfAllocatableRegisterPairs,
callee_saved_core_registers.list(),
callee_saved_fp_registers.list(),
- compiler_options),
+ compiler_options,
+ stats),
block_labels_(nullptr),
location_builder_(graph, this),
instruction_visitor_(graph, this),
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 12ead7e..3211a83 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -248,7 +248,8 @@
public:
CodeGeneratorARM64(HGraph* graph,
const Arm64InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options);
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorARM64() {}
void GenerateFrameEntry() OVERRIDE;
@@ -326,7 +327,7 @@
void Initialize() OVERRIDE {
HGraph* graph = GetGraph();
- int length = graph->GetBlocks().Size();
+ int length = graph->GetBlocks().size();
block_labels_ = graph->GetArena()->AllocArray<vixl::Label>(length);
for (int i = 0; i < length; ++i) {
new(block_labels_ + i) vixl::Label();
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 8d60026..0787e49 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -418,7 +418,8 @@
CodeGeneratorMIPS64::CodeGeneratorMIPS64(HGraph* graph,
const Mips64InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
: CodeGenerator(graph,
kNumberOfGpuRegisters,
kNumberOfFpuRegisters,
@@ -427,7 +428,8 @@
arraysize(kCoreCalleeSaves)),
ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
arraysize(kFpuCalleeSaves)),
- compiler_options),
+ compiler_options,
+ stats),
block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index ae7c568..c754838 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -220,7 +220,8 @@
public:
CodeGeneratorMIPS64(HGraph* graph,
const Mips64InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options);
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorMIPS64() {}
void GenerateFrameEntry() OVERRIDE;
@@ -273,7 +274,7 @@
}
void Initialize() OVERRIDE {
- block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ block_labels_.SetSize(GetGraph()->GetBlocks().size());
}
void Finalize(CodeAllocator* allocator) OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index dc5c86e..c7ddabb 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -431,7 +431,8 @@
CodeGeneratorX86::CodeGeneratorX86(HGraph* graph,
const X86InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
: CodeGenerator(graph,
kNumberOfCpuRegisters,
kNumberOfXmmRegisters,
@@ -439,8 +440,9 @@
ComputeRegisterMask(reinterpret_cast<const int*>(kCoreCalleeSaves),
arraysize(kCoreCalleeSaves))
| (1 << kFakeReturnRegister),
- 0,
- compiler_options),
+ 0,
+ compiler_options,
+ stats),
block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
@@ -1312,7 +1314,7 @@
}
// Convert the jumps into the result.
- Label done_label;
+ NearLabel done_label;
// False case: result = 0.
__ Bind(&false_label);
@@ -1968,7 +1970,7 @@
XmmRegister input = in.AsFpuRegister<XmmRegister>();
Register output = out.AsRegister<Register>();
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
__ movl(output, Immediate(kPrimIntMax));
// temp = int-to-float(output)
@@ -1993,7 +1995,7 @@
XmmRegister input = in.AsFpuRegister<XmmRegister>();
Register output = out.AsRegister<Register>();
XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
__ movl(output, Immediate(kPrimIntMax));
// temp = int-to-double(output)
@@ -2652,7 +2654,7 @@
PushOntoFPStack(first, 0, 2 * elem_size, /* is_fp */ true, is_wide);
// Loop doing FPREM until we stabilize.
- Label retry;
+ NearLabel retry;
__ Bind(&retry);
__ fprem();
@@ -2766,8 +2768,8 @@
int shift;
CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift);
- Label ndiv;
- Label end;
+ NearLabel ndiv;
+ NearLabel end;
// If numerator is 0, the result is 0, no computation needed.
__ testl(eax, eax);
__ j(kNotEqual, &ndiv);
@@ -3243,7 +3245,7 @@
}
void InstructionCodeGeneratorX86::GenerateShlLong(const Location& loc, Register shifter) {
- Label done;
+ NearLabel done;
__ shld(loc.AsRegisterPairHigh<Register>(), loc.AsRegisterPairLow<Register>(), shifter);
__ shll(loc.AsRegisterPairLow<Register>(), shifter);
__ testl(shifter, Immediate(32));
@@ -3275,7 +3277,7 @@
}
void InstructionCodeGeneratorX86::GenerateShrLong(const Location& loc, Register shifter) {
- Label done;
+ NearLabel done;
__ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter);
__ sarl(loc.AsRegisterPairHigh<Register>(), shifter);
__ testl(shifter, Immediate(32));
@@ -3310,7 +3312,7 @@
}
void InstructionCodeGeneratorX86::GenerateUShrLong(const Location& loc, Register shifter) {
- Label done;
+ NearLabel done;
__ shrd(loc.AsRegisterPairLow<Register>(), loc.AsRegisterPairHigh<Register>(), shifter);
__ shrl(loc.AsRegisterPairHigh<Register>(), shifter);
__ testl(shifter, Immediate(32));
@@ -3485,7 +3487,7 @@
Location left = locations->InAt(0);
Location right = locations->InAt(1);
- Label less, greater, done;
+ NearLabel less, greater, done;
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
Register left_low = left.AsRegisterPairLow<Register>();
@@ -3709,7 +3711,7 @@
Register object,
Register value,
bool value_can_be_null) {
- Label is_null;
+ NearLabel is_null;
if (value_can_be_null) {
__ testl(value, value);
__ j(kEqual, &is_null);
@@ -4946,7 +4948,7 @@
Location cls = locations->InAt(1);
Register out = locations->Out().AsRegister<Register>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Label done, zero;
+ NearLabel done, zero;
SlowPathCodeX86* slow_path = nullptr;
// Return 0 if `obj` is null.
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index bd7cb12..c63634d 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -220,7 +220,8 @@
public:
CodeGeneratorX86(HGraph* graph,
const X86InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options);
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorX86() {}
void GenerateFrameEntry() OVERRIDE;
@@ -312,7 +313,7 @@
}
void Initialize() OVERRIDE {
- block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ block_labels_.SetSize(GetGraph()->GetBlocks().size());
}
bool NeedsTwoRegisters(Primitive::Type type) const OVERRIDE {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 0cf1089..82c037a 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -580,7 +580,8 @@
static constexpr Register kFakeReturnRegister = Register(kLastCpuRegister + 1);
CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph,
const X86_64InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options)
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats)
: CodeGenerator(graph,
kNumberOfCpuRegisters,
kNumberOfFloatRegisters,
@@ -590,7 +591,8 @@
| (1 << kFakeReturnRegister),
ComputeRegisterMask(reinterpret_cast<const int*>(kFpuCalleeSaves),
arraysize(kFpuCalleeSaves)),
- compiler_options),
+ compiler_options,
+ stats),
block_labels_(graph->GetArena(), 0),
location_builder_(graph, this),
instruction_visitor_(graph, this),
@@ -1324,7 +1326,7 @@
}
// Convert the jumps into the result.
- Label done_label;
+ NearLabel done_label;
// False case: result = 0.
__ Bind(&false_label);
@@ -1413,7 +1415,7 @@
Location left = locations->InAt(0);
Location right = locations->InAt(1);
- Label less, greater, done;
+ NearLabel less, greater, done;
Primitive::Type type = compare->InputAt(0)->GetType();
switch (type) {
case Primitive::kPrimLong: {
@@ -2123,7 +2125,7 @@
// Processing a Dex `float-to-int' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
- Label done, nan;
+ NearLabel done, nan;
__ movl(output, Immediate(kPrimIntMax));
// if input >= (float)INT_MAX goto done
@@ -2145,7 +2147,7 @@
// Processing a Dex `double-to-int' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
- Label done, nan;
+ NearLabel done, nan;
__ movl(output, Immediate(kPrimIntMax));
// if input >= (double)INT_MAX goto done
@@ -2187,7 +2189,7 @@
// Processing a Dex `float-to-long' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
- Label done, nan;
+ NearLabel done, nan;
codegen_->Load64BitValue(output, kPrimLongMax);
// if input >= (float)LONG_MAX goto done
@@ -2209,7 +2211,7 @@
// Processing a Dex `double-to-long' instruction.
XmmRegister input = in.AsFpuRegister<XmmRegister>();
CpuRegister output = out.AsRegister<CpuRegister>();
- Label done, nan;
+ NearLabel done, nan;
codegen_->Load64BitValue(output, kPrimLongMax);
// if input >= (double)LONG_MAX goto done
@@ -2772,7 +2774,7 @@
PushOntoFPStack(first, 0, 2 * elem_size, is_float);
// Loop doing FPREM until we stabilize.
- Label retry;
+ NearLabel retry;
__ Bind(&retry);
__ fprem();
@@ -2926,8 +2928,8 @@
__ movl(numerator, eax);
- Label no_div;
- Label end;
+ NearLabel no_div;
+ NearLabel end;
__ testl(eax, eax);
__ j(kNotEqual, &no_div);
@@ -4247,7 +4249,7 @@
CpuRegister object,
CpuRegister value,
bool value_can_be_null) {
- Label is_null;
+ NearLabel is_null;
if (value_can_be_null) {
__ testl(value, value);
__ j(kEqual, &is_null);
@@ -4674,7 +4676,7 @@
Location cls = locations->InAt(1);
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
- Label done, zero;
+ NearLabel done, zero;
SlowPathCodeX86_64* slow_path = nullptr;
// Return 0 if `obj` is null.
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index f9d8e04..522f036 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -220,7 +220,8 @@
public:
CodeGeneratorX86_64(HGraph* graph,
const X86_64InstructionSetFeatures& isa_features,
- const CompilerOptions& compiler_options);
+ const CompilerOptions& compiler_options,
+ OptimizingCompilerStats* stats = nullptr);
virtual ~CodeGeneratorX86_64() {}
void GenerateFrameEntry() OVERRIDE;
@@ -297,7 +298,7 @@
}
void Initialize() OVERRIDE {
- block_labels_.SetSize(GetGraph()->GetBlocks().Size());
+ block_labels_.SetSize(GetGraph()->GetBlocks().size());
}
bool NeedsTwoRegisters(Primitive::Type type ATTRIBUTE_UNUSED) const OVERRIDE {
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 72c67f5..5fc305c 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -193,7 +193,7 @@
bool has_result,
Expected expected) {
// Tests may have already computed it.
- if (graph->GetReversePostOrder().IsEmpty()) {
+ if (graph->GetReversePostOrder().empty()) {
graph->BuildDominatorTree();
}
SsaLivenessAnalysis liveness(graph, codegen);
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index 509478c..7d509a2 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -64,8 +64,8 @@
void HDeadCodeElimination::RemoveDeadBlocks() {
// Classify blocks as reachable/unreachable.
ArenaAllocator* allocator = graph_->GetArena();
- ArenaBitVector live_blocks(allocator, graph_->GetBlocks().Size(), false);
- ArenaBitVector affected_loops(allocator, graph_->GetBlocks().Size(), false);
+ ArenaBitVector live_blocks(allocator, graph_->GetBlocks().size(), false);
+ ArenaBitVector affected_loops(allocator, graph_->GetBlocks().size(), false);
MarkReachableBlocks(graph_->GetEntryBlock(), &live_blocks);
bool removed_one_or_more_blocks = false;
diff --git a/compiler/optimizing/dominator_test.cc b/compiler/optimizing/dominator_test.cc
index 78ae1dd..6b18650 100644
--- a/compiler/optimizing/dominator_test.cc
+++ b/compiler/optimizing/dominator_test.cc
@@ -24,7 +24,7 @@
namespace art {
-static void TestCode(const uint16_t* data, const int* blocks, size_t blocks_length) {
+static void TestCode(const uint16_t* data, const uint32_t* blocks, size_t blocks_length) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HGraph* graph = CreateGraph(&allocator);
@@ -33,19 +33,19 @@
bool graph_built = builder.BuildGraph(*item);
ASSERT_TRUE(graph_built);
graph->BuildDominatorTree();
- ASSERT_EQ(graph->GetBlocks().Size(), blocks_length);
+ ASSERT_EQ(graph->GetBlocks().size(), blocks_length);
for (size_t i = 0, e = blocks_length; i < e; ++i) {
- if (blocks[i] == -1) {
- if (graph->GetBlocks().Get(i) == nullptr) {
+ if (blocks[i] == kInvalidBlockId) {
+ if (graph->GetBlock(i) == nullptr) {
// Dead block.
} else {
// Only the entry block has no dominator.
- ASSERT_EQ(nullptr, graph->GetBlocks().Get(i)->GetDominator());
- ASSERT_TRUE(graph->GetBlocks().Get(i)->IsEntryBlock());
+ ASSERT_EQ(nullptr, graph->GetBlock(i)->GetDominator());
+ ASSERT_TRUE(graph->GetBlock(i)->IsEntryBlock());
}
} else {
- ASSERT_NE(nullptr, graph->GetBlocks().Get(i)->GetDominator());
- ASSERT_EQ(blocks[i], graph->GetBlocks().Get(i)->GetDominator()->GetBlockId());
+ ASSERT_NE(nullptr, graph->GetBlock(i)->GetDominator());
+ ASSERT_EQ(blocks[i], graph->GetBlock(i)->GetDominator()->GetBlockId());
}
}
}
@@ -54,10 +54,10 @@
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID); // Block number 1
- const int dominators[] = {
- -1,
- 0,
- 1
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ 1
};
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -68,11 +68,11 @@
Instruction::GOTO | 0x100, // Block number 1
Instruction::RETURN_VOID); // Block number 2
- const int dominators[] = {
- -1,
- 0,
- 1,
- 2
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ 1,
+ 2
};
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -84,12 +84,12 @@
Instruction::GOTO | 0x100, // Block number 2
Instruction::RETURN_VOID); // Block number 3
- const int dominators[] = {
- -1,
- 0,
- 1,
- 2,
- 3
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ 1,
+ 2,
+ 3
};
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -101,12 +101,12 @@
Instruction::RETURN_VOID, // Block number 2
Instruction::GOTO | 0xFF00); // Block number 3
- const int dominators[] = {
- -1,
- 0,
- 3,
- 1,
- 2
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ 3,
+ 1,
+ 2
};
TestCode(data1, dominators, sizeof(dominators) / sizeof(int));
@@ -131,10 +131,10 @@
Instruction::NOP,
Instruction::GOTO | 0xFF00);
- const int dominators[] = {
- -1,
- 0,
- -1
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ kInvalidBlockId
};
TestCode(data1, dominators, sizeof(dominators) / sizeof(int));
@@ -152,11 +152,11 @@
Instruction::GOTO | 0xFE00); // Block number 2
- const int dominators[] = {
- -1,
- 0,
- -1,
- 1
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ kInvalidBlockId,
+ 1
};
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -169,13 +169,13 @@
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
- const int dominators[] = {
- -1,
- 0,
- 1,
- 1,
- 3,
- 1, // Synthesized block to avoid critical edge.
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ 1,
+ 1,
+ 3,
+ 1, // Synthesized block to avoid critical edge.
};
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -188,14 +188,14 @@
Instruction::GOTO | 0x100, // Block number 2
Instruction::GOTO | 0xFF00); // Block number 3
- const int dominators[] = {
- -1,
- 0,
- 1,
- 1,
- -1, // exit block is not dominated by any block due to the spin loop.
- 1, // block to avoid critical edge.
- 1 // block to avoid critical edge.
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ 1,
+ 1,
+ kInvalidBlockId, // exit block is not dominated by any block due to the spin loop.
+ 1, // block to avoid critical edge.
+ 1 // block to avoid critical edge.
};
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -209,14 +209,14 @@
Instruction::GOTO | 0x100, // Block number 3
Instruction::GOTO | 0xFF00); // Block number 4
- const int dominators[] = {
- -1,
- 0,
- 1,
- 1,
- 1,
- -1, // exit block is not dominated by any block due to the spin loop.
- 1 // block to avoid critical edge.
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ 1,
+ 1,
+ 1,
+ kInvalidBlockId, // exit block is not dominated by any block due to the spin loop.
+ 1 // block to avoid critical edge.
};
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -230,14 +230,14 @@
Instruction::GOTO | 0x100, // Block number 3
Instruction::GOTO | 0xFE00); // Block number 4
- const int dominators[] = {
- -1,
- 0,
- 1,
- 1,
- 1,
- -1, // exit block is not dominated by any block due to the spin loop.
- 1 // block to avoid critical edge.
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ 1,
+ 1,
+ 1,
+ kInvalidBlockId, // exit block is not dominated by any block due to the spin loop.
+ 1 // block to avoid critical edge.
};
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
@@ -252,16 +252,16 @@
Instruction::GOTO | 0x100, // Block number 4
Instruction::RETURN_VOID); // Block number 5
- const int dominators[] = {
- -1,
- 0,
- 1,
- 2,
- 2,
- 1,
- 5, // Block number 5 dominates exit block
- 1, // block to avoid critical edge.
- 2 // block to avoid critical edge.
+ const uint32_t dominators[] = {
+ kInvalidBlockId,
+ 0,
+ 1,
+ 2,
+ 2,
+ 1,
+ 5, // Block number 5 dominates exit block
+ 1, // block to avoid critical edge.
+ 2 // block to avoid critical edge.
};
TestCode(data, dominators, sizeof(dominators) / sizeof(int));
diff --git a/compiler/optimizing/find_loops_test.cc b/compiler/optimizing/find_loops_test.cc
index 29aa97a..9e0d352 100644
--- a/compiler/optimizing/find_loops_test.cc
+++ b/compiler/optimizing/find_loops_test.cc
@@ -46,8 +46,8 @@
ArenaPool arena;
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
- ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+ for (HBasicBlock* block : graph->GetBlocks()) {
+ ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
}
@@ -59,8 +59,8 @@
ArenaPool arena;
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
- ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+ for (HBasicBlock* block : graph->GetBlocks()) {
+ ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
}
@@ -75,8 +75,8 @@
ArenaPool arena;
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
- ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+ for (HBasicBlock* block : graph->GetBlocks()) {
+ ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
}
@@ -92,8 +92,8 @@
ArenaPool arena;
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
- ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+ for (HBasicBlock* block : graph->GetBlocks()) {
+ ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
}
@@ -107,20 +107,20 @@
ArenaPool arena;
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
- ASSERT_EQ(graph->GetBlocks().Get(i)->GetLoopInformation(), nullptr);
+ for (HBasicBlock* block : graph->GetBlocks()) {
+ ASSERT_EQ(block->GetLoopInformation(), nullptr);
}
}
static void TestBlock(HGraph* graph,
- int block_id,
+ uint32_t block_id,
bool is_loop_header,
- int parent_loop_header_id,
+ uint32_t parent_loop_header_id,
const int* blocks_in_loop = nullptr,
size_t number_of_blocks = 0) {
- HBasicBlock* block = graph->GetBlocks().Get(block_id);
+ HBasicBlock* block = graph->GetBlock(block_id);
ASSERT_EQ(block->IsLoopHeader(), is_loop_header);
- if (parent_loop_header_id == -1) {
+ if (parent_loop_header_id == kInvalidBlockId) {
ASSERT_EQ(block->GetLoopInformation(), nullptr);
} else {
ASSERT_EQ(block->GetLoopInformation()->GetHeader()->GetBlockId(), parent_loop_header_id);
@@ -154,13 +154,13 @@
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- TestBlock(graph, 0, false, -1); // entry block
- TestBlock(graph, 1, false, -1); // pre header
+ TestBlock(graph, 0, false, kInvalidBlockId); // entry block
+ TestBlock(graph, 1, false, kInvalidBlockId); // pre header
const int blocks2[] = {2, 3};
- TestBlock(graph, 2, true, 2, blocks2, 2); // loop header
- TestBlock(graph, 3, false, 2); // block in loop
- TestBlock(graph, 4, false, -1); // return block
- TestBlock(graph, 5, false, -1); // exit block
+ TestBlock(graph, 2, true, 2, blocks2, 2); // loop header
+ TestBlock(graph, 3, false, 2); // block in loop
+ TestBlock(graph, 4, false, kInvalidBlockId); // return block
+ TestBlock(graph, 5, false, kInvalidBlockId); // exit block
}
TEST(FindLoopsTest, Loop2) {
@@ -182,14 +182,14 @@
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- TestBlock(graph, 0, false, -1); // entry block
- TestBlock(graph, 1, false, -1); // goto block
+ TestBlock(graph, 0, false, kInvalidBlockId); // entry block
+ TestBlock(graph, 1, false, kInvalidBlockId); // goto block
const int blocks2[] = {2, 3};
- TestBlock(graph, 2, true, 2, blocks2, 2); // loop header
- TestBlock(graph, 3, false, 2); // block in loop
- TestBlock(graph, 4, false, -1); // pre header
- TestBlock(graph, 5, false, -1); // return block
- TestBlock(graph, 6, false, -1); // exit block
+ TestBlock(graph, 2, true, 2, blocks2, 2); // loop header
+ TestBlock(graph, 3, false, 2); // block in loop
+ TestBlock(graph, 4, false, kInvalidBlockId); // pre header
+ TestBlock(graph, 5, false, kInvalidBlockId); // return block
+ TestBlock(graph, 6, false, kInvalidBlockId); // exit block
}
TEST(FindLoopsTest, Loop3) {
@@ -207,16 +207,16 @@
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- TestBlock(graph, 0, false, -1); // entry block
- TestBlock(graph, 1, false, -1); // goto block
- TestBlock(graph, 2, false, -1);
+ TestBlock(graph, 0, false, kInvalidBlockId); // entry block
+ TestBlock(graph, 1, false, kInvalidBlockId); // goto block
+ TestBlock(graph, 2, false, kInvalidBlockId);
const int blocks2[] = {3, 4};
- TestBlock(graph, 3, true, 3, blocks2, 2); // loop header
- TestBlock(graph, 4, false, 3); // block in loop
- TestBlock(graph, 5, false, -1); // pre header
- TestBlock(graph, 6, false, -1); // return block
- TestBlock(graph, 7, false, -1); // exit block
- TestBlock(graph, 8, false, -1); // synthesized pre header
+ TestBlock(graph, 3, true, 3, blocks2, 2); // loop header
+ TestBlock(graph, 4, false, 3); // block in loop
+ TestBlock(graph, 5, false, kInvalidBlockId); // pre header
+ TestBlock(graph, 6, false, kInvalidBlockId); // return block
+ TestBlock(graph, 7, false, kInvalidBlockId); // exit block
+ TestBlock(graph, 8, false, kInvalidBlockId); // synthesized pre header
}
TEST(FindLoopsTest, Loop4) {
@@ -233,15 +233,15 @@
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- TestBlock(graph, 0, false, -1); // entry block
- TestBlock(graph, 1, false, -1); // pre header
+ TestBlock(graph, 0, false, kInvalidBlockId); // entry block
+ TestBlock(graph, 1, false, kInvalidBlockId); // pre header
const int blocks2[] = {2, 3, 4, 5};
TestBlock(graph, 2, true, 2, blocks2, arraysize(blocks2)); // loop header
- TestBlock(graph, 3, false, 2); // block in loop
- TestBlock(graph, 4, false, 2); // back edge
- TestBlock(graph, 5, false, 2); // back edge
- TestBlock(graph, 6, false, -1); // return block
- TestBlock(graph, 7, false, -1); // exit block
+ TestBlock(graph, 3, false, 2); // block in loop
+ TestBlock(graph, 4, false, 2); // back edge
+ TestBlock(graph, 5, false, 2); // back edge
+ TestBlock(graph, 6, false, kInvalidBlockId); // return block
+ TestBlock(graph, 7, false, kInvalidBlockId); // exit block
}
@@ -259,16 +259,16 @@
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- TestBlock(graph, 0, false, -1); // entry block
- TestBlock(graph, 1, false, -1); // pre header
+ TestBlock(graph, 0, false, kInvalidBlockId); // entry block
+ TestBlock(graph, 1, false, kInvalidBlockId); // pre header
const int blocks2[] = {2, 3, 5};
- TestBlock(graph, 2, true, 2, blocks2, 3); // loop header
- TestBlock(graph, 3, false, 2); // block in loop
- TestBlock(graph, 4, false, -1); // loop exit
- TestBlock(graph, 5, false, 2); // back edge
- TestBlock(graph, 6, false, -1); // return block
- TestBlock(graph, 7, false, -1); // exit block
- TestBlock(graph, 8, false, -1); // synthesized block at the loop exit
+ TestBlock(graph, 2, true, 2, blocks2, 3); // loop header
+ TestBlock(graph, 3, false, 2); // block in loop
+ TestBlock(graph, 4, false, kInvalidBlockId); // loop exit
+ TestBlock(graph, 5, false, 2); // back edge
+ TestBlock(graph, 6, false, kInvalidBlockId); // return block
+ TestBlock(graph, 7, false, kInvalidBlockId); // exit block
+ TestBlock(graph, 8, false, kInvalidBlockId); // synthesized block at the loop exit
}
TEST(FindLoopsTest, InnerLoop) {
@@ -284,22 +284,22 @@
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- TestBlock(graph, 0, false, -1); // entry block
- TestBlock(graph, 1, false, -1); // pre header of outer loop
+ TestBlock(graph, 0, false, kInvalidBlockId); // entry block
+ TestBlock(graph, 1, false, kInvalidBlockId); // pre header of outer loop
const int blocks2[] = {2, 3, 4, 5, 8};
- TestBlock(graph, 2, true, 2, blocks2, 5); // outer loop header
+ TestBlock(graph, 2, true, 2, blocks2, 5); // outer loop header
const int blocks3[] = {3, 4};
- TestBlock(graph, 3, true, 3, blocks3, 2); // inner loop header
- TestBlock(graph, 4, false, 3); // back edge on inner loop
- TestBlock(graph, 5, false, 2); // back edge on outer loop
- TestBlock(graph, 6, false, -1); // return block
- TestBlock(graph, 7, false, -1); // exit block
- TestBlock(graph, 8, false, 2); // synthesized block as pre header of inner loop
+ TestBlock(graph, 3, true, 3, blocks3, 2); // inner loop header
+ TestBlock(graph, 4, false, 3); // back edge on inner loop
+ TestBlock(graph, 5, false, 2); // back edge on outer loop
+ TestBlock(graph, 6, false, kInvalidBlockId); // return block
+ TestBlock(graph, 7, false, kInvalidBlockId); // exit block
+ TestBlock(graph, 8, false, 2); // synthesized block as pre header of inner loop
- ASSERT_TRUE(graph->GetBlocks().Get(3)->GetLoopInformation()->IsIn(
- *graph->GetBlocks().Get(2)->GetLoopInformation()));
- ASSERT_FALSE(graph->GetBlocks().Get(2)->GetLoopInformation()->IsIn(
- *graph->GetBlocks().Get(3)->GetLoopInformation()));
+ ASSERT_TRUE(graph->GetBlock(3)->GetLoopInformation()->IsIn(
+ *graph->GetBlock(2)->GetLoopInformation()));
+ ASSERT_FALSE(graph->GetBlock(2)->GetLoopInformation()->IsIn(
+ *graph->GetBlock(3)->GetLoopInformation()));
}
TEST(FindLoopsTest, TwoLoops) {
@@ -315,21 +315,21 @@
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- TestBlock(graph, 0, false, -1); // entry block
- TestBlock(graph, 1, false, -1); // pre header of first loop
+ TestBlock(graph, 0, false, kInvalidBlockId); // entry block
+ TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
const int blocks2[] = {2, 3};
- TestBlock(graph, 2, true, 2, blocks2, 2); // first loop header
- TestBlock(graph, 3, false, 2); // back edge of first loop
+ TestBlock(graph, 2, true, 2, blocks2, 2); // first loop header
+ TestBlock(graph, 3, false, 2); // back edge of first loop
const int blocks4[] = {4, 5};
- TestBlock(graph, 4, true, 4, blocks4, 2); // second loop header
- TestBlock(graph, 5, false, 4); // back edge of second loop
- TestBlock(graph, 6, false, -1); // return block
- TestBlock(graph, 7, false, -1); // exit block
+ TestBlock(graph, 4, true, 4, blocks4, 2); // second loop header
+ TestBlock(graph, 5, false, 4); // back edge of second loop
+ TestBlock(graph, 6, false, kInvalidBlockId); // return block
+ TestBlock(graph, 7, false, kInvalidBlockId); // exit block
- ASSERT_FALSE(graph->GetBlocks().Get(4)->GetLoopInformation()->IsIn(
- *graph->GetBlocks().Get(2)->GetLoopInformation()));
- ASSERT_FALSE(graph->GetBlocks().Get(2)->GetLoopInformation()->IsIn(
- *graph->GetBlocks().Get(4)->GetLoopInformation()));
+ ASSERT_FALSE(graph->GetBlock(4)->GetLoopInformation()->IsIn(
+ *graph->GetBlock(2)->GetLoopInformation()));
+ ASSERT_FALSE(graph->GetBlock(2)->GetLoopInformation()->IsIn(
+ *graph->GetBlock(4)->GetLoopInformation()));
}
TEST(FindLoopsTest, NonNaturalLoop) {
@@ -344,9 +344,10 @@
ArenaPool arena;
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- ASSERT_TRUE(graph->GetBlocks().Get(3)->IsLoopHeader());
- HLoopInformation* info = graph->GetBlocks().Get(3)->GetLoopInformation();
- ASSERT_FALSE(info->GetHeader()->Dominates(info->GetBackEdges().Get(0)));
+ ASSERT_TRUE(graph->GetBlock(3)->IsLoopHeader());
+ HLoopInformation* info = graph->GetBlock(3)->GetLoopInformation();
+ ASSERT_EQ(1u, info->NumberOfBackEdges());
+ ASSERT_FALSE(info->GetHeader()->Dominates(info->GetBackEdges()[0]));
}
TEST(FindLoopsTest, DoWhileLoop) {
@@ -360,14 +361,14 @@
ArenaAllocator allocator(&arena);
HGraph* graph = TestCode(data, &allocator);
- TestBlock(graph, 0, false, -1); // entry block
- TestBlock(graph, 1, false, -1); // pre header of first loop
+ TestBlock(graph, 0, false, kInvalidBlockId); // entry block
+ TestBlock(graph, 1, false, kInvalidBlockId); // pre header of first loop
const int blocks2[] = {2, 3, 6};
- TestBlock(graph, 2, true, 2, blocks2, 3); // loop header
- TestBlock(graph, 3, false, 2); // back edge of first loop
- TestBlock(graph, 4, false, -1); // return block
- TestBlock(graph, 5, false, -1); // exit block
- TestBlock(graph, 6, false, 2); // synthesized block to avoid a critical edge
+ TestBlock(graph, 2, true, 2, blocks2, 3); // loop header
+ TestBlock(graph, 3, false, 2); // back edge of first loop
+ TestBlock(graph, 4, false, kInvalidBlockId); // return block
+ TestBlock(graph, 5, false, kInvalidBlockId); // exit block
+ TestBlock(graph, 6, false, 2); // synthesized block to avoid a critical edge
}
} // namespace art
diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc
index 074ed71..af8aa23 100644
--- a/compiler/optimizing/graph_checker.cc
+++ b/compiler/optimizing/graph_checker.cc
@@ -475,14 +475,13 @@
const ArenaBitVector& loop_blocks = loop_information->GetBlocks();
// Ensure back edges belong to the loop.
- size_t num_back_edges = loop_information->GetBackEdges().Size();
- if (num_back_edges == 0) {
+ if (loop_information->NumberOfBackEdges() == 0) {
AddError(StringPrintf(
"Loop defined by header %d has no back edge.",
id));
} else {
- for (size_t i = 0; i < num_back_edges; ++i) {
- int back_edge_id = loop_information->GetBackEdges().Get(i)->GetBlockId();
+ for (HBasicBlock* back_edge : loop_information->GetBackEdges()) {
+ int back_edge_id = back_edge->GetBlockId();
if (!loop_blocks.IsBitSet(back_edge_id)) {
AddError(StringPrintf(
"Loop defined by header %d has an invalid back edge %d.",
@@ -494,7 +493,7 @@
// Ensure all blocks in the loop are live and dominated by the loop header.
for (uint32_t i : loop_blocks.Indexes()) {
- HBasicBlock* loop_block = GetGraph()->GetBlocks().Get(i);
+ HBasicBlock* loop_block = GetGraph()->GetBlock(i);
if (loop_block == nullptr) {
AddError(StringPrintf("Loop defined by header %d contains a previously removed block %d.",
id,
diff --git a/compiler/optimizing/graph_visualizer.cc b/compiler/optimizing/graph_visualizer.cc
index 5b8e386..60a5955 100644
--- a/compiler/optimizing/graph_visualizer.cc
+++ b/compiler/optimizing/graph_visualizer.cc
@@ -679,7 +679,7 @@
bool is_after_pass,
bool graph_in_bad_state) const {
DCHECK(output_ != nullptr);
- if (!graph_->GetBlocks().IsEmpty()) {
+ if (!graph_->GetBlocks().empty()) {
HGraphVisualizerPrinter printer(graph_,
*output_,
pass_name,
@@ -692,7 +692,7 @@
void HGraphVisualizer::DumpGraphWithDisassembly() const {
DCHECK(output_ != nullptr);
- if (!graph_->GetBlocks().IsEmpty()) {
+ if (!graph_->GetBlocks().empty()) {
HGraphVisualizerPrinter printer(graph_,
*output_,
"disassembly",
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 5bb4e8e..1ee8648 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -306,7 +306,7 @@
: graph_(graph),
allocator_(allocator),
side_effects_(side_effects),
- sets_(allocator, graph->GetBlocks().Size(), nullptr) {}
+ sets_(allocator, graph->GetBlocks().size(), nullptr) {}
void Run();
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index 3f5a6e7..92c732c 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -15,6 +15,7 @@
*/
#include "induction_var_analysis.h"
+#include "induction_var_range.h"
namespace art {
@@ -42,6 +43,40 @@
instruction->GetBlock() == loop->GetHeader();
}
+/**
+ * Since graph traversal may enter a SCC at any position, an initial representation may be rotated,
+ * along dependences, viz. any of (a, b, c, d), (d, a, b, c) (c, d, a, b), (b, c, d, a) assuming
+ * a chain of dependences (mutual independent items may occur in arbitrary order). For proper
+ * classification, the lexicographically first entry-phi is rotated to the front.
+ */
+static void RotateEntryPhiFirst(HLoopInformation* loop,
+ ArenaVector<HInstruction*>* scc,
+ ArenaVector<HInstruction*>* new_scc) {
+ // Find very first entry-phi.
+ const HInstructionList& phis = loop->GetHeader()->GetPhis();
+ HInstruction* phi = nullptr;
+ size_t phi_pos = -1;
+ const size_t size = scc->size();
+ for (size_t i = 0; i < size; i++) {
+ if (IsEntryPhi(loop, scc->at(i)) && (phi == nullptr || phis.FoundBefore(scc->at(i), phi))) {
+ phi = scc->at(i);
+ phi_pos = i;
+ }
+ }
+
+ // If found, bring that entry-phi to front.
+ if (phi != nullptr) {
+ new_scc->clear();
+ for (size_t i = 0; i < size; i++) {
+ DCHECK_LT(phi_pos, size);
+ new_scc->push_back(scc->at(phi_pos));
+ if (++phi_pos >= size) phi_pos = 0;
+ }
+ DCHECK_EQ(size, new_scc->size());
+ scc->swap(*new_scc);
+ }
+}
+
//
// Class methods.
//
@@ -203,7 +238,15 @@
void HInductionVarAnalysis::ClassifyNonTrivial(HLoopInformation* loop) {
const size_t size = scc_.size();
DCHECK_GE(size, 1u);
- HInstruction* phi = scc_[size - 1];
+
+ // Rotate proper entry-phi to front.
+ if (size > 1) {
+ ArenaVector<HInstruction*> other(graph_->GetArena()->Adapter());
+ RotateEntryPhiFirst(loop, &scc_, &other);
+ }
+
+ // Analyze from phi onwards.
+ HInstruction* phi = scc_[0];
if (!IsEntryPhi(loop, phi)) {
return;
}
@@ -225,7 +268,7 @@
// Inspect remainder of the cycle that resides in scc_. The cycle_ mapping assigns
// temporary meaning to its nodes, seeded from the phi instruction and back.
- for (size_t i = 0; i < size - 1; i++) {
+ for (size_t i = 1; i < size; i++) {
HInstruction* instruction = scc_[i];
InductionInfo* update = nullptr;
if (instruction->IsPhi()) {
@@ -249,19 +292,19 @@
InductionInfo* induction = it->second;
switch (induction->induction_class) {
case kInvariant:
- // Classify phi (last element in scc_) and then the rest of the cycle "on-demand".
- // Statements are scanned in the Tarjan SCC order, with phi first.
+ // Classify first phi and then the rest of the cycle "on-demand".
+ // Statements are scanned in order.
AssignInfo(loop, phi, CreateInduction(kLinear, induction, initial));
- for (size_t i = 0; i < size - 1; i++) {
+ for (size_t i = 1; i < size; i++) {
ClassifyTrivial(loop, scc_[i]);
}
break;
case kPeriodic:
- // Classify all elements in the cycle with the found periodic induction while rotating
- // each first element to the end. Lastly, phi (last element in scc_) is classified.
- // Statements are scanned in the reverse Tarjan SCC order, with phi last.
- for (size_t i = 2; i <= size; i++) {
- AssignInfo(loop, scc_[size - i], induction);
+ // Classify all elements in the cycle with the found periodic induction while
+ // rotating each first element to the end. Lastly, phi is classified.
+ // Statements are scanned in reverse order.
+ for (size_t i = size - 1; i >= 1; i--) {
+ AssignInfo(loop, scc_[i], induction);
induction = RotatePeriodicInduction(induction->op_b, induction->op_a);
}
AssignInfo(loop, phi, induction);
@@ -511,12 +554,15 @@
InductionInfo* stride = a->op_a;
InductionInfo* lo_val = a->op_b;
InductionInfo* hi_val = b;
- int64_t value = -1;
- if (IsIntAndGet(stride, &value)) {
- if ((value > 0 && (cmp == kCondLT || cmp == kCondLE)) ||
- (value < 0 && (cmp == kCondGT || cmp == kCondGE))) {
+ // Analyze the stride thoroughly, since its representation may be compound at this point.
+ InductionVarRange::Value v1 = InductionVarRange::GetMin(stride, nullptr);
+ InductionVarRange::Value v2 = InductionVarRange::GetMax(stride, nullptr);
+ if (v1.a_constant == 0 && v2.a_constant == 0 && v1.b_constant == v2.b_constant) {
+ const int32_t stride_value = v1.b_constant;
+ if ((stride_value > 0 && (cmp == kCondLT || cmp == kCondLE)) ||
+ (stride_value < 0 && (cmp == kCondGT || cmp == kCondGE))) {
bool is_strict = cmp == kCondLT || cmp == kCondGT;
- VisitTripCount(loop, lo_val, hi_val, stride, value, type, is_strict);
+ VisitTripCount(loop, lo_val, hi_val, stride, stride_value, type, is_strict);
}
}
}
@@ -544,7 +590,7 @@
// least once. Otherwise TC is 0. Also, the expression assumes the loop does not
// have any early-exits. Otherwise, TC is an upper bound.
//
- bool cancels = is_strict && abs(stride_value) == 1; // compensation cancels conversion?
+ bool cancels = is_strict && std::abs(stride_value) == 1; // compensation cancels conversion?
if (!cancels) {
// Convert exclusive integral inequality into inclusive integral inequality,
// viz. condition i < U is i <= U - 1 and condition i > U is i >= U + 1.
@@ -557,7 +603,7 @@
}
// Assign the trip-count expression to the loop control. Clients that use the information
- // should be aware that due to the L <= U assumption, the expression is only valid in the
+ // should be aware that due to the top-test assumption, the expression is only valid in the
// loop-body proper, and not yet in the loop-header. If the loop has any early exits, the
// trip-count forms a conservative upper bound on the number of loop iterations.
InductionInfo* trip_count =
diff --git a/compiler/optimizing/induction_var_range.cc b/compiler/optimizing/induction_var_range.cc
index bd90334..486e904 100644
--- a/compiler/optimizing/induction_var_range.cc
+++ b/compiler/optimizing/induction_var_range.cc
@@ -126,6 +126,7 @@
}
InductionVarRange::Value InductionVarRange::GetFetch(HInstruction* instruction,
+ HInductionVarAnalysis::InductionInfo* trip,
int32_t fail_value) {
// Detect constants and chase the fetch a bit deeper into the HIR tree, so that it becomes
// more likely range analysis will compare the same instructions as terminal nodes.
@@ -134,9 +135,16 @@
return Value(value);
} else if (instruction->IsAdd()) {
if (IsIntAndGet(instruction->InputAt(0), &value)) {
- return AddValue(Value(value), GetFetch(instruction->InputAt(1), fail_value), fail_value);
+ return AddValue(Value(value),
+ GetFetch(instruction->InputAt(1), trip, fail_value), fail_value);
} else if (IsIntAndGet(instruction->InputAt(1), &value)) {
- return AddValue(GetFetch(instruction->InputAt(0), fail_value), Value(value), fail_value);
+ return AddValue(GetFetch(instruction->InputAt(0), trip, fail_value),
+ Value(value), fail_value);
+ }
+ } else if (fail_value < 0) {
+ // Special case: within the loop-body, minimum of trip-count is 1.
+ if (trip != nullptr && instruction == trip->op_b->fetch) {
+ return Value(1);
}
}
return Value(instruction, 1, 0);
@@ -163,7 +171,7 @@
case HInductionVarAnalysis::kDiv:
return GetDiv(info->op_a, info->op_b, trip, INT_MIN);
case HInductionVarAnalysis::kFetch:
- return GetFetch(info->fetch, INT_MIN);
+ return GetFetch(info->fetch, trip, INT_MIN);
}
break;
case HInductionVarAnalysis::kLinear:
@@ -200,7 +208,7 @@
case HInductionVarAnalysis::kDiv:
return GetDiv(info->op_a, info->op_b, trip, INT_MAX);
case HInductionVarAnalysis::kFetch:
- return GetFetch(info->fetch, INT_MAX);
+ return GetFetch(info->fetch, trip, INT_MAX);
}
break;
case HInductionVarAnalysis::kLinear:
diff --git a/compiler/optimizing/induction_var_range.h b/compiler/optimizing/induction_var_range.h
index b079076..e002e5f 100644
--- a/compiler/optimizing/induction_var_range.h
+++ b/compiler/optimizing/induction_var_range.h
@@ -27,7 +27,7 @@
* API to obtain a conservative lower and upper bound value on each instruction in the HIR.
*
* For example, given a linear induction 2 * i + x where 0 <= i <= 10, range analysis yields lower
- * bound value x and upper bound value x + 20 for the expression, thus, the range [0, x + 20].
+ * bound value x and upper bound value x + 20 for the expression, thus, the range [x, x + 20].
*/
class InductionVarRange {
public:
@@ -39,7 +39,7 @@
*/
struct Value {
Value(HInstruction* i, int32_t a, int32_t b)
- : instruction(a ? i : nullptr),
+ : instruction(a != 0 ? i : nullptr),
a_constant(a),
b_constant(b) {}
explicit Value(int32_t b) : Value(nullptr, 0, b) {}
@@ -70,7 +70,9 @@
HInductionVarAnalysis::InductionInfo* GetTripCount(HLoopInformation* loop,
HInstruction* context);
- static Value GetFetch(HInstruction* instruction, int32_t fail_value);
+ static Value GetFetch(HInstruction* instruction,
+ HInductionVarAnalysis::InductionInfo* trip,
+ int32_t fail_value);
static Value GetMin(HInductionVarAnalysis::InductionInfo* info,
HInductionVarAnalysis::InductionInfo* trip);
@@ -78,10 +80,12 @@
HInductionVarAnalysis::InductionInfo* trip);
static Value GetMul(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
- HInductionVarAnalysis::InductionInfo* trip, int32_t fail_value);
+ HInductionVarAnalysis::InductionInfo* trip,
+ int32_t fail_value);
static Value GetDiv(HInductionVarAnalysis::InductionInfo* info1,
HInductionVarAnalysis::InductionInfo* info2,
- HInductionVarAnalysis::InductionInfo* trip, int32_t fail_value);
+ HInductionVarAnalysis::InductionInfo* trip,
+ int32_t fail_value);
static Value AddValue(Value v1, Value v2, int32_t fail_value);
static Value SubValue(Value v1, Value v2, int32_t fail_value);
@@ -93,6 +97,7 @@
/** Results of prior induction variable analysis. */
HInductionVarAnalysis *induction_analysis_;
+ friend class HInductionVarAnalysis;
friend class InductionVarRangeTest;
DISALLOW_COPY_AND_ASSIGN(InductionVarRange);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index efd4fcf..12fd1c3 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -48,16 +48,17 @@
// doing some logic in the runtime to discover if a method could have been inlined.
return;
}
- const GrowableArray<HBasicBlock*>& blocks = graph_->GetReversePostOrder();
- HBasicBlock* next_block = blocks.Get(0);
- for (size_t i = 0; i < blocks.Size(); ++i) {
+ const ArenaVector<HBasicBlock*>& blocks = graph_->GetReversePostOrder();
+ DCHECK(!blocks.empty());
+ HBasicBlock* next_block = blocks[0];
+ for (size_t i = 0; i < blocks.size(); ++i) {
// Because we are changing the graph when inlining, we need to remember the next block.
// This avoids doing the inlining work again on the inlined blocks.
- if (blocks.Get(i) != next_block) {
+ if (blocks[i] != next_block) {
continue;
}
HBasicBlock* block = next_block;
- next_block = (i == blocks.Size() - 1) ? nullptr : blocks.Get(i + 1);
+ next_block = (i == blocks.size() - 1) ? nullptr : blocks[i + 1];
for (HInstruction* instruction = block->GetFirstInstruction(); instruction != nullptr;) {
HInstruction* next = instruction->GetNext();
HInvoke* call = instruction->AsInvoke();
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 5a04ffb..e302317 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -507,7 +507,7 @@
XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
- Label nan, done, op2_label;
+ NearLabel nan, done, op2_label;
if (is_double) {
__ ucomisd(out, op2);
} else {
@@ -841,7 +841,7 @@
Register out = locations->Out().AsRegister<Register>();
XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
X86Assembler* assembler = GetAssembler();
// Generate 0.5 into inPlusPointFive.
@@ -1147,9 +1147,7 @@
Register edi = locations->GetTemp(1).AsRegister<Register>();
Register esi = locations->Out().AsRegister<Register>();
- Label end;
- Label return_true;
- Label return_false;
+ NearLabel end, return_true, return_false;
// Get offsets of count, value, and class fields within a string object.
const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
@@ -1181,8 +1179,7 @@
__ cmpl(ecx, Address(arg, count_offset));
__ j(kNotEqual, &return_false);
// Return true if both strings are empty.
- __ testl(ecx, ecx);
- __ j(kEqual, &return_true);
+ __ jecxz(&return_true);
// Load starting addresses of string values into ESI/EDI as required for repe_cmpsl instruction.
__ leal(esi, Address(str, value_offset));
@@ -1292,7 +1289,7 @@
// Do a zero-length check.
// TODO: Support jecxz.
- Label not_found_label;
+ NearLabel not_found_label;
__ testl(string_length, string_length);
__ j(kEqual, ¬_found_label);
@@ -1335,7 +1332,7 @@
__ subl(string_length, counter);
__ leal(out, Address(string_length, -1));
- Label done;
+ NearLabel done;
__ jmp(&done);
// Failed to match; return -1.
@@ -2055,7 +2052,7 @@
}
// BSR sets ZF if the input was zero, and the output is undefined.
- Label all_zeroes, done;
+ NearLabel all_zeroes, done;
__ j(kEqual, &all_zeroes);
// Correct the result from BSR to get the final CLZ result.
@@ -2074,7 +2071,7 @@
DCHECK(src.IsRegisterPair());
Register src_lo = src.AsRegisterPairLow<Register>();
Register src_hi = src.AsRegisterPairHigh<Register>();
- Label handle_low, done, all_zeroes;
+ NearLabel handle_low, done, all_zeroes;
// Is the high word zero?
__ testl(src_hi, src_hi);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 3ce4578..51980af 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -405,7 +405,7 @@
XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
- Label nan, done, op2_label;
+ NearLabel nan, done, op2_label;
if (is_double) {
__ ucomisd(out, op2);
} else {
@@ -702,7 +702,7 @@
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
X86_64Assembler* assembler = GetAssembler();
// Load 0.5 into inPlusPointFive.
@@ -750,7 +750,7 @@
XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
CpuRegister out = locations->Out().AsRegister<CpuRegister>();
XmmRegister inPlusPointFive = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
- Label done, nan;
+ NearLabel done, nan;
X86_64Assembler* assembler = GetAssembler();
// Load 0.5 into inPlusPointFive.
@@ -1044,9 +1044,7 @@
CpuRegister rdi = locations->GetTemp(1).AsRegister<CpuRegister>();
CpuRegister rsi = locations->Out().AsRegister<CpuRegister>();
- Label end;
- Label return_true;
- Label return_false;
+ NearLabel end, return_true, return_false;
// Get offsets of count, value, and class fields within a string object.
const uint32_t count_offset = mirror::String::CountOffset().Uint32Value();
@@ -1078,8 +1076,7 @@
__ cmpl(rcx, Address(arg, count_offset));
__ j(kNotEqual, &return_false);
// Return true if both strings are empty.
- __ testl(rcx, rcx);
- __ j(kEqual, &return_true);
+ __ jrcxz(&return_true);
// Load starting addresses of string values into RSI/RDI as required for repe_cmpsq instruction.
__ leal(rsi, Address(str, value_offset));
@@ -1189,7 +1186,7 @@
// Do a length check.
// TODO: Support jecxz.
- Label not_found_label;
+ NearLabel not_found_label;
__ testl(string_length, string_length);
__ j(kEqual, ¬_found_label);
@@ -1231,7 +1228,7 @@
__ subl(string_length, counter);
__ leal(out, Address(string_length, -1));
- Label done;
+ NearLabel done;
__ jmp(&done);
// Failed to match; return -1.
@@ -1896,7 +1893,7 @@
}
// BSR sets ZF if the input was zero, and the output is undefined.
- Label is_zero, done;
+ NearLabel is_zero, done;
__ j(kEqual, &is_zero);
// Correct the result from BSR to get the CLZ result.
diff --git a/compiler/optimizing/licm.cc b/compiler/optimizing/licm.cc
index 5b89b4e..c38bbe3 100644
--- a/compiler/optimizing/licm.cc
+++ b/compiler/optimizing/licm.cc
@@ -80,7 +80,7 @@
void LICM::Run() {
DCHECK(side_effects_.HasRun());
// Only used during debug.
- ArenaBitVector visited(graph_->GetArena(), graph_->GetBlocks().Size(), false);
+ ArenaBitVector visited(graph_->GetArena(), graph_->GetBlocks().size(), false);
// Post order visit to visit inner loops before outer loops.
for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 4f259b5..a059766 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -36,7 +36,8 @@
namespace art {
-static void TestCode(const uint16_t* data, const int* expected_order, size_t number_of_blocks) {
+template <size_t number_of_blocks>
+static void TestCode(const uint16_t* data, const uint32_t (&expected_order)[number_of_blocks]) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
HGraph* graph = CreateGraph(&allocator);
@@ -53,9 +54,9 @@
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- ASSERT_EQ(graph->GetLinearOrder().Size(), number_of_blocks);
+ ASSERT_EQ(graph->GetLinearOrder().size(), number_of_blocks);
for (size_t i = 0; i < number_of_blocks; ++i) {
- ASSERT_EQ(graph->GetLinearOrder().Get(i)->GetBlockId(), expected_order[i]);
+ ASSERT_EQ(graph->GetLinearOrder()[i]->GetBlockId(), expected_order[i]);
}
}
@@ -80,8 +81,8 @@
Instruction::GOTO | 0xFE00,
Instruction::RETURN_VOID);
- const int blocks[] = {0, 1, 2, 7, 3, 4, 8, 5, 6};
- TestCode(data, blocks, 9);
+ const uint32_t blocks[] = {0, 1, 2, 7, 3, 4, 8, 5, 6};
+ TestCode(data, blocks);
}
TEST(LinearizeTest, CFG2) {
@@ -105,8 +106,8 @@
Instruction::IF_EQ, 0xFFFD,
Instruction::GOTO | 0xFE00);
- const int blocks[] = {0, 1, 2, 7, 4, 5, 8, 3, 6};
- TestCode(data, blocks, 9);
+ const uint32_t blocks[] = {0, 1, 2, 7, 4, 5, 8, 3, 6};
+ TestCode(data, blocks);
}
TEST(LinearizeTest, CFG3) {
@@ -132,8 +133,8 @@
Instruction::IF_EQ, 0xFFFC,
Instruction::GOTO | 0xFD00);
- const int blocks[] = {0, 1, 2, 8, 5, 6, 4, 9, 3, 7};
- TestCode(data, blocks, 10);
+ const uint32_t blocks[] = {0, 1, 2, 8, 5, 6, 4, 9, 3, 7};
+ TestCode(data, blocks);
}
TEST(LinearizeTest, CFG4) {
@@ -162,8 +163,8 @@
Instruction::GOTO | 0xFE00,
Instruction::RETURN_VOID);
- const int blocks[] = {0, 1, 2, 8, 3, 10, 4, 5, 11, 9, 6, 7};
- TestCode(data, blocks, 12);
+ const uint32_t blocks[] = {0, 1, 2, 8, 3, 10, 4, 5, 11, 9, 6, 7};
+ TestCode(data, blocks);
}
TEST(LinearizeTest, CFG5) {
@@ -192,8 +193,8 @@
Instruction::IF_EQ, 0xFFFE,
Instruction::GOTO | 0xFE00);
- const int blocks[] = {0, 1, 2, 8, 4, 10, 5, 6, 11, 9, 3, 7};
- TestCode(data, blocks, 12);
+ const uint32_t blocks[] = {0, 1, 2, 8, 4, 10, 5, 6, 11, 9, 3, 7};
+ TestCode(data, blocks);
}
TEST(LinearizeTest, CFG6) {
@@ -218,8 +219,8 @@
Instruction::RETURN_VOID,
Instruction::GOTO | 0xFA00);
- const int blocks[] = {0, 1, 2, 3, 4, 6, 9, 8, 5, 7};
- TestCode(data, blocks, arraysize(blocks));
+ const uint32_t blocks[] = {0, 1, 2, 3, 4, 6, 9, 8, 5, 7};
+ TestCode(data, blocks);
}
TEST(LinearizeTest, CFG7) {
@@ -246,8 +247,8 @@
Instruction::RETURN_VOID,
Instruction::GOTO | 0xFA00);
- const int blocks[] = {0, 1, 2, 3, 4, 9, 8, 6, 5, 7};
- TestCode(data, blocks, arraysize(blocks));
+ const uint32_t blocks[] = {0, 1, 2, 3, 4, 9, 8, 6, 5, 7};
+ TestCode(data, blocks);
}
} // namespace art
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 7cb00a1..b9ab290 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -77,7 +77,7 @@
ASSERT_EQ(2u, range->GetStart());
// Last use is the return instruction.
ASSERT_EQ(8u, range->GetEnd());
- HBasicBlock* block = graph->GetBlocks().Get(1);
+ HBasicBlock* block = graph->GetBlock(1);
ASSERT_TRUE(block->GetLastInstruction()->IsReturn());
ASSERT_EQ(8u, block->GetLastInstruction()->GetLifetimePosition());
ASSERT_TRUE(range->GetNext() == nullptr);
@@ -125,7 +125,7 @@
ASSERT_EQ(2u, range->GetStart());
// Last use is the return instruction.
ASSERT_EQ(22u, range->GetEnd());
- HBasicBlock* block = graph->GetBlocks().Get(3);
+ HBasicBlock* block = graph->GetBlock(3);
ASSERT_TRUE(block->GetLastInstruction()->IsReturn());
ASSERT_EQ(22u, block->GetLastInstruction()->GetLifetimePosition());
ASSERT_TRUE(range->GetNext() == nullptr);
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 4b25046..cc3b35b 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -35,7 +35,7 @@
* A Location is an abstraction over the potential location
* of an instruction. It could be in register or stack.
*/
-class Location : public ValueObject {
+class Location {
public:
enum OutputOverlap {
kOutputOverlap,
@@ -84,7 +84,7 @@
DCHECK(!IsValid());
}
- Location(const Location& other) : ValueObject(), value_(other.value_) {}
+ Location(const Location& other) : value_(other.value_) {}
Location& operator=(const Location& other) {
value_ = other.value_;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index cc12a10..570ce2e 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -27,12 +27,12 @@
namespace art {
void HGraph::AddBlock(HBasicBlock* block) {
- block->SetBlockId(blocks_.Size());
- blocks_.Add(block);
+ block->SetBlockId(blocks_.size());
+ blocks_.push_back(block);
}
void HGraph::FindBackEdges(ArenaBitVector* visited) {
- ArenaBitVector visiting(arena_, blocks_.Size(), false);
+ ArenaBitVector visiting(arena_, blocks_.size(), false);
VisitBlockForBackEdges(entry_block_, visited, &visiting);
}
@@ -53,9 +53,9 @@
}
void HGraph::RemoveInstructionsAsUsersFromDeadBlocks(const ArenaBitVector& visited) const {
- for (size_t i = 0; i < blocks_.Size(); ++i) {
+ for (size_t i = 0; i < blocks_.size(); ++i) {
if (!visited.IsBitSet(i)) {
- HBasicBlock* block = blocks_.Get(i);
+ HBasicBlock* block = GetBlock(i);
DCHECK(block->GetPhis().IsEmpty()) << "Phis are not inserted at this stage";
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
RemoveAsUser(it.Current());
@@ -65,16 +65,16 @@
}
void HGraph::RemoveDeadBlocks(const ArenaBitVector& visited) {
- for (size_t i = 0; i < blocks_.Size(); ++i) {
+ for (size_t i = 0; i < blocks_.size(); ++i) {
if (!visited.IsBitSet(i)) {
- HBasicBlock* block = blocks_.Get(i);
+ HBasicBlock* block = GetBlock(i);
// We only need to update the successor, which might be live.
for (HBasicBlock* successor : block->GetSuccessors()) {
successor->RemovePredecessor(block);
}
// Remove the block from the list of blocks, so that further analyses
// never see it.
- blocks_.Put(i, nullptr);
+ blocks_[i] = nullptr;
}
}
}
@@ -103,7 +103,7 @@
// collect both normal- and exceptional-flow values at the same time.
SimplifyCatchBlocks();
- ArenaBitVector visited(arena_, blocks_.Size(), false);
+ ArenaBitVector visited(arena_, blocks_.size(), false);
// (2) Find the back edges in the graph doing a DFS traversal.
FindBackEdges(&visited);
@@ -130,7 +130,7 @@
for (HReversePostOrderIterator it(*this); !it.Done(); it.Advance()) {
it.Current()->ClearDominanceInformation();
}
- reverse_post_order_.Reset();
+ reverse_post_order_.clear();
}
void HBasicBlock::ClearDominanceInformation() {
@@ -139,17 +139,17 @@
}
void HGraph::ComputeDominanceInformation() {
- DCHECK(reverse_post_order_.IsEmpty());
- GrowableArray<size_t> visits(arena_, blocks_.Size());
- visits.SetSize(blocks_.Size());
- reverse_post_order_.Add(entry_block_);
+ DCHECK(reverse_post_order_.empty());
+ reverse_post_order_.reserve(blocks_.size());
+ ArenaVector<size_t> visits(blocks_.size(), 0u, arena_->Adapter());
+ reverse_post_order_.push_back(entry_block_);
for (HBasicBlock* successor : entry_block_->GetSuccessors()) {
VisitBlockForDominatorTree(successor, entry_block_, &visits);
}
}
HBasicBlock* HGraph::FindCommonDominator(HBasicBlock* first, HBasicBlock* second) const {
- ArenaBitVector visited(arena_, blocks_.Size(), false);
+ ArenaBitVector visited(arena_, blocks_.size(), false);
// Walk the dominator tree of the first block and mark the visited blocks.
while (first != nullptr) {
visited.SetBit(first->GetBlockId());
@@ -168,20 +168,20 @@
void HGraph::VisitBlockForDominatorTree(HBasicBlock* block,
HBasicBlock* predecessor,
- GrowableArray<size_t>* visits) {
+ ArenaVector<size_t>* visits) {
if (block->GetDominator() == nullptr) {
block->SetDominator(predecessor);
} else {
block->SetDominator(FindCommonDominator(block->GetDominator(), predecessor));
}
- visits->Increment(block->GetBlockId());
// Once all the forward edges have been visited, we know the immediate
// dominator of the block. We can then start visiting its successors.
- if (visits->Get(block->GetBlockId()) ==
+ DCHECK_LT(block->GetBlockId(), visits->size());
+ if (++(*visits)[block->GetBlockId()] ==
block->GetPredecessors().size() - block->NumberOfBackEdges()) {
block->GetDominator()->AddDominatedBlock(block);
- reverse_post_order_.Add(block);
+ reverse_post_order_.push_back(block);
for (HBasicBlock* successor : block->GetSuccessors()) {
VisitBlockForDominatorTree(successor, block, visits);
}
@@ -189,7 +189,7 @@
}
void HGraph::TransformToSsa() {
- DCHECK(!reverse_post_order_.IsEmpty());
+ DCHECK(!reverse_post_order_.empty());
SsaBuilder ssa_builder(this);
ssa_builder.BuildSsa();
}
@@ -289,8 +289,7 @@
}
void HGraph::SimplifyCatchBlocks() {
- for (size_t i = 0; i < blocks_.Size(); ++i) {
- HBasicBlock* catch_block = blocks_.Get(i);
+ for (HBasicBlock* catch_block : blocks_) {
if (!catch_block->IsCatchBlock()) {
continue;
}
@@ -350,8 +349,7 @@
// Simplify the CFG for future analysis, and code generation:
// (1): Split critical edges.
// (2): Simplify loops by having only one back edge, and one preheader.
- for (size_t i = 0; i < blocks_.Size(); ++i) {
- HBasicBlock* block = blocks_.Get(i);
+ for (HBasicBlock* block : blocks_) {
if (block == nullptr) continue;
if (block->NumberOfNormalSuccessors() > 1) {
for (size_t j = 0; j < block->GetSuccessors().size(); ++j) {
@@ -483,8 +481,7 @@
bool HLoopInformation::Populate() {
DCHECK_EQ(blocks_.NumSetBits(), 0u) << "Loop information has already been populated";
- for (size_t i = 0, e = GetBackEdges().Size(); i < e; ++i) {
- HBasicBlock* back_edge = GetBackEdges().Get(i);
+ for (HBasicBlock* back_edge : GetBackEdges()) {
DCHECK(back_edge->GetDominator() != nullptr);
if (!header_->Dominates(back_edge)) {
// This loop is not natural. Do not bother going further.
@@ -505,7 +502,7 @@
void HLoopInformation::Update() {
HGraph* graph = header_->GetGraph();
for (uint32_t id : blocks_.Indexes()) {
- HBasicBlock* block = graph->GetBlocks().Get(id);
+ HBasicBlock* block = graph->GetBlock(id);
// Reset loop information of non-header blocks inside the loop, except
// members of inner nested loops because those should already have been
// updated by their own LoopInformation.
@@ -515,7 +512,7 @@
}
blocks_.ClearAllBits();
- if (back_edges_.IsEmpty()) {
+ if (back_edges_.empty()) {
// The loop has been dismantled, delete its suspend check and remove info
// from the header.
DCHECK(HasSuspendCheck());
@@ -525,8 +522,8 @@
suspend_check_ = nullptr;
} else {
if (kIsDebugBuild) {
- for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
- DCHECK(header_->Dominates(back_edges_.Get(i)));
+ for (HBasicBlock* back_edge : back_edges_) {
+ DCHECK(header_->Dominates(back_edge));
}
}
// This loop still has reachable back edges. Repopulate the list of blocks.
@@ -549,8 +546,8 @@
size_t HLoopInformation::GetLifetimeEnd() const {
size_t last_position = 0;
- for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
- last_position = std::max(back_edges_.Get(i)->GetLifetimeEnd(), last_position);
+ for (HBasicBlock* back_edge : GetBackEdges()) {
+ last_position = std::max(back_edge->GetLifetimeEnd(), last_position);
}
return last_position;
}
@@ -714,7 +711,8 @@
}
void HEnvironment::RemoveAsUserOfInput(size_t index) const {
- const HUserRecord<HEnvironment*> user_record = vregs_.Get(index);
+ DCHECK_LT(index, Size());
+ const HUserRecord<HEnvironment*>& user_record = vregs_[index];
user_record.GetInstruction()->RemoveEnvironmentUser(user_record.GetUseNode());
}
@@ -883,14 +881,15 @@
void HPhi::AddInput(HInstruction* input) {
DCHECK(input->GetBlock() != nullptr);
- inputs_.Add(HUserRecord<HInstruction*>(input));
- input->AddUseAt(this, inputs_.Size() - 1);
+ inputs_.push_back(HUserRecord<HInstruction*>(input));
+ input->AddUseAt(this, inputs_.size() - 1);
}
void HPhi::RemoveInputAt(size_t index) {
RemoveAsUserOfInput(index);
- inputs_.DeleteAt(index);
+ inputs_.erase(inputs_.begin() + index);
for (size_t i = index, e = InputCount(); i < e; ++i) {
+ DCHECK_EQ(InputRecordAt(i).GetUseNode()->GetIndex(), i + 1u);
InputRecordAt(i).GetUseNode()->SetIndex(i);
}
}
@@ -905,9 +904,8 @@
#undef DEFINE_ACCEPT
void HGraphVisitor::VisitInsertionOrder() {
- const GrowableArray<HBasicBlock*>& blocks = graph_->GetBlocks();
- for (size_t i = 0 ; i < blocks.Size(); i++) {
- HBasicBlock* block = blocks.Get(i);
+ const ArenaVector<HBasicBlock*>& blocks = graph_->GetBlocks();
+ for (HBasicBlock* block : blocks) {
if (block != nullptr) {
VisitBasicBlock(block);
}
@@ -1441,15 +1439,14 @@
// Create space in `blocks` for adding `number_of_new_blocks` entries
// starting at location `at`. Blocks after `at` are moved accordingly.
-static void MakeRoomFor(GrowableArray<HBasicBlock*>* blocks,
+static void MakeRoomFor(ArenaVector<HBasicBlock*>* blocks,
size_t number_of_new_blocks,
- size_t at) {
- size_t old_size = blocks->Size();
+ size_t after) {
+ DCHECK_LT(after, blocks->size());
+ size_t old_size = blocks->size();
size_t new_size = old_size + number_of_new_blocks;
- blocks->SetSize(new_size);
- for (size_t i = old_size - 1, j = new_size - 1; i > at; --i, --j) {
- blocks->Put(j, blocks->Get(i));
- }
+ blocks->resize(new_size);
+ std::copy_backward(blocks->begin() + after + 1u, blocks->begin() + old_size, blocks->end());
}
void HGraph::DeleteDeadBlock(HBasicBlock* block) {
@@ -1470,8 +1467,8 @@
exit_block_ = nullptr;
}
- reverse_post_order_.Delete(block);
- blocks_.Put(block->GetBlockId(), nullptr);
+ RemoveElement(reverse_post_order_, block);
+ blocks_[block->GetBlockId()] = nullptr;
}
HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
@@ -1500,12 +1497,12 @@
}
HInstruction* return_value = nullptr;
- if (GetBlocks().Size() == 3) {
+ if (GetBlocks().size() == 3) {
// Simple case of an entry block, a body block, and an exit block.
// Put the body block's instruction into `invoke`'s block.
- HBasicBlock* body = GetBlocks().Get(1);
- DCHECK(GetBlocks().Get(0)->IsEntryBlock());
- DCHECK(GetBlocks().Get(2)->IsExitBlock());
+ HBasicBlock* body = GetBlock(1);
+ DCHECK(GetBlock(0)->IsEntryBlock());
+ DCHECK(GetBlock(2)->IsExitBlock());
DCHECK(!body->IsExitBlock());
HInstruction* last = body->GetLastInstruction();
@@ -1578,15 +1575,12 @@
// We add the `to` block.
static constexpr int kNumberOfNewBlocksInCaller = 1;
- size_t blocks_added = (reverse_post_order_.Size() - kNumberOfSkippedBlocksInCallee)
+ size_t blocks_added = (reverse_post_order_.size() - kNumberOfSkippedBlocksInCallee)
+ kNumberOfNewBlocksInCaller;
// Find the location of `at` in the outer graph's reverse post order. The new
// blocks will be added after it.
- size_t index_of_at = 0;
- while (outer_graph->reverse_post_order_.Get(index_of_at) != at) {
- index_of_at++;
- }
+ size_t index_of_at = IndexOfElement(outer_graph->reverse_post_order_, at);
MakeRoomFor(&outer_graph->reverse_post_order_, blocks_added, index_of_at);
// Do a reverse post order of the blocks in the callee and do (1), (2),
@@ -1599,7 +1593,7 @@
DCHECK(current->GetGraph() == this);
current->SetGraph(outer_graph);
outer_graph->AddBlock(current);
- outer_graph->reverse_post_order_.Put(++index_of_at, current);
+ outer_graph->reverse_post_order_[++index_of_at] = current;
if (info != nullptr) {
current->SetLoopInformation(info);
for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
@@ -1612,7 +1606,7 @@
// Do (1), (2), and (3) to `to`.
to->SetGraph(outer_graph);
outer_graph->AddBlock(to);
- outer_graph->reverse_post_order_.Put(++index_of_at, to);
+ outer_graph->reverse_post_order_[++index_of_at] = to;
if (info != nullptr) {
to->SetLoopInformation(info);
for (HLoopInformationOutwardIterator loop_it(*at); !loop_it.Done(); loop_it.Advance()) {
@@ -1725,15 +1719,12 @@
new_pre_header->dominated_blocks_.push_back(header);
header->SetDominator(new_pre_header);
- size_t index_of_header = 0;
- while (reverse_post_order_.Get(index_of_header) != header) {
- index_of_header++;
- }
+ size_t index_of_header = IndexOfElement(reverse_post_order_, header);
MakeRoomFor(&reverse_post_order_, 4, index_of_header - 1);
- reverse_post_order_.Put(index_of_header++, if_block);
- reverse_post_order_.Put(index_of_header++, dummy_block);
- reverse_post_order_.Put(index_of_header++, deopt_block);
- reverse_post_order_.Put(index_of_header++, new_pre_header);
+ reverse_post_order_[index_of_header++] = if_block;
+ reverse_post_order_[index_of_header++] = dummy_block;
+ reverse_post_order_[index_of_header++] = deopt_block;
+ reverse_post_order_[index_of_header++] = new_pre_header;
HLoopInformation* info = pre_header->GetLoopInformation();
if (info != nullptr) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d52a4f7..993cc37 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -147,9 +147,9 @@
bool debuggable = false,
int start_instruction_id = 0)
: arena_(arena),
- blocks_(arena, kDefaultNumberOfBlocks),
- reverse_post_order_(arena, kDefaultNumberOfBlocks),
- linear_order_(arena, kDefaultNumberOfBlocks),
+ blocks_(arena->Adapter(kArenaAllocBlockList)),
+ reverse_post_order_(arena->Adapter(kArenaAllocReversePostOrder)),
+ linear_order_(arena->Adapter(kArenaAllocLinearOrder)),
entry_block_(nullptr),
exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
@@ -167,15 +167,21 @@
should_generate_constructor_barrier_(should_generate_constructor_barrier),
instruction_set_(instruction_set),
cached_null_constant_(nullptr),
- cached_int_constants_(std::less<int32_t>(), arena->Adapter()),
- cached_float_constants_(std::less<int32_t>(), arena->Adapter()),
- cached_long_constants_(std::less<int64_t>(), arena->Adapter()),
- cached_double_constants_(std::less<int64_t>(), arena->Adapter()),
- cached_current_method_(nullptr) {}
+ cached_int_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)),
+ cached_float_constants_(std::less<int32_t>(), arena->Adapter(kArenaAllocConstantsMap)),
+ cached_long_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
+ cached_double_constants_(std::less<int64_t>(), arena->Adapter(kArenaAllocConstantsMap)),
+ cached_current_method_(nullptr) {
+ blocks_.reserve(kDefaultNumberOfBlocks);
+ }
ArenaAllocator* GetArena() const { return arena_; }
- const GrowableArray<HBasicBlock*>& GetBlocks() const { return blocks_; }
- HBasicBlock* GetBlock(size_t id) const { return blocks_.Get(id); }
+ const ArenaVector<HBasicBlock*>& GetBlocks() const { return blocks_; }
+
+ HBasicBlock* GetBlock(size_t id) const {
+ DCHECK_LT(id, blocks_.size());
+ return blocks_[id];
+ }
bool IsInSsaForm() const { return in_ssa_form_; }
@@ -295,11 +301,11 @@
return number_of_vregs_ - number_of_in_vregs_;
}
- const GrowableArray<HBasicBlock*>& GetReversePostOrder() const {
+ const ArenaVector<HBasicBlock*>& GetReversePostOrder() const {
return reverse_post_order_;
}
- const GrowableArray<HBasicBlock*>& GetLinearOrder() const {
+ const ArenaVector<HBasicBlock*>& GetLinearOrder() const {
return linear_order_;
}
@@ -366,7 +372,7 @@
private:
void VisitBlockForDominatorTree(HBasicBlock* block,
HBasicBlock* predecessor,
- GrowableArray<size_t>* visits);
+ ArenaVector<size_t>* visits);
void FindBackEdges(ArenaBitVector* visited);
void VisitBlockForBackEdges(HBasicBlock* block,
ArenaBitVector* visited,
@@ -407,13 +413,13 @@
ArenaAllocator* const arena_;
// List of blocks in insertion order.
- GrowableArray<HBasicBlock*> blocks_;
+ ArenaVector<HBasicBlock*> blocks_;
// List of blocks to perform a reverse post order tree traversal.
- GrowableArray<HBasicBlock*> reverse_post_order_;
+ ArenaVector<HBasicBlock*> reverse_post_order_;
// List of blocks to perform a linear order tree traversal.
- GrowableArray<HBasicBlock*> linear_order_;
+ ArenaVector<HBasicBlock*> linear_order_;
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
@@ -483,9 +489,11 @@
HLoopInformation(HBasicBlock* header, HGraph* graph)
: header_(header),
suspend_check_(nullptr),
- back_edges_(graph->GetArena(), kDefaultNumberOfBackEdges),
+ back_edges_(graph->GetArena()->Adapter(kArenaAllocLoopInfoBackEdges)),
// Make bit vector growable, as the number of blocks may change.
- blocks_(graph->GetArena(), graph->GetBlocks().Size(), true) {}
+ blocks_(graph->GetArena(), graph->GetBlocks().size(), true) {
+ back_edges_.reserve(kDefaultNumberOfBackEdges);
+ }
HBasicBlock* GetHeader() const {
return header_;
@@ -500,27 +508,24 @@
bool HasSuspendCheck() const { return suspend_check_ != nullptr; }
void AddBackEdge(HBasicBlock* back_edge) {
- back_edges_.Add(back_edge);
+ back_edges_.push_back(back_edge);
}
void RemoveBackEdge(HBasicBlock* back_edge) {
- back_edges_.Delete(back_edge);
+ RemoveElement(back_edges_, back_edge);
}
bool IsBackEdge(const HBasicBlock& block) const {
- for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
- if (back_edges_.Get(i) == &block) return true;
- }
- return false;
+ return ContainsElement(back_edges_, &block);
}
size_t NumberOfBackEdges() const {
- return back_edges_.Size();
+ return back_edges_.size();
}
HBasicBlock* GetPreHeader() const;
- const GrowableArray<HBasicBlock*>& GetBackEdges() const {
+ const ArenaVector<HBasicBlock*>& GetBackEdges() const {
return back_edges_;
}
@@ -529,13 +534,7 @@
size_t GetLifetimeEnd() const;
void ReplaceBackEdge(HBasicBlock* existing, HBasicBlock* new_back_edge) {
- for (size_t i = 0, e = back_edges_.Size(); i < e; ++i) {
- if (back_edges_.Get(i) == existing) {
- back_edges_.Put(i, new_back_edge);
- return;
- }
- }
- UNREACHABLE();
+ ReplaceElement(back_edges_, existing, new_back_edge);
}
// Finds blocks that are part of this loop. Returns whether the loop is a natural loop,
@@ -567,7 +566,7 @@
HBasicBlock* header_;
HSuspendCheck* suspend_check_;
- GrowableArray<HBasicBlock*> back_edges_;
+ ArenaVector<HBasicBlock*> back_edges_;
ArenaBitVector blocks_;
DISALLOW_COPY_AND_ASSIGN(HLoopInformation);
@@ -627,6 +626,7 @@
};
static constexpr size_t kNoLifetime = -1;
+static constexpr uint32_t kInvalidBlockId = static_cast<uint32_t>(-1);
// A block in a method. Contains the list of instructions represented
// as a double linked list. Each block knows its predecessors and
@@ -641,7 +641,7 @@
loop_information_(nullptr),
dominator_(nullptr),
dominated_blocks_(graph->GetArena()->Adapter(kArenaAllocDominated)),
- block_id_(-1),
+ block_id_(kInvalidBlockId),
dex_pc_(dex_pc),
lifetime_start_(kNoLifetime),
lifetime_end_(kNoLifetime),
@@ -707,7 +707,7 @@
HGraph* GetGraph() const { return graph_; }
void SetGraph(HGraph* graph) { graph_ = graph; }
- int GetBlockId() const { return block_id_; }
+ uint32_t GetBlockId() const { return block_id_; }
void SetBlockId(int id) { block_id_ = id; }
uint32_t GetDexPc() const { return dex_pc_; }
@@ -964,7 +964,7 @@
HLoopInformation* loop_information_;
HBasicBlock* dominator_;
ArenaVector<HBasicBlock*> dominated_blocks_;
- int block_id_;
+ uint32_t block_id_;
// The dex program counter of the first instruction of this block.
const uint32_t dex_pc_;
size_t lifetime_start_;
@@ -1242,7 +1242,7 @@
// instructions they use and pointers to the corresponding HUseListNodes kept
// by the used instructions.
template <typename T>
-class HUserRecord : public ValueObject {
+class HUserRecord {
public:
HUserRecord() : instruction_(nullptr), use_node_(nullptr) {}
explicit HUserRecord(HInstruction* instruction) : instruction_(instruction), use_node_(nullptr) {}
@@ -1513,23 +1513,14 @@
uint32_t dex_pc,
InvokeType invoke_type,
HInstruction* holder)
- : vregs_(arena, number_of_vregs),
- locations_(arena, number_of_vregs),
+ : vregs_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentVRegs)),
+ locations_(number_of_vregs, arena->Adapter(kArenaAllocEnvironmentLocations)),
parent_(nullptr),
dex_file_(dex_file),
method_idx_(method_idx),
dex_pc_(dex_pc),
invoke_type_(invoke_type),
holder_(holder) {
- vregs_.SetSize(number_of_vregs);
- for (size_t i = 0; i < number_of_vregs; i++) {
- vregs_.Put(i, HUserRecord<HEnvironment*>());
- }
-
- locations_.SetSize(number_of_vregs);
- for (size_t i = 0; i < number_of_vregs; ++i) {
- locations_.Put(i, Location());
- }
}
HEnvironment(ArenaAllocator* arena, const HEnvironment& to_copy, HInstruction* holder)
@@ -1562,25 +1553,29 @@
void CopyFromWithLoopPhiAdjustment(HEnvironment* env, HBasicBlock* loop_header);
void SetRawEnvAt(size_t index, HInstruction* instruction) {
- vregs_.Put(index, HUserRecord<HEnvironment*>(instruction));
+ DCHECK_LT(index, Size());
+ vregs_[index] = HUserRecord<HEnvironment*>(instruction);
}
HInstruction* GetInstructionAt(size_t index) const {
- return vregs_.Get(index).GetInstruction();
+ DCHECK_LT(index, Size());
+ return vregs_[index].GetInstruction();
}
void RemoveAsUserOfInput(size_t index) const;
- size_t Size() const { return vregs_.Size(); }
+ size_t Size() const { return vregs_.size(); }
HEnvironment* GetParent() const { return parent_; }
void SetLocationAt(size_t index, Location location) {
- locations_.Put(index, location);
+ DCHECK_LT(index, Size());
+ locations_[index] = location;
}
Location GetLocationAt(size_t index) const {
- return locations_.Get(index);
+ DCHECK_LT(index, Size());
+ return locations_[index];
}
uint32_t GetDexPc() const {
@@ -1609,11 +1604,12 @@
void RecordEnvUse(HUseListNode<HEnvironment*>* env_use) {
DCHECK(env_use->GetUser() == this);
size_t index = env_use->GetIndex();
- vregs_.Put(index, HUserRecord<HEnvironment*>(vregs_.Get(index), env_use));
+ DCHECK_LT(index, Size());
+ vregs_[index] = HUserRecord<HEnvironment*>(vregs_[index], env_use);
}
- GrowableArray<HUserRecord<HEnvironment*> > vregs_;
- GrowableArray<Location> locations_;
+ ArenaVector<HUserRecord<HEnvironment*>> vregs_;
+ ArenaVector<Location> locations_;
HEnvironment* parent_;
const DexFile& dex_file_;
const uint32_t method_idx_;
@@ -2977,7 +2973,7 @@
class HInvoke : public HInstruction {
public:
- size_t InputCount() const OVERRIDE { return inputs_.Size(); }
+ size_t InputCount() const OVERRIDE { return inputs_.size(); }
// Runtime needs to walk the stack, so Dex -> Dex calls need to
// know their environment.
@@ -3031,23 +3027,26 @@
: HInstruction(
SideEffects::AllExceptGCDependency(), dex_pc), // Assume write/read on all fields/arrays.
number_of_arguments_(number_of_arguments),
- inputs_(arena, number_of_arguments),
+ inputs_(number_of_arguments + number_of_other_inputs, arena->Adapter(kArenaAllocInvokeInputs)),
return_type_(return_type),
dex_method_index_(dex_method_index),
original_invoke_type_(original_invoke_type),
intrinsic_(Intrinsics::kNone),
needs_environment_or_cache_(kNeedsEnvironmentOrCache) {
- uint32_t number_of_inputs = number_of_arguments + number_of_other_inputs;
- inputs_.SetSize(number_of_inputs);
}
- const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE { return inputs_.Get(i); }
+ const HUserRecord<HInstruction*> InputRecordAt(size_t index) const OVERRIDE {
+ DCHECK_LT(index, InputCount());
+ return inputs_[index];
+ }
+
void SetRawInputRecordAt(size_t index, const HUserRecord<HInstruction*>& input) OVERRIDE {
- inputs_.Put(index, input);
+ DCHECK_LT(index, InputCount());
+ inputs_[index] = input;
}
uint32_t number_of_arguments_;
- GrowableArray<HUserRecord<HInstruction*> > inputs_;
+ ArenaVector<HUserRecord<HInstruction*>> inputs_;
const Primitive::Type return_type_;
const uint32_t dex_method_index_;
const InvokeType original_invoke_type_;
@@ -3226,7 +3225,7 @@
DCHECK(last_input != nullptr);
DCHECK(last_input->IsLoadClass()) << last_input->DebugName();
RemoveAsUserOfInput(last_input_index);
- inputs_.DeleteAt(last_input_index);
+ inputs_.pop_back();
clinit_check_requirement_ = ClinitCheckRequirement::kImplicit;
DCHECK(IsStaticWithImplicitClinitCheck());
}
@@ -3245,7 +3244,7 @@
DCHECK(last_input != nullptr);
DCHECK(last_input->IsFakeString()) << last_input->DebugName();
RemoveAsUserOfInput(last_input_index);
- inputs_.DeleteAt(last_input_index);
+ inputs_.pop_back();
}
// Is this a call to a static method whose declaring class has an
@@ -3978,12 +3977,11 @@
Primitive::Type type,
uint32_t dex_pc = kNoDexPc)
: HInstruction(SideEffects::None(), dex_pc),
- inputs_(arena, number_of_inputs),
+ inputs_(number_of_inputs, arena->Adapter(kArenaAllocPhiInputs)),
reg_number_(reg_number),
type_(type),
is_live_(false),
can_be_null_(true) {
- inputs_.SetSize(number_of_inputs);
}
// Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
@@ -4001,7 +3999,7 @@
bool IsCatchPhi() const { return GetBlock()->IsCatchBlock(); }
- size_t InputCount() const OVERRIDE { return inputs_.Size(); }
+ size_t InputCount() const OVERRIDE { return inputs_.size(); }
void AddInput(HInstruction* input);
void RemoveInputAt(size_t index);
@@ -4043,14 +4041,18 @@
DECLARE_INSTRUCTION(Phi);
protected:
- const HUserRecord<HInstruction*> InputRecordAt(size_t i) const OVERRIDE { return inputs_.Get(i); }
+ const HUserRecord<HInstruction*> InputRecordAt(size_t index) const OVERRIDE {
+ DCHECK_LE(index, InputCount());
+ return inputs_[index];
+ }
void SetRawInputRecordAt(size_t index, const HUserRecord<HInstruction*>& input) OVERRIDE {
- inputs_.Put(index, input);
+ DCHECK_LE(index, InputCount());
+ inputs_[index] = input;
}
private:
- GrowableArray<HUserRecord<HInstruction*> > inputs_;
+ ArenaVector<HUserRecord<HInstruction*> > inputs_;
const uint32_t reg_number_;
Primitive::Type type_;
bool is_live_;
@@ -5084,8 +5086,8 @@
public:
explicit HInsertionOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {}
- bool Done() const { return index_ == graph_.GetBlocks().Size(); }
- HBasicBlock* Current() const { return graph_.GetBlocks().Get(index_); }
+ bool Done() const { return index_ == graph_.GetBlocks().size(); }
+ HBasicBlock* Current() const { return graph_.GetBlock(index_); }
void Advance() { ++index_; }
private:
@@ -5099,11 +5101,11 @@
public:
explicit HReversePostOrderIterator(const HGraph& graph) : graph_(graph), index_(0) {
// Check that reverse post order of the graph has been built.
- DCHECK(!graph.GetReversePostOrder().IsEmpty());
+ DCHECK(!graph.GetReversePostOrder().empty());
}
- bool Done() const { return index_ == graph_.GetReversePostOrder().Size(); }
- HBasicBlock* Current() const { return graph_.GetReversePostOrder().Get(index_); }
+ bool Done() const { return index_ == graph_.GetReversePostOrder().size(); }
+ HBasicBlock* Current() const { return graph_.GetReversePostOrder()[index_]; }
void Advance() { ++index_; }
private:
@@ -5116,13 +5118,13 @@
class HPostOrderIterator : public ValueObject {
public:
explicit HPostOrderIterator(const HGraph& graph)
- : graph_(graph), index_(graph_.GetReversePostOrder().Size()) {
+ : graph_(graph), index_(graph_.GetReversePostOrder().size()) {
// Check that reverse post order of the graph has been built.
- DCHECK(!graph.GetReversePostOrder().IsEmpty());
+ DCHECK(!graph.GetReversePostOrder().empty());
}
bool Done() const { return index_ == 0; }
- HBasicBlock* Current() const { return graph_.GetReversePostOrder().Get(index_ - 1); }
+ HBasicBlock* Current() const { return graph_.GetReversePostOrder()[index_ - 1u]; }
void Advance() { --index_; }
private:
@@ -5135,11 +5137,11 @@
class HLinearPostOrderIterator : public ValueObject {
public:
explicit HLinearPostOrderIterator(const HGraph& graph)
- : order_(graph.GetLinearOrder()), index_(graph.GetLinearOrder().Size()) {}
+ : order_(graph.GetLinearOrder()), index_(graph.GetLinearOrder().size()) {}
bool Done() const { return index_ == 0; }
- HBasicBlock* Current() const { return order_.Get(index_ -1); }
+ HBasicBlock* Current() const { return order_[index_ - 1u]; }
void Advance() {
--index_;
@@ -5147,7 +5149,7 @@
}
private:
- const GrowableArray<HBasicBlock*>& order_;
+ const ArenaVector<HBasicBlock*>& order_;
size_t index_;
DISALLOW_COPY_AND_ASSIGN(HLinearPostOrderIterator);
@@ -5158,12 +5160,12 @@
explicit HLinearOrderIterator(const HGraph& graph)
: order_(graph.GetLinearOrder()), index_(0) {}
- bool Done() const { return index_ == order_.Size(); }
- HBasicBlock* Current() const { return order_.Get(index_); }
+ bool Done() const { return index_ == order_.size(); }
+ HBasicBlock* Current() const { return order_[index_]; }
void Advance() { ++index_; }
private:
- const GrowableArray<HBasicBlock*>& order_;
+ const ArenaVector<HBasicBlock*>& order_;
size_t index_;
DISALLOW_COPY_AND_ASSIGN(HLinearOrderIterator);
@@ -5183,11 +5185,11 @@
}
}
- bool Done() const { return index_ == blocks_.Size(); }
- HBasicBlock* Current() const { return blocks_.Get(index_); }
+ bool Done() const { return index_ == blocks_.size(); }
+ HBasicBlock* Current() const { return blocks_[index_]; }
void Advance() {
++index_;
- for (size_t e = blocks_.Size(); index_ < e; ++index_) {
+ for (size_t e = blocks_.size(); index_ < e; ++index_) {
if (blocks_in_loop_.IsBitSet(index_)) {
break;
}
@@ -5196,7 +5198,7 @@
private:
const BitVector& blocks_in_loop_;
- const GrowableArray<HBasicBlock*>& blocks_;
+ const ArenaVector<HBasicBlock*>& blocks_;
size_t index_;
DISALLOW_COPY_AND_ASSIGN(HBlocksInLoopIterator);
@@ -5211,17 +5213,18 @@
: blocks_in_loop_(info.GetBlocks()),
blocks_(info.GetHeader()->GetGraph()->GetReversePostOrder()),
index_(0) {
- if (!blocks_in_loop_.IsBitSet(blocks_.Get(index_)->GetBlockId())) {
+ DCHECK(!blocks_.empty());
+ if (!blocks_in_loop_.IsBitSet(blocks_[index_]->GetBlockId())) {
Advance();
}
}
- bool Done() const { return index_ == blocks_.Size(); }
- HBasicBlock* Current() const { return blocks_.Get(index_); }
+ bool Done() const { return index_ == blocks_.size(); }
+ HBasicBlock* Current() const { return blocks_[index_]; }
void Advance() {
++index_;
- for (size_t e = blocks_.Size(); index_ < e; ++index_) {
- if (blocks_in_loop_.IsBitSet(blocks_.Get(index_)->GetBlockId())) {
+ for (size_t e = blocks_.size(); index_ < e; ++index_) {
+ if (blocks_in_loop_.IsBitSet(blocks_[index_]->GetBlockId())) {
break;
}
}
@@ -5229,7 +5232,7 @@
private:
const BitVector& blocks_in_loop_;
- const GrowableArray<HBasicBlock*>& blocks_;
+ const ArenaVector<HBasicBlock*>& blocks_;
size_t index_;
DISALLOW_COPY_AND_ASSIGN(HBlocksInLoopReversePostOrderIterator);
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
index f455571..05c6b2c 100644
--- a/compiler/optimizing/optimizing_cfi_test.cc
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -71,7 +71,7 @@
}
}
}
- GrowableArray<HBasicBlock*> blocks(&allocator, 0);
+ ArenaVector<HBasicBlock*> blocks(allocator.Adapter());
code_gen->block_order_ = &blocks;
code_gen->ComputeSpillMask();
code_gen->SetFrameSize(frame_size);
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 4421460..092e3c2 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -51,6 +51,7 @@
#include "graph_checker.h"
#include "graph_visualizer.h"
#include "gvn.h"
+#include "induction_var_analysis.h"
#include "inliner.h"
#include "instruction_simplifier.h"
#include "intrinsics.h"
@@ -333,7 +334,9 @@
CHECK_EQ(driver->GetThreadCount(), 1U)
<< "Graph visualizer requires the compiler to run single-threaded. "
<< "Invoke the compiler with '-j1'.";
- visualizer_output_.reset(new std::ofstream(cfg_file_name));
+ std::ios_base::openmode cfg_file_mode =
+ driver->GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
+ visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
}
if (driver->GetDumpStats()) {
compilation_stats_.reset(new OptimizingCompilerStats());
@@ -462,7 +465,8 @@
SideEffectsAnalysis* side_effects = new (arena) SideEffectsAnalysis(graph);
GVNOptimization* gvn = new (arena) GVNOptimization(graph, *side_effects);
LICM* licm = new (arena) LICM(graph, *side_effects);
- BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph);
+ HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph);
+ BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, induction);
ReferenceTypePropagation* type_propagation =
new (arena) ReferenceTypePropagation(graph, handles);
InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier(
@@ -510,6 +514,7 @@
side_effects,
gvn,
licm,
+ induction,
bce,
simplify3,
dce2,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 86c22ed..350f0b1 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -60,10 +60,8 @@
}
void RemoveSuspendChecks(HGraph* graph) {
- for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
- for (HInstructionIterator it(graph->GetBlocks().Get(i)->GetInstructions());
- !it.Done();
- it.Advance()) {
+ for (HBasicBlock* block : graph->GetBlocks()) {
+ for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
if (current->IsSuspendCheck()) {
current->GetBlock()->RemoveInstruction(current);
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 965a8df..b72df86 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -312,7 +312,7 @@
register_allocator.AllocateRegisters();
ASSERT_TRUE(register_allocator.Validate(false));
- HBasicBlock* loop_header = graph->GetBlocks().Get(2);
+ HBasicBlock* loop_header = graph->GetBlock(2);
HPhi* phi = loop_header->GetFirstPhi()->AsPhi();
LiveInterval* phi_interval = phi->GetLiveInterval();
@@ -321,7 +321,7 @@
ASSERT_TRUE(loop_update->HasRegister());
ASSERT_NE(phi_interval->GetRegister(), loop_update->GetRegister());
- HBasicBlock* return_block = graph->GetBlocks().Get(3);
+ HBasicBlock* return_block = graph->GetBlock(3);
HReturn* ret = return_block->GetLastInstruction()->AsReturn();
ASSERT_EQ(phi_interval->GetRegister(), ret->InputAt(0)->GetLiveInterval()->GetRegister());
}
@@ -343,8 +343,8 @@
SsaLivenessAnalysis liveness(graph, &codegen);
liveness.Analyze();
- HXor* first_xor = graph->GetBlocks().Get(1)->GetFirstInstruction()->AsXor();
- HXor* last_xor = graph->GetBlocks().Get(1)->GetLastInstruction()->GetPrevious()->AsXor();
+ HXor* first_xor = graph->GetBlock(1)->GetFirstInstruction()->AsXor();
+ HXor* last_xor = graph->GetBlock(1)->GetLastInstruction()->GetPrevious()->AsXor();
ASSERT_EQ(last_xor->InputAt(0), first_xor);
LiveInterval* interval = first_xor->GetLiveInterval();
ASSERT_EQ(interval->GetEnd(), last_xor->GetLifetimePosition());
diff --git a/compiler/optimizing/side_effects_analysis.cc b/compiler/optimizing/side_effects_analysis.cc
index 1c3e255..1956781 100644
--- a/compiler/optimizing/side_effects_analysis.cc
+++ b/compiler/optimizing/side_effects_analysis.cc
@@ -21,8 +21,8 @@
void SideEffectsAnalysis::Run() {
// Inlining might have created more blocks, so we need to increase the size
// if needed.
- block_effects_.SetSize(graph_->GetBlocks().Size());
- loop_effects_.SetSize(graph_->GetBlocks().Size());
+ block_effects_.SetSize(graph_->GetBlocks().size());
+ loop_effects_.SetSize(graph_->GetBlocks().size());
// In DEBUG mode, ensure side effects are properly initialized to empty.
if (kIsDebugBuild) {
diff --git a/compiler/optimizing/side_effects_analysis.h b/compiler/optimizing/side_effects_analysis.h
index 7d300ad..9888140 100644
--- a/compiler/optimizing/side_effects_analysis.h
+++ b/compiler/optimizing/side_effects_analysis.h
@@ -27,8 +27,8 @@
explicit SideEffectsAnalysis(HGraph* graph)
: HOptimization(graph, kSideEffectsAnalysisPassName),
graph_(graph),
- block_effects_(graph->GetArena(), graph->GetBlocks().Size(), SideEffects::None()),
- loop_effects_(graph->GetArena(), graph->GetBlocks().Size(), SideEffects::None()) {}
+ block_effects_(graph->GetArena(), graph->GetBlocks().size(), SideEffects::None()),
+ loop_effects_(graph->GetArena(), graph->GetBlocks().size(), SideEffects::None()) {}
SideEffects GetLoopEffects(HBasicBlock* block) const;
SideEffects GetBlockEffects(HBasicBlock* block) const;
diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h
index 64600db..dc76177 100644
--- a/compiler/optimizing/ssa_builder.h
+++ b/compiler/optimizing/ssa_builder.h
@@ -52,8 +52,8 @@
: HGraphVisitor(graph),
current_locals_(nullptr),
loop_headers_(graph->GetArena(), kDefaultNumberOfLoops),
- locals_for_(graph->GetArena(), graph->GetBlocks().Size()) {
- locals_for_.SetSize(graph->GetBlocks().Size());
+ locals_for_(graph->GetArena(), graph->GetBlocks().size()) {
+ locals_for_.SetSize(graph->GetBlocks().size());
}
void BuildSsa();
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 63635f3..1e9a813 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -69,8 +69,8 @@
// current reverse post order in the graph, but it would require making
// order queries to a GrowableArray, which is not the best data structure
// for it.
- GrowableArray<uint32_t> forward_predecessors(graph_->GetArena(), graph_->GetBlocks().Size());
- forward_predecessors.SetSize(graph_->GetBlocks().Size());
+ GrowableArray<uint32_t> forward_predecessors(graph_->GetArena(), graph_->GetBlocks().size());
+ forward_predecessors.SetSize(graph_->GetBlocks().size());
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
size_t number_of_forward_predecessors = block->GetPredecessors().size();
@@ -84,11 +84,12 @@
// iterate over the successors. When all non-back edge predecessors of a
// successor block are visited, the successor block is added in the worklist
// following an order that satisfies the requirements to build our linear graph.
+ graph_->linear_order_.reserve(graph_->GetReversePostOrder().size());
GrowableArray<HBasicBlock*> worklist(graph_->GetArena(), 1);
worklist.Add(graph_->GetEntryBlock());
do {
HBasicBlock* current = worklist.Pop();
- graph_->linear_order_.Add(current);
+ graph_->linear_order_.push_back(current);
for (HBasicBlock* successor : current->GetSuccessors()) {
int block_id = successor->GetBlockId();
size_t number_of_remaining_predecessors = forward_predecessors.Get(block_id);
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index ef396cb..3aedaa5 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -1106,11 +1106,11 @@
SsaLivenessAnalysis(HGraph* graph, CodeGenerator* codegen)
: graph_(graph),
codegen_(codegen),
- block_infos_(graph->GetArena(), graph->GetBlocks().Size()),
+ block_infos_(graph->GetArena(), graph->GetBlocks().size()),
instructions_from_ssa_index_(graph->GetArena(), 0),
instructions_from_lifetime_position_(graph->GetArena(), 0),
number_of_ssa_values_(0) {
- block_infos_.SetSize(graph->GetBlocks().Size());
+ block_infos_.SetSize(graph->GetBlocks().size());
}
void Analyze();
diff --git a/compiler/optimizing/ssa_test.cc b/compiler/optimizing/ssa_test.cc
index 0e8c058..024278f 100644
--- a/compiler/optimizing/ssa_test.cc
+++ b/compiler/optimizing/ssa_test.cc
@@ -64,8 +64,7 @@
static void ReNumberInstructions(HGraph* graph) {
int id = 0;
- for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
- HBasicBlock* block = graph->GetBlocks().Get(i);
+ for (HBasicBlock* block : graph->GetBlocks()) {
for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
it.Current()->SetId(id++);
}
@@ -92,8 +91,8 @@
ReNumberInstructions(graph);
// Test that phis had their type set.
- for (size_t i = 0, e = graph->GetBlocks().Size(); i < e; ++i) {
- for (HInstructionIterator it(graph->GetBlocks().Get(i)->GetPhis()); !it.Done(); it.Advance()) {
+ for (HBasicBlock* block : graph->GetBlocks()) {
+ for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
ASSERT_NE(it.Current()->GetType(), Primitive::kPrimVoid);
}
}
diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc
index 24ea9e2..b078f3e 100644
--- a/compiler/utils/mips64/assembler_mips64.cc
+++ b/compiler/utils/mips64/assembler_mips64.cc
@@ -44,6 +44,32 @@
Emit(encoding);
}
+void Mips64Assembler::EmitRsd(int opcode, GpuRegister rs, GpuRegister rd,
+ int shamt, int funct) {
+ CHECK_NE(rs, kNoGpuRegister);
+ CHECK_NE(rd, kNoGpuRegister);
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(rs) << kRsShift |
+ static_cast<uint32_t>(ZERO) << kRtShift |
+ static_cast<uint32_t>(rd) << kRdShift |
+ shamt << kShamtShift |
+ funct;
+ Emit(encoding);
+}
+
+void Mips64Assembler::EmitRtd(int opcode, GpuRegister rt, GpuRegister rd,
+ int shamt, int funct) {
+ CHECK_NE(rt, kNoGpuRegister);
+ CHECK_NE(rd, kNoGpuRegister);
+ uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
+ static_cast<uint32_t>(ZERO) << kRsShift |
+ static_cast<uint32_t>(rt) << kRtShift |
+ static_cast<uint32_t>(rd) << kRdShift |
+ shamt << kShamtShift |
+ funct;
+ Emit(encoding);
+}
+
void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) {
CHECK_NE(rs, kNoGpuRegister);
CHECK_NE(rt, kNoGpuRegister);
@@ -235,6 +261,14 @@
EmitR(0, rs, rt, rd, 0, 0x27);
}
+void Mips64Assembler::Bitswap(GpuRegister rd, GpuRegister rt) {
+ EmitRtd(0x1f, rt, rd, 0x0, 0x20);
+}
+
+void Mips64Assembler::Dbitswap(GpuRegister rd, GpuRegister rt) {
+ EmitRtd(0x1f, rt, rd, 0x0, 0x24);
+}
+
void Mips64Assembler::Seb(GpuRegister rd, GpuRegister rt) {
EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x10, 0x20);
}
@@ -243,12 +277,44 @@
EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x18, 0x20);
}
+void Mips64Assembler::Dsbh(GpuRegister rd, GpuRegister rt) {
+ EmitRtd(0x1f, rt, rd, 0x2, 0x24);
+}
+
+void Mips64Assembler::Dshd(GpuRegister rd, GpuRegister rt) {
+ EmitRtd(0x1f, rt, rd, 0x5, 0x24);
+}
+
void Mips64Assembler::Dext(GpuRegister rt, GpuRegister rs, int pos, int size_less_one) {
DCHECK(0 <= pos && pos < 32) << pos;
DCHECK(0 <= size_less_one && size_less_one < 32) << size_less_one;
EmitR(0x1f, rs, rt, static_cast<GpuRegister>(size_less_one), pos, 3);
}
+void Mips64Assembler::Wsbh(GpuRegister rd, GpuRegister rt) {
+ EmitRtd(0x1f, rt, rd, 2, 0x20);
+}
+
+void Mips64Assembler::Sc(GpuRegister rt, GpuRegister base, int16_t imm9) {
+ DCHECK((-256 <= imm9) && (imm9 < 256));
+ EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x26);
+}
+
+void Mips64Assembler::Scd(GpuRegister rt, GpuRegister base, int16_t imm9) {
+ DCHECK((-256 <= imm9) && (imm9 < 256));
+ EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x27);
+}
+
+void Mips64Assembler::Ll(GpuRegister rt, GpuRegister base, int16_t imm9) {
+ DCHECK((-256 <= imm9) && (imm9 < 256));
+ EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x36);
+}
+
+void Mips64Assembler::Lld(GpuRegister rt, GpuRegister base, int16_t imm9) {
+ DCHECK((-256 <= imm9) && (imm9 < 256));
+ EmitI(0x1f, base, rt, ((imm9 & 0x1FF) << 7) | 0x37);
+}
+
void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rt, int shamt) {
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x00);
}
@@ -257,6 +323,10 @@
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x02);
}
+void Mips64Assembler::Rotr(GpuRegister rd, GpuRegister rt, int shamt) {
+ EmitR(0, static_cast<GpuRegister>(1), rt, rd, shamt, 0x02);
+}
+
void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rt, int shamt) {
EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x03);
}
@@ -414,6 +484,30 @@
Nop();
}
+void Mips64Assembler::Seleqz(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x35);
+}
+
+void Mips64Assembler::Selnez(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
+ EmitR(0, rs, rt, rd, 0, 0x37);
+}
+
+void Mips64Assembler::Clz(GpuRegister rd, GpuRegister rs) {
+ EmitRsd(0, rs, rd, 0x01, 0x10);
+}
+
+void Mips64Assembler::Clo(GpuRegister rd, GpuRegister rs) {
+ EmitRsd(0, rs, rd, 0x01, 0x11);
+}
+
+void Mips64Assembler::Dclz(GpuRegister rd, GpuRegister rs) {
+ EmitRsd(0, rs, rd, 0x01, 0x12);
+}
+
+void Mips64Assembler::Dclo(GpuRegister rd, GpuRegister rs) {
+ EmitRsd(0, rs, rd, 0x01, 0x13);
+}
+
void Mips64Assembler::Jalr(GpuRegister rd, GpuRegister rs) {
EmitR(0, rs, static_cast<GpuRegister>(0), rd, 0, 0x09);
Nop();
@@ -543,6 +637,22 @@
EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
}
+void Mips64Assembler::SqrtS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x4);
+}
+
+void Mips64Assembler::SqrtD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x4);
+}
+
+void Mips64Assembler::AbsS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x5);
+}
+
+void Mips64Assembler::AbsD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x5);
+}
+
void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x6);
}
@@ -559,6 +669,94 @@
EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x7);
}
+void Mips64Assembler::RoundLS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x8);
+}
+
+void Mips64Assembler::RoundLD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x8);
+}
+
+void Mips64Assembler::RoundWS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xc);
+}
+
+void Mips64Assembler::RoundWD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xc);
+}
+
+void Mips64Assembler::CeilLS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xa);
+}
+
+void Mips64Assembler::CeilLD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xa);
+}
+
+void Mips64Assembler::CeilWS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xe);
+}
+
+void Mips64Assembler::CeilWD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xe);
+}
+
+void Mips64Assembler::FloorLS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xb);
+}
+
+void Mips64Assembler::FloorLD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xb);
+}
+
+void Mips64Assembler::FloorWS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0xf);
+}
+
+void Mips64Assembler::FloorWD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0xf);
+}
+
+void Mips64Assembler::SelS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x10);
+}
+
+void Mips64Assembler::SelD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x10);
+}
+
+void Mips64Assembler::RintS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x1a);
+}
+
+void Mips64Assembler::RintD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x1a);
+}
+
+void Mips64Assembler::ClassS(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x1b);
+}
+
+void Mips64Assembler::ClassD(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x1b);
+}
+
+void Mips64Assembler::MinS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x1c);
+}
+
+void Mips64Assembler::MinD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x1c);
+}
+
+void Mips64Assembler::MaxS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x10, ft, fs, fd, 0x1e);
+}
+
+void Mips64Assembler::MaxD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
+ EmitFR(0x11, 0x11, ft, fs, fd, 0x1e);
+}
+
void Mips64Assembler::Cvtsw(FpuRegister fd, FpuRegister fs) {
EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x20);
}
@@ -575,6 +773,10 @@
EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x21);
}
+void Mips64Assembler::Cvtdl(FpuRegister fd, FpuRegister fs) {
+ EmitFR(0x11, 0x15, static_cast<FpuRegister>(0), fs, fd, 0x21);
+}
+
void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) {
EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
}
diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h
index 31130ea..a120abb 100644
--- a/compiler/utils/mips64/assembler_mips64.h
+++ b/compiler/utils/mips64/assembler_mips64.h
@@ -90,12 +90,22 @@
void Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Bitswap(GpuRegister rd, GpuRegister rt); // R6
+ void Dbitswap(GpuRegister rd, GpuRegister rt); // R6
void Seb(GpuRegister rd, GpuRegister rt); // R2+
void Seh(GpuRegister rd, GpuRegister rt); // R2+
+ void Dsbh(GpuRegister rd, GpuRegister rt); // R2+
+ void Dshd(GpuRegister rd, GpuRegister rt); // R2+
void Dext(GpuRegister rs, GpuRegister rt, int pos, int size_less_one); // MIPS64
+ void Wsbh(GpuRegister rd, GpuRegister rt);
+ void Sc(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
+ void Scd(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
+ void Ll(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
+ void Lld(GpuRegister rt, GpuRegister base, int16_t imm9 = 0);
void Sll(GpuRegister rd, GpuRegister rt, int shamt);
void Srl(GpuRegister rd, GpuRegister rt, int shamt);
+ void Rotr(GpuRegister rd, GpuRegister rt, int shamt);
void Sra(GpuRegister rd, GpuRegister rt, int shamt);
void Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
void Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs);
@@ -133,6 +143,12 @@
void Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt);
void Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16);
void Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16);
+ void Seleqz(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Selnez(GpuRegister rd, GpuRegister rs, GpuRegister rt);
+ void Clz(GpuRegister rd, GpuRegister rs);
+ void Clo(GpuRegister rd, GpuRegister rs);
+ void Dclz(GpuRegister rd, GpuRegister rs);
+ void Dclo(GpuRegister rd, GpuRegister rs);
void Beq(GpuRegister rs, GpuRegister rt, uint16_t imm16);
void Bne(GpuRegister rs, GpuRegister rt, uint16_t imm16);
@@ -165,15 +181,42 @@
void SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
void MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
void DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void SqrtS(FpuRegister fd, FpuRegister fs);
+ void SqrtD(FpuRegister fd, FpuRegister fs);
+ void AbsS(FpuRegister fd, FpuRegister fs);
+ void AbsD(FpuRegister fd, FpuRegister fs);
void MovS(FpuRegister fd, FpuRegister fs);
void MovD(FpuRegister fd, FpuRegister fs);
void NegS(FpuRegister fd, FpuRegister fs);
void NegD(FpuRegister fd, FpuRegister fs);
+ void RoundLS(FpuRegister fd, FpuRegister fs);
+ void RoundLD(FpuRegister fd, FpuRegister fs);
+ void RoundWS(FpuRegister fd, FpuRegister fs);
+ void RoundWD(FpuRegister fd, FpuRegister fs);
+ void CeilLS(FpuRegister fd, FpuRegister fs);
+ void CeilLD(FpuRegister fd, FpuRegister fs);
+ void CeilWS(FpuRegister fd, FpuRegister fs);
+ void CeilWD(FpuRegister fd, FpuRegister fs);
+ void FloorLS(FpuRegister fd, FpuRegister fs);
+ void FloorLD(FpuRegister fd, FpuRegister fs);
+ void FloorWS(FpuRegister fd, FpuRegister fs);
+ void FloorWD(FpuRegister fd, FpuRegister fs);
+ void SelS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void SelD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void RintS(FpuRegister fd, FpuRegister fs);
+ void RintD(FpuRegister fd, FpuRegister fs);
+ void ClassS(FpuRegister fd, FpuRegister fs);
+ void ClassD(FpuRegister fd, FpuRegister fs);
+ void MinS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void MinD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void MaxS(FpuRegister fd, FpuRegister fs, FpuRegister ft);
+ void MaxD(FpuRegister fd, FpuRegister fs, FpuRegister ft);
void Cvtsw(FpuRegister fd, FpuRegister fs);
void Cvtdw(FpuRegister fd, FpuRegister fs);
void Cvtsd(FpuRegister fd, FpuRegister fs);
void Cvtds(FpuRegister fd, FpuRegister fs);
+ void Cvtdl(FpuRegister fd, FpuRegister fs);
void Mfc1(GpuRegister rt, FpuRegister fs);
void Mtc1(GpuRegister rt, FpuRegister fs);
@@ -342,6 +385,8 @@
private:
void EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd, int shamt, int funct);
+ void EmitRsd(int opcode, GpuRegister rs, GpuRegister rd, int shamt, int funct);
+ void EmitRtd(int opcode, GpuRegister rt, GpuRegister rd, int shamt, int funct);
void EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm);
void EmitI21(int opcode, GpuRegister rs, uint32_t imm21);
void EmitJ(int opcode, uint32_t addr26);
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 10ae1ca..8cbca0b 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -523,6 +523,7 @@
dump_passes_(false),
dump_timing_(false),
dump_slow_timing_(kIsDebugBuild),
+ dump_cfg_append_(false),
swap_fd_(-1),
timings_(timings) {}
@@ -1096,6 +1097,8 @@
dump_passes_ = true;
} else if (option.starts_with("--dump-cfg=")) {
dump_cfg_file_name_ = option.substr(strlen("--dump-cfg=")).data();
+ } else if (option.starts_with("--dump-cfg-append")) {
+ dump_cfg_append_ = true;
} else if (option == "--dump-stats") {
dump_stats_ = true;
} else if (option == "--generate-debug-info" || option == "-g") {
@@ -1478,6 +1481,7 @@
dump_stats_,
dump_passes_,
dump_cfg_file_name_,
+ dump_cfg_append_,
compiler_phases_timings_.get(),
swap_fd_,
profile_file_));
@@ -1986,6 +1990,7 @@
bool dump_timing_;
bool dump_slow_timing_;
std::string dump_cfg_file_name_;
+ bool dump_cfg_append_;
std::string swap_file_name_;
int swap_fd_;
std::string profile_file_; // Profile file to use
diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc
index 70ca88d..c55d285 100644
--- a/disassembler/disassembler_mips.cc
+++ b/disassembler/disassembler_mips.cc
@@ -46,6 +46,7 @@
static const uint32_t kRTypeMask = ((0x3f << kOpcodeShift) | (0x3f));
static const uint32_t kSpecial0Mask = (0x3f << kOpcodeShift);
static const uint32_t kSpecial2Mask = (0x3f << kOpcodeShift);
+static const uint32_t kSpecial3Mask = (0x3f << kOpcodeShift);
static const uint32_t kFpMask = kRTypeMask;
static const MipsInstruction gMipsInstructions[] = {
@@ -98,7 +99,6 @@
{ kRTypeMask, 46, "dsub", "DST", },
{ kRTypeMask, 47, "dsubu", "DST", },
// TODO: tge[u], tlt[u], teg, tne
- // TODO: seleqz, selnez
{ kRTypeMask, 56, "dsll", "DTA", },
{ kRTypeMask, 58, "dsrl", "DTA", },
{ kRTypeMask, 59, "dsra", "DTA", },
@@ -106,9 +106,6 @@
{ kRTypeMask | (0x1f << 21), 62 | (1 << 21), "drotr32", "DTA", },
{ kRTypeMask, 62, "dsrl32", "DTA", },
{ kRTypeMask, 63, "dsra32", "DTA", },
- { kRTypeMask, (31u << kOpcodeShift) | 3, "dext", "TSAZ", },
- { kRTypeMask | (0x1f << 21) | (0x1f << 6), (31u << 26) | (16 << 6) | 32, "seb", "DT", },
- { kRTypeMask | (0x1f << 21) | (0x1f << 6), (31u << 26) | (24 << 6) | 32, "seh", "DT", },
// SPECIAL0
{ kSpecial0Mask | 0x7ff, (2 << 6) | 24, "mul", "DST" },
@@ -127,7 +124,13 @@
{ kSpecial0Mask | 0x7ff, (3 << 6) | 30, "dmod", "DST" },
{ kSpecial0Mask | 0x7ff, (2 << 6) | 31, "ddivu", "DST" },
{ kSpecial0Mask | 0x7ff, (3 << 6) | 31, "dmodu", "DST" },
- // TODO: [d]clz, [d]clo
+ { kSpecial0Mask | 0x7ff, (0 << 6) | 53, "seleqz", "DST" },
+ { kSpecial0Mask | 0x7ff, (0 << 6) | 55, "selnez", "DST" },
+ { kSpecial0Mask | (0x1f << 21) | 0x3f, (1 << 21) | 2, "rotr", "DTA", },
+ { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x10, "clz", "DS" },
+ { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x11, "clo", "DS" },
+ { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x12, "dclz", "DS" },
+ { kSpecial0Mask | (0x1f << 16) | 0x7ff, (0x01 << 6) | 0x13, "dclo", "DS" },
// TODO: sdbbp
// SPECIAL2
@@ -140,6 +143,20 @@
{ kSpecial2Mask | 0xffff, (28 << kOpcodeShift) | 5, "msubu", "ST" },
{ kSpecial2Mask | 0x3f, (28 << kOpcodeShift) | 0x3f, "sdbbp", "" }, // TODO: code
+ // SPECIAL3
+ { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 3, "dext", "TSAZ", },
+ { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f, (31 << kOpcodeShift) | (16 << 6) | 32, "seb", "DT", },
+ { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f, (31 << kOpcodeShift) | (24 << 6) | 32, "seh", "DT", },
+ { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f, (31 << kOpcodeShift) | 32, "bitswap", "DT", },
+ { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f, (31 << kOpcodeShift) | 36, "dbitswap", "DT", },
+ { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f, (31 << kOpcodeShift) | (2 << 6) | 36, "dsbh", "DT", },
+ { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f, (31 << kOpcodeShift) | (5 << 6) | 36, "dshd", "DT", },
+ { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f, (31 << kOpcodeShift) | (2 << 6) | 32, "wsbh", "DT", },
+ { kSpecial3Mask | 0x7f, (31 << kOpcodeShift) | 0x26, "sc", "Tl", },
+ { kSpecial3Mask | 0x7f, (31 << kOpcodeShift) | 0x27, "scd", "Tl", },
+ { kSpecial3Mask | 0x7f, (31 << kOpcodeShift) | 0x36, "ll", "Tl", },
+ { kSpecial3Mask | 0x7f, (31 << kOpcodeShift) | 0x37, "lld", "Tl", },
+
// J-type instructions.
{ kJTypeMask, 2 << kOpcodeShift, "j", "L" },
{ kJTypeMask, 3 << kOpcodeShift, "jal", "L" },
@@ -305,11 +322,16 @@
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 13, "trunc.w", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 14, "ceil.w", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 15, "floor.w", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 26, "rint", "fad" },
+ { kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 27, "class", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 32, "cvt.s", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 33, "cvt.d", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 36, "cvt.w", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 37, "cvt.l", "fad" },
{ kFpMask | (0x21f << 16), kCop1 | (0x200 << 16) | 38, "cvt.ps", "fad" },
+ { kFpMask, kCop1 | 0x10, "sel", "fadt" },
+ { kFpMask, kCop1 | 0x1e, "max", "fadt" },
+ { kFpMask, kCop1 | 0x1c, "min", "fadt" },
};
static uint32_t ReadU32(const uint8_t* ptr) {
@@ -390,6 +412,12 @@
args << reinterpret_cast<void*>(target);
}
break;
+ case 'l': // 9-bit signed offset
+ {
+ int32_t offset = static_cast<int16_t>(instruction) >> 7;
+ args << StringPrintf("%+d(r%d)", offset, rs);
+ }
+ break;
case 'O': // +x(rs)
{
int32_t offset = static_cast<int16_t>(instruction & 0xffff);
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index a36b0fb..337df38 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -57,14 +57,23 @@
"STL ",
"Graph ",
"BasicBlock ",
+ "BlockList ",
+ "RevPostOrder ",
+ "LinearOrder ",
+ "ConstantsMap ",
"Predecessors ",
"Successors ",
"Dominated ",
"Instruction ",
+ "InvokeInputs ",
+ "PhiInputs ",
"LoopInfo ",
+ "LIBackEdges ",
"TryCatchInf ",
"UseListNode ",
"Environment ",
+ "EnvVRegs ",
+ "EnvLocations ",
"MoveOperands ",
"CodeBuffer ",
"StackMaps ",
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 47defb4..8104978 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -67,14 +67,23 @@
kArenaAllocSTL,
kArenaAllocGraph,
kArenaAllocBasicBlock,
+ kArenaAllocBlockList,
+ kArenaAllocReversePostOrder,
+ kArenaAllocLinearOrder,
+ kArenaAllocConstantsMap,
kArenaAllocPredecessors,
kArenaAllocSuccessors,
kArenaAllocDominated,
kArenaAllocInstruction,
+ kArenaAllocInvokeInputs,
+ kArenaAllocPhiInputs,
kArenaAllocLoopInfo,
+ kArenaAllocLoopInfoBackEdges,
kArenaAllocTryCatchInfo,
kArenaAllocUseListNode,
kArenaAllocEnvironment,
+ kArenaAllocEnvironmentVRegs,
+ kArenaAllocEnvironmentLocations,
kArenaAllocMoveOperands,
kArenaAllocCodeBuffer,
kArenaAllocStackMaps,
diff --git a/runtime/gc/accounting/mod_union_table-inl.h b/runtime/gc/accounting/mod_union_table-inl.h
index c756127..3a09634 100644
--- a/runtime/gc/accounting/mod_union_table-inl.h
+++ b/runtime/gc/accounting/mod_union_table-inl.h
@@ -28,7 +28,8 @@
// A mod-union table to record image references to the Zygote and alloc space.
class ModUnionTableToZygoteAllocspace : public ModUnionTableReferenceCache {
public:
- explicit ModUnionTableToZygoteAllocspace(const std::string& name, Heap* heap,
+ explicit ModUnionTableToZygoteAllocspace(const std::string& name,
+ Heap* heap,
space::ContinuousSpace* space)
: ModUnionTableReferenceCache(name, heap, space) {}
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 5151819..1361f7b 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -27,9 +27,7 @@
#include "gc/space/space.h"
#include "mirror/object-inl.h"
#include "space_bitmap-inl.h"
-#include "thread.h"
-
-using ::art::mirror::Object;
+#include "thread-inl.h"
namespace art {
namespace gc {
@@ -38,10 +36,10 @@
class ModUnionAddToCardSetVisitor {
public:
explicit ModUnionAddToCardSetVisitor(ModUnionTable::CardSet* const cleared_cards)
- : cleared_cards_(cleared_cards) {
- }
+ : cleared_cards_(cleared_cards) {}
- inline void operator()(uint8_t* card, uint8_t expected_value,
+ inline void operator()(uint8_t* card,
+ uint8_t expected_value,
uint8_t new_value ATTRIBUTE_UNUSED) const {
if (expected_value == CardTable::kCardDirty) {
cleared_cards_->insert(card);
@@ -55,10 +53,10 @@
class ModUnionAddToCardBitmapVisitor {
public:
ModUnionAddToCardBitmapVisitor(ModUnionTable::CardBitmap* bitmap, CardTable* card_table)
- : bitmap_(bitmap), card_table_(card_table) {
- }
+ : bitmap_(bitmap), card_table_(card_table) {}
- inline void operator()(uint8_t* card, uint8_t expected_value,
+ inline void operator()(uint8_t* card,
+ uint8_t expected_value,
uint8_t new_value ATTRIBUTE_UNUSED) const {
if (expected_value == CardTable::kCardDirty) {
// We want the address the card represents, not the address of the card.
@@ -93,12 +91,13 @@
space::ContinuousSpace* from_space,
space::ContinuousSpace* immune_space,
bool* contains_reference_to_other_space)
- : visitor_(visitor), from_space_(from_space), immune_space_(immune_space),
- contains_reference_to_other_space_(contains_reference_to_other_space) {
- }
+ : visitor_(visitor),
+ from_space_(from_space),
+ immune_space_(immune_space),
+ contains_reference_to_other_space_(contains_reference_to_other_space) {}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
MarkReference(obj->GetFieldObjectReferenceAddr(offset));
}
@@ -144,14 +143,18 @@
space::ContinuousSpace* from_space,
space::ContinuousSpace* immune_space,
bool* contains_reference_to_other_space)
- : visitor_(visitor), from_space_(from_space), immune_space_(immune_space),
+ : visitor_(visitor),
+ from_space_(from_space),
+ immune_space_(immune_space),
contains_reference_to_other_space_(contains_reference_to_other_space) {}
- void operator()(Object* root) const
+ void operator()(mirror::Object* root) const
REQUIRES(Locks::heap_bitmap_lock_)
SHARED_REQUIRES(Locks::mutator_lock_) {
DCHECK(root != nullptr);
- ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_, from_space_, immune_space_,
+ ModUnionUpdateObjectReferencesVisitor ref_visitor(visitor_,
+ from_space_,
+ immune_space_,
contains_reference_to_other_space_);
root->VisitReferences(ref_visitor, VoidFunctor());
}
@@ -176,7 +179,7 @@
public:
AddToReferenceArrayVisitor(ModUnionTableReferenceCache* mod_union_table,
MarkObjectVisitor* visitor,
- std::vector<mirror::HeapReference<Object>*>* references,
+ std::vector<mirror::HeapReference<mirror::Object>*>* references,
bool* has_target_reference)
: mod_union_table_(mod_union_table),
visitor_(visitor),
@@ -184,9 +187,9 @@
has_target_reference_(has_target_reference) {}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::mutator_lock_) {
- mirror::HeapReference<Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
+ mirror::HeapReference<mirror::Object>* ref_ptr = obj->GetFieldObjectReferenceAddr(offset);
mirror::Object* ref = ref_ptr->AsMirrorPtr();
// Only add the reference if it is non null and fits our criteria.
if (ref != nullptr && mod_union_table_->ShouldAddReference(ref)) {
@@ -214,7 +217,7 @@
private:
ModUnionTableReferenceCache* const mod_union_table_;
MarkObjectVisitor* const visitor_;
- std::vector<mirror::HeapReference<Object>*>* const references_;
+ std::vector<mirror::HeapReference<mirror::Object>*>* const references_;
bool* const has_target_reference_;
};
@@ -222,14 +225,14 @@
public:
ModUnionReferenceVisitor(ModUnionTableReferenceCache* const mod_union_table,
MarkObjectVisitor* visitor,
- std::vector<mirror::HeapReference<Object>*>* references,
+ std::vector<mirror::HeapReference<mirror::Object>*>* references,
bool* has_target_reference)
: mod_union_table_(mod_union_table),
visitor_(visitor),
references_(references),
has_target_reference_(has_target_reference) {}
- void operator()(Object* obj) const
+ void operator()(mirror::Object* obj) const
SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
// We don't have an early exit since we use the visitor pattern, an early
// exit should significantly speed this up.
@@ -243,23 +246,23 @@
private:
ModUnionTableReferenceCache* const mod_union_table_;
MarkObjectVisitor* const visitor_;
- std::vector<mirror::HeapReference<Object>*>* const references_;
+ std::vector<mirror::HeapReference<mirror::Object>*>* const references_;
bool* const has_target_reference_;
};
class CheckReferenceVisitor {
public:
CheckReferenceVisitor(ModUnionTableReferenceCache* mod_union_table,
- const std::set<const Object*>& references)
+ const std::set<mirror::Object*>& references)
: mod_union_table_(mod_union_table),
- references_(references) {
- }
+ references_(references) {}
// Extra parameters are required since we use this same visitor signature for checking objects.
- void operator()(Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool is_static ATTRIBUTE_UNUSED) const
SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
- if (ref != nullptr && mod_union_table_->ShouldAddReference(ref) &&
+ if (ref != nullptr &&
+ mod_union_table_->ShouldAddReference(ref) &&
references_.find(ref) == references_.end()) {
Heap* heap = mod_union_table_->GetHeap();
space::ContinuousSpace* from_space = heap->FindContinuousSpaceFromObject(obj, false);
@@ -290,18 +293,17 @@
private:
ModUnionTableReferenceCache* const mod_union_table_;
- const std::set<const Object*>& references_;
+ const std::set<mirror::Object*>& references_;
};
class ModUnionCheckReferences {
public:
ModUnionCheckReferences(ModUnionTableReferenceCache* mod_union_table,
- const std::set<const Object*>& references)
+ const std::set<mirror::Object*>& references)
REQUIRES(Locks::heap_bitmap_lock_)
- : mod_union_table_(mod_union_table), references_(references) {
- }
+ : mod_union_table_(mod_union_table), references_(references) {}
- void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(mirror::Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
CheckReferenceVisitor visitor(mod_union_table_, references_);
obj->VisitReferences(visitor, VoidFunctor());
@@ -309,13 +311,13 @@
private:
ModUnionTableReferenceCache* const mod_union_table_;
- const std::set<const Object*>& references_;
+ const std::set<mirror::Object*>& references_;
};
void ModUnionTableReferenceCache::Verify() {
// Start by checking that everything in the mod union table is marked.
for (const auto& ref_pair : references_) {
- for (mirror::HeapReference<Object>* ref : ref_pair.second) {
+ for (mirror::HeapReference<mirror::Object>* ref : ref_pair.second) {
CHECK(heap_->IsLiveObjectLocked(ref->AsMirrorPtr()));
}
}
@@ -326,8 +328,8 @@
for (const auto& ref_pair : references_) {
const uint8_t* card = ref_pair.first;
if (*card == CardTable::kCardClean) {
- std::set<const Object*> reference_set;
- for (mirror::HeapReference<Object>* obj_ptr : ref_pair.second) {
+ std::set<mirror::Object*> reference_set;
+ for (mirror::HeapReference<mirror::Object>* obj_ptr : ref_pair.second) {
reference_set.insert(obj_ptr->AsMirrorPtr());
}
ModUnionCheckReferences visitor(this, reference_set);
@@ -351,7 +353,7 @@
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
uintptr_t end = start + CardTable::kCardSize;
os << reinterpret_cast<void*>(start) << "-" << reinterpret_cast<void*>(end) << "->{";
- for (mirror::HeapReference<Object>* ref : ref_pair.second) {
+ for (mirror::HeapReference<mirror::Object>* ref : ref_pair.second) {
os << reinterpret_cast<const void*>(ref->AsMirrorPtr()) << ",";
}
os << "},";
@@ -360,7 +362,7 @@
void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkObjectVisitor* visitor) {
CardTable* const card_table = heap_->GetCardTable();
- std::vector<mirror::HeapReference<Object>*> cards_references;
+ std::vector<mirror::HeapReference<mirror::Object>*> cards_references;
// If has_target_reference is true then there was a GcRoot compressed reference which wasn't
// added. In this case we need to keep the card dirty.
// We don't know if the GcRoot addresses will remain constant, for example, classloaders have a
@@ -375,7 +377,7 @@
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + CardTable::kCardSize;
space::ContinuousSpace* space =
- heap_->FindContinuousSpaceFromObject(reinterpret_cast<Object*>(start), false);
+ heap_->FindContinuousSpaceFromObject(reinterpret_cast<mirror::Object*>(start), false);
DCHECK(space != nullptr);
ContinuousSpaceBitmap* live_bitmap = space->GetLiveBitmap();
live_bitmap->VisitMarkedRange(start, end, add_visitor);
@@ -402,12 +404,12 @@
cleared_cards_ = std::move(new_cleared_cards);
size_t count = 0;
for (auto it = references_.begin(); it != references_.end();) {
- std::vector<mirror::HeapReference<Object>*>& references = it->second;
+ std::vector<mirror::HeapReference<mirror::Object>*>& references = it->second;
// Since there is no card mark for setting a reference to null, we check each reference.
// If all of the references of a card are null then we can remove that card. This is racy
// with the mutators, but handled by rescanning dirty cards.
bool all_null = true;
- for (mirror::HeapReference<Object>* obj_ptr : references) {
+ for (mirror::HeapReference<mirror::Object>* obj_ptr : references) {
if (obj_ptr->AsMirrorPtr() != nullptr) {
all_null = false;
visitor->MarkHeapReference(obj_ptr);
@@ -426,7 +428,8 @@
}
}
-ModUnionTableCardCache::ModUnionTableCardCache(const std::string& name, Heap* heap,
+ModUnionTableCardCache::ModUnionTableCardCache(const std::string& name,
+ Heap* heap,
space::ContinuousSpace* space)
: ModUnionTable(name, heap, space) {
// Normally here we could use End() instead of Limit(), but for testing we may want to have a
@@ -441,10 +444,15 @@
class CardBitVisitor {
public:
- CardBitVisitor(MarkObjectVisitor* visitor, space::ContinuousSpace* space,
- space::ContinuousSpace* immune_space, ModUnionTable::CardBitmap* card_bitmap)
- : visitor_(visitor), space_(space), immune_space_(immune_space),
- bitmap_(space->GetLiveBitmap()), card_bitmap_(card_bitmap) {
+ CardBitVisitor(MarkObjectVisitor* visitor,
+ space::ContinuousSpace* space,
+ space::ContinuousSpace* immune_space,
+ ModUnionTable::CardBitmap* card_bitmap)
+ : visitor_(visitor),
+ space_(space),
+ immune_space_(immune_space),
+ bitmap_(space->GetLiveBitmap()),
+ card_bitmap_(card_bitmap) {
DCHECK(immune_space_ != nullptr);
}
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 5888193..a7a4246 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -29,26 +29,17 @@
namespace art {
namespace mirror {
- class Object;
+class Object;
} // namespace mirror
namespace gc {
-
-namespace collector {
- class MarkSweep;
-} // namespace collector
namespace space {
class ContinuousSpace;
- class Space;
} // namespace space
-
class Heap;
namespace accounting {
-class Bitmap;
-class HeapBitmap;
-
// The mod-union table is the union of modified cards. It is used to allow the card table to be
// cleared between GC phases, reducing the number of dirty cards that need to be scanned.
class ModUnionTable {
@@ -60,8 +51,7 @@
explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
: name_(name),
heap_(heap),
- space_(space) {
- }
+ space_(space) {}
virtual ~ModUnionTable() {}
@@ -89,12 +79,15 @@
virtual bool ContainsCardFor(uintptr_t addr) = 0;
virtual void Dump(std::ostream& os) = 0;
+
space::ContinuousSpace* GetSpace() {
return space_;
}
+
Heap* GetHeap() const {
return heap_;
}
+
const std::string& GetName() const {
return name_;
}
@@ -111,6 +104,7 @@
explicit ModUnionTableReferenceCache(const std::string& name, Heap* heap,
space::ContinuousSpace* space)
: ModUnionTable(name, heap, space) {}
+
virtual ~ModUnionTableReferenceCache() {}
// Clear and store cards for a space.
@@ -151,6 +145,7 @@
// Note: There is assumption that the space End() doesn't change.
explicit ModUnionTableCardCache(const std::string& name, Heap* heap,
space::ContinuousSpace* space);
+
virtual ~ModUnionTableCardCache() {}
// Clear and store cards for a space.
diff --git a/test/530-checker-loops/expected.txt b/test/530-checker-loops/expected.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/test/530-checker-loops/expected.txt
diff --git a/test/530-checker-loops/info.txt b/test/530-checker-loops/info.txt
new file mode 100644
index 0000000..f5d334d
--- /dev/null
+++ b/test/530-checker-loops/info.txt
@@ -0,0 +1 @@
+Test on loop optimizations.
diff --git a/test/530-checker-loops/src/Main.java b/test/530-checker-loops/src/Main.java
new file mode 100644
index 0000000..e518a61
--- /dev/null
+++ b/test/530-checker-loops/src/Main.java
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// Test on loop optimizations.
+//
+public class Main {
+
+ static int sResult;
+
+ //
+ // Various sequence variables where bound checks can be removed from loop.
+ //
+
+ /// CHECK-START: int Main.linear(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.linear(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int linear(int[] x) {
+ int result = 0;
+ for (int i = 0; i < x.length; i++) {
+ result += x[i];
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.linearDown(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.linearDown(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int linearDown(int[] x) {
+ int result = 0;
+ for (int i = x.length - 1; i >= 0; i--) {
+ result += x[i];
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.linearObscure(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.linearObscure(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int linearObscure(int[] x) {
+ int result = 0;
+ for (int i = x.length - 1; i >= 0; i--) {
+ int k = i + 5;
+ result += x[k - 5];
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.linearWhile(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.linearWhile(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int linearWhile(int[] x) {
+ int i = 0;
+ int result = 0;
+ while (i < x.length) {
+ result += x[i++];
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.wrapAroundThenLinear(int[]) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.wrapAroundThenLinear(int[]) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int wrapAroundThenLinear(int[] x) {
+ // Loop with wrap around (length - 1, 0, 1, 2, ..).
+ int w = x.length - 1;
+ int result = 0;
+ for (int i = 0; i < x.length; i++) {
+ result += x[w];
+ w = i;
+ }
+ return result;
+ }
+
+ /// CHECK-START: int[] Main.linearWithParameter(int) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int[] Main.linearWithParameter(int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int[] linearWithParameter(int n) {
+ int[] x = new int[n];
+ for (int i = 0; i < n; i++) {
+ x[i] = i;
+ }
+ return x;
+ }
+
+ /// CHECK-START: int Main.linearWithCompoundStride() BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.linearWithCompoundStride() BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int linearWithCompoundStride() {
+ int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 };
+ int result = 0;
+ for (int i = 0; i <= 12; ) {
+ i++;
+ result += x[i];
+ i++;
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.linearWithLargePositiveStride() BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.linearWithLargePositiveStride() BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int linearWithLargePositiveStride() {
+ int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
+ int result = 0;
+ int k = 0;
+ // Range analysis has no problem with a trip-count defined by a
+ // reasonably large positive stride.
+ for (int i = 1; i <= 10 * 10000000 + 1; i += 10000000) {
+ result += x[k++];
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.linearWithVeryLargePositiveStride() BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.linearWithVeryLargePositiveStride() BCE (after)
+ /// CHECK-DAG: BoundsCheck
+ private static int linearWithVeryLargePositiveStride() {
+ int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
+ int result = 0;
+ int k = 0;
+ // Range analysis conservatively bails due to potential of wrap-around
+ // arithmetic while computing the trip-count for this very large stride.
+ for (int i = 1; i < 2147483647; i += 195225786) {
+ result += x[k++];
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.linearWithLargeNegativeStride() BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.linearWithLargeNegativeStride() BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int linearWithLargeNegativeStride() {
+ int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
+ int result = 0;
+ int k = 0;
+ // Range analysis has no problem with a trip-count defined by a
+ // reasonably large negative stride.
+ for (int i = -1; i >= -10 * 10000000 - 1; i -= 10000000) {
+ result += x[k++];
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.linearWithVeryLargeNegativeStride() BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.linearWithVeryLargeNegativeStride() BCE (after)
+ /// CHECK-DAG: BoundsCheck
+ private static int linearWithVeryLargeNegativeStride() {
+ int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
+ int result = 0;
+ int k = 0;
+ // Range analysis conservatively bails due to potential of wrap-around
+ // arithmetic while computing the trip-count for this very large stride.
+ for (int i = -2; i > -2147483648; i -= 195225786) {
+ result += x[k++];
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.periodicIdiom(int) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.periodicIdiom(int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int periodicIdiom(int tc) {
+ int[] x = { 1, 3 };
+ // Loop with periodic sequence (0, 1).
+ int k = 0;
+ int result = 0;
+ for (int i = 0; i < tc; i++) {
+ result += x[k];
+ k = 1 - k;
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.periodicSequence2(int) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.periodicSequence2(int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int periodicSequence2(int tc) {
+ int[] x = { 1, 3 };
+ // Loop with periodic sequence (0, 1).
+ int k = 0;
+ int l = 1;
+ int result = 0;
+ for (int i = 0; i < tc; i++) {
+ result += x[k];
+ int t = l;
+ l = k;
+ k = t;
+ }
+ return result;
+ }
+
+ /// CHECK-START: int Main.periodicSequence4(int) BCE (before)
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-DAG: BoundsCheck
+ /// CHECK-START: int Main.periodicSequence4(int) BCE (after)
+ /// CHECK-NOT: BoundsCheck
+ private static int periodicSequence4(int tc) {
+ int[] x = { 1, 3, 5, 7 };
+ // Loop with periodic sequence (0, 1, 2, 3).
+ int k = 0;
+ int l = 1;
+ int m = 2;
+ int n = 3;
+ int result = 0;
+ for (int i = 0; i < tc; i++) {
+ result += x[k] + x[l] + x[m] + x[n]; // all used at once
+ int t = n;
+ n = k;
+ k = l;
+ l = m;
+ m = t;
+ }
+ return result;
+ }
+
+ //
+ // Cases that actually go out of bounds. These test cases
+ // ensure the exceptions are thrown at the right places.
+ //
+
+ private static void lowerOOB(int[] x) {
+ for (int i = -1; i < x.length; i++) {
+ sResult += x[i];
+ }
+ }
+
+ private static void upperOOB(int[] x) {
+ for (int i = 0; i <= x.length; i++) {
+ sResult += x[i];
+ }
+ }
+
+ //
+ // Verifier.
+ //
+
+ public static void main(String[] args) {
+ int[] empty = { };
+ int[] x = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
+
+ // Linear and wrap-around.
+ expectEquals(0, linear(empty));
+ expectEquals(55, linear(x));
+ expectEquals(0, linearDown(empty));
+ expectEquals(55, linearDown(x));
+ expectEquals(0, linearObscure(empty));
+ expectEquals(55, linearObscure(x));
+ expectEquals(0, linearWhile(empty));
+ expectEquals(55, linearWhile(x));
+ expectEquals(0, wrapAroundThenLinear(empty));
+ expectEquals(55, wrapAroundThenLinear(x));
+
+ // Linear with parameter.
+ sResult = 0;
+ try {
+ linearWithParameter(-1);
+ } catch (NegativeArraySizeException e) {
+ sResult = 1;
+ }
+ expectEquals(1, sResult);
+ for (int n = 0; n < 32; n++) {
+ int[] r = linearWithParameter(n);
+ expectEquals(n, r.length);
+ for (int i = 0; i < n; i++) {
+ expectEquals(i, r[i]);
+ }
+ }
+
+ // Linear with non-unit strides.
+ expectEquals(56, linearWithCompoundStride());
+ expectEquals(66, linearWithLargePositiveStride());
+ expectEquals(66, linearWithVeryLargePositiveStride());
+ expectEquals(66, linearWithLargeNegativeStride());
+ expectEquals(66, linearWithVeryLargeNegativeStride());
+
+ // Periodic adds (1, 3), one at the time.
+ expectEquals(0, periodicIdiom(-1));
+ for (int tc = 0; tc < 32; tc++) {
+ int expected = (tc >> 1) << 2;
+ if ((tc & 1) != 0)
+ expected += 1;
+ expectEquals(expected, periodicIdiom(tc));
+ }
+
+ // Periodic adds (1, 3), one at the time.
+ expectEquals(0, periodicSequence2(-1));
+ for (int tc = 0; tc < 32; tc++) {
+ int expected = (tc >> 1) << 2;
+ if ((tc & 1) != 0)
+ expected += 1;
+ expectEquals(expected, periodicSequence2(tc));
+ }
+
+ // Periodic adds (1, 3, 5, 7), all at once.
+ expectEquals(0, periodicSequence4(-1));
+ for (int tc = 0; tc < 32; tc++) {
+ expectEquals(tc * 16, periodicSequence4(tc));
+ }
+
+ // Lower bound goes OOB.
+ sResult = 0;
+ try {
+ lowerOOB(x);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ sResult += 1000;
+ }
+ expectEquals(1000, sResult);
+
+ // Upper bound goes OOB.
+ sResult = 0;
+ try {
+ upperOOB(x);
+ } catch (ArrayIndexOutOfBoundsException e) {
+ sResult += 1000;
+ }
+ expectEquals(1055, sResult);
+
+ }
+
+ private static void expectEquals(int expected, int result) {
+ if (expected != result) {
+ throw new Error("Expected: " + expected + ", found: " + result);
+ }
+ }
+}
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 2ee87e5..c69b819 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -71,6 +71,11 @@
# We need to provide our own linker in case the linker on the device
# is out of date.
env="TARGET_GLOBAL_LDFLAGS=-Wl,-dynamic-linker=$android_root/bin/$linker"
+ # gcc gives a linker error, so compile with clang.
+ # TODO: investigate and fix?
+ if [[ $TARGET_PRODUCT == "mips32r2_fp" ]]; then
+ env="$env USE_CLANG_PLATFORM_BUILD=true"
+ fi
# Use '-e' to force the override of TARGET_GLOBAL_LDFLAGS.
# Also, we build extra tools that will be used by tests, so that
# they are compiled with our own linker.