Final CL to enable register allocation on x86.
This CL implements:
1) Resolution after allocation: connecting the locations
allocated to an interval within a block and between blocks.
2) Handling of fixed registers: some instructions require
inputs/output to be at a specific location, and the allocator
needs to deal with them in a special way.
3) ParallelMoveResolver::EmitNativeCode for x86.
Change-Id: I0da6bd7eb66877987148b87c3be6a983b4e3f858
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index 3d2d136..4a1b6ce 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -147,4 +147,64 @@
}
}
+bool ParallelMoveResolver::IsScratchLocation(Location loc) {
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ if (moves_.Get(i)->Blocks(loc)) {
+ return false;
+ }
+ }
+
+ for (size_t i = 0; i < moves_.Size(); ++i) {
+ if (moves_.Get(i)->GetDestination().Equals(loc)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int ParallelMoveResolver::AllocateScratchRegister(int blocked, int register_count, bool* spilled) {
+ int scratch = -1;
+ for (int reg = 0; reg < register_count; ++reg) {
+ if ((blocked != reg) &&
+ IsScratchLocation(Location::RegisterLocation(ManagedRegister(reg)))) {
+ scratch = reg;
+ break;
+ }
+ }
+
+ if (scratch == -1) {
+ *spilled = true;
+ for (int reg = 0; reg < register_count; ++reg) {
+ if (blocked != reg) {
+ scratch = reg;
+ }
+ }
+ } else {
+ *spilled = false;
+ }
+
+ return scratch;
+}
+
+
+ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope(
+ ParallelMoveResolver* resolver, int blocked, int number_of_registers)
+ : resolver_(resolver),
+ reg_(kNoRegister),
+ spilled_(false) {
+ reg_ = resolver_->AllocateScratchRegister(blocked, number_of_registers, &spilled_);
+
+ if (spilled_) {
+ resolver->SpillScratch(reg_);
+ }
+}
+
+
+ParallelMoveResolver::ScratchRegisterScope::~ScratchRegisterScope() {
+ if (spilled_) {
+ resolver_->RestoreScratch(reg_);
+ }
+}
+
} // namespace art