Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2016 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "register_allocator_graph_color.h" |
| 18 | |
| 19 | #include "code_generator.h" |
| 20 | #include "register_allocation_resolver.h" |
| 21 | #include "ssa_liveness_analysis.h" |
| 22 | #include "thread-inl.h" |
| 23 | |
| 24 | namespace art { |
| 25 | |
| 26 | // Highest number of registers that we support for any platform. This can be used for std::bitset, |
| 27 | // for example, which needs to know its size at compile time. |
| 28 | static constexpr size_t kMaxNumRegs = 32; |
| 29 | |
| 30 | // The maximum number of graph coloring attempts before triggering a DCHECK. |
| 31 | // This is meant to catch changes to the graph coloring algorithm that undermine its forward |
| 32 | // progress guarantees. Forward progress for the algorithm means splitting live intervals on |
| 33 | // every graph coloring attempt so that eventually the interference graph will be sparse enough |
| 34 | // to color. The main threat to forward progress is trying to split short intervals which cannot be |
| 35 | // split further; this could cause infinite looping because the interference graph would never |
| 36 | // change. This is avoided by prioritizing short intervals before long ones, so that long |
| 37 | // intervals are split when coloring fails. |
| 38 | static constexpr size_t kMaxGraphColoringAttemptsDebug = 100; |
| 39 | |
| 40 | // Interference nodes make up the interference graph, which is the primary data structure in |
| 41 | // graph coloring register allocation. Each node represents a single live interval, and contains |
| 42 | // a set of adjacent nodes corresponding to intervals overlapping with its own. To save memory, |
| 43 | // pre-colored nodes never contain outgoing edges (only incoming ones). |
| 44 | // |
| 45 | // As nodes are pruned from the interference graph, incoming edges of the pruned node are removed, |
| 46 | // but outgoing edges remain in order to later color the node based on the colors of its neighbors. |
| 47 | // |
| 48 | // Note that a pair interval is represented by a single node in the interference graph, which |
| 49 | // essentially requires two colors. One consequence of this is that the degree of a node is not |
| 50 | // necessarily equal to the number of adjacent nodes--instead, the degree reflects the maximum |
| 51 | // number of colors with which a node could interfere. We model this by giving edges different |
| 52 | // weights (1 or 2) to control how much it increases the degree of adjacent nodes. |
| 53 | // For example, the edge between two single nodes will have weight 1. On the other hand, |
| 54 | // the edge between a single node and a pair node will have weight 2. This is because the pair |
| 55 | // node could block up to two colors for the single node, and because the single node could |
| 56 | // block an entire two-register aligned slot for the pair node. |
| 57 | // The degree is defined this way because we use it to decide whether a node is guaranteed a color, |
| 58 | // and thus whether it is safe to prune it from the interference graph early on. |
| 59 | class InterferenceNode : public ArenaObject<kArenaAllocRegisterAllocator> { |
| 60 | public: |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 61 | InterferenceNode(ArenaAllocator* allocator, LiveInterval* interval, size_t id) |
| 62 | : interval_(interval), |
| 63 | adjacent_nodes_(CmpPtr, allocator->Adapter(kArenaAllocRegisterAllocator)), |
| 64 | out_degree_(0), |
| 65 | id_(id) {} |
| 66 | |
| 67 | // Used to maintain determinism when storing InterferenceNode pointers in sets. |
| 68 | static bool CmpPtr(const InterferenceNode* lhs, const InterferenceNode* rhs) { |
| 69 | return lhs->id_ < rhs->id_; |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 70 | } |
| 71 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 72 | void AddInterference(InterferenceNode* other) { |
| 73 | if (adjacent_nodes_.insert(other).second) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 74 | out_degree_ += EdgeWeightWith(other); |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | void RemoveInterference(InterferenceNode* other) { |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 79 | if (adjacent_nodes_.erase(other) > 0) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 80 | out_degree_ -= EdgeWeightWith(other); |
| 81 | } |
| 82 | } |
| 83 | |
| 84 | bool ContainsInterference(InterferenceNode* other) const { |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 85 | return adjacent_nodes_.count(other) > 0; |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | LiveInterval* GetInterval() const { |
| 89 | return interval_; |
| 90 | } |
| 91 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 92 | const ArenaSet<InterferenceNode*, decltype(&CmpPtr)>& GetAdjacentNodes() const { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 93 | return adjacent_nodes_; |
| 94 | } |
| 95 | |
| 96 | size_t GetOutDegree() const { |
| 97 | return out_degree_; |
| 98 | } |
| 99 | |
| 100 | size_t GetId() const { |
| 101 | return id_; |
| 102 | } |
| 103 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 104 | private: |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 105 | // We give extra weight to edges adjacent to pair nodes. See the general comment on the |
| 106 | // interference graph above. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 107 | size_t EdgeWeightWith(InterferenceNode* other) const { |
| 108 | return (interval_->HasHighInterval() || other->interval_->HasHighInterval()) ? 2 : 1; |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 109 | } |
| 110 | |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 111 | // The live interval that this node represents. |
| 112 | LiveInterval* const interval_; |
| 113 | |
| 114 | // All nodes interfering with this one. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 115 | // TODO: There is potential to use a cheaper data structure here, especially since |
| 116 | // adjacency sets will usually be small. |
| 117 | ArenaSet<InterferenceNode*, decltype(&CmpPtr)> adjacent_nodes_; |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 118 | |
| 119 | // The maximum number of colors with which this node could interfere. This could be more than |
| 120 | // the number of adjacent nodes if this is a pair node, or if some adjacent nodes are pair nodes. |
| 121 | // We use "out" degree because incoming edges come from nodes already pruned from the graph, |
| 122 | // and do not affect the coloring of this node. |
| 123 | size_t out_degree_; |
| 124 | |
| 125 | // A unique identifier for this node, used to maintain determinism when storing |
| 126 | // interference nodes in sets. |
| 127 | const size_t id_; |
| 128 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 129 | // TODO: We could cache the result of interval_->RequiresRegister(), since it |
| 130 | // will not change for the lifetime of this node. (Currently, RequiresRegister() requires |
| 131 | // iterating through all uses of a live interval.) |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 132 | |
| 133 | DISALLOW_COPY_AND_ASSIGN(InterferenceNode); |
| 134 | }; |
| 135 | |
| 136 | static bool IsCoreInterval(LiveInterval* interval) { |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 137 | return interval->GetType() != Primitive::kPrimFloat |
| 138 | && interval->GetType() != Primitive::kPrimDouble; |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | static size_t ComputeReservedArtMethodSlots(const CodeGenerator& codegen) { |
| 142 | return static_cast<size_t>(InstructionSetPointerSize(codegen.GetInstructionSet())) / kVRegSize; |
| 143 | } |
| 144 | |
| 145 | RegisterAllocatorGraphColor::RegisterAllocatorGraphColor(ArenaAllocator* allocator, |
| 146 | CodeGenerator* codegen, |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 147 | const SsaLivenessAnalysis& liveness) |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 148 | : RegisterAllocator(allocator, codegen, liveness), |
| 149 | core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)), |
| 150 | fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)), |
| 151 | temp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)), |
| 152 | safepoints_(allocator->Adapter(kArenaAllocRegisterAllocator)), |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 153 | physical_core_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)), |
| 154 | physical_fp_intervals_(allocator->Adapter(kArenaAllocRegisterAllocator)), |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 155 | int_spill_slot_counter_(0), |
| 156 | double_spill_slot_counter_(0), |
| 157 | float_spill_slot_counter_(0), |
| 158 | long_spill_slot_counter_(0), |
| 159 | catch_phi_spill_slot_counter_(0), |
| 160 | reserved_art_method_slots_(ComputeReservedArtMethodSlots(*codegen)), |
| 161 | reserved_out_slots_(codegen->GetGraph()->GetMaximumNumberOfOutVRegs()), |
| 162 | number_of_globally_blocked_core_regs_(0), |
| 163 | number_of_globally_blocked_fp_regs_(0), |
| 164 | max_safepoint_live_core_regs_(0), |
| 165 | max_safepoint_live_fp_regs_(0), |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 166 | coloring_attempt_allocator_(nullptr) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 167 | // Before we ask for blocked registers, set them up in the code generator. |
| 168 | codegen->SetupBlockedRegisters(); |
| 169 | |
| 170 | // Initialize physical core register live intervals and blocked registers. |
| 171 | // This includes globally blocked registers, such as the stack pointer. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 172 | physical_core_intervals_.resize(codegen->GetNumberOfCoreRegisters(), nullptr); |
| 173 | for (size_t i = 0; i < codegen->GetNumberOfCoreRegisters(); ++i) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 174 | LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, Primitive::kPrimInt); |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 175 | physical_core_intervals_[i] = interval; |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 176 | core_intervals_.push_back(interval); |
| 177 | if (codegen_->IsBlockedCoreRegister(i)) { |
| 178 | ++number_of_globally_blocked_core_regs_; |
| 179 | interval->AddRange(0, liveness.GetMaxLifetimePosition()); |
| 180 | } |
| 181 | } |
| 182 | // Initialize physical floating point register live intervals and blocked registers. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 183 | physical_fp_intervals_.resize(codegen->GetNumberOfFloatingPointRegisters(), nullptr); |
| 184 | for (size_t i = 0; i < codegen->GetNumberOfFloatingPointRegisters(); ++i) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 185 | LiveInterval* interval = LiveInterval::MakeFixedInterval(allocator_, i, Primitive::kPrimFloat); |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 186 | physical_fp_intervals_[i] = interval; |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 187 | fp_intervals_.push_back(interval); |
| 188 | if (codegen_->IsBlockedFloatingPointRegister(i)) { |
| 189 | ++number_of_globally_blocked_fp_regs_; |
| 190 | interval->AddRange(0, liveness.GetMaxLifetimePosition()); |
| 191 | } |
| 192 | } |
| 193 | } |
| 194 | |
| 195 | void RegisterAllocatorGraphColor::AllocateRegisters() { |
| 196 | // (1) Collect and prepare live intervals. |
| 197 | ProcessInstructions(); |
| 198 | |
| 199 | for (bool processing_core_regs : {true, false}) { |
| 200 | ArenaVector<LiveInterval*>& intervals = processing_core_regs |
| 201 | ? core_intervals_ |
| 202 | : fp_intervals_; |
| 203 | size_t num_registers = processing_core_regs |
| 204 | ? codegen_->GetNumberOfCoreRegisters() |
| 205 | : codegen_->GetNumberOfFloatingPointRegisters(); |
| 206 | |
| 207 | size_t attempt = 0; |
| 208 | while (true) { |
| 209 | ++attempt; |
| 210 | DCHECK(attempt <= kMaxGraphColoringAttemptsDebug) |
| 211 | << "Exceeded debug max graph coloring register allocation attempts. " |
| 212 | << "This could indicate that the register allocator is not making forward progress, " |
| 213 | << "which could be caused by prioritizing the wrong live intervals. (Short intervals " |
| 214 | << "should be prioritized over long ones, because they cannot be split further.)"; |
| 215 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 216 | // Reset the allocator for the next coloring attempt. |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 217 | ArenaAllocator coloring_attempt_allocator(allocator_->GetArenaPool()); |
| 218 | coloring_attempt_allocator_ = &coloring_attempt_allocator; |
| 219 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 220 | // (2) Build the interference graph. |
| 221 | ArenaVector<InterferenceNode*> prunable_nodes( |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 222 | coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator)); |
| 223 | ArenaVector<InterferenceNode*> safepoints( |
| 224 | coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator)); |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 225 | BuildInterferenceGraph(intervals, &prunable_nodes, &safepoints); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 226 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 227 | // (3) Prune all uncolored nodes from interference graph. |
| 228 | ArenaStdStack<InterferenceNode*> pruned_nodes( |
| 229 | coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator)); |
| 230 | PruneInterferenceGraph(prunable_nodes, num_registers, &pruned_nodes); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 231 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 232 | // (4) Color pruned nodes based on interferences. |
| 233 | bool successful = ColorInterferenceGraph(&pruned_nodes, num_registers); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 234 | |
| 235 | if (successful) { |
| 236 | // Compute the maximum number of live registers across safepoints. |
| 237 | // Notice that we do not count globally blocked registers, such as the stack pointer. |
| 238 | if (safepoints.size() > 0) { |
| 239 | size_t max_safepoint_live_regs = ComputeMaxSafepointLiveRegisters(safepoints); |
| 240 | if (processing_core_regs) { |
| 241 | max_safepoint_live_core_regs_ = |
| 242 | max_safepoint_live_regs - number_of_globally_blocked_core_regs_; |
| 243 | } else { |
| 244 | max_safepoint_live_fp_regs_= |
| 245 | max_safepoint_live_regs - number_of_globally_blocked_fp_regs_; |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | // Tell the code generator which registers were allocated. |
| 250 | // We only look at prunable_nodes because we already told the code generator about |
| 251 | // fixed intervals while processing instructions. We also ignore the fixed intervals |
| 252 | // placed at the top of catch blocks. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 253 | for (InterferenceNode* node : prunable_nodes) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 254 | LiveInterval* interval = node->GetInterval(); |
| 255 | if (interval->HasRegister()) { |
| 256 | Location low_reg = processing_core_regs |
| 257 | ? Location::RegisterLocation(interval->GetRegister()) |
| 258 | : Location::FpuRegisterLocation(interval->GetRegister()); |
| 259 | codegen_->AddAllocatedRegister(low_reg); |
| 260 | if (interval->HasHighInterval()) { |
| 261 | LiveInterval* high = interval->GetHighInterval(); |
| 262 | DCHECK(high->HasRegister()); |
| 263 | Location high_reg = processing_core_regs |
| 264 | ? Location::RegisterLocation(high->GetRegister()) |
| 265 | : Location::FpuRegisterLocation(high->GetRegister()); |
| 266 | codegen_->AddAllocatedRegister(high_reg); |
| 267 | } |
| 268 | } else { |
| 269 | DCHECK(!interval->HasHighInterval() || !interval->GetHighInterval()->HasRegister()); |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | break; |
| 274 | } |
| 275 | } // while unsuccessful |
| 276 | } // for processing_core_instructions |
| 277 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 278 | // (5) Resolve locations and deconstruct SSA form. |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 279 | RegisterAllocationResolver(allocator_, codegen_, liveness_) |
| 280 | .Resolve(max_safepoint_live_core_regs_, |
| 281 | max_safepoint_live_fp_regs_, |
| 282 | reserved_art_method_slots_ + reserved_out_slots_, |
| 283 | int_spill_slot_counter_, |
| 284 | long_spill_slot_counter_, |
| 285 | float_spill_slot_counter_, |
| 286 | double_spill_slot_counter_, |
| 287 | catch_phi_spill_slot_counter_, |
| 288 | temp_intervals_); |
| 289 | |
| 290 | if (kIsDebugBuild) { |
| 291 | Validate(/*log_fatal_on_failure*/ true); |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | bool RegisterAllocatorGraphColor::Validate(bool log_fatal_on_failure) { |
| 296 | for (bool processing_core_regs : {true, false}) { |
| 297 | ArenaVector<LiveInterval*> intervals( |
| 298 | allocator_->Adapter(kArenaAllocRegisterAllocatorValidate)); |
| 299 | for (size_t i = 0; i < liveness_.GetNumberOfSsaValues(); ++i) { |
| 300 | HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i); |
| 301 | LiveInterval* interval = instruction->GetLiveInterval(); |
| 302 | if (interval != nullptr && IsCoreInterval(interval) == processing_core_regs) { |
| 303 | intervals.push_back(instruction->GetLiveInterval()); |
| 304 | } |
| 305 | } |
| 306 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 307 | ArenaVector<LiveInterval*>& physical_intervals = processing_core_regs |
| 308 | ? physical_core_intervals_ |
| 309 | : physical_fp_intervals_; |
| 310 | for (LiveInterval* fixed : physical_intervals) { |
| 311 | if (fixed->GetFirstRange() != nullptr) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 312 | // Ideally we would check fixed ranges as well, but currently there are times when |
| 313 | // two fixed intervals for the same register will overlap. For example, a fixed input |
| 314 | // and a fixed output may sometimes share the same register, in which there will be two |
| 315 | // fixed intervals for the same place. |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | for (LiveInterval* temp : temp_intervals_) { |
| 320 | if (IsCoreInterval(temp) == processing_core_regs) { |
| 321 | intervals.push_back(temp); |
| 322 | } |
| 323 | } |
| 324 | |
| 325 | size_t spill_slots = int_spill_slot_counter_ |
| 326 | + long_spill_slot_counter_ |
| 327 | + float_spill_slot_counter_ |
| 328 | + double_spill_slot_counter_ |
| 329 | + catch_phi_spill_slot_counter_; |
| 330 | bool ok = ValidateIntervals(intervals, |
| 331 | spill_slots, |
| 332 | reserved_art_method_slots_ + reserved_out_slots_, |
| 333 | *codegen_, |
| 334 | allocator_, |
| 335 | processing_core_regs, |
| 336 | log_fatal_on_failure); |
| 337 | if (!ok) { |
| 338 | return false; |
| 339 | } |
| 340 | } // for processing_core_regs |
| 341 | |
| 342 | return true; |
| 343 | } |
| 344 | |
| 345 | void RegisterAllocatorGraphColor::ProcessInstructions() { |
| 346 | for (HLinearPostOrderIterator it(*codegen_->GetGraph()); !it.Done(); it.Advance()) { |
| 347 | HBasicBlock* block = it.Current(); |
| 348 | |
| 349 | // Note that we currently depend on this ordering, since some helper |
| 350 | // code is designed for linear scan register allocation. |
| 351 | for (HBackwardInstructionIterator instr_it(block->GetInstructions()); |
| 352 | !instr_it.Done(); |
| 353 | instr_it.Advance()) { |
| 354 | ProcessInstruction(instr_it.Current()); |
| 355 | } |
| 356 | |
| 357 | for (HInstructionIterator phi_it(block->GetPhis()); !phi_it.Done(); phi_it.Advance()) { |
| 358 | ProcessInstruction(phi_it.Current()); |
| 359 | } |
| 360 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 361 | if (block->IsCatchBlock() || (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 362 | // By blocking all registers at the top of each catch block or irreducible loop, we force |
| 363 | // intervals belonging to the live-in set of the catch/header block to be spilled. |
| 364 | // TODO(ngeoffray): Phis in this block could be allocated in register. |
| 365 | size_t position = block->GetLifetimeStart(); |
| 366 | BlockRegisters(position, position + 1); |
| 367 | } |
| 368 | } |
| 369 | } |
| 370 | |
| 371 | void RegisterAllocatorGraphColor::ProcessInstruction(HInstruction* instruction) { |
| 372 | LocationSummary* locations = instruction->GetLocations(); |
| 373 | if (locations == nullptr) { |
| 374 | return; |
| 375 | } |
| 376 | if (locations->NeedsSafepoint() && codegen_->IsLeafMethod()) { |
| 377 | // We do this here because we do not want the suspend check to artificially |
| 378 | // create live registers. |
| 379 | DCHECK(instruction->IsSuspendCheckEntry()); |
| 380 | DCHECK_EQ(locations->GetTempCount(), 0u); |
| 381 | instruction->GetBlock()->RemoveInstruction(instruction); |
| 382 | return; |
| 383 | } |
| 384 | |
| 385 | CheckForTempLiveIntervals(instruction); |
| 386 | CheckForSafepoint(instruction); |
| 387 | if (instruction->GetLocations()->WillCall()) { |
| 388 | // If a call will happen, create fixed intervals for caller-save registers. |
| 389 | // TODO: Note that it may be beneficial to later split intervals at this point, |
| 390 | // so that we allow last-minute moves from a caller-save register |
| 391 | // to a callee-save register. |
| 392 | BlockRegisters(instruction->GetLifetimePosition(), |
| 393 | instruction->GetLifetimePosition() + 1, |
| 394 | /*caller_save_only*/ true); |
| 395 | } |
| 396 | CheckForFixedInputs(instruction); |
| 397 | |
| 398 | LiveInterval* interval = instruction->GetLiveInterval(); |
| 399 | if (interval == nullptr) { |
| 400 | // Instructions lacking a valid output location do not have a live interval. |
| 401 | DCHECK(!locations->Out().IsValid()); |
| 402 | return; |
| 403 | } |
| 404 | |
| 405 | // Low intervals act as representatives for their corresponding high interval. |
| 406 | DCHECK(!interval->IsHighInterval()); |
| 407 | if (codegen_->NeedsTwoRegisters(interval->GetType())) { |
| 408 | interval->AddHighInterval(); |
| 409 | } |
| 410 | AddSafepointsFor(instruction); |
| 411 | CheckForFixedOutput(instruction); |
| 412 | AllocateSpillSlotForCatchPhi(instruction); |
| 413 | |
| 414 | ArenaVector<LiveInterval*>& intervals = IsCoreInterval(interval) |
| 415 | ? core_intervals_ |
| 416 | : fp_intervals_; |
| 417 | if (interval->HasSpillSlot() || instruction->IsConstant()) { |
| 418 | // Note that if an interval already has a spill slot, then its value currently resides |
| 419 | // in the stack (e.g., parameters). Thus we do not have to allocate a register until its first |
| 420 | // register use. This is also true for constants, which can be materialized at any point. |
| 421 | size_t first_register_use = interval->FirstRegisterUse(); |
| 422 | if (first_register_use != kNoLifetime) { |
| 423 | LiveInterval* split = SplitBetween(interval, interval->GetStart(), first_register_use - 1); |
| 424 | intervals.push_back(split); |
| 425 | } else { |
| 426 | // We won't allocate a register for this value. |
| 427 | } |
| 428 | } else { |
| 429 | intervals.push_back(interval); |
| 430 | } |
| 431 | } |
| 432 | |
| 433 | void RegisterAllocatorGraphColor::CheckForFixedInputs(HInstruction* instruction) { |
| 434 | // We simply block physical registers where necessary. |
| 435 | // TODO: Ideally we would coalesce the physical register with the register |
| 436 | // allocated to the input value, but this can be tricky if, e.g., there |
| 437 | // could be multiple physical register uses of the same value at the |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 438 | // same instruction. Need to think about it more. |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 439 | LocationSummary* locations = instruction->GetLocations(); |
| 440 | size_t position = instruction->GetLifetimePosition(); |
| 441 | for (size_t i = 0; i < locations->GetInputCount(); ++i) { |
| 442 | Location input = locations->InAt(i); |
| 443 | if (input.IsRegister() || input.IsFpuRegister()) { |
| 444 | BlockRegister(input, position, position + 1); |
| 445 | codegen_->AddAllocatedRegister(input); |
| 446 | } else if (input.IsPair()) { |
| 447 | BlockRegister(input.ToLow(), position, position + 1); |
| 448 | BlockRegister(input.ToHigh(), position, position + 1); |
| 449 | codegen_->AddAllocatedRegister(input.ToLow()); |
| 450 | codegen_->AddAllocatedRegister(input.ToHigh()); |
| 451 | } |
| 452 | } |
| 453 | } |
| 454 | |
| 455 | void RegisterAllocatorGraphColor::CheckForFixedOutput(HInstruction* instruction) { |
| 456 | // If an instruction has a fixed output location, we give the live interval a register and then |
| 457 | // proactively split it just after the definition point to avoid creating too many interferences |
| 458 | // with a fixed node. |
| 459 | LiveInterval* interval = instruction->GetLiveInterval(); |
| 460 | Location out = interval->GetDefinedBy()->GetLocations()->Out(); |
| 461 | size_t position = instruction->GetLifetimePosition(); |
| 462 | DCHECK_GE(interval->GetEnd() - position, 2u); |
| 463 | |
| 464 | if (out.IsUnallocated() && out.GetPolicy() == Location::kSameAsFirstInput) { |
| 465 | out = instruction->GetLocations()->InAt(0); |
| 466 | } |
| 467 | |
| 468 | if (out.IsRegister() || out.IsFpuRegister()) { |
| 469 | interval->SetRegister(out.reg()); |
| 470 | codegen_->AddAllocatedRegister(out); |
| 471 | Split(interval, position + 1); |
| 472 | } else if (out.IsPair()) { |
| 473 | interval->SetRegister(out.low()); |
| 474 | interval->GetHighInterval()->SetRegister(out.high()); |
| 475 | codegen_->AddAllocatedRegister(out.ToLow()); |
| 476 | codegen_->AddAllocatedRegister(out.ToHigh()); |
| 477 | Split(interval, position + 1); |
| 478 | } else if (out.IsStackSlot() || out.IsDoubleStackSlot()) { |
| 479 | interval->SetSpillSlot(out.GetStackIndex()); |
| 480 | } else { |
| 481 | DCHECK(out.IsUnallocated() || out.IsConstant()); |
| 482 | } |
| 483 | } |
| 484 | |
| 485 | void RegisterAllocatorGraphColor::AddSafepointsFor(HInstruction* instruction) { |
| 486 | LiveInterval* interval = instruction->GetLiveInterval(); |
| 487 | for (size_t safepoint_index = safepoints_.size(); safepoint_index > 0; --safepoint_index) { |
| 488 | HInstruction* safepoint = safepoints_[safepoint_index - 1u]; |
| 489 | size_t safepoint_position = safepoint->GetLifetimePosition(); |
| 490 | |
| 491 | // Test that safepoints_ are ordered in the optimal way. |
| 492 | DCHECK(safepoint_index == safepoints_.size() || |
| 493 | safepoints_[safepoint_index]->GetLifetimePosition() < safepoint_position); |
| 494 | |
| 495 | if (safepoint_position == interval->GetStart()) { |
| 496 | // The safepoint is for this instruction, so the location of the instruction |
| 497 | // does not need to be saved. |
| 498 | DCHECK_EQ(safepoint_index, safepoints_.size()); |
| 499 | DCHECK_EQ(safepoint, instruction); |
| 500 | continue; |
| 501 | } else if (interval->IsDeadAt(safepoint_position)) { |
| 502 | break; |
| 503 | } else if (!interval->Covers(safepoint_position)) { |
| 504 | // Hole in the interval. |
| 505 | continue; |
| 506 | } |
| 507 | interval->AddSafepoint(safepoint); |
| 508 | } |
| 509 | } |
| 510 | |
| 511 | void RegisterAllocatorGraphColor::CheckForTempLiveIntervals(HInstruction* instruction) { |
| 512 | LocationSummary* locations = instruction->GetLocations(); |
| 513 | size_t position = instruction->GetLifetimePosition(); |
| 514 | for (size_t i = 0; i < locations->GetTempCount(); ++i) { |
| 515 | Location temp = locations->GetTemp(i); |
| 516 | if (temp.IsRegister() || temp.IsFpuRegister()) { |
| 517 | BlockRegister(temp, position, position + 1); |
| 518 | codegen_->AddAllocatedRegister(temp); |
| 519 | } else { |
| 520 | DCHECK(temp.IsUnallocated()); |
| 521 | switch (temp.GetPolicy()) { |
| 522 | case Location::kRequiresRegister: { |
| 523 | LiveInterval* interval = |
| 524 | LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimInt); |
| 525 | interval->AddTempUse(instruction, i); |
| 526 | core_intervals_.push_back(interval); |
| 527 | temp_intervals_.push_back(interval); |
| 528 | break; |
| 529 | } |
| 530 | |
| 531 | case Location::kRequiresFpuRegister: { |
| 532 | LiveInterval* interval = |
| 533 | LiveInterval::MakeTempInterval(allocator_, Primitive::kPrimDouble); |
| 534 | interval->AddTempUse(instruction, i); |
| 535 | fp_intervals_.push_back(interval); |
| 536 | temp_intervals_.push_back(interval); |
| 537 | if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) { |
| 538 | interval->AddHighInterval(/*is_temp*/ true); |
| 539 | temp_intervals_.push_back(interval->GetHighInterval()); |
| 540 | } |
| 541 | break; |
| 542 | } |
| 543 | |
| 544 | default: |
| 545 | LOG(FATAL) << "Unexpected policy for temporary location " |
| 546 | << temp.GetPolicy(); |
| 547 | } |
| 548 | } |
| 549 | } |
| 550 | } |
| 551 | |
| 552 | void RegisterAllocatorGraphColor::CheckForSafepoint(HInstruction* instruction) { |
| 553 | LocationSummary* locations = instruction->GetLocations(); |
| 554 | size_t position = instruction->GetLifetimePosition(); |
| 555 | |
| 556 | if (locations->NeedsSafepoint()) { |
| 557 | safepoints_.push_back(instruction); |
| 558 | if (locations->OnlyCallsOnSlowPath()) { |
| 559 | // We add a synthesized range at this position to record the live registers |
| 560 | // at this position. Ideally, we could just update the safepoints when locations |
| 561 | // are updated, but we currently need to know the full stack size before updating |
| 562 | // locations (because of parameters and the fact that we don't have a frame pointer). |
| 563 | // And knowing the full stack size requires to know the maximum number of live |
| 564 | // registers at calls in slow paths. |
| 565 | // By adding the following interval in the algorithm, we can compute this |
| 566 | // maximum before updating locations. |
| 567 | LiveInterval* interval = LiveInterval::MakeSlowPathInterval(allocator_, instruction); |
| 568 | interval->AddRange(position, position + 1); |
| 569 | core_intervals_.push_back(interval); |
| 570 | fp_intervals_.push_back(interval); |
| 571 | } |
| 572 | } |
| 573 | } |
| 574 | |
| 575 | LiveInterval* RegisterAllocatorGraphColor::TrySplit(LiveInterval* interval, size_t position) { |
| 576 | if (interval->GetStart() < position && position < interval->GetEnd()) { |
| 577 | return Split(interval, position); |
| 578 | } else { |
| 579 | return interval; |
| 580 | } |
| 581 | } |
| 582 | |
| 583 | void RegisterAllocatorGraphColor::SplitAtRegisterUses(LiveInterval* interval) { |
| 584 | DCHECK(!interval->IsHighInterval()); |
| 585 | |
| 586 | // Split just after a register definition. |
| 587 | if (interval->IsParent() && interval->DefinitionRequiresRegister()) { |
| 588 | interval = TrySplit(interval, interval->GetStart() + 1); |
| 589 | } |
| 590 | |
| 591 | UsePosition* use = interval->GetFirstUse(); |
| 592 | while (use != nullptr && use->GetPosition() < interval->GetStart()) { |
| 593 | use = use->GetNext(); |
| 594 | } |
| 595 | |
| 596 | // Split around register uses. |
| 597 | size_t end = interval->GetEnd(); |
| 598 | while (use != nullptr && use->GetPosition() <= end) { |
| 599 | if (use->RequiresRegister()) { |
| 600 | size_t position = use->GetPosition(); |
| 601 | interval = TrySplit(interval, position - 1); |
| 602 | if (liveness_.GetInstructionFromPosition(position / 2)->IsControlFlow()) { |
| 603 | // If we are at the very end of a basic block, we cannot split right |
| 604 | // at the use. Split just after instead. |
| 605 | interval = TrySplit(interval, position + 1); |
| 606 | } else { |
| 607 | interval = TrySplit(interval, position); |
| 608 | } |
| 609 | } |
| 610 | use = use->GetNext(); |
| 611 | } |
| 612 | } |
| 613 | |
| 614 | void RegisterAllocatorGraphColor::AllocateSpillSlotForCatchPhi(HInstruction* instruction) { |
| 615 | if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) { |
| 616 | HPhi* phi = instruction->AsPhi(); |
| 617 | LiveInterval* interval = phi->GetLiveInterval(); |
| 618 | |
| 619 | HInstruction* previous_phi = phi->GetPrevious(); |
| 620 | DCHECK(previous_phi == nullptr || |
| 621 | previous_phi->AsPhi()->GetRegNumber() <= phi->GetRegNumber()) |
| 622 | << "Phis expected to be sorted by vreg number, " |
| 623 | << "so that equivalent phis are adjacent."; |
| 624 | |
| 625 | if (phi->IsVRegEquivalentOf(previous_phi)) { |
| 626 | // Assign the same spill slot. |
| 627 | DCHECK(previous_phi->GetLiveInterval()->HasSpillSlot()); |
| 628 | interval->SetSpillSlot(previous_phi->GetLiveInterval()->GetSpillSlot()); |
| 629 | } else { |
| 630 | interval->SetSpillSlot(catch_phi_spill_slot_counter_); |
| 631 | catch_phi_spill_slot_counter_ += interval->NeedsTwoSpillSlots() ? 2 : 1; |
| 632 | } |
| 633 | } |
| 634 | } |
| 635 | |
| 636 | void RegisterAllocatorGraphColor::BlockRegister(Location location, |
| 637 | size_t start, |
| 638 | size_t end) { |
| 639 | DCHECK(location.IsRegister() || location.IsFpuRegister()); |
| 640 | int reg = location.reg(); |
| 641 | LiveInterval* interval = location.IsRegister() |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 642 | ? physical_core_intervals_[reg] |
| 643 | : physical_fp_intervals_[reg]; |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 644 | DCHECK(interval->GetRegister() == reg); |
| 645 | bool blocked_by_codegen = location.IsRegister() |
| 646 | ? codegen_->IsBlockedCoreRegister(reg) |
| 647 | : codegen_->IsBlockedFloatingPointRegister(reg); |
| 648 | if (blocked_by_codegen) { |
| 649 | // We've already blocked this register for the entire method. (And adding a |
| 650 | // range inside another range violates the preconditions of AddRange). |
| 651 | } else { |
| 652 | interval->AddRange(start, end); |
| 653 | } |
| 654 | } |
| 655 | |
| 656 | void RegisterAllocatorGraphColor::BlockRegisters(size_t start, size_t end, bool caller_save_only) { |
| 657 | for (size_t i = 0; i < codegen_->GetNumberOfCoreRegisters(); ++i) { |
| 658 | if (!caller_save_only || !codegen_->IsCoreCalleeSaveRegister(i)) { |
| 659 | BlockRegister(Location::RegisterLocation(i), start, end); |
| 660 | } |
| 661 | } |
| 662 | for (size_t i = 0; i < codegen_->GetNumberOfFloatingPointRegisters(); ++i) { |
| 663 | if (!caller_save_only || !codegen_->IsFloatingPointCalleeSaveRegister(i)) { |
| 664 | BlockRegister(Location::FpuRegisterLocation(i), start, end); |
| 665 | } |
| 666 | } |
| 667 | } |
| 668 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 669 | // Add an interference edge, but only if necessary. |
| 670 | static void AddPotentialInterference(InterferenceNode* from, InterferenceNode* to) { |
| 671 | if (from->GetInterval()->HasRegister()) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 672 | // We save space by ignoring outgoing edges from fixed nodes. |
| 673 | } else if (to->GetInterval()->IsSlowPathSafepoint()) { |
| 674 | // Safepoint intervals are only there to count max live registers, |
| 675 | // so no need to give them incoming interference edges. |
| 676 | // This is also necessary for correctness, because we don't want nodes |
| 677 | // to remove themselves from safepoint adjacency sets when they're pruned. |
| 678 | } else { |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 679 | from->AddInterference(to); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 680 | } |
| 681 | } |
| 682 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 683 | // TODO: See locations->OutputCanOverlapWithInputs(); we may want to consider |
| 684 | // this when building the interference graph. |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 685 | void RegisterAllocatorGraphColor::BuildInterferenceGraph( |
| 686 | const ArenaVector<LiveInterval*>& intervals, |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 687 | ArenaVector<InterferenceNode*>* prunable_nodes, |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 688 | ArenaVector<InterferenceNode*>* safepoints) { |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 689 | size_t interval_id_counter = 0; |
| 690 | |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 691 | // Build the interference graph efficiently by ordering range endpoints |
| 692 | // by position and doing a linear sweep to find interferences. (That is, we |
| 693 | // jump from endpoint to endpoint, maintaining a set of intervals live at each |
| 694 | // point. If two nodes are ever in the live set at the same time, then they |
| 695 | // interfere with each other.) |
| 696 | // |
| 697 | // We order by both position and (secondarily) by whether the endpoint |
| 698 | // begins or ends a range; we want to process range endings before range |
| 699 | // beginnings at the same position because they should not conflict. |
| 700 | // |
| 701 | // For simplicity, we create a tuple for each endpoint, and then sort the tuples. |
| 702 | // Tuple contents: (position, is_range_beginning, node). |
| 703 | ArenaVector<std::tuple<size_t, bool, InterferenceNode*>> range_endpoints( |
| 704 | coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator)); |
| 705 | for (LiveInterval* parent : intervals) { |
| 706 | for (LiveInterval* sibling = parent; sibling != nullptr; sibling = sibling->GetNextSibling()) { |
| 707 | LiveRange* range = sibling->GetFirstRange(); |
| 708 | if (range != nullptr) { |
| 709 | InterferenceNode* node = new (coloring_attempt_allocator_) InterferenceNode( |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 710 | coloring_attempt_allocator_, sibling, interval_id_counter++); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 711 | if (sibling->HasRegister()) { |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 712 | // Fixed nodes will never be pruned, so no need to keep track of them. |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 713 | } else if (sibling->IsSlowPathSafepoint()) { |
| 714 | // Safepoint intervals are synthesized to count max live registers. |
| 715 | // They will be processed separately after coloring. |
| 716 | safepoints->push_back(node); |
| 717 | } else { |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 718 | prunable_nodes->push_back(node); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 719 | } |
| 720 | |
| 721 | while (range != nullptr) { |
| 722 | range_endpoints.push_back(std::make_tuple(range->GetStart(), true, node)); |
| 723 | range_endpoints.push_back(std::make_tuple(range->GetEnd(), false, node)); |
| 724 | range = range->GetNext(); |
| 725 | } |
| 726 | } |
| 727 | } |
| 728 | } |
| 729 | |
| 730 | // Sort the endpoints. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 731 | std::sort(range_endpoints.begin(), range_endpoints.end()); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 732 | |
| 733 | // Nodes live at the current position in the linear sweep. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 734 | ArenaSet<InterferenceNode*, decltype(&InterferenceNode::CmpPtr)> live( |
| 735 | InterferenceNode::CmpPtr, coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator)); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 736 | |
| 737 | // Linear sweep. When we encounter the beginning of a range, we add the corresponding node to the |
| 738 | // live set. When we encounter the end of a range, we remove the corresponding node |
| 739 | // from the live set. Nodes interfere if they are in the live set at the same time. |
| 740 | for (auto it = range_endpoints.begin(); it != range_endpoints.end(); ++it) { |
| 741 | bool is_range_beginning; |
| 742 | InterferenceNode* node; |
| 743 | // Extract information from the tuple, including the node this tuple represents. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 744 | std::tie(std::ignore, is_range_beginning, node) = *it; |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 745 | |
| 746 | if (is_range_beginning) { |
| 747 | for (InterferenceNode* conflicting : live) { |
| 748 | DCHECK_NE(node, conflicting); |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 749 | AddPotentialInterference(node, conflicting); |
| 750 | AddPotentialInterference(conflicting, node); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 751 | } |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 752 | DCHECK_EQ(live.count(node), 0u); |
| 753 | live.insert(node); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 754 | } else { |
| 755 | // End of range. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 756 | DCHECK_EQ(live.count(node), 1u); |
| 757 | live.erase(node); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 758 | } |
| 759 | } |
| 760 | DCHECK(live.empty()); |
| 761 | } |
| 762 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 763 | // The order in which we color nodes is vital to both correctness (forward |
| 764 | // progress) and code quality. Specifically, we must prioritize intervals |
| 765 | // that require registers, and after that we must prioritize short intervals. |
| 766 | // That way, if we fail to color a node, it either won't require a register, |
| 767 | // or it will be a long interval that can be split in order to make the |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 768 | // interference graph sparser. |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 769 | // TODO: May also want to consider: |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 770 | // - Loop depth |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 771 | // - Constants (since they can be rematerialized) |
| 772 | // - Allocated spill slots |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 773 | static bool GreaterNodePriority(const InterferenceNode* lhs, |
| 774 | const InterferenceNode* rhs) { |
| 775 | LiveInterval* lhs_interval = lhs->GetInterval(); |
| 776 | LiveInterval* rhs_interval = rhs->GetInterval(); |
| 777 | |
| 778 | // (1) Choose the interval that requires a register. |
| 779 | if (lhs_interval->RequiresRegister() != rhs_interval->RequiresRegister()) { |
| 780 | return lhs_interval->RequiresRegister(); |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 781 | } |
| 782 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 783 | // (2) Choose the interval that has a shorter life span. |
| 784 | if (lhs_interval->GetLength() != rhs_interval->GetLength()) { |
| 785 | return lhs_interval->GetLength() < rhs_interval->GetLength(); |
| 786 | } |
| 787 | |
| 788 | // (3) Just choose the interval based on a deterministic ordering. |
| 789 | return InterferenceNode::CmpPtr(lhs, rhs); |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 790 | } |
| 791 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 792 | void RegisterAllocatorGraphColor::PruneInterferenceGraph( |
| 793 | const ArenaVector<InterferenceNode*>& prunable_nodes, |
| 794 | size_t num_regs, |
| 795 | ArenaStdStack<InterferenceNode*>* pruned_nodes) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 796 | // When pruning the graph, we refer to nodes with degree less than num_regs as low degree nodes, |
| 797 | // and all others as high degree nodes. The distinction is important: low degree nodes are |
| 798 | // guaranteed a color, while high degree nodes are not. |
| 799 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 800 | // Low-degree nodes are guaranteed a color, so worklist order does not matter. |
| 801 | ArenaDeque<InterferenceNode*> low_degree_worklist( |
| 802 | coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator)); |
| 803 | |
| 804 | // If we have to prune from the high-degree worklist, we cannot guarantee |
| 805 | // the pruned node a color. So, we order the worklist by priority. |
| 806 | ArenaSet<InterferenceNode*, decltype(&GreaterNodePriority)> high_degree_worklist( |
| 807 | GreaterNodePriority, coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator)); |
| 808 | |
| 809 | // Build worklists. |
| 810 | for (InterferenceNode* node : prunable_nodes) { |
| 811 | DCHECK(!node->GetInterval()->HasRegister()) |
| 812 | << "Fixed nodes should never be pruned"; |
| 813 | DCHECK(!node->GetInterval()->IsSlowPathSafepoint()) |
| 814 | << "Safepoint nodes should never be pruned"; |
| 815 | if (node->GetOutDegree() < num_regs) { |
| 816 | low_degree_worklist.push_back(node); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 817 | } else { |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 818 | high_degree_worklist.insert(node); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 819 | } |
| 820 | } |
| 821 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 822 | // Helper function to prune an interval from the interference graph, |
| 823 | // which includes updating the worklists. |
| 824 | auto prune_node = [this, |
| 825 | num_regs, |
| 826 | &pruned_nodes, |
| 827 | &low_degree_worklist, |
| 828 | &high_degree_worklist] (InterferenceNode* node) { |
| 829 | DCHECK(!node->GetInterval()->HasRegister()); |
| 830 | pruned_nodes->push(node); |
| 831 | for (InterferenceNode* adjacent : node->GetAdjacentNodes()) { |
| 832 | DCHECK(!adjacent->GetInterval()->IsSlowPathSafepoint()) |
| 833 | << "Nodes should never interfere with synthesized safepoint nodes"; |
| 834 | if (adjacent->GetInterval()->HasRegister()) { |
| 835 | // No effect on pre-colored nodes; they're never pruned. |
| 836 | } else { |
| 837 | bool was_high_degree = adjacent->GetOutDegree() >= num_regs; |
| 838 | DCHECK(adjacent->ContainsInterference(node)) |
| 839 | << "Missing incoming interference edge from non-fixed node"; |
| 840 | adjacent->RemoveInterference(node); |
| 841 | if (was_high_degree && adjacent->GetOutDegree() < num_regs) { |
| 842 | // This is a transition from high degree to low degree. |
| 843 | DCHECK_EQ(high_degree_worklist.count(adjacent), 1u); |
| 844 | high_degree_worklist.erase(adjacent); |
| 845 | low_degree_worklist.push_back(adjacent); |
| 846 | } |
| 847 | } |
| 848 | } |
| 849 | }; |
| 850 | |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 851 | // Prune graph. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 852 | while (!low_degree_worklist.empty() || !high_degree_worklist.empty()) { |
| 853 | while (!low_degree_worklist.empty()) { |
| 854 | InterferenceNode* node = low_degree_worklist.front(); |
| 855 | // TODO: pop_back() should work as well, but it doesn't; we get a |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 856 | // failed check while pruning. We should look into this. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 857 | low_degree_worklist.pop_front(); |
| 858 | prune_node(node); |
| 859 | } |
| 860 | if (!high_degree_worklist.empty()) { |
| 861 | // We prune the lowest-priority node, because pruning a node earlier |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 862 | // gives it a higher chance of being spilled. |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 863 | InterferenceNode* node = *high_degree_worklist.rbegin(); |
| 864 | high_degree_worklist.erase(node); |
| 865 | prune_node(node); |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 866 | } |
| 867 | } |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 868 | } |
| 869 | |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 870 | // Build a mask with a bit set for each register assigned to some |
| 871 | // interval in `intervals`. |
| 872 | template <typename Container> |
| 873 | static std::bitset<kMaxNumRegs> BuildConflictMask(Container& intervals) { |
| 874 | std::bitset<kMaxNumRegs> conflict_mask; |
| 875 | for (InterferenceNode* adjacent : intervals) { |
| 876 | LiveInterval* conflicting = adjacent->GetInterval(); |
| 877 | if (conflicting->HasRegister()) { |
| 878 | conflict_mask.set(conflicting->GetRegister()); |
| 879 | if (conflicting->HasHighInterval()) { |
| 880 | DCHECK(conflicting->GetHighInterval()->HasRegister()); |
| 881 | conflict_mask.set(conflicting->GetHighInterval()->GetRegister()); |
| 882 | } |
| 883 | } else { |
| 884 | DCHECK(!conflicting->HasHighInterval() |
| 885 | || !conflicting->GetHighInterval()->HasRegister()); |
| 886 | } |
| 887 | } |
| 888 | return conflict_mask; |
| 889 | } |
| 890 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 891 | bool RegisterAllocatorGraphColor::ColorInterferenceGraph( |
| 892 | ArenaStdStack<InterferenceNode*>* pruned_nodes, |
| 893 | size_t num_regs) { |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 894 | DCHECK_LE(num_regs, kMaxNumRegs) << "kMaxNumRegs is too small"; |
| 895 | ArenaVector<LiveInterval*> colored_intervals( |
| 896 | coloring_attempt_allocator_->Adapter(kArenaAllocRegisterAllocator)); |
| 897 | bool successful = true; |
| 898 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 899 | while (!pruned_nodes->empty()) { |
| 900 | InterferenceNode* node = pruned_nodes->top(); |
| 901 | pruned_nodes->pop(); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 902 | LiveInterval* interval = node->GetInterval(); |
Matthew Gharrity | 465ed69 | 2016-07-22 08:52:13 -0700 | [diff] [blame] | 903 | |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 904 | // Search for free register(s). |
| 905 | // Note that the graph coloring allocator assumes that pair intervals are aligned here, |
| 906 | // excluding pre-colored pair intervals (which can currently be unaligned on x86). |
| 907 | std::bitset<kMaxNumRegs> conflict_mask = BuildConflictMask(node->GetAdjacentNodes()); |
| 908 | size_t reg = 0; |
| 909 | if (interval->HasHighInterval()) { |
| 910 | while (reg < num_regs - 1 && (conflict_mask[reg] || conflict_mask[reg + 1])) { |
| 911 | reg += 2; |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 912 | } |
| 913 | } else { |
Andreas Gampe | 6f61ee5 | 2016-08-12 06:33:15 +0000 | [diff] [blame^] | 914 | // We use CTZ (count trailing zeros) to quickly find the lowest available register. |
| 915 | // Note that CTZ is undefined for 0, so we special-case it. |
| 916 | reg = conflict_mask.all() ? conflict_mask.size() : CTZ(~conflict_mask.to_ulong()); |
Matthew Gharrity | d9ffd0d | 2016-06-22 10:27:55 -0700 | [diff] [blame] | 917 | } |
| 918 | |
| 919 | if (reg < (interval->HasHighInterval() ? num_regs - 1 : num_regs)) { |
| 920 | // Assign register. |
| 921 | DCHECK(!interval->HasRegister()); |
| 922 | interval->SetRegister(reg); |
| 923 | colored_intervals.push_back(interval); |
| 924 | if (interval->HasHighInterval()) { |
| 925 | DCHECK(!interval->GetHighInterval()->HasRegister()); |
| 926 | interval->GetHighInterval()->SetRegister(reg + 1); |
| 927 | colored_intervals.push_back(interval->GetHighInterval()); |
| 928 | } |
| 929 | } else if (interval->RequiresRegister()) { |
| 930 | // The interference graph is too dense to color. Make it sparser by |
| 931 | // splitting this live interval. |
| 932 | successful = false; |
| 933 | SplitAtRegisterUses(interval); |
| 934 | // We continue coloring, because there may be additional intervals that cannot |
| 935 | // be colored, and that we should split. |
| 936 | } else { |
| 937 | // Spill. |
| 938 | AllocateSpillSlotFor(interval); |
| 939 | } |
| 940 | } |
| 941 | |
| 942 | // If unsuccessful, reset all register assignments. |
| 943 | if (!successful) { |
| 944 | for (LiveInterval* interval : colored_intervals) { |
| 945 | interval->ClearRegister(); |
| 946 | } |
| 947 | } |
| 948 | |
| 949 | return successful; |
| 950 | } |
| 951 | |
| 952 | size_t RegisterAllocatorGraphColor::ComputeMaxSafepointLiveRegisters( |
| 953 | const ArenaVector<InterferenceNode*>& safepoints) { |
| 954 | size_t max_safepoint_live_regs = 0; |
| 955 | for (InterferenceNode* safepoint : safepoints) { |
| 956 | DCHECK(safepoint->GetInterval()->IsSlowPathSafepoint()); |
| 957 | std::bitset<kMaxNumRegs> conflict_mask = BuildConflictMask(safepoint->GetAdjacentNodes()); |
| 958 | size_t live_regs = conflict_mask.count(); |
| 959 | max_safepoint_live_regs = std::max(max_safepoint_live_regs, live_regs); |
| 960 | } |
| 961 | return max_safepoint_live_regs; |
| 962 | } |
| 963 | |
| 964 | void RegisterAllocatorGraphColor::AllocateSpillSlotFor(LiveInterval* interval) { |
| 965 | LiveInterval* parent = interval->GetParent(); |
| 966 | HInstruction* defined_by = parent->GetDefinedBy(); |
| 967 | if (parent->HasSpillSlot()) { |
| 968 | // We already have a spill slot for this value that we can reuse. |
| 969 | } else if (defined_by->IsParameterValue()) { |
| 970 | // Parameters already have a stack slot. |
| 971 | parent->SetSpillSlot(codegen_->GetStackSlotOfParameter(defined_by->AsParameterValue())); |
| 972 | } else if (defined_by->IsCurrentMethod()) { |
| 973 | // The current method is always at spill slot 0. |
| 974 | parent->SetSpillSlot(0); |
| 975 | } else if (defined_by->IsConstant()) { |
| 976 | // Constants don't need a spill slot. |
| 977 | } else { |
| 978 | // Allocate a spill slot based on type. |
| 979 | size_t* spill_slot_counter; |
| 980 | switch (interval->GetType()) { |
| 981 | case Primitive::kPrimDouble: |
| 982 | spill_slot_counter = &double_spill_slot_counter_; |
| 983 | break; |
| 984 | case Primitive::kPrimLong: |
| 985 | spill_slot_counter = &long_spill_slot_counter_; |
| 986 | break; |
| 987 | case Primitive::kPrimFloat: |
| 988 | spill_slot_counter = &float_spill_slot_counter_; |
| 989 | break; |
| 990 | case Primitive::kPrimNot: |
| 991 | case Primitive::kPrimInt: |
| 992 | case Primitive::kPrimChar: |
| 993 | case Primitive::kPrimByte: |
| 994 | case Primitive::kPrimBoolean: |
| 995 | case Primitive::kPrimShort: |
| 996 | spill_slot_counter = &int_spill_slot_counter_; |
| 997 | break; |
| 998 | case Primitive::kPrimVoid: |
| 999 | LOG(FATAL) << "Unexpected type for interval " << interval->GetType(); |
| 1000 | UNREACHABLE(); |
| 1001 | } |
| 1002 | |
| 1003 | parent->SetSpillSlot(*spill_slot_counter); |
| 1004 | *spill_slot_counter += parent->NeedsTwoSpillSlots() ? 2 : 1; |
| 1005 | // TODO: Could color stack slots if we wanted to, even if |
| 1006 | // it's just a trivial coloring. See the linear scan implementation, |
| 1007 | // which simply reuses spill slots for values whose live intervals |
| 1008 | // have already ended. |
| 1009 | } |
| 1010 | } |
| 1011 | |
| 1012 | } // namespace art |