blob: 1786aa72a1d5769d2a198f7a3032443a84bea5b8 [file] [log] [blame]
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "register_allocation_resolver.h"
18
19#include "code_generator.h"
Aart Bik96202302016-10-04 17:33:56 -070020#include "linear_order.h"
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070021#include "ssa_liveness_analysis.h"
22
23namespace art {
24
25RegisterAllocationResolver::RegisterAllocationResolver(ArenaAllocator* allocator,
26 CodeGenerator* codegen,
27 const SsaLivenessAnalysis& liveness)
28 : allocator_(allocator),
29 codegen_(codegen),
30 liveness_(liveness) {}
31
Vladimir Marko70e97462016-08-09 11:04:26 +010032void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoints,
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070033 size_t reserved_out_slots,
34 size_t int_spill_slots,
35 size_t long_spill_slots,
36 size_t float_spill_slots,
37 size_t double_spill_slots,
38 size_t catch_phi_spill_slots,
Matthew Gharrity0b110f42016-07-20 10:13:45 -070039 const ArenaVector<LiveInterval*>& temp_intervals) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070040 size_t spill_slots = int_spill_slots
41 + long_spill_slots
42 + float_spill_slots
43 + double_spill_slots
44 + catch_phi_spill_slots;
45
Vladimir Marko70e97462016-08-09 11:04:26 +010046 // Update safepoints and calculate the size of the spills.
47 UpdateSafepointLiveRegisters();
48 size_t maximum_safepoint_spill_size = CalculateMaximumSafepointSpillSize(safepoints);
49
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070050 // Computes frame size and spill mask.
51 codegen_->InitializeCodeGeneration(spill_slots,
Vladimir Marko70e97462016-08-09 11:04:26 +010052 maximum_safepoint_spill_size,
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070053 reserved_out_slots, // Includes slot(s) for the art method.
54 codegen_->GetGraph()->GetLinearOrder());
55
56 // Resolve outputs, including stack locations.
57 // TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
58 for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
59 HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
60 LiveInterval* current = instruction->GetLiveInterval();
61 LocationSummary* locations = instruction->GetLocations();
62 Location location = locations->Out();
63 if (instruction->IsParameterValue()) {
64 // Now that we know the frame size, adjust the parameter's location.
65 if (location.IsStackSlot()) {
66 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
67 current->SetSpillSlot(location.GetStackIndex());
68 locations->UpdateOut(location);
69 } else if (location.IsDoubleStackSlot()) {
70 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
71 current->SetSpillSlot(location.GetStackIndex());
72 locations->UpdateOut(location);
73 } else if (current->HasSpillSlot()) {
74 current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
75 }
76 } else if (instruction->IsCurrentMethod()) {
77 // The current method is always at offset 0.
78 DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
79 } else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
80 DCHECK(current->HasSpillSlot());
81 size_t slot = current->GetSpillSlot()
82 + spill_slots
83 + reserved_out_slots
84 - catch_phi_spill_slots;
85 current->SetSpillSlot(slot * kVRegSize);
86 } else if (current->HasSpillSlot()) {
87 // Adjust the stack slot, now that we know the number of them for each type.
88 // The way this implementation lays out the stack is the following:
89 // [parameter slots ]
Mingyao Yang063fc772016-08-02 11:02:54 -070090 // [art method (caller) ]
91 // [entry spill (core) ]
92 // [entry spill (float) ]
93 // [should_deoptimize flag] (this is optional)
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070094 // [catch phi spill slots ]
95 // [double spill slots ]
96 // [long spill slots ]
97 // [float spill slots ]
98 // [int/ref values ]
99 // [maximum out values ] (number of arguments for calls)
100 // [art method ].
101 size_t slot = current->GetSpillSlot();
102 switch (current->GetType()) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100103 case DataType::Type::kFloat64:
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700104 slot += long_spill_slots;
105 FALLTHROUGH_INTENDED;
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100106 case DataType::Type::kInt64:
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700107 slot += float_spill_slots;
108 FALLTHROUGH_INTENDED;
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100109 case DataType::Type::kFloat32:
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700110 slot += int_spill_slots;
111 FALLTHROUGH_INTENDED;
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100112 case DataType::Type::kReference:
113 case DataType::Type::kInt32:
114 case DataType::Type::kUint16:
Vladimir Markod5d2f2c2017-09-26 12:37:26 +0100115 case DataType::Type::kUint8:
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100116 case DataType::Type::kInt8:
117 case DataType::Type::kBool:
118 case DataType::Type::kInt16:
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700119 slot += reserved_out_slots;
120 break;
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100121 case DataType::Type::kVoid:
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700122 LOG(FATAL) << "Unexpected type for interval " << current->GetType();
123 }
124 current->SetSpillSlot(slot * kVRegSize);
125 }
126
127 Location source = current->ToLocation();
128
129 if (location.IsUnallocated()) {
130 if (location.GetPolicy() == Location::kSameAsFirstInput) {
131 if (locations->InAt(0).IsUnallocated()) {
132 locations->SetInAt(0, source);
133 } else {
134 DCHECK(locations->InAt(0).Equals(source));
135 }
136 }
137 locations->UpdateOut(source);
138 } else {
139 DCHECK(source.Equals(location));
140 }
141 }
142
143 // Connect siblings and resolve inputs.
144 for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
145 HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
Vladimir Marko70e97462016-08-09 11:04:26 +0100146 ConnectSiblings(instruction->GetLiveInterval());
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700147 }
148
149 // Resolve non-linear control flow across branches. Order does not matter.
Aart Bik96202302016-10-04 17:33:56 -0700150 for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700151 if (block->IsCatchBlock() ||
152 (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
153 // Instructions live at the top of catch blocks or irreducible loop header
154 // were forced to spill.
155 if (kIsDebugBuild) {
156 BitVector* live = liveness_.GetLiveInSet(*block);
157 for (uint32_t idx : live->Indexes()) {
158 LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
159 LiveInterval* sibling = interval->GetSiblingAt(block->GetLifetimeStart());
160 // `GetSiblingAt` returns the sibling that contains a position, but there could be
161 // a lifetime hole in it. `CoversSlow` returns whether the interval is live at that
162 // position.
163 if ((sibling != nullptr) && sibling->CoversSlow(block->GetLifetimeStart())) {
164 DCHECK(!sibling->HasRegister());
165 }
166 }
167 }
168 } else {
169 BitVector* live = liveness_.GetLiveInSet(*block);
170 for (uint32_t idx : live->Indexes()) {
171 LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
172 for (HBasicBlock* predecessor : block->GetPredecessors()) {
173 ConnectSplitSiblings(interval, predecessor, block);
174 }
175 }
176 }
177 }
178
179 // Resolve phi inputs. Order does not matter.
Aart Bik96202302016-10-04 17:33:56 -0700180 for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
181 if (block->IsCatchBlock()) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700182 // Catch phi values are set at runtime by the exception delivery mechanism.
183 } else {
Aart Bik96202302016-10-04 17:33:56 -0700184 for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700185 HInstruction* phi = inst_it.Current();
Aart Bik96202302016-10-04 17:33:56 -0700186 for (size_t i = 0, e = block->GetPredecessors().size(); i < e; ++i) {
187 HBasicBlock* predecessor = block->GetPredecessors()[i];
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700188 DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
189 HInstruction* input = phi->InputAt(i);
190 Location source = input->GetLiveInterval()->GetLocationAt(
191 predecessor->GetLifetimeEnd() - 1);
192 Location destination = phi->GetLiveInterval()->ToLocation();
193 InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
194 }
195 }
196 }
197 }
198
199 // Resolve temp locations.
200 for (LiveInterval* temp : temp_intervals) {
201 if (temp->IsHighInterval()) {
202 // High intervals can be skipped, they are already handled by the low interval.
203 continue;
204 }
205 HInstruction* at = liveness_.GetTempUser(temp);
206 size_t temp_index = liveness_.GetTempIndex(temp);
207 LocationSummary* locations = at->GetLocations();
208 switch (temp->GetType()) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100209 case DataType::Type::kInt32:
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700210 locations->SetTempAt(temp_index, Location::RegisterLocation(temp->GetRegister()));
211 break;
212
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100213 case DataType::Type::kFloat64:
214 if (codegen_->NeedsTwoRegisters(DataType::Type::kFloat64)) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700215 Location location = Location::FpuRegisterPairLocation(
216 temp->GetRegister(), temp->GetHighInterval()->GetRegister());
217 locations->SetTempAt(temp_index, location);
218 } else {
219 locations->SetTempAt(temp_index, Location::FpuRegisterLocation(temp->GetRegister()));
220 }
221 break;
222
223 default:
224 LOG(FATAL) << "Unexpected type for temporary location "
225 << temp->GetType();
226 }
227 }
228}
229
Vladimir Marko70e97462016-08-09 11:04:26 +0100230void RegisterAllocationResolver::UpdateSafepointLiveRegisters() {
231 for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
232 HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
233 for (LiveInterval* current = instruction->GetLiveInterval();
234 current != nullptr;
235 current = current->GetNextSibling()) {
236 if (!current->HasRegister()) {
237 continue;
238 }
239 Location source = current->ToLocation();
240 for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
241 safepoint_position != nullptr;
242 safepoint_position = safepoint_position->GetNext()) {
243 DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
244 LocationSummary* locations = safepoint_position->GetLocations();
245 switch (source.GetKind()) {
246 case Location::kRegister:
247 case Location::kFpuRegister: {
248 locations->AddLiveRegister(source);
249 break;
250 }
251 case Location::kRegisterPair:
252 case Location::kFpuRegisterPair: {
253 locations->AddLiveRegister(source.ToLow());
254 locations->AddLiveRegister(source.ToHigh());
255 break;
256 }
257 case Location::kStackSlot: // Fall-through
258 case Location::kDoubleStackSlot: // Fall-through
259 case Location::kConstant: {
260 // Nothing to do.
261 break;
262 }
263 default: {
264 LOG(FATAL) << "Unexpected location for object";
265 }
266 }
267 }
268 }
269 }
270}
271
272size_t RegisterAllocationResolver::CalculateMaximumSafepointSpillSize(
273 ArrayRef<HInstruction* const> safepoints) {
274 size_t core_register_spill_size = codegen_->GetWordSize();
275 size_t fp_register_spill_size = codegen_->GetFloatingPointSpillSlotSize();
276 size_t maximum_safepoint_spill_size = 0u;
277 for (HInstruction* instruction : safepoints) {
278 LocationSummary* locations = instruction->GetLocations();
279 if (locations->OnlyCallsOnSlowPath()) {
280 size_t core_spills =
281 codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
282 size_t fp_spills =
283 codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
284 size_t spill_size =
285 core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
286 maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
287 } else if (locations->CallsOnMainAndSlowPath()) {
288 // Nothing to spill on the slow path if the main path already clobbers caller-saves.
289 DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
290 DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
291 }
292 }
293 return maximum_safepoint_spill_size;
294}
295
296void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700297 LiveInterval* current = interval;
298 if (current->HasSpillSlot()
299 && current->HasRegister()
300 // Currently, we spill unconditionnally the current method in the code generators.
301 && !interval->GetDefinedBy()->IsCurrentMethod()) {
302 // We spill eagerly, so move must be at definition.
Aart Bikcc895252017-03-21 10:55:15 -0700303 Location loc;
304 switch (interval->NumberOfSpillSlotsNeeded()) {
305 case 1: loc = Location::StackSlot(interval->GetParent()->GetSpillSlot()); break;
306 case 2: loc = Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot()); break;
Aart Bik5576f372017-03-23 16:17:37 -0700307 case 4: loc = Location::SIMDStackSlot(interval->GetParent()->GetSpillSlot()); break;
Aart Bikcc895252017-03-21 10:55:15 -0700308 default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
309 }
310 InsertMoveAfter(interval->GetDefinedBy(), interval->ToLocation(), loc);
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700311 }
Vladimir Marko82b07402017-03-01 19:02:04 +0000312 UsePositionList::const_iterator use_it = current->GetUses().begin();
313 const UsePositionList::const_iterator use_end = current->GetUses().end();
314 EnvUsePositionList::const_iterator env_use_it = current->GetEnvironmentUses().begin();
315 const EnvUsePositionList::const_iterator env_use_end = current->GetEnvironmentUses().end();
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700316
317 // Walk over all siblings, updating locations of use positions, and
318 // connecting them when they are adjacent.
319 do {
320 Location source = current->ToLocation();
321
322 // Walk over all uses covered by this interval, and update the location
323 // information.
324
325 LiveRange* range = current->GetFirstRange();
326 while (range != nullptr) {
Vladimir Marko82b07402017-03-01 19:02:04 +0000327 // Process uses in the closed interval [range->GetStart(), range->GetEnd()].
328 // FindMatchingUseRange() expects a half-open interval, so pass `range->GetEnd() + 1u`.
329 size_t range_begin = range->GetStart();
330 size_t range_end = range->GetEnd() + 1u;
331 auto matching_use_range =
332 FindMatchingUseRange(use_it, use_end, range_begin, range_end);
333 DCHECK(std::all_of(use_it,
334 matching_use_range.begin(),
335 [](const UsePosition& pos) { return pos.IsSynthesized(); }));
336 for (const UsePosition& use : matching_use_range) {
337 DCHECK(current->CoversSlow(use.GetPosition()) || (use.GetPosition() == range->GetEnd()));
338 if (!use.IsSynthesized()) {
339 LocationSummary* locations = use.GetUser()->GetLocations();
340 Location expected_location = locations->InAt(use.GetInputIndex());
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700341 // The expected (actual) location may be invalid in case the input is unused. Currently
342 // this only happens for intrinsics.
343 if (expected_location.IsValid()) {
344 if (expected_location.IsUnallocated()) {
Vladimir Marko82b07402017-03-01 19:02:04 +0000345 locations->SetInAt(use.GetInputIndex(), source);
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700346 } else if (!expected_location.IsConstant()) {
Vladimir Marko82b07402017-03-01 19:02:04 +0000347 AddInputMoveFor(
348 interval->GetDefinedBy(), use.GetUser(), source, expected_location);
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700349 }
350 } else {
Vladimir Marko82b07402017-03-01 19:02:04 +0000351 DCHECK(use.GetUser()->IsInvoke());
352 DCHECK(use.GetUser()->AsInvoke()->GetIntrinsic() != Intrinsics::kNone);
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700353 }
354 }
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700355 }
Vladimir Marko82b07402017-03-01 19:02:04 +0000356 use_it = matching_use_range.end();
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700357
358 // Walk over the environment uses, and update their locations.
Vladimir Marko82b07402017-03-01 19:02:04 +0000359 auto matching_env_use_range =
360 FindMatchingUseRange(env_use_it, env_use_end, range_begin, range_end);
361 for (const EnvUsePosition& env_use : matching_env_use_range) {
362 DCHECK(current->CoversSlow(env_use.GetPosition())
363 || (env_use.GetPosition() == range->GetEnd()));
364 HEnvironment* environment = env_use.GetEnvironment();
365 environment->SetLocationAt(env_use.GetInputIndex(), source);
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700366 }
Vladimir Marko82b07402017-03-01 19:02:04 +0000367 env_use_it = matching_env_use_range.end();
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700368
369 range = range->GetNext();
370 }
371
372 // If the next interval starts just after this one, and has a register,
373 // insert a move.
374 LiveInterval* next_sibling = current->GetNextSibling();
375 if (next_sibling != nullptr
376 && next_sibling->HasRegister()
377 && current->GetEnd() == next_sibling->GetStart()) {
378 Location destination = next_sibling->ToLocation();
379 InsertParallelMoveAt(current->GetEnd(), interval->GetDefinedBy(), source, destination);
380 }
381
382 for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
383 safepoint_position != nullptr;
384 safepoint_position = safepoint_position->GetNext()) {
385 DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
386
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100387 if (current->GetType() == DataType::Type::kReference) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700388 DCHECK(interval->GetDefinedBy()->IsActualObject())
389 << interval->GetDefinedBy()->DebugName()
Roland Levillain19c54192016-11-04 13:44:09 +0000390 << '(' << interval->GetDefinedBy()->GetId() << ')'
391 << "@" << safepoint_position->GetInstruction()->DebugName()
392 << '(' << safepoint_position->GetInstruction()->GetId() << ')';
Vladimir Marko70e97462016-08-09 11:04:26 +0100393 LocationSummary* locations = safepoint_position->GetLocations();
394 if (current->GetParent()->HasSpillSlot()) {
395 locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700396 }
Vladimir Marko70e97462016-08-09 11:04:26 +0100397 if (source.GetKind() == Location::kRegister) {
398 locations->SetRegisterBit(source.reg());
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700399 }
400 }
401 }
402 current = next_sibling;
403 } while (current != nullptr);
404
Vladimir Marko82b07402017-03-01 19:02:04 +0000405 // Following uses can only be synthesized uses.
406 DCHECK(std::all_of(use_it, use_end, [](const UsePosition& pos) { return pos.IsSynthesized(); }));
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700407}
408
409static bool IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(
410 HInstruction* instruction) {
411 return instruction->GetBlock()->GetGraph()->HasIrreducibleLoops() &&
412 (instruction->IsConstant() || instruction->IsCurrentMethod());
413}
414
415void RegisterAllocationResolver::ConnectSplitSiblings(LiveInterval* interval,
416 HBasicBlock* from,
417 HBasicBlock* to) const {
418 if (interval->GetNextSibling() == nullptr) {
419 // Nothing to connect. The whole range was allocated to the same location.
420 return;
421 }
422
423 // Find the intervals that cover `from` and `to`.
424 size_t destination_position = to->GetLifetimeStart();
425 size_t source_position = from->GetLifetimeEnd() - 1;
426 LiveInterval* destination = interval->GetSiblingAt(destination_position);
427 LiveInterval* source = interval->GetSiblingAt(source_position);
428
429 if (destination == source) {
430 // Interval was not split.
431 return;
432 }
433
434 LiveInterval* parent = interval->GetParent();
435 HInstruction* defined_by = parent->GetDefinedBy();
436 if (codegen_->GetGraph()->HasIrreducibleLoops() &&
437 (destination == nullptr || !destination->CoversSlow(destination_position))) {
438 // Our live_in fixed point calculation has found that the instruction is live
439 // in the `to` block because it will eventually enter an irreducible loop. Our
440 // live interval computation however does not compute a fixed point, and
441 // therefore will not have a location for that instruction for `to`.
442 // Because the instruction is a constant or the ArtMethod, we don't need to
443 // do anything: it will be materialized in the irreducible loop.
444 DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by))
445 << defined_by->DebugName() << ":" << defined_by->GetId()
446 << " " << from->GetBlockId() << " -> " << to->GetBlockId();
447 return;
448 }
449
450 if (!destination->HasRegister()) {
451 // Values are eagerly spilled. Spill slot already contains appropriate value.
452 return;
453 }
454
455 Location location_source;
456 // `GetSiblingAt` returns the interval whose start and end cover `position`,
457 // but does not check whether the interval is inactive at that position.
458 // The only situation where the interval is inactive at that position is in the
459 // presence of irreducible loops for constants and ArtMethod.
460 if (codegen_->GetGraph()->HasIrreducibleLoops() &&
461 (source == nullptr || !source->CoversSlow(source_position))) {
462 DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by));
463 if (defined_by->IsConstant()) {
464 location_source = defined_by->GetLocations()->Out();
465 } else {
466 DCHECK(defined_by->IsCurrentMethod());
Aart Bikcc895252017-03-21 10:55:15 -0700467 switch (parent->NumberOfSpillSlotsNeeded()) {
468 case 1: location_source = Location::StackSlot(parent->GetSpillSlot()); break;
469 case 2: location_source = Location::DoubleStackSlot(parent->GetSpillSlot()); break;
Aart Bik5576f372017-03-23 16:17:37 -0700470 case 4: location_source = Location::SIMDStackSlot(parent->GetSpillSlot()); break;
Aart Bikcc895252017-03-21 10:55:15 -0700471 default: LOG(FATAL) << "Unexpected number of spill slots"; UNREACHABLE();
472 }
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700473 }
474 } else {
475 DCHECK(source != nullptr);
476 DCHECK(source->CoversSlow(source_position));
477 DCHECK(destination->CoversSlow(destination_position));
478 location_source = source->ToLocation();
479 }
480
481 // If `from` has only one successor, we can put the moves at the exit of it. Otherwise
482 // we need to put the moves at the entry of `to`.
483 if (from->GetNormalSuccessors().size() == 1) {
484 InsertParallelMoveAtExitOf(from,
485 defined_by,
486 location_source,
487 destination->ToLocation());
488 } else {
489 DCHECK_EQ(to->GetPredecessors().size(), 1u);
490 InsertParallelMoveAtEntryOf(to,
491 defined_by,
492 location_source,
493 destination->ToLocation());
494 }
495}
496
497static bool IsValidDestination(Location destination) {
498 return destination.IsRegister()
499 || destination.IsRegisterPair()
500 || destination.IsFpuRegister()
501 || destination.IsFpuRegisterPair()
502 || destination.IsStackSlot()
Aart Bik5576f372017-03-23 16:17:37 -0700503 || destination.IsDoubleStackSlot()
504 || destination.IsSIMDStackSlot();
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700505}
506
507void RegisterAllocationResolver::AddMove(HParallelMove* move,
508 Location source,
509 Location destination,
510 HInstruction* instruction,
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100511 DataType::Type type) const {
512 if (type == DataType::Type::kInt64
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700513 && codegen_->ShouldSplitLongMoves()
514 // The parallel move resolver knows how to deal with long constants.
515 && !source.IsConstant()) {
Vladimir Marko0ebe0d82017-09-21 22:50:39 +0100516 move->AddMove(source.ToLow(), destination.ToLow(), DataType::Type::kInt32, instruction);
517 move->AddMove(source.ToHigh(), destination.ToHigh(), DataType::Type::kInt32, nullptr);
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700518 } else {
519 move->AddMove(source, destination, type, instruction);
520 }
521}
522
523void RegisterAllocationResolver::AddInputMoveFor(HInstruction* input,
524 HInstruction* user,
525 Location source,
526 Location destination) const {
527 if (source.Equals(destination)) return;
528
529 DCHECK(!user->IsPhi());
530
531 HInstruction* previous = user->GetPrevious();
532 HParallelMove* move = nullptr;
533 if (previous == nullptr
534 || !previous->IsParallelMove()
535 || previous->GetLifetimePosition() < user->GetLifetimePosition()) {
536 move = new (allocator_) HParallelMove(allocator_);
537 move->SetLifetimePosition(user->GetLifetimePosition());
538 user->GetBlock()->InsertInstructionBefore(move, user);
539 } else {
540 move = previous->AsParallelMove();
541 }
542 DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition());
543 AddMove(move, source, destination, nullptr, input->GetType());
544}
545
546static bool IsInstructionStart(size_t position) {
547 return (position & 1) == 0;
548}
549
550static bool IsInstructionEnd(size_t position) {
551 return (position & 1) == 1;
552}
553
554void RegisterAllocationResolver::InsertParallelMoveAt(size_t position,
555 HInstruction* instruction,
556 Location source,
557 Location destination) const {
558 DCHECK(IsValidDestination(destination)) << destination;
559 if (source.Equals(destination)) return;
560
561 HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
562 HParallelMove* move;
563 if (at == nullptr) {
564 if (IsInstructionStart(position)) {
565 // Block boundary, don't do anything the connection of split siblings will handle it.
566 return;
567 } else {
568 // Move must happen before the first instruction of the block.
569 at = liveness_.GetInstructionFromPosition((position + 1) / 2);
570 // Note that parallel moves may have already been inserted, so we explicitly
571 // ask for the first instruction of the block: `GetInstructionFromPosition` does
572 // not contain the `HParallelMove` instructions.
573 at = at->GetBlock()->GetFirstInstruction();
574
575 if (at->GetLifetimePosition() < position) {
576 // We may insert moves for split siblings and phi spills at the beginning of the block.
577 // Since this is a different lifetime position, we need to go to the next instruction.
578 DCHECK(at->IsParallelMove());
579 at = at->GetNext();
580 }
581
582 if (at->GetLifetimePosition() != position) {
583 DCHECK_GT(at->GetLifetimePosition(), position);
584 move = new (allocator_) HParallelMove(allocator_);
585 move->SetLifetimePosition(position);
586 at->GetBlock()->InsertInstructionBefore(move, at);
587 } else {
588 DCHECK(at->IsParallelMove());
589 move = at->AsParallelMove();
590 }
591 }
592 } else if (IsInstructionEnd(position)) {
593 // Move must happen after the instruction.
594 DCHECK(!at->IsControlFlow());
595 move = at->GetNext()->AsParallelMove();
596 // This is a parallel move for connecting siblings in a same block. We need to
597 // differentiate it with moves for connecting blocks, and input moves.
598 if (move == nullptr || move->GetLifetimePosition() > position) {
599 move = new (allocator_) HParallelMove(allocator_);
600 move->SetLifetimePosition(position);
601 at->GetBlock()->InsertInstructionBefore(move, at->GetNext());
602 }
603 } else {
604 // Move must happen before the instruction.
605 HInstruction* previous = at->GetPrevious();
606 if (previous == nullptr
607 || !previous->IsParallelMove()
608 || previous->GetLifetimePosition() != position) {
609 // If the previous is a parallel move, then its position must be lower
610 // than the given `position`: it was added just after the non-parallel
611 // move instruction that precedes `instruction`.
612 DCHECK(previous == nullptr
613 || !previous->IsParallelMove()
614 || previous->GetLifetimePosition() < position);
615 move = new (allocator_) HParallelMove(allocator_);
616 move->SetLifetimePosition(position);
617 at->GetBlock()->InsertInstructionBefore(move, at);
618 } else {
619 move = previous->AsParallelMove();
620 }
621 }
622 DCHECK_EQ(move->GetLifetimePosition(), position);
623 AddMove(move, source, destination, instruction, instruction->GetType());
624}
625
626void RegisterAllocationResolver::InsertParallelMoveAtExitOf(HBasicBlock* block,
627 HInstruction* instruction,
628 Location source,
629 Location destination) const {
630 DCHECK(IsValidDestination(destination)) << destination;
631 if (source.Equals(destination)) return;
632
633 DCHECK_EQ(block->GetNormalSuccessors().size(), 1u);
634 HInstruction* last = block->GetLastInstruction();
635 // We insert moves at exit for phi predecessors and connecting blocks.
636 // A block ending with an if or a packed switch cannot branch to a block
637 // with phis because we do not allow critical edges. It can also not connect
638 // a split interval between two blocks: the move has to happen in the successor.
639 DCHECK(!last->IsIf() && !last->IsPackedSwitch());
640 HInstruction* previous = last->GetPrevious();
641 HParallelMove* move;
642 // This is a parallel move for connecting blocks. We need to differentiate
643 // it with moves for connecting siblings in a same block, and output moves.
644 size_t position = last->GetLifetimePosition();
645 if (previous == nullptr || !previous->IsParallelMove()
646 || previous->AsParallelMove()->GetLifetimePosition() != position) {
647 move = new (allocator_) HParallelMove(allocator_);
648 move->SetLifetimePosition(position);
649 block->InsertInstructionBefore(move, last);
650 } else {
651 move = previous->AsParallelMove();
652 }
653 AddMove(move, source, destination, instruction, instruction->GetType());
654}
655
656void RegisterAllocationResolver::InsertParallelMoveAtEntryOf(HBasicBlock* block,
657 HInstruction* instruction,
658 Location source,
659 Location destination) const {
660 DCHECK(IsValidDestination(destination)) << destination;
661 if (source.Equals(destination)) return;
662
663 HInstruction* first = block->GetFirstInstruction();
664 HParallelMove* move = first->AsParallelMove();
665 size_t position = block->GetLifetimeStart();
666 // This is a parallel move for connecting blocks. We need to differentiate
667 // it with moves for connecting siblings in a same block, and input moves.
668 if (move == nullptr || move->GetLifetimePosition() != position) {
669 move = new (allocator_) HParallelMove(allocator_);
670 move->SetLifetimePosition(position);
671 block->InsertInstructionBefore(move, first);
672 }
673 AddMove(move, source, destination, instruction, instruction->GetType());
674}
675
676void RegisterAllocationResolver::InsertMoveAfter(HInstruction* instruction,
677 Location source,
678 Location destination) const {
679 DCHECK(IsValidDestination(destination)) << destination;
680 if (source.Equals(destination)) return;
681
682 if (instruction->IsPhi()) {
683 InsertParallelMoveAtEntryOf(instruction->GetBlock(), instruction, source, destination);
684 return;
685 }
686
687 size_t position = instruction->GetLifetimePosition() + 1;
688 HParallelMove* move = instruction->GetNext()->AsParallelMove();
689 // This is a parallel move for moving the output of an instruction. We need
690 // to differentiate with input moves, moves for connecting siblings in a
691 // and moves for connecting blocks.
692 if (move == nullptr || move->GetLifetimePosition() != position) {
693 move = new (allocator_) HParallelMove(allocator_);
694 move->SetLifetimePosition(position);
695 instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext());
696 }
697 AddMove(move, source, destination, instruction, instruction->GetType());
698}
699
700} // namespace art