blob: 5991791a154a01077c627bd46b63cbe2fc3b491c [file] [log] [blame]
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "register_allocation_resolver.h"
18
19#include "code_generator.h"
Aart Bik96202302016-10-04 17:33:56 -070020#include "linear_order.h"
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070021#include "ssa_liveness_analysis.h"
22
23namespace art {
24
25RegisterAllocationResolver::RegisterAllocationResolver(ArenaAllocator* allocator,
26 CodeGenerator* codegen,
27 const SsaLivenessAnalysis& liveness)
28 : allocator_(allocator),
29 codegen_(codegen),
30 liveness_(liveness) {}
31
Vladimir Marko70e97462016-08-09 11:04:26 +010032void RegisterAllocationResolver::Resolve(ArrayRef<HInstruction* const> safepoints,
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070033 size_t reserved_out_slots,
34 size_t int_spill_slots,
35 size_t long_spill_slots,
36 size_t float_spill_slots,
37 size_t double_spill_slots,
38 size_t catch_phi_spill_slots,
Matthew Gharrity0b110f42016-07-20 10:13:45 -070039 const ArenaVector<LiveInterval*>& temp_intervals) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070040 size_t spill_slots = int_spill_slots
41 + long_spill_slots
42 + float_spill_slots
43 + double_spill_slots
44 + catch_phi_spill_slots;
45
Vladimir Marko70e97462016-08-09 11:04:26 +010046 // Update safepoints and calculate the size of the spills.
47 UpdateSafepointLiveRegisters();
48 size_t maximum_safepoint_spill_size = CalculateMaximumSafepointSpillSize(safepoints);
49
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070050 // Computes frame size and spill mask.
51 codegen_->InitializeCodeGeneration(spill_slots,
Vladimir Marko70e97462016-08-09 11:04:26 +010052 maximum_safepoint_spill_size,
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -070053 reserved_out_slots, // Includes slot(s) for the art method.
54 codegen_->GetGraph()->GetLinearOrder());
55
56 // Resolve outputs, including stack locations.
57 // TODO: Use pointers of Location inside LiveInterval to avoid doing another iteration.
58 for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
59 HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
60 LiveInterval* current = instruction->GetLiveInterval();
61 LocationSummary* locations = instruction->GetLocations();
62 Location location = locations->Out();
63 if (instruction->IsParameterValue()) {
64 // Now that we know the frame size, adjust the parameter's location.
65 if (location.IsStackSlot()) {
66 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
67 current->SetSpillSlot(location.GetStackIndex());
68 locations->UpdateOut(location);
69 } else if (location.IsDoubleStackSlot()) {
70 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
71 current->SetSpillSlot(location.GetStackIndex());
72 locations->UpdateOut(location);
73 } else if (current->HasSpillSlot()) {
74 current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
75 }
76 } else if (instruction->IsCurrentMethod()) {
77 // The current method is always at offset 0.
78 DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
79 } else if (instruction->IsPhi() && instruction->AsPhi()->IsCatchPhi()) {
80 DCHECK(current->HasSpillSlot());
81 size_t slot = current->GetSpillSlot()
82 + spill_slots
83 + reserved_out_slots
84 - catch_phi_spill_slots;
85 current->SetSpillSlot(slot * kVRegSize);
86 } else if (current->HasSpillSlot()) {
87 // Adjust the stack slot, now that we know the number of them for each type.
88 // The way this implementation lays out the stack is the following:
89 // [parameter slots ]
90 // [catch phi spill slots ]
91 // [double spill slots ]
92 // [long spill slots ]
93 // [float spill slots ]
94 // [int/ref values ]
95 // [maximum out values ] (number of arguments for calls)
96 // [art method ].
97 size_t slot = current->GetSpillSlot();
98 switch (current->GetType()) {
99 case Primitive::kPrimDouble:
100 slot += long_spill_slots;
101 FALLTHROUGH_INTENDED;
102 case Primitive::kPrimLong:
103 slot += float_spill_slots;
104 FALLTHROUGH_INTENDED;
105 case Primitive::kPrimFloat:
106 slot += int_spill_slots;
107 FALLTHROUGH_INTENDED;
108 case Primitive::kPrimNot:
109 case Primitive::kPrimInt:
110 case Primitive::kPrimChar:
111 case Primitive::kPrimByte:
112 case Primitive::kPrimBoolean:
113 case Primitive::kPrimShort:
114 slot += reserved_out_slots;
115 break;
116 case Primitive::kPrimVoid:
117 LOG(FATAL) << "Unexpected type for interval " << current->GetType();
118 }
119 current->SetSpillSlot(slot * kVRegSize);
120 }
121
122 Location source = current->ToLocation();
123
124 if (location.IsUnallocated()) {
125 if (location.GetPolicy() == Location::kSameAsFirstInput) {
126 if (locations->InAt(0).IsUnallocated()) {
127 locations->SetInAt(0, source);
128 } else {
129 DCHECK(locations->InAt(0).Equals(source));
130 }
131 }
132 locations->UpdateOut(source);
133 } else {
134 DCHECK(source.Equals(location));
135 }
136 }
137
138 // Connect siblings and resolve inputs.
139 for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
140 HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
Vladimir Marko70e97462016-08-09 11:04:26 +0100141 ConnectSiblings(instruction->GetLiveInterval());
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700142 }
143
144 // Resolve non-linear control flow across branches. Order does not matter.
Aart Bik96202302016-10-04 17:33:56 -0700145 for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700146 if (block->IsCatchBlock() ||
147 (block->IsLoopHeader() && block->GetLoopInformation()->IsIrreducible())) {
148 // Instructions live at the top of catch blocks or irreducible loop header
149 // were forced to spill.
150 if (kIsDebugBuild) {
151 BitVector* live = liveness_.GetLiveInSet(*block);
152 for (uint32_t idx : live->Indexes()) {
153 LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
154 LiveInterval* sibling = interval->GetSiblingAt(block->GetLifetimeStart());
155 // `GetSiblingAt` returns the sibling that contains a position, but there could be
156 // a lifetime hole in it. `CoversSlow` returns whether the interval is live at that
157 // position.
158 if ((sibling != nullptr) && sibling->CoversSlow(block->GetLifetimeStart())) {
159 DCHECK(!sibling->HasRegister());
160 }
161 }
162 }
163 } else {
164 BitVector* live = liveness_.GetLiveInSet(*block);
165 for (uint32_t idx : live->Indexes()) {
166 LiveInterval* interval = liveness_.GetInstructionFromSsaIndex(idx)->GetLiveInterval();
167 for (HBasicBlock* predecessor : block->GetPredecessors()) {
168 ConnectSplitSiblings(interval, predecessor, block);
169 }
170 }
171 }
172 }
173
174 // Resolve phi inputs. Order does not matter.
Aart Bik96202302016-10-04 17:33:56 -0700175 for (HBasicBlock* block : codegen_->GetGraph()->GetLinearOrder()) {
176 if (block->IsCatchBlock()) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700177 // Catch phi values are set at runtime by the exception delivery mechanism.
178 } else {
Aart Bik96202302016-10-04 17:33:56 -0700179 for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700180 HInstruction* phi = inst_it.Current();
Aart Bik96202302016-10-04 17:33:56 -0700181 for (size_t i = 0, e = block->GetPredecessors().size(); i < e; ++i) {
182 HBasicBlock* predecessor = block->GetPredecessors()[i];
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700183 DCHECK_EQ(predecessor->GetNormalSuccessors().size(), 1u);
184 HInstruction* input = phi->InputAt(i);
185 Location source = input->GetLiveInterval()->GetLocationAt(
186 predecessor->GetLifetimeEnd() - 1);
187 Location destination = phi->GetLiveInterval()->ToLocation();
188 InsertParallelMoveAtExitOf(predecessor, phi, source, destination);
189 }
190 }
191 }
192 }
193
194 // Resolve temp locations.
195 for (LiveInterval* temp : temp_intervals) {
196 if (temp->IsHighInterval()) {
197 // High intervals can be skipped, they are already handled by the low interval.
198 continue;
199 }
200 HInstruction* at = liveness_.GetTempUser(temp);
201 size_t temp_index = liveness_.GetTempIndex(temp);
202 LocationSummary* locations = at->GetLocations();
203 switch (temp->GetType()) {
204 case Primitive::kPrimInt:
205 locations->SetTempAt(temp_index, Location::RegisterLocation(temp->GetRegister()));
206 break;
207
208 case Primitive::kPrimDouble:
209 if (codegen_->NeedsTwoRegisters(Primitive::kPrimDouble)) {
210 Location location = Location::FpuRegisterPairLocation(
211 temp->GetRegister(), temp->GetHighInterval()->GetRegister());
212 locations->SetTempAt(temp_index, location);
213 } else {
214 locations->SetTempAt(temp_index, Location::FpuRegisterLocation(temp->GetRegister()));
215 }
216 break;
217
218 default:
219 LOG(FATAL) << "Unexpected type for temporary location "
220 << temp->GetType();
221 }
222 }
223}
224
Vladimir Marko70e97462016-08-09 11:04:26 +0100225void RegisterAllocationResolver::UpdateSafepointLiveRegisters() {
226 for (size_t i = 0, e = liveness_.GetNumberOfSsaValues(); i < e; ++i) {
227 HInstruction* instruction = liveness_.GetInstructionFromSsaIndex(i);
228 for (LiveInterval* current = instruction->GetLiveInterval();
229 current != nullptr;
230 current = current->GetNextSibling()) {
231 if (!current->HasRegister()) {
232 continue;
233 }
234 Location source = current->ToLocation();
235 for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
236 safepoint_position != nullptr;
237 safepoint_position = safepoint_position->GetNext()) {
238 DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
239 LocationSummary* locations = safepoint_position->GetLocations();
240 switch (source.GetKind()) {
241 case Location::kRegister:
242 case Location::kFpuRegister: {
243 locations->AddLiveRegister(source);
244 break;
245 }
246 case Location::kRegisterPair:
247 case Location::kFpuRegisterPair: {
248 locations->AddLiveRegister(source.ToLow());
249 locations->AddLiveRegister(source.ToHigh());
250 break;
251 }
252 case Location::kStackSlot: // Fall-through
253 case Location::kDoubleStackSlot: // Fall-through
254 case Location::kConstant: {
255 // Nothing to do.
256 break;
257 }
258 default: {
259 LOG(FATAL) << "Unexpected location for object";
260 }
261 }
262 }
263 }
264 }
265}
266
267size_t RegisterAllocationResolver::CalculateMaximumSafepointSpillSize(
268 ArrayRef<HInstruction* const> safepoints) {
269 size_t core_register_spill_size = codegen_->GetWordSize();
270 size_t fp_register_spill_size = codegen_->GetFloatingPointSpillSlotSize();
271 size_t maximum_safepoint_spill_size = 0u;
272 for (HInstruction* instruction : safepoints) {
273 LocationSummary* locations = instruction->GetLocations();
274 if (locations->OnlyCallsOnSlowPath()) {
275 size_t core_spills =
276 codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true);
277 size_t fp_spills =
278 codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false);
279 size_t spill_size =
280 core_register_spill_size * core_spills + fp_register_spill_size * fp_spills;
281 maximum_safepoint_spill_size = std::max(maximum_safepoint_spill_size, spill_size);
282 } else if (locations->CallsOnMainAndSlowPath()) {
283 // Nothing to spill on the slow path if the main path already clobbers caller-saves.
284 DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ true));
285 DCHECK_EQ(0u, codegen_->GetNumberOfSlowPathSpills(locations, /* core_registers */ false));
286 }
287 }
288 return maximum_safepoint_spill_size;
289}
290
291void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700292 LiveInterval* current = interval;
293 if (current->HasSpillSlot()
294 && current->HasRegister()
295 // Currently, we spill unconditionnally the current method in the code generators.
296 && !interval->GetDefinedBy()->IsCurrentMethod()) {
297 // We spill eagerly, so move must be at definition.
298 InsertMoveAfter(interval->GetDefinedBy(),
299 interval->ToLocation(),
300 interval->NeedsTwoSpillSlots()
301 ? Location::DoubleStackSlot(interval->GetParent()->GetSpillSlot())
302 : Location::StackSlot(interval->GetParent()->GetSpillSlot()));
303 }
304 UsePosition* use = current->GetFirstUse();
305 UsePosition* env_use = current->GetFirstEnvironmentUse();
306
307 // Walk over all siblings, updating locations of use positions, and
308 // connecting them when they are adjacent.
309 do {
310 Location source = current->ToLocation();
311
312 // Walk over all uses covered by this interval, and update the location
313 // information.
314
315 LiveRange* range = current->GetFirstRange();
316 while (range != nullptr) {
317 while (use != nullptr && use->GetPosition() < range->GetStart()) {
318 DCHECK(use->IsSynthesized());
319 use = use->GetNext();
320 }
321 while (use != nullptr && use->GetPosition() <= range->GetEnd()) {
322 DCHECK(!use->GetIsEnvironment());
323 DCHECK(current->CoversSlow(use->GetPosition()) || (use->GetPosition() == range->GetEnd()));
324 if (!use->IsSynthesized()) {
325 LocationSummary* locations = use->GetUser()->GetLocations();
326 Location expected_location = locations->InAt(use->GetInputIndex());
327 // The expected (actual) location may be invalid in case the input is unused. Currently
328 // this only happens for intrinsics.
329 if (expected_location.IsValid()) {
330 if (expected_location.IsUnallocated()) {
331 locations->SetInAt(use->GetInputIndex(), source);
332 } else if (!expected_location.IsConstant()) {
333 AddInputMoveFor(interval->GetDefinedBy(), use->GetUser(), source, expected_location);
334 }
335 } else {
336 DCHECK(use->GetUser()->IsInvoke());
337 DCHECK(use->GetUser()->AsInvoke()->GetIntrinsic() != Intrinsics::kNone);
338 }
339 }
340 use = use->GetNext();
341 }
342
343 // Walk over the environment uses, and update their locations.
344 while (env_use != nullptr && env_use->GetPosition() < range->GetStart()) {
345 env_use = env_use->GetNext();
346 }
347
348 while (env_use != nullptr && env_use->GetPosition() <= range->GetEnd()) {
349 DCHECK(current->CoversSlow(env_use->GetPosition())
350 || (env_use->GetPosition() == range->GetEnd()));
351 HEnvironment* environment = env_use->GetEnvironment();
352 environment->SetLocationAt(env_use->GetInputIndex(), source);
353 env_use = env_use->GetNext();
354 }
355
356 range = range->GetNext();
357 }
358
359 // If the next interval starts just after this one, and has a register,
360 // insert a move.
361 LiveInterval* next_sibling = current->GetNextSibling();
362 if (next_sibling != nullptr
363 && next_sibling->HasRegister()
364 && current->GetEnd() == next_sibling->GetStart()) {
365 Location destination = next_sibling->ToLocation();
366 InsertParallelMoveAt(current->GetEnd(), interval->GetDefinedBy(), source, destination);
367 }
368
369 for (SafepointPosition* safepoint_position = current->GetFirstSafepoint();
370 safepoint_position != nullptr;
371 safepoint_position = safepoint_position->GetNext()) {
372 DCHECK(current->CoversSlow(safepoint_position->GetPosition()));
373
Vladimir Marko70e97462016-08-09 11:04:26 +0100374 if (current->GetType() == Primitive::kPrimNot) {
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700375 DCHECK(interval->GetDefinedBy()->IsActualObject())
376 << interval->GetDefinedBy()->DebugName()
Roland Levillain19c54192016-11-04 13:44:09 +0000377 << '(' << interval->GetDefinedBy()->GetId() << ')'
378 << "@" << safepoint_position->GetInstruction()->DebugName()
379 << '(' << safepoint_position->GetInstruction()->GetId() << ')';
Vladimir Marko70e97462016-08-09 11:04:26 +0100380 LocationSummary* locations = safepoint_position->GetLocations();
381 if (current->GetParent()->HasSpillSlot()) {
382 locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700383 }
Vladimir Marko70e97462016-08-09 11:04:26 +0100384 if (source.GetKind() == Location::kRegister) {
385 locations->SetRegisterBit(source.reg());
Matthew Gharrity5d6e27d2016-07-18 13:38:44 -0700386 }
387 }
388 }
389 current = next_sibling;
390 } while (current != nullptr);
391
392 if (kIsDebugBuild) {
393 // Following uses can only be synthesized uses.
394 while (use != nullptr) {
395 DCHECK(use->IsSynthesized());
396 use = use->GetNext();
397 }
398 }
399}
400
401static bool IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(
402 HInstruction* instruction) {
403 return instruction->GetBlock()->GetGraph()->HasIrreducibleLoops() &&
404 (instruction->IsConstant() || instruction->IsCurrentMethod());
405}
406
407void RegisterAllocationResolver::ConnectSplitSiblings(LiveInterval* interval,
408 HBasicBlock* from,
409 HBasicBlock* to) const {
410 if (interval->GetNextSibling() == nullptr) {
411 // Nothing to connect. The whole range was allocated to the same location.
412 return;
413 }
414
415 // Find the intervals that cover `from` and `to`.
416 size_t destination_position = to->GetLifetimeStart();
417 size_t source_position = from->GetLifetimeEnd() - 1;
418 LiveInterval* destination = interval->GetSiblingAt(destination_position);
419 LiveInterval* source = interval->GetSiblingAt(source_position);
420
421 if (destination == source) {
422 // Interval was not split.
423 return;
424 }
425
426 LiveInterval* parent = interval->GetParent();
427 HInstruction* defined_by = parent->GetDefinedBy();
428 if (codegen_->GetGraph()->HasIrreducibleLoops() &&
429 (destination == nullptr || !destination->CoversSlow(destination_position))) {
430 // Our live_in fixed point calculation has found that the instruction is live
431 // in the `to` block because it will eventually enter an irreducible loop. Our
432 // live interval computation however does not compute a fixed point, and
433 // therefore will not have a location for that instruction for `to`.
434 // Because the instruction is a constant or the ArtMethod, we don't need to
435 // do anything: it will be materialized in the irreducible loop.
436 DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by))
437 << defined_by->DebugName() << ":" << defined_by->GetId()
438 << " " << from->GetBlockId() << " -> " << to->GetBlockId();
439 return;
440 }
441
442 if (!destination->HasRegister()) {
443 // Values are eagerly spilled. Spill slot already contains appropriate value.
444 return;
445 }
446
447 Location location_source;
448 // `GetSiblingAt` returns the interval whose start and end cover `position`,
449 // but does not check whether the interval is inactive at that position.
450 // The only situation where the interval is inactive at that position is in the
451 // presence of irreducible loops for constants and ArtMethod.
452 if (codegen_->GetGraph()->HasIrreducibleLoops() &&
453 (source == nullptr || !source->CoversSlow(source_position))) {
454 DCHECK(IsMaterializableEntryBlockInstructionOfGraphWithIrreducibleLoop(defined_by));
455 if (defined_by->IsConstant()) {
456 location_source = defined_by->GetLocations()->Out();
457 } else {
458 DCHECK(defined_by->IsCurrentMethod());
459 location_source = parent->NeedsTwoSpillSlots()
460 ? Location::DoubleStackSlot(parent->GetSpillSlot())
461 : Location::StackSlot(parent->GetSpillSlot());
462 }
463 } else {
464 DCHECK(source != nullptr);
465 DCHECK(source->CoversSlow(source_position));
466 DCHECK(destination->CoversSlow(destination_position));
467 location_source = source->ToLocation();
468 }
469
470 // If `from` has only one successor, we can put the moves at the exit of it. Otherwise
471 // we need to put the moves at the entry of `to`.
472 if (from->GetNormalSuccessors().size() == 1) {
473 InsertParallelMoveAtExitOf(from,
474 defined_by,
475 location_source,
476 destination->ToLocation());
477 } else {
478 DCHECK_EQ(to->GetPredecessors().size(), 1u);
479 InsertParallelMoveAtEntryOf(to,
480 defined_by,
481 location_source,
482 destination->ToLocation());
483 }
484}
485
486static bool IsValidDestination(Location destination) {
487 return destination.IsRegister()
488 || destination.IsRegisterPair()
489 || destination.IsFpuRegister()
490 || destination.IsFpuRegisterPair()
491 || destination.IsStackSlot()
492 || destination.IsDoubleStackSlot();
493}
494
495void RegisterAllocationResolver::AddMove(HParallelMove* move,
496 Location source,
497 Location destination,
498 HInstruction* instruction,
499 Primitive::Type type) const {
500 if (type == Primitive::kPrimLong
501 && codegen_->ShouldSplitLongMoves()
502 // The parallel move resolver knows how to deal with long constants.
503 && !source.IsConstant()) {
504 move->AddMove(source.ToLow(), destination.ToLow(), Primitive::kPrimInt, instruction);
505 move->AddMove(source.ToHigh(), destination.ToHigh(), Primitive::kPrimInt, nullptr);
506 } else {
507 move->AddMove(source, destination, type, instruction);
508 }
509}
510
511void RegisterAllocationResolver::AddInputMoveFor(HInstruction* input,
512 HInstruction* user,
513 Location source,
514 Location destination) const {
515 if (source.Equals(destination)) return;
516
517 DCHECK(!user->IsPhi());
518
519 HInstruction* previous = user->GetPrevious();
520 HParallelMove* move = nullptr;
521 if (previous == nullptr
522 || !previous->IsParallelMove()
523 || previous->GetLifetimePosition() < user->GetLifetimePosition()) {
524 move = new (allocator_) HParallelMove(allocator_);
525 move->SetLifetimePosition(user->GetLifetimePosition());
526 user->GetBlock()->InsertInstructionBefore(move, user);
527 } else {
528 move = previous->AsParallelMove();
529 }
530 DCHECK_EQ(move->GetLifetimePosition(), user->GetLifetimePosition());
531 AddMove(move, source, destination, nullptr, input->GetType());
532}
533
534static bool IsInstructionStart(size_t position) {
535 return (position & 1) == 0;
536}
537
538static bool IsInstructionEnd(size_t position) {
539 return (position & 1) == 1;
540}
541
542void RegisterAllocationResolver::InsertParallelMoveAt(size_t position,
543 HInstruction* instruction,
544 Location source,
545 Location destination) const {
546 DCHECK(IsValidDestination(destination)) << destination;
547 if (source.Equals(destination)) return;
548
549 HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
550 HParallelMove* move;
551 if (at == nullptr) {
552 if (IsInstructionStart(position)) {
553 // Block boundary, don't do anything the connection of split siblings will handle it.
554 return;
555 } else {
556 // Move must happen before the first instruction of the block.
557 at = liveness_.GetInstructionFromPosition((position + 1) / 2);
558 // Note that parallel moves may have already been inserted, so we explicitly
559 // ask for the first instruction of the block: `GetInstructionFromPosition` does
560 // not contain the `HParallelMove` instructions.
561 at = at->GetBlock()->GetFirstInstruction();
562
563 if (at->GetLifetimePosition() < position) {
564 // We may insert moves for split siblings and phi spills at the beginning of the block.
565 // Since this is a different lifetime position, we need to go to the next instruction.
566 DCHECK(at->IsParallelMove());
567 at = at->GetNext();
568 }
569
570 if (at->GetLifetimePosition() != position) {
571 DCHECK_GT(at->GetLifetimePosition(), position);
572 move = new (allocator_) HParallelMove(allocator_);
573 move->SetLifetimePosition(position);
574 at->GetBlock()->InsertInstructionBefore(move, at);
575 } else {
576 DCHECK(at->IsParallelMove());
577 move = at->AsParallelMove();
578 }
579 }
580 } else if (IsInstructionEnd(position)) {
581 // Move must happen after the instruction.
582 DCHECK(!at->IsControlFlow());
583 move = at->GetNext()->AsParallelMove();
584 // This is a parallel move for connecting siblings in a same block. We need to
585 // differentiate it with moves for connecting blocks, and input moves.
586 if (move == nullptr || move->GetLifetimePosition() > position) {
587 move = new (allocator_) HParallelMove(allocator_);
588 move->SetLifetimePosition(position);
589 at->GetBlock()->InsertInstructionBefore(move, at->GetNext());
590 }
591 } else {
592 // Move must happen before the instruction.
593 HInstruction* previous = at->GetPrevious();
594 if (previous == nullptr
595 || !previous->IsParallelMove()
596 || previous->GetLifetimePosition() != position) {
597 // If the previous is a parallel move, then its position must be lower
598 // than the given `position`: it was added just after the non-parallel
599 // move instruction that precedes `instruction`.
600 DCHECK(previous == nullptr
601 || !previous->IsParallelMove()
602 || previous->GetLifetimePosition() < position);
603 move = new (allocator_) HParallelMove(allocator_);
604 move->SetLifetimePosition(position);
605 at->GetBlock()->InsertInstructionBefore(move, at);
606 } else {
607 move = previous->AsParallelMove();
608 }
609 }
610 DCHECK_EQ(move->GetLifetimePosition(), position);
611 AddMove(move, source, destination, instruction, instruction->GetType());
612}
613
614void RegisterAllocationResolver::InsertParallelMoveAtExitOf(HBasicBlock* block,
615 HInstruction* instruction,
616 Location source,
617 Location destination) const {
618 DCHECK(IsValidDestination(destination)) << destination;
619 if (source.Equals(destination)) return;
620
621 DCHECK_EQ(block->GetNormalSuccessors().size(), 1u);
622 HInstruction* last = block->GetLastInstruction();
623 // We insert moves at exit for phi predecessors and connecting blocks.
624 // A block ending with an if or a packed switch cannot branch to a block
625 // with phis because we do not allow critical edges. It can also not connect
626 // a split interval between two blocks: the move has to happen in the successor.
627 DCHECK(!last->IsIf() && !last->IsPackedSwitch());
628 HInstruction* previous = last->GetPrevious();
629 HParallelMove* move;
630 // This is a parallel move for connecting blocks. We need to differentiate
631 // it with moves for connecting siblings in a same block, and output moves.
632 size_t position = last->GetLifetimePosition();
633 if (previous == nullptr || !previous->IsParallelMove()
634 || previous->AsParallelMove()->GetLifetimePosition() != position) {
635 move = new (allocator_) HParallelMove(allocator_);
636 move->SetLifetimePosition(position);
637 block->InsertInstructionBefore(move, last);
638 } else {
639 move = previous->AsParallelMove();
640 }
641 AddMove(move, source, destination, instruction, instruction->GetType());
642}
643
644void RegisterAllocationResolver::InsertParallelMoveAtEntryOf(HBasicBlock* block,
645 HInstruction* instruction,
646 Location source,
647 Location destination) const {
648 DCHECK(IsValidDestination(destination)) << destination;
649 if (source.Equals(destination)) return;
650
651 HInstruction* first = block->GetFirstInstruction();
652 HParallelMove* move = first->AsParallelMove();
653 size_t position = block->GetLifetimeStart();
654 // This is a parallel move for connecting blocks. We need to differentiate
655 // it with moves for connecting siblings in a same block, and input moves.
656 if (move == nullptr || move->GetLifetimePosition() != position) {
657 move = new (allocator_) HParallelMove(allocator_);
658 move->SetLifetimePosition(position);
659 block->InsertInstructionBefore(move, first);
660 }
661 AddMove(move, source, destination, instruction, instruction->GetType());
662}
663
664void RegisterAllocationResolver::InsertMoveAfter(HInstruction* instruction,
665 Location source,
666 Location destination) const {
667 DCHECK(IsValidDestination(destination)) << destination;
668 if (source.Equals(destination)) return;
669
670 if (instruction->IsPhi()) {
671 InsertParallelMoveAtEntryOf(instruction->GetBlock(), instruction, source, destination);
672 return;
673 }
674
675 size_t position = instruction->GetLifetimePosition() + 1;
676 HParallelMove* move = instruction->GetNext()->AsParallelMove();
677 // This is a parallel move for moving the output of an instruction. We need
678 // to differentiate with input moves, moves for connecting siblings in a
679 // and moves for connecting blocks.
680 if (move == nullptr || move->GetLifetimePosition() != position) {
681 move = new (allocator_) HParallelMove(allocator_);
682 move->SetLifetimePosition(position);
683 instruction->GetBlock()->InsertInstructionBefore(move, instruction->GetNext());
684 }
685 AddMove(move, source, destination, instruction, instruction->GetType());
686}
687
688} // namespace art