blob: fce776920d9f45e65bafca16c90b7b418ed6ebc8 [file] [log] [blame]
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +01001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Nicolas Geoffraya2d15b52015-03-31 18:13:51 +010016#include <iostream>
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010017
18#include "parallel_move_resolver.h"
Vladimir Marko225b6462015-09-28 12:17:40 +010019
20#include "base/stl_util.h"
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010021#include "nodes.h"
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010022
23namespace art {
24
Zheng Xuad4450e2015-04-17 18:48:56 +080025void ParallelMoveResolver::BuildInitialMoveList(HParallelMove* parallel_move) {
26 // Perform a linear sweep of the moves to add them to the initial list of
27 // moves to perform, ignoring any move that is redundant (the source is
28 // the same as the destination, the destination is ignored and
29 // unallocated, or the move was already eliminated).
30 for (size_t i = 0; i < parallel_move->NumMoves(); ++i) {
31 MoveOperands* move = parallel_move->MoveOperandsAt(i);
32 if (!move->IsRedundant()) {
Vladimir Marko225b6462015-09-28 12:17:40 +010033 moves_.push_back(move);
Zheng Xuad4450e2015-04-17 18:48:56 +080034 }
35 }
36}
37
38void ParallelMoveResolverWithSwap::EmitNativeCode(HParallelMove* parallel_move) {
Vladimir Marko225b6462015-09-28 12:17:40 +010039 DCHECK(moves_.empty());
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010040 // Build up a worklist of moves.
41 BuildInitialMoveList(parallel_move);
42
Mark Mendell6e18dcb2015-07-28 17:26:55 -040043 // Move stack/stack slot to take advantage of a free register on constrained machines.
Vladimir Marko225b6462015-09-28 12:17:40 +010044 for (size_t i = 0; i < moves_.size(); ++i) {
45 const MoveOperands& move = *moves_[i];
Mark Mendell6e18dcb2015-07-28 17:26:55 -040046 // Ignore constants and moves already eliminated.
47 if (move.IsEliminated() || move.GetSource().IsConstant()) {
48 continue;
49 }
50
51 if ((move.GetSource().IsStackSlot() || move.GetSource().IsDoubleStackSlot()) &&
52 (move.GetDestination().IsStackSlot() || move.GetDestination().IsDoubleStackSlot())) {
53 PerformMove(i);
54 }
55 }
56
Vladimir Marko225b6462015-09-28 12:17:40 +010057 for (size_t i = 0; i < moves_.size(); ++i) {
58 const MoveOperands& move = *moves_[i];
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010059 // Skip constants to perform them last. They don't block other moves
60 // and skipping such moves with register destinations keeps those
61 // registers free for the whole algorithm.
62 if (!move.IsEliminated() && !move.GetSource().IsConstant()) {
63 PerformMove(i);
64 }
65 }
66
67 // Perform the moves with constant sources.
Vladimir Marko225b6462015-09-28 12:17:40 +010068 for (size_t i = 0; i < moves_.size(); ++i) {
69 MoveOperands* move = moves_[i];
Nicolas Geoffray48c310c2015-01-14 10:45:05 +000070 if (!move->IsEliminated()) {
71 DCHECK(move->GetSource().IsConstant());
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010072 EmitMove(i);
Nicolas Geoffray48c310c2015-01-14 10:45:05 +000073 // Eliminate the move, in case following moves need a scratch register.
74 move->Eliminate();
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010075 }
76 }
77
Vladimir Marko225b6462015-09-28 12:17:40 +010078 moves_.clear();
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010079}
80
Nicolas Geoffraya2d15b52015-03-31 18:13:51 +010081Location LowOf(Location location) {
82 if (location.IsRegisterPair()) {
83 return Location::RegisterLocation(location.low());
84 } else if (location.IsFpuRegisterPair()) {
85 return Location::FpuRegisterLocation(location.low());
86 } else if (location.IsDoubleStackSlot()) {
87 return Location::StackSlot(location.GetStackIndex());
88 } else {
89 return Location::NoLocation();
90 }
91}
92
93Location HighOf(Location location) {
94 if (location.IsRegisterPair()) {
95 return Location::RegisterLocation(location.high());
96 } else if (location.IsFpuRegisterPair()) {
97 return Location::FpuRegisterLocation(location.high());
98 } else if (location.IsDoubleStackSlot()) {
99 return Location::StackSlot(location.GetHighStackIndex(4));
100 } else {
101 return Location::NoLocation();
102 }
103}
104
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000105// Update the source of `move`, knowing that `updated_location` has been swapped
106// with `new_source`. Note that `updated_location` can be a pair, therefore if
107// `move` is non-pair, we need to extract which register to use.
108static void UpdateSourceOf(MoveOperands* move, Location updated_location, Location new_source) {
109 Location source = move->GetSource();
Nicolas Geoffraya2d15b52015-03-31 18:13:51 +0100110 if (LowOf(updated_location).Equals(source)) {
111 move->SetSource(LowOf(new_source));
112 } else if (HighOf(updated_location).Equals(source)) {
113 move->SetSource(HighOf(new_source));
114 } else {
115 DCHECK(updated_location.Equals(source)) << updated_location << " " << source;
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000116 move->SetSource(new_source);
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000117 }
118}
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100119
Zheng Xuad4450e2015-04-17 18:48:56 +0800120MoveOperands* ParallelMoveResolverWithSwap::PerformMove(size_t index) {
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100121 // Each call to this function performs a move and deletes it from the move
122 // graph. We first recursively perform any move blocking this one. We
123 // mark a move as "pending" on entry to PerformMove in order to detect
124 // cycles in the move graph. We use operand swaps to resolve cycles,
125 // which means that a call to PerformMove could change any source operand
126 // in the move graph.
127
Vladimir Marko225b6462015-09-28 12:17:40 +0100128 DCHECK_LT(index, moves_.size());
129 MoveOperands* move = moves_[index];
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000130 DCHECK(!move->IsPending());
131 if (move->IsRedundant()) {
132 // Because we swap register pairs first, following, un-pending
133 // moves may become redundant.
134 move->Eliminate();
135 return nullptr;
136 }
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100137
138 // Clear this move's destination to indicate a pending move. The actual
139 // destination is saved in a stack-allocated local. Recursion may allow
140 // multiple moves to be pending.
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000141 DCHECK(!move->GetSource().IsInvalid());
142 Location destination = move->MarkPending();
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100143
144 // Perform a depth-first traversal of the move graph to resolve
145 // dependencies. Any unperformed, unpending move with a source the same
146 // as this one's destination blocks this one so recursively perform all
147 // such moves.
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000148 MoveOperands* required_swap = nullptr;
Vladimir Marko225b6462015-09-28 12:17:40 +0100149 for (size_t i = 0; i < moves_.size(); ++i) {
150 const MoveOperands& other_move = *moves_[i];
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100151 if (other_move.Blocks(destination) && !other_move.IsPending()) {
152 // Though PerformMove can change any source operand in the move graph,
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000153 // calling `PerformMove` cannot create a blocking move via a swap
154 // (this loop does not miss any).
155 // For example, assume there is a non-blocking move with source A
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100156 // and this move is blocked on source B and there is a swap of A and
157 // B. Then A and B must be involved in the same cycle (or they would
158 // not be swapped). Since this move's destination is B and there is
159 // only a single incoming edge to an operand, this move must also be
160 // involved in the same cycle. In that case, the blocking move will
161 // be created but will be "pending" when we return from PerformMove.
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000162 required_swap = PerformMove(i);
163
164 if (required_swap == move) {
165 // If this move is required to swap, we do so without looking
166 // at the next moves. Swapping is not blocked by anything, it just
167 // updates other moves's source.
168 break;
Vladimir Marko225b6462015-09-28 12:17:40 +0100169 } else if (required_swap == moves_[i]) {
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000170 // If `other_move` was swapped, we iterate again to find a new
171 // potential cycle.
172 required_swap = nullptr;
173 i = 0;
174 } else if (required_swap != nullptr) {
175 // A move is required to swap. We walk back the cycle to find the
176 // move by just returning from this `PerforrmMove`.
Vladimir Marko225b6462015-09-28 12:17:40 +0100177 moves_[index]->ClearPending(destination);
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000178 return required_swap;
179 }
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100180 }
181 }
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100182
183 // We are about to resolve this move and don't need it marked as
184 // pending, so restore its destination.
185 move->ClearPending(destination);
186
187 // This move's source may have changed due to swaps to resolve cycles and
188 // so it may now be the last move in the cycle. If so remove it.
189 if (move->GetSource().Equals(destination)) {
190 move->Eliminate();
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000191 DCHECK(required_swap == nullptr);
192 return nullptr;
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100193 }
194
195 // The move may be blocked on a (at most one) pending move, in which case
196 // we have a cycle. Search for such a blocking move and perform a swap to
197 // resolve it.
198 bool do_swap = false;
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000199 if (required_swap != nullptr) {
200 DCHECK_EQ(required_swap, move);
201 do_swap = true;
202 } else {
Vladimir Marko225b6462015-09-28 12:17:40 +0100203 for (MoveOperands* other_move : moves_) {
204 if (other_move->Blocks(destination)) {
205 DCHECK(other_move->IsPending());
206 if (!move->Is64BitMove() && other_move->Is64BitMove()) {
Nicolas Geoffray90218252015-04-15 11:56:51 +0100207 // We swap 64bits moves before swapping 32bits moves. Go back from the
208 // cycle by returning the move that must be swapped.
Vladimir Marko225b6462015-09-28 12:17:40 +0100209 return other_move;
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000210 }
211 do_swap = true;
212 break;
213 }
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100214 }
215 }
216
217 if (do_swap) {
218 EmitSwap(index);
219 // Any unperformed (including pending) move with a source of either
220 // this move's source or destination needs to have their source
221 // changed to reflect the state of affairs after the swap.
222 Location source = move->GetSource();
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800223 Location swap_destination = move->GetDestination();
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100224 move->Eliminate();
Vladimir Marko225b6462015-09-28 12:17:40 +0100225 for (MoveOperands* other_move : moves_) {
226 if (other_move->Blocks(source)) {
227 UpdateSourceOf(other_move, source, swap_destination);
228 } else if (other_move->Blocks(swap_destination)) {
229 UpdateSourceOf(other_move, swap_destination, source);
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100230 }
231 }
Nicolas Geoffray90218252015-04-15 11:56:51 +0100232 // If the swap was required because of a 64bits move in the middle of a cycle,
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000233 // we return the swapped move, so that the caller knows it needs to re-iterate
234 // its dependency loop.
235 return required_swap;
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100236 } else {
237 // This move is not blocked.
238 EmitMove(index);
239 move->Eliminate();
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000240 DCHECK(required_swap == nullptr);
241 return nullptr;
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100242 }
243}
244
Zheng Xuad4450e2015-04-17 18:48:56 +0800245bool ParallelMoveResolverWithSwap::IsScratchLocation(Location loc) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100246 for (MoveOperands* move : moves_) {
247 if (move->Blocks(loc)) {
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100248 return false;
249 }
250 }
251
Vladimir Marko225b6462015-09-28 12:17:40 +0100252 for (MoveOperands* move : moves_) {
253 if (move->GetDestination().Equals(loc)) {
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100254 return true;
255 }
256 }
257
258 return false;
259}
260
Zheng Xuad4450e2015-04-17 18:48:56 +0800261int ParallelMoveResolverWithSwap::AllocateScratchRegister(int blocked,
262 int register_count,
263 int if_scratch,
264 bool* spilled) {
Nicolas Geoffraye27f31a2014-06-12 17:53:14 +0100265 DCHECK_NE(blocked, if_scratch);
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100266 int scratch = -1;
267 for (int reg = 0; reg < register_count; ++reg) {
Nicolas Geoffray56b9ee62014-10-09 11:47:51 +0100268 if ((blocked != reg) && IsScratchLocation(Location::RegisterLocation(reg))) {
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100269 scratch = reg;
270 break;
271 }
272 }
273
274 if (scratch == -1) {
275 *spilled = true;
Nicolas Geoffraye27f31a2014-06-12 17:53:14 +0100276 scratch = if_scratch;
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100277 } else {
278 *spilled = false;
279 }
280
281 return scratch;
282}
283
284
Zheng Xuad4450e2015-04-17 18:48:56 +0800285ParallelMoveResolverWithSwap::ScratchRegisterScope::ScratchRegisterScope(
286 ParallelMoveResolverWithSwap* resolver, int blocked, int if_scratch, int number_of_registers)
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100287 : resolver_(resolver),
288 reg_(kNoRegister),
289 spilled_(false) {
Nicolas Geoffraye27f31a2014-06-12 17:53:14 +0100290 reg_ = resolver_->AllocateScratchRegister(blocked, number_of_registers, if_scratch, &spilled_);
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100291
292 if (spilled_) {
293 resolver->SpillScratch(reg_);
294 }
295}
296
297
Zheng Xuad4450e2015-04-17 18:48:56 +0800298ParallelMoveResolverWithSwap::ScratchRegisterScope::~ScratchRegisterScope() {
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +0100299 if (spilled_) {
300 resolver_->RestoreScratch(reg_);
301 }
302}
303
Zheng Xuad4450e2015-04-17 18:48:56 +0800304void ParallelMoveResolverNoSwap::EmitNativeCode(HParallelMove* parallel_move) {
305 DCHECK_EQ(GetNumberOfPendingMoves(), 0u);
Vladimir Marko225b6462015-09-28 12:17:40 +0100306 DCHECK(moves_.empty());
307 DCHECK(scratches_.empty());
Zheng Xuad4450e2015-04-17 18:48:56 +0800308
309 // Backend dependent initialization.
310 PrepareForEmitNativeCode();
311
312 // Build up a worklist of moves.
313 BuildInitialMoveList(parallel_move);
314
Vladimir Marko225b6462015-09-28 12:17:40 +0100315 for (size_t i = 0; i < moves_.size(); ++i) {
316 const MoveOperands& move = *moves_[i];
Zheng Xuad4450e2015-04-17 18:48:56 +0800317 // Skip constants to perform them last. They don't block other moves and
318 // skipping such moves with register destinations keeps those registers
319 // free for the whole algorithm.
320 if (!move.IsEliminated() && !move.GetSource().IsConstant()) {
321 PerformMove(i);
322 }
323 }
324
325 // Perform the moves with constant sources and register destinations with UpdateMoveSource()
326 // to reduce the number of literal loads. Stack destinations are skipped since we won't be benefit
327 // from changing the constant sources to stack locations.
Vladimir Marko225b6462015-09-28 12:17:40 +0100328 for (size_t i = 0; i < moves_.size(); ++i) {
329 MoveOperands* move = moves_[i];
Zheng Xuad4450e2015-04-17 18:48:56 +0800330 Location destination = move->GetDestination();
331 if (!move->IsEliminated() && !destination.IsStackSlot() && !destination.IsDoubleStackSlot()) {
332 Location source = move->GetSource();
333 EmitMove(i);
334 move->Eliminate();
335 // This may introduce additional instruction dependency, but reduce number
336 // of moves and possible literal loads. For example,
337 // Original moves:
338 // 1234.5678 -> D0
339 // 1234.5678 -> D1
340 // Updated moves:
341 // 1234.5678 -> D0
342 // D0 -> D1
343 UpdateMoveSource(source, destination);
344 }
345 }
346
347 // Perform the rest of the moves.
Vladimir Marko225b6462015-09-28 12:17:40 +0100348 for (size_t i = 0; i < moves_.size(); ++i) {
349 MoveOperands* move = moves_[i];
Zheng Xuad4450e2015-04-17 18:48:56 +0800350 if (!move->IsEliminated()) {
351 EmitMove(i);
352 move->Eliminate();
353 }
354 }
355
356 // All pending moves that we have added for resolve cycles should be performed.
357 DCHECK_EQ(GetNumberOfPendingMoves(), 0u);
358
359 // Backend dependent cleanup.
360 FinishEmitNativeCode();
361
Vladimir Marko225b6462015-09-28 12:17:40 +0100362 moves_.clear();
363 scratches_.clear();
Zheng Xuad4450e2015-04-17 18:48:56 +0800364}
365
366Location ParallelMoveResolverNoSwap::GetScratchLocation(Location::Kind kind) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100367 for (Location loc : scratches_) {
Zheng Xuad4450e2015-04-17 18:48:56 +0800368 if (loc.GetKind() == kind && !IsBlockedByMoves(loc)) {
369 return loc;
370 }
371 }
Vladimir Marko225b6462015-09-28 12:17:40 +0100372 for (MoveOperands* move : moves_) {
373 Location loc = move->GetDestination();
Zheng Xuad4450e2015-04-17 18:48:56 +0800374 if (loc.GetKind() == kind && !IsBlockedByMoves(loc)) {
375 return loc;
376 }
377 }
378 return Location::NoLocation();
379}
380
381void ParallelMoveResolverNoSwap::AddScratchLocation(Location loc) {
382 if (kIsDebugBuild) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100383 for (Location scratch : scratches_) {
384 CHECK(!loc.Equals(scratch));
Zheng Xuad4450e2015-04-17 18:48:56 +0800385 }
386 }
Vladimir Marko225b6462015-09-28 12:17:40 +0100387 scratches_.push_back(loc);
Zheng Xuad4450e2015-04-17 18:48:56 +0800388}
389
390void ParallelMoveResolverNoSwap::RemoveScratchLocation(Location loc) {
391 DCHECK(!IsBlockedByMoves(loc));
Vladimir Marko225b6462015-09-28 12:17:40 +0100392 for (auto it = scratches_.begin(), end = scratches_.end(); it != end; ++it) {
393 if (loc.Equals(*it)) {
394 scratches_.erase(it);
Zheng Xuad4450e2015-04-17 18:48:56 +0800395 break;
396 }
397 }
398}
399
400void ParallelMoveResolverNoSwap::PerformMove(size_t index) {
401 // Each call to this function performs a move and deletes it from the move
402 // graph. We first recursively perform any move blocking this one. We mark
403 // a move as "pending" on entry to PerformMove in order to detect cycles
404 // in the move graph. We use scratch location to resolve cycles, also
405 // additional pending moves might be added. After move has been performed,
406 // we will update source operand in the move graph to reduce dependencies in
407 // the graph.
408
Vladimir Marko225b6462015-09-28 12:17:40 +0100409 DCHECK_LT(index, moves_.size());
410 MoveOperands* move = moves_[index];
Zheng Xuad4450e2015-04-17 18:48:56 +0800411 DCHECK(!move->IsPending());
412 DCHECK(!move->IsEliminated());
413 if (move->IsRedundant()) {
414 // Previous operations on the list of moves have caused this particular move
415 // to become a no-op, so we can safely eliminate it. Consider for example
416 // (0 -> 1) (1 -> 0) (1 -> 2). There is a cycle (0 -> 1) (1 -> 0), that we will
417 // resolve as (1 -> scratch) (0 -> 1) (scratch -> 0). If, by chance, '2' is
418 // used as the scratch location, the move (1 -> 2) will occur while resolving
419 // the cycle. When that move is emitted, the code will update moves with a '1'
420 // as their source to use '2' instead (see `UpdateMoveSource()`. In our example
421 // the initial move (1 -> 2) would then become the no-op (2 -> 2) that can be
422 // eliminated here.
423 move->Eliminate();
424 return;
425 }
426
427 // Clear this move's destination to indicate a pending move. The actual
428 // destination is saved in a stack-allocated local. Recursion may allow
429 // multiple moves to be pending.
430 DCHECK(!move->GetSource().IsInvalid());
431 Location destination = move->MarkPending();
432
433 // Perform a depth-first traversal of the move graph to resolve
434 // dependencies. Any unperformed, unpending move with a source the same
435 // as this one's destination blocks this one so recursively perform all
436 // such moves.
Vladimir Marko225b6462015-09-28 12:17:40 +0100437 for (size_t i = 0; i < moves_.size(); ++i) {
438 const MoveOperands& other_move = *moves_[i];
Zheng Xuad4450e2015-04-17 18:48:56 +0800439 if (other_move.Blocks(destination) && !other_move.IsPending()) {
440 PerformMove(i);
441 }
442 }
443
444 // We are about to resolve this move and don't need it marked as
445 // pending, so restore its destination.
446 move->ClearPending(destination);
447
448 // No one else should write to the move destination when the it is pending.
449 DCHECK(!move->IsRedundant());
450
451 Location source = move->GetSource();
452 // The move may be blocked on several pending moves, in case we have a cycle.
453 if (IsBlockedByMoves(destination)) {
454 // For a cycle like: (A -> B) (B -> C) (C -> A), we change it to following
455 // sequence:
456 // (C -> scratch) # Emit right now.
457 // (A -> B) (B -> C) # Unblocked.
458 // (scratch -> A) # Add to pending_moves_, blocked by (A -> B).
459 Location::Kind kind = source.GetKind();
460 DCHECK_NE(kind, Location::kConstant);
461 Location scratch = AllocateScratchLocationFor(kind);
462 // We only care about the move size.
463 Primitive::Type type = move->Is64BitMove() ? Primitive::kPrimLong : Primitive::kPrimInt;
464 // Perform (C -> scratch)
465 move->SetDestination(scratch);
466 EmitMove(index);
467 move->Eliminate();
468 UpdateMoveSource(source, scratch);
469 // Add (scratch -> A).
470 AddPendingMove(scratch, destination, type);
471 } else {
472 // This move is not blocked.
473 EmitMove(index);
474 move->Eliminate();
475 UpdateMoveSource(source, destination);
476 }
477
478 // Moves in the pending list should not block any other moves. But performing
479 // unblocked moves in the pending list can free scratch registers, so we do this
480 // as early as possible.
481 MoveOperands* pending_move;
482 while ((pending_move = GetUnblockedPendingMove(source)) != nullptr) {
483 Location pending_source = pending_move->GetSource();
484 Location pending_destination = pending_move->GetDestination();
485 // We do not depend on the pending move index. So just delete the move instead
486 // of eliminating it to make the pending list cleaner.
487 DeletePendingMove(pending_move);
488 move->SetSource(pending_source);
489 move->SetDestination(pending_destination);
490 EmitMove(index);
491 move->Eliminate();
492 UpdateMoveSource(pending_source, pending_destination);
493 // Free any unblocked locations in the scratch location list.
Vladimir Marko225b6462015-09-28 12:17:40 +0100494 // Note: Fetch size() on each iteration because scratches_ can be modified inside the loop.
495 // FIXME: If FreeScratchLocation() removes the location from scratches_,
496 // we skip the next location. This happens for arm64.
497 for (size_t i = 0; i < scratches_.size(); ++i) {
498 Location scratch = scratches_[i];
Zheng Xuad4450e2015-04-17 18:48:56 +0800499 // Only scratch overlapping with performed move source can be unblocked.
500 if (scratch.OverlapsWith(pending_source) && !IsBlockedByMoves(scratch)) {
501 FreeScratchLocation(pending_source);
502 }
503 }
504 }
505}
506
507void ParallelMoveResolverNoSwap::UpdateMoveSource(Location from, Location to) {
508 // This function is used to reduce the dependencies in the graph after
509 // (from -> to) has been performed. Since we ensure there is no move with the same
510 // destination, (to -> X) can not be blocked while (from -> X) might still be
511 // blocked. Consider for example the moves (0 -> 1) (1 -> 2) (1 -> 3). After
512 // (1 -> 2) has been performed, the moves left are (0 -> 1) and (1 -> 3). There is
513 // a dependency between the two. If we update the source location from 1 to 2, we
514 // will get (0 -> 1) and (2 -> 3). There is no dependency between the two.
515 //
516 // This is not something we must do, but we can use fewer scratch locations with
517 // this trick. For example, we can avoid using additional scratch locations for
518 // moves (0 -> 1), (1 -> 2), (1 -> 0).
Vladimir Marko225b6462015-09-28 12:17:40 +0100519 for (MoveOperands* move : moves_) {
Zheng Xuad4450e2015-04-17 18:48:56 +0800520 if (move->GetSource().Equals(from)) {
521 move->SetSource(to);
522 }
523 }
524}
525
526void ParallelMoveResolverNoSwap::AddPendingMove(Location source,
527 Location destination, Primitive::Type type) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100528 pending_moves_.push_back(new (allocator_) MoveOperands(source, destination, type, nullptr));
Zheng Xuad4450e2015-04-17 18:48:56 +0800529}
530
531void ParallelMoveResolverNoSwap::DeletePendingMove(MoveOperands* move) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100532 RemoveElement(pending_moves_, move);
Zheng Xuad4450e2015-04-17 18:48:56 +0800533}
534
535MoveOperands* ParallelMoveResolverNoSwap::GetUnblockedPendingMove(Location loc) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100536 for (MoveOperands* move : pending_moves_) {
Zheng Xuad4450e2015-04-17 18:48:56 +0800537 Location destination = move->GetDestination();
538 // Only moves with destination overlapping with input loc can be unblocked.
539 if (destination.OverlapsWith(loc) && !IsBlockedByMoves(destination)) {
540 return move;
541 }
542 }
543 return nullptr;
544}
545
546bool ParallelMoveResolverNoSwap::IsBlockedByMoves(Location loc) {
Vladimir Marko225b6462015-09-28 12:17:40 +0100547 for (MoveOperands* move : pending_moves_) {
548 if (move->Blocks(loc)) {
Zheng Xuad4450e2015-04-17 18:48:56 +0800549 return true;
550 }
551 }
Vladimir Marko225b6462015-09-28 12:17:40 +0100552 for (MoveOperands* move : moves_) {
553 if (move->Blocks(loc)) {
Zheng Xuad4450e2015-04-17 18:48:56 +0800554 return true;
555 }
556 }
557 return false;
558}
559
560// So far it is only used for debugging purposes to make sure all pending moves
561// have been performed.
562size_t ParallelMoveResolverNoSwap::GetNumberOfPendingMoves() {
Vladimir Marko225b6462015-09-28 12:17:40 +0100563 return pending_moves_.size();
Zheng Xuad4450e2015-04-17 18:48:56 +0800564}
565
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100566} // namespace art