blob: 42788616908e94565565aacd1a0c2525d9ea56cb [file] [log] [blame]
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +01001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_
18#define ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_
19
Vladimir Marko225b6462015-09-28 12:17:40 +010020#include "base/arena_containers.h"
Ian Rogers0279ebb2014-10-08 17:27:48 -070021#include "base/value_object.h"
Zheng Xuad4450e2015-04-17 18:48:56 +080022#include "locations.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010023#include "primitive.h"
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010024
25namespace art {
26
27class HParallelMove;
28class MoveOperands;
29
Zheng Xuad4450e2015-04-17 18:48:56 +080030// Helper classes to resolve a set of parallel moves. Architecture dependent code generator must
31// have their own subclass that implements corresponding virtual functions.
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010032class ParallelMoveResolver : public ValueObject {
33 public:
Vladimir Marko225b6462015-09-28 12:17:40 +010034 explicit ParallelMoveResolver(ArenaAllocator* allocator)
35 : moves_(allocator->Adapter(kArenaAllocParallelMoveResolver)) {
36 moves_.reserve(32);
37 }
Nicolas Geoffrayaa037b52014-05-23 10:40:42 +010038 virtual ~ParallelMoveResolver() {}
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010039
40 // Resolve a set of parallel moves, emitting assembler instructions.
Zheng Xuad4450e2015-04-17 18:48:56 +080041 virtual void EmitNativeCode(HParallelMove* parallel_move) = 0;
42
43 protected:
44 // Build the initial list of moves.
45 void BuildInitialMoveList(HParallelMove* parallel_move);
46
Vladimir Marko225b6462015-09-28 12:17:40 +010047 ArenaVector<MoveOperands*> moves_;
Zheng Xuad4450e2015-04-17 18:48:56 +080048
49 private:
50 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolver);
51};
52
53// This helper class uses swap to resolve dependencies and may emit swap.
54class ParallelMoveResolverWithSwap : public ParallelMoveResolver {
55 public:
56 explicit ParallelMoveResolverWithSwap(ArenaAllocator* allocator)
57 : ParallelMoveResolver(allocator) {}
58 virtual ~ParallelMoveResolverWithSwap() {}
59
60 // Resolve a set of parallel moves, emitting assembler instructions.
61 void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010062
63 protected:
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +010064 class ScratchRegisterScope : public ValueObject {
65 public:
Zheng Xuad4450e2015-04-17 18:48:56 +080066 ScratchRegisterScope(ParallelMoveResolverWithSwap* resolver,
Nicolas Geoffraye27f31a2014-06-12 17:53:14 +010067 int blocked,
68 int if_scratch,
69 int number_of_registers);
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +010070 ~ScratchRegisterScope();
71
72 int GetRegister() const { return reg_; }
73 bool IsSpilled() const { return spilled_; }
74
75 private:
Zheng Xuad4450e2015-04-17 18:48:56 +080076 ParallelMoveResolverWithSwap* resolver_;
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +010077 int reg_;
78 bool spilled_;
79 };
80
Zheng Xuad4450e2015-04-17 18:48:56 +080081 // Return true if the location can be scratched.
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +010082 bool IsScratchLocation(Location loc);
Nicolas Geoffray48c310c2015-01-14 10:45:05 +000083
84 // Allocate a scratch register for performing a move. The method will try to use
85 // a register that is the destination of a move, but that move has not been emitted yet.
Nicolas Geoffraye27f31a2014-06-12 17:53:14 +010086 int AllocateScratchRegister(int blocked, int if_scratch, int register_count, bool* spilled);
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +010087
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010088 // Emit a move.
89 virtual void EmitMove(size_t index) = 0;
90
91 // Execute a move by emitting a swap of two operands.
92 virtual void EmitSwap(size_t index) = 0;
93
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +010094 virtual void SpillScratch(int reg) = 0;
95 virtual void RestoreScratch(int reg) = 0;
96
Nicolas Geoffray86dbb9a2014-06-04 11:12:39 +010097 static constexpr int kNoRegister = -1;
98
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +010099 private:
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100100 // Perform the move at the moves_ index in question (possibly requiring
101 // other moves to satisfy dependencies).
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000102 //
103 // Return whether another move in the dependency cycle needs to swap. This
Nicolas Geoffray90218252015-04-15 11:56:51 +0100104 // is to handle 64bits swaps:
105 // 1) In the case of register pairs, where we want the pair to swap first to avoid
106 // building pairs that are unexpected by the code generator. For example, if
107 // we were to swap R1 with R2, we would need to update all locations using
108 // R2 to R1. So a (R2,R3) pair register could become (R1,R3). We could make
109 // the code generator understand such pairs, but it's easier and cleaner to
110 // just not create such pairs and exchange pairs in priority.
111 // 2) Even when the architecture does not have pairs, we must handle 64bits swaps
112 // first. Consider the case: (R0->R1) (R1->S) (S->R0), where 'S' is a single
113 // stack slot. If we end up swapping S and R0, S will only contain the low bits
114 // of R0. If R0->R1 is for a 64bits instruction, R1 will therefore not contain
115 // the right value.
Nicolas Geoffrayf7a0c4e2015-02-10 17:08:47 +0000116 MoveOperands* PerformMove(size_t index);
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100117
Zheng Xuad4450e2015-04-17 18:48:56 +0800118 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverWithSwap);
119};
120
121// This helper class uses additional scratch registers to resolve dependencies. It supports all kind
122// of dependency cycles and does not care about the register layout.
123class ParallelMoveResolverNoSwap : public ParallelMoveResolver {
124 public:
125 explicit ParallelMoveResolverNoSwap(ArenaAllocator* allocator)
Vladimir Marko225b6462015-09-28 12:17:40 +0100126 : ParallelMoveResolver(allocator),
127 scratches_(allocator->Adapter(kArenaAllocParallelMoveResolver)),
128 pending_moves_(allocator->Adapter(kArenaAllocParallelMoveResolver)),
129 allocator_(allocator) {
130 scratches_.reserve(32);
131 pending_moves_.reserve(8);
132 }
Zheng Xuad4450e2015-04-17 18:48:56 +0800133 virtual ~ParallelMoveResolverNoSwap() {}
134
135 // Resolve a set of parallel moves, emitting assembler instructions.
136 void EmitNativeCode(HParallelMove* parallel_move) OVERRIDE;
137
138 protected:
139 // Called at the beginning of EmitNativeCode(). A subclass may put some architecture dependent
140 // initialization here.
141 virtual void PrepareForEmitNativeCode() = 0;
142
143 // Called at the end of EmitNativeCode(). A subclass may put some architecture dependent cleanup
144 // here. All scratch locations will be removed after this call.
145 virtual void FinishEmitNativeCode() = 0;
146
147 // Allocate a scratch location to perform a move from input kind of location. A subclass should
148 // implement this to get the best fit location. If there is no suitable physical register, it can
149 // also return a stack slot.
150 virtual Location AllocateScratchLocationFor(Location::Kind kind) = 0;
151
152 // Called after a move which takes a scratch location as source. A subclass can defer the cleanup
153 // to FinishEmitNativeCode().
154 virtual void FreeScratchLocation(Location loc) = 0;
155
156 // Emit a move.
157 virtual void EmitMove(size_t index) = 0;
158
159 // Return a scratch location from the moves which exactly matches the kind.
160 // Return Location::NoLocation() if no matching scratch location can be found.
161 Location GetScratchLocation(Location::Kind kind);
162
163 // Add a location to the scratch list which can be returned from GetScratchLocation() to resolve
164 // dependency cycles.
165 void AddScratchLocation(Location loc);
166
167 // Remove a location from the scratch list.
168 void RemoveScratchLocation(Location loc);
169
170 // List of scratch locations.
Vladimir Marko225b6462015-09-28 12:17:40 +0100171 ArenaVector<Location> scratches_;
Zheng Xuad4450e2015-04-17 18:48:56 +0800172
173 private:
174 // Perform the move at the given index in `moves_` (possibly requiring other moves to satisfy
175 // dependencies).
176 void PerformMove(size_t index);
177
178 void UpdateMoveSource(Location from, Location to);
179
180 void AddPendingMove(Location source, Location destination, Primitive::Type type);
181
182 void DeletePendingMove(MoveOperands* move);
183
184 // Find a move that may be unblocked after (loc -> XXX) is performed.
185 MoveOperands* GetUnblockedPendingMove(Location loc);
186
187 // Return true if the location is blocked by outstanding moves.
188 bool IsBlockedByMoves(Location loc);
189
190 // Return the number of pending moves.
191 size_t GetNumberOfPendingMoves();
192
193 // Additional pending moves which might be added to resolve dependency cycle.
Vladimir Marko225b6462015-09-28 12:17:40 +0100194 ArenaVector<MoveOperands*> pending_moves_;
Zheng Xuad4450e2015-04-17 18:48:56 +0800195
196 // Used to allocate pending MoveOperands.
197 ArenaAllocator* const allocator_;
198
199 DISALLOW_COPY_AND_ASSIGN(ParallelMoveResolverNoSwap);
Nicolas Geoffray4e3d23a2014-05-22 18:32:45 +0100200};
201
202} // namespace art
203
204#endif // ART_COMPILER_OPTIMIZING_PARALLEL_MOVE_RESOLVER_H_