blob: 477f139a14d331eacf9e608823bf9a71a856163d [file] [log] [blame]
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/move-optimizer.h"
6
7namespace v8 {
8namespace internal {
9namespace compiler {
10
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011namespace {
12
Ben Murdoch097c5b22016-05-18 11:27:45 +010013struct MoveKey {
14 InstructionOperand source;
15 InstructionOperand destination;
16};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017
18struct MoveKeyCompare {
19 bool operator()(const MoveKey& a, const MoveKey& b) const {
Ben Murdoch097c5b22016-05-18 11:27:45 +010020 if (a.source.EqualsCanonicalized(b.source)) {
21 return a.destination.CompareCanonicalized(b.destination);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000022 }
Ben Murdoch097c5b22016-05-18 11:27:45 +010023 return a.source.CompareCanonicalized(b.source);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000024 }
25};
26
27struct OperandCompare {
28 bool operator()(const InstructionOperand& a,
29 const InstructionOperand& b) const {
30 return a.CompareCanonicalized(b);
31 }
32};
33
34typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
35typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
36
37
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000038int FindFirstNonEmptySlot(const Instruction* instr) {
39 int i = Instruction::FIRST_GAP_POSITION;
40 for (; i <= Instruction::LAST_GAP_POSITION; i++) {
41 ParallelMove* moves = instr->parallel_moves()[i];
42 if (moves == nullptr) continue;
43 for (MoveOperands* move : *moves) {
44 if (!move->IsRedundant()) return i;
45 move->Eliminate();
46 }
47 moves->clear(); // Clear this redundant move.
48 }
49 return i;
50}
51
52} // namespace
53
54
Emily Bernierd0a1eb72015-03-24 16:35:39 -040055MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
56 : local_zone_(local_zone),
57 code_(code),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000058 local_vector_(local_zone) {}
Emily Bernierd0a1eb72015-03-24 16:35:39 -040059
60
61void MoveOptimizer::Run() {
Ben Murdoch097c5b22016-05-18 11:27:45 +010062 for (Instruction* instruction : code()->instructions()) {
63 CompressGaps(instruction);
64 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000065 for (InstructionBlock* block : code()->instruction_blocks()) {
66 CompressBlock(block);
67 }
68 for (InstructionBlock* block : code()->instruction_blocks()) {
69 if (block->PredecessorCount() <= 1) continue;
70 if (!block->IsDeferred()) {
71 bool has_only_deferred = true;
72 for (RpoNumber& pred_id : block->predecessors()) {
73 if (!code()->InstructionBlockAt(pred_id)->IsDeferred()) {
74 has_only_deferred = false;
75 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040076 }
77 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000078 // This would pull down common moves. If the moves occur in deferred
79 // blocks, and the closest common successor is not deferred, we lose the
80 // optimization of just spilling/filling in deferred blocks, when the
81 // current block is not deferred.
82 if (has_only_deferred) continue;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040083 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000084 OptimizeMerge(block);
85 }
Ben Murdoch097c5b22016-05-18 11:27:45 +010086 for (Instruction* gap : code()->instructions()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000087 FinalizeMoves(gap);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040088 }
89}
90
Ben Murdoch097c5b22016-05-18 11:27:45 +010091void MoveOptimizer::RemoveClobberedDestinations(Instruction* instruction) {
92 if (instruction->IsCall()) return;
93 ParallelMove* moves = instruction->parallel_moves()[0];
94 if (moves == nullptr) return;
Emily Bernierd0a1eb72015-03-24 16:35:39 -040095
Ben Murdoch097c5b22016-05-18 11:27:45 +010096 DCHECK(instruction->parallel_moves()[1] == nullptr ||
97 instruction->parallel_moves()[1]->empty());
98
99 OperandSet outputs(local_zone());
100 OperandSet inputs(local_zone());
101
102 // Outputs and temps are treated together as potentially clobbering a
103 // destination operand.
104 for (size_t i = 0; i < instruction->OutputCount(); ++i) {
105 outputs.insert(*instruction->OutputAt(i));
106 }
107 for (size_t i = 0; i < instruction->TempCount(); ++i) {
108 outputs.insert(*instruction->TempAt(i));
109 }
110
111 // Input operands block elisions.
112 for (size_t i = 0; i < instruction->InputCount(); ++i) {
113 inputs.insert(*instruction->InputAt(i));
114 }
115
116 // Elide moves made redundant by the instruction.
117 for (MoveOperands* move : *moves) {
118 if (outputs.find(move->destination()) != outputs.end() &&
119 inputs.find(move->destination()) == inputs.end()) {
120 move->Eliminate();
121 }
122 }
123
124 // The ret instruction makes any assignment before it unnecessary, except for
125 // the one for its input.
126 if (instruction->opcode() == ArchOpcode::kArchRet) {
127 for (MoveOperands* move : *moves) {
128 if (inputs.find(move->destination()) == inputs.end()) {
129 move->Eliminate();
130 }
131 }
132 }
133}
134
135void MoveOptimizer::MigrateMoves(Instruction* to, Instruction* from) {
136 if (from->IsCall()) return;
137
138 ParallelMove* from_moves = from->parallel_moves()[0];
139 if (from_moves == nullptr || from_moves->empty()) return;
140
141 ZoneSet<InstructionOperand, OperandCompare> dst_cant_be(local_zone());
142 ZoneSet<InstructionOperand, OperandCompare> src_cant_be(local_zone());
143
144 // If an operand is an input to the instruction, we cannot move assignments
145 // where it appears on the LHS.
146 for (size_t i = 0; i < from->InputCount(); ++i) {
147 dst_cant_be.insert(*from->InputAt(i));
148 }
149 // If an operand is output to the instruction, we cannot move assignments
150 // where it appears on the RHS, because we would lose its value before the
151 // instruction.
152 // Same for temp operands.
153 // The output can't appear on the LHS because we performed
154 // RemoveClobberedDestinations for the "from" instruction.
155 for (size_t i = 0; i < from->OutputCount(); ++i) {
156 src_cant_be.insert(*from->OutputAt(i));
157 }
158 for (size_t i = 0; i < from->TempCount(); ++i) {
159 src_cant_be.insert(*from->TempAt(i));
160 }
161 for (MoveOperands* move : *from_moves) {
162 if (move->IsRedundant()) continue;
163 // Assume dest has a value "V". If we have a "dest = y" move, then we can't
164 // move "z = dest", because z would become y rather than "V".
165 // We assume CompressMoves has happened before this, which means we don't
166 // have more than one assignment to dest.
167 src_cant_be.insert(move->destination());
168 }
169
170 ZoneSet<MoveKey, MoveKeyCompare> move_candidates(local_zone());
171 // We start with all the moves that don't have conflicting source or
172 // destination operands are eligible for being moved down.
173 for (MoveOperands* move : *from_moves) {
174 if (move->IsRedundant()) continue;
175 if (dst_cant_be.find(move->destination()) == dst_cant_be.end()) {
176 MoveKey key = {move->source(), move->destination()};
177 move_candidates.insert(key);
178 }
179 }
180 if (move_candidates.empty()) return;
181
182 // Stabilize the candidate set.
183 bool changed = false;
184 do {
185 changed = false;
186 for (auto iter = move_candidates.begin(); iter != move_candidates.end();) {
187 auto current = iter;
188 ++iter;
189 InstructionOperand src = current->source;
190 if (src_cant_be.find(src) != src_cant_be.end()) {
191 src_cant_be.insert(current->destination);
192 move_candidates.erase(current);
193 changed = true;
194 }
195 }
196 } while (changed);
197
198 ParallelMove to_move(local_zone());
199 for (MoveOperands* move : *from_moves) {
200 if (move->IsRedundant()) continue;
201 MoveKey key = {move->source(), move->destination()};
202 if (move_candidates.find(key) != move_candidates.end()) {
203 to_move.AddMove(move->source(), move->destination(), code_zone());
204 move->Eliminate();
205 }
206 }
207 if (to_move.empty()) return;
208
209 ParallelMove* dest =
210 to->GetOrCreateParallelMove(Instruction::GapPosition::START, code_zone());
211
212 CompressMoves(&to_move, dest);
213 DCHECK(dest->empty());
214 for (MoveOperands* m : to_move) {
215 dest->push_back(m);
216 }
217}
218
219void MoveOptimizer::CompressMoves(ParallelMove* left, MoveOpVector* right) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000220 if (right == nullptr) return;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400221
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000222 MoveOpVector& eliminated = local_vector();
223 DCHECK(eliminated.empty());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400224
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000225 if (!left->empty()) {
226 // Modify the right moves in place and collect moves that will be killed by
227 // merging the two gaps.
228 for (MoveOperands* move : *right) {
229 if (move->IsRedundant()) continue;
230 MoveOperands* to_eliminate = left->PrepareInsertAfter(move);
231 if (to_eliminate != nullptr) eliminated.push_back(to_eliminate);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400232 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000233 // Eliminate dead moves.
234 for (MoveOperands* to_eliminate : eliminated) {
235 to_eliminate->Eliminate();
236 }
237 eliminated.clear();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400238 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400239 // Add all possibly modified moves from right side.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000240 for (MoveOperands* move : *right) {
241 if (move->IsRedundant()) continue;
242 left->push_back(move);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400243 }
244 // Nuke right.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000245 right->clear();
246 DCHECK(eliminated.empty());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400247}
248
Ben Murdoch097c5b22016-05-18 11:27:45 +0100249void MoveOptimizer::CompressGaps(Instruction* instruction) {
250 int i = FindFirstNonEmptySlot(instruction);
251 bool has_moves = i <= Instruction::LAST_GAP_POSITION;
252 USE(has_moves);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400253
Ben Murdoch097c5b22016-05-18 11:27:45 +0100254 if (i == Instruction::LAST_GAP_POSITION) {
255 std::swap(instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
256 instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
257 } else if (i == Instruction::FIRST_GAP_POSITION) {
258 CompressMoves(
259 instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION],
260 instruction->parallel_moves()[Instruction::LAST_GAP_POSITION]);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000261 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100262 // We either have no moves, or, after swapping or compressing, we have
263 // all the moves in the first gap position, and none in the second/end gap
264 // position.
265 ParallelMove* first =
266 instruction->parallel_moves()[Instruction::FIRST_GAP_POSITION];
267 ParallelMove* last =
268 instruction->parallel_moves()[Instruction::LAST_GAP_POSITION];
269 USE(first);
270 USE(last);
271
272 DCHECK(!has_moves ||
273 (first != nullptr && (last == nullptr || last->empty())));
274}
275
276void MoveOptimizer::CompressBlock(InstructionBlock* block) {
277 int first_instr_index = block->first_instruction_index();
278 int last_instr_index = block->last_instruction_index();
279
280 // Start by removing gap assignments where the output of the subsequent
281 // instruction appears on LHS, as long as they are not needed by its input.
282 Instruction* prev_instr = code()->instructions()[first_instr_index];
283 RemoveClobberedDestinations(prev_instr);
284
285 for (int index = first_instr_index + 1; index <= last_instr_index; ++index) {
286 Instruction* instr = code()->instructions()[index];
287 // Migrate to the gap of prev_instr eligible moves from instr.
288 MigrateMoves(instr, prev_instr);
289 // Remove gap assignments clobbered by instr's output.
290 RemoveClobberedDestinations(instr);
291 prev_instr = instr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000292 }
293}
294
295
296const Instruction* MoveOptimizer::LastInstruction(
297 const InstructionBlock* block) const {
298 return code()->instructions()[block->last_instruction_index()];
299}
300
301
302void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
303 DCHECK(block->PredecessorCount() > 1);
304 // Ensure that the last instruction in all incoming blocks don't contain
305 // things that would prevent moving gap moves across them.
306 for (RpoNumber& pred_index : block->predecessors()) {
307 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100308
309 // If the predecessor has more than one successor, we shouldn't attempt to
310 // move down to this block (one of the successors) any of the gap moves,
311 // because their effect may be necessary to the other successors.
312 if (pred->SuccessorCount() > 1) return;
313
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000314 const Instruction* last_instr =
315 code()->instructions()[pred->last_instruction_index()];
316 if (last_instr->IsCall()) return;
317 if (last_instr->TempCount() != 0) return;
318 if (last_instr->OutputCount() != 0) return;
319 for (size_t i = 0; i < last_instr->InputCount(); ++i) {
320 const InstructionOperand* op = last_instr->InputAt(i);
321 if (!op->IsConstant() && !op->IsImmediate()) return;
322 }
323 }
324 // TODO(dcarney): pass a ZonePool down for this?
325 MoveMap move_map(local_zone());
326 size_t correct_counts = 0;
327 // Accumulate set of shared moves.
328 for (RpoNumber& pred_index : block->predecessors()) {
329 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
330 const Instruction* instr = LastInstruction(pred);
331 if (instr->parallel_moves()[0] == nullptr ||
332 instr->parallel_moves()[0]->empty()) {
333 return;
334 }
335 for (const MoveOperands* move : *instr->parallel_moves()[0]) {
336 if (move->IsRedundant()) continue;
337 InstructionOperand src = move->source();
338 InstructionOperand dst = move->destination();
339 MoveKey key = {src, dst};
340 auto res = move_map.insert(std::make_pair(key, 1));
341 if (!res.second) {
342 res.first->second++;
343 if (res.first->second == block->PredecessorCount()) {
344 correct_counts++;
345 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400346 }
347 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000348 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100349 if (move_map.empty() || correct_counts == 0) return;
350
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000351 // Find insertion point.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100352 Instruction* instr = code()->instructions()[block->first_instruction_index()];
353
354 if (correct_counts != move_map.size()) {
355 // Moves that are unique to each predecessor won't be pushed to the common
356 // successor.
357 OperandSet conflicting_srcs(local_zone());
358 for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
359 auto current = iter;
360 ++iter;
361 if (current->second != block->PredecessorCount()) {
362 InstructionOperand dest = current->first.destination;
363 // Not all the moves in all the gaps are the same. Maybe some are. If
364 // there are such moves, we could move them, but the destination of the
365 // moves staying behind can't appear as a source of a common move,
366 // because the move staying behind will clobber this destination.
367 conflicting_srcs.insert(dest);
368 move_map.erase(current);
369 }
370 }
371
372 bool changed = false;
373 do {
374 // If a common move can't be pushed to the common successor, then its
375 // destination also can't appear as source to any move being pushed.
376 changed = false;
377 for (auto iter = move_map.begin(), end = move_map.end(); iter != end;) {
378 auto current = iter;
379 ++iter;
380 DCHECK_EQ(block->PredecessorCount(), current->second);
381 if (conflicting_srcs.find(current->first.source) !=
382 conflicting_srcs.end()) {
383 conflicting_srcs.insert(current->first.destination);
384 move_map.erase(current);
385 changed = true;
386 }
387 }
388 } while (changed);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000389 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100390
391 if (move_map.empty()) return;
392
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000393 DCHECK_NOT_NULL(instr);
394 bool gap_initialized = true;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100395 if (instr->parallel_moves()[0] != nullptr &&
396 !instr->parallel_moves()[0]->empty()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000397 // Will compress after insertion.
398 gap_initialized = false;
399 std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
400 }
401 ParallelMove* moves = instr->GetOrCreateParallelMove(
402 static_cast<Instruction::GapPosition>(0), code_zone());
403 // Delete relevant entries in predecessors and move everything to block.
404 bool first_iteration = true;
405 for (RpoNumber& pred_index : block->predecessors()) {
406 const InstructionBlock* pred = code()->InstructionBlockAt(pred_index);
407 for (MoveOperands* move : *LastInstruction(pred)->parallel_moves()[0]) {
408 if (move->IsRedundant()) continue;
409 MoveKey key = {move->source(), move->destination()};
410 auto it = move_map.find(key);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100411 if (it != move_map.end()) {
412 if (first_iteration) {
413 moves->AddMove(move->source(), move->destination());
414 }
415 move->Eliminate();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000416 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000417 }
418 first_iteration = false;
419 }
420 // Compress.
421 if (!gap_initialized) {
422 CompressMoves(instr->parallel_moves()[0], instr->parallel_moves()[1]);
423 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100424 CompressBlock(block);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000425}
426
427
428namespace {
429
430bool IsSlot(const InstructionOperand& op) {
431 return op.IsStackSlot() || op.IsDoubleStackSlot();
432}
433
434
435bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
436 if (!a->source().EqualsCanonicalized(b->source())) {
437 return a->source().CompareCanonicalized(b->source());
438 }
439 if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
440 if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
441 return a->destination().CompareCanonicalized(b->destination());
442}
443
444} // namespace
445
446
447// Split multiple loads of the same constant or stack slot off into the second
448// slot and keep remaining moves in the first slot.
449void MoveOptimizer::FinalizeMoves(Instruction* instr) {
450 MoveOpVector& loads = local_vector();
451 DCHECK(loads.empty());
452
Ben Murdoch097c5b22016-05-18 11:27:45 +0100453 ParallelMove* parallel_moves = instr->parallel_moves()[0];
454 if (parallel_moves == nullptr) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000455 // Find all the loads.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100456 for (MoveOperands* move : *parallel_moves) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000457 if (move->IsRedundant()) continue;
458 if (move->source().IsConstant() || IsSlot(move->source())) {
459 loads.push_back(move);
460 }
461 }
462 if (loads.empty()) return;
463 // Group the loads by source, moving the preferred destination to the
464 // beginning of the group.
465 std::sort(loads.begin(), loads.end(), LoadCompare);
466 MoveOperands* group_begin = nullptr;
467 for (MoveOperands* load : loads) {
468 // New group.
469 if (group_begin == nullptr ||
470 !load->source().EqualsCanonicalized(group_begin->source())) {
471 group_begin = load;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400472 continue;
473 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000474 // Nothing to be gained from splitting here.
475 if (IsSlot(group_begin->destination())) continue;
476 // Insert new move into slot 1.
477 ParallelMove* slot_1 = instr->GetOrCreateParallelMove(
478 static_cast<Instruction::GapPosition>(1), code_zone());
479 slot_1->AddMove(group_begin->destination(), load->destination());
480 load->Eliminate();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400481 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000482 loads.clear();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400483}
484
485} // namespace compiler
486} // namespace internal
487} // namespace v8