blob: 3a20d15880f29250c6d909c3ed0e9597fb76a15d [file] [log] [blame]
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +00001//===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The machine combiner pass uses machine trace metrics to ensure the combined
11// instructions does not lengthen the critical path or the resource depth.
12//===----------------------------------------------------------------------===//
13#define DEBUG_TYPE "machine-combiner"
14
15#include "llvm/ADT/Statistic.h"
16#include "llvm/ADT/DenseMap.h"
17#include "llvm/CodeGen/MachineDominators.h"
18#include "llvm/CodeGen/MachineFunction.h"
19#include "llvm/CodeGen/MachineFunctionPass.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineLoopInfo.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/CodeGen/MachineTraceMetrics.h"
24#include "llvm/CodeGen/Passes.h"
25#include "llvm/CodeGen/TargetSchedule.h"
26#include "llvm/Support/CommandLine.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetSubtargetInfo.h"
32
33using namespace llvm;
34
35STATISTIC(NumInstCombined, "Number of machineinst combined");
36
37namespace {
38class MachineCombiner : public MachineFunctionPass {
39 const TargetInstrInfo *TII;
40 const TargetRegisterInfo *TRI;
Pete Cooper11759452014-09-02 17:43:54 +000041 MCSchedModel SchedModel;
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +000042 MachineRegisterInfo *MRI;
43 MachineTraceMetrics *Traces;
44 MachineTraceMetrics::Ensemble *MinInstr;
45
46 TargetSchedModel TSchedModel;
47
Sanjay Patelb1ca4e42015-01-27 22:26:56 +000048 /// True if optimizing for code size.
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +000049 bool OptSize;
50
51public:
52 static char ID;
53 MachineCombiner() : MachineFunctionPass(ID) {
54 initializeMachineCombinerPass(*PassRegistry::getPassRegistry());
55 }
56 void getAnalysisUsage(AnalysisUsage &AU) const override;
57 bool runOnMachineFunction(MachineFunction &MF) override;
58 const char *getPassName() const override { return "Machine InstCombiner"; }
59
60private:
61 bool doSubstitute(unsigned NewSize, unsigned OldSize);
62 bool combineInstructions(MachineBasicBlock *);
63 MachineInstr *getOperandDef(const MachineOperand &MO);
64 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
65 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
66 MachineTraceMetrics::Trace BlockTrace);
67 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot,
68 MachineTraceMetrics::Trace BlockTrace);
69 bool
Sanjay Patele79b43a2015-06-23 00:39:40 +000070 improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root,
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +000071 MachineTraceMetrics::Trace BlockTrace,
72 SmallVectorImpl<MachineInstr *> &InsInstrs,
Sanjay Patele79b43a2015-06-23 00:39:40 +000073 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
74 bool NewCodeHasLessInsts);
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +000075 bool preservesResourceLen(MachineBasicBlock *MBB,
76 MachineTraceMetrics::Trace BlockTrace,
77 SmallVectorImpl<MachineInstr *> &InsInstrs,
78 SmallVectorImpl<MachineInstr *> &DelInstrs);
79 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs,
80 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC);
81};
Alexander Kornienkof00654e2015-06-23 09:49:53 +000082}
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +000083
84char MachineCombiner::ID = 0;
85char &llvm::MachineCombinerID = MachineCombiner::ID;
86
87INITIALIZE_PASS_BEGIN(MachineCombiner, "machine-combiner",
88 "Machine InstCombiner", false, false)
89INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics)
90INITIALIZE_PASS_END(MachineCombiner, "machine-combiner", "Machine InstCombiner",
91 false, false)
92
93void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const {
94 AU.setPreservesCFG();
95 AU.addPreserved<MachineDominatorTree>();
96 AU.addPreserved<MachineLoopInfo>();
97 AU.addRequired<MachineTraceMetrics>();
98 AU.addPreserved<MachineTraceMetrics>();
99 MachineFunctionPass::getAnalysisUsage(AU);
100}
101
102MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) {
103 MachineInstr *DefInstr = nullptr;
104 // We need a virtual register definition.
105 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
106 DefInstr = MRI->getUniqueVRegDef(MO.getReg());
107 // PHI's have no depth etc.
108 if (DefInstr && DefInstr->isPHI())
109 DefInstr = nullptr;
110 return DefInstr;
111}
112
Sanjay Patelb1ca4e42015-01-27 22:26:56 +0000113/// Computes depth of instructions in vector \InsInstr.
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000114///
115/// \param InsInstrs is a vector of machine instructions
116/// \param InstrIdxForVirtReg is a dense map of virtual register to index
117/// of defining machine instruction in \p InsInstrs
118/// \param BlockTrace is a trace of machine instructions
119///
120/// \returns Depth of last instruction in \InsInstrs ("NewRoot")
121unsigned
122MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs,
123 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
124 MachineTraceMetrics::Trace BlockTrace) {
125
126 SmallVector<unsigned, 16> InstrDepth;
Hal Finkele0fa8f22015-07-15 08:22:23 +0000127 assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
128 "Missing machine model\n");
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000129
Sanjay Patel6b280772015-01-27 22:16:52 +0000130 // For each instruction in the new sequence compute the depth based on the
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000131 // operands. Use the trace information when possible. For new operands which
132 // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth
133 for (auto *InstrPtr : InsInstrs) { // for each Use
134 unsigned IDepth = 0;
135 DEBUG(dbgs() << "NEW INSTR "; InstrPtr->dump(); dbgs() << "\n";);
Sanjay Patelf69f4e42015-05-21 17:43:26 +0000136 for (const MachineOperand &MO : InstrPtr->operands()) {
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000137 // Check for virtual register operand.
138 if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
139 continue;
140 if (!MO.isUse())
141 continue;
142 unsigned DepthOp = 0;
143 unsigned LatencyOp = 0;
144 DenseMap<unsigned, unsigned>::iterator II =
145 InstrIdxForVirtReg.find(MO.getReg());
146 if (II != InstrIdxForVirtReg.end()) {
147 // Operand is new virtual register not in trace
Saleem Abdulrasoolbefa2152014-08-03 23:00:38 +0000148 assert(II->second < InstrDepth.size() && "Bad Index");
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000149 MachineInstr *DefInstr = InsInstrs[II->second];
150 assert(DefInstr &&
151 "There must be a definition for a new virtual register");
152 DepthOp = InstrDepth[II->second];
153 LatencyOp = TSchedModel.computeOperandLatency(
154 DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
155 InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
156 } else {
157 MachineInstr *DefInstr = getOperandDef(MO);
158 if (DefInstr) {
159 DepthOp = BlockTrace.getInstrCycles(DefInstr).Depth;
160 LatencyOp = TSchedModel.computeOperandLatency(
161 DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()),
162 InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg()));
163 }
164 }
165 IDepth = std::max(IDepth, DepthOp + LatencyOp);
166 }
167 InstrDepth.push_back(IDepth);
168 }
169 unsigned NewRootIdx = InsInstrs.size() - 1;
170 return InstrDepth[NewRootIdx];
171}
172
Sanjay Patelb1ca4e42015-01-27 22:26:56 +0000173/// Computes instruction latency as max of latency of defined operands.
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000174///
175/// \param Root is a machine instruction that could be replaced by NewRoot.
176/// It is used to compute a more accurate latency information for NewRoot in
177/// case there is a dependent instruction in the same trace (\p BlockTrace)
178/// \param NewRoot is the instruction for which the latency is computed
179/// \param BlockTrace is a trace of machine instructions
180///
181/// \returns Latency of \p NewRoot
182unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot,
183 MachineTraceMetrics::Trace BlockTrace) {
184
Hal Finkele0fa8f22015-07-15 08:22:23 +0000185 assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
186 "Missing machine model\n");
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000187
188 // Check each definition in NewRoot and compute the latency
189 unsigned NewRootLatency = 0;
190
Sanjay Patelf69f4e42015-05-21 17:43:26 +0000191 for (const MachineOperand &MO : NewRoot->operands()) {
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000192 // Check for virtual register operand.
193 if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())))
194 continue;
195 if (!MO.isDef())
196 continue;
197 // Get the first instruction that uses MO
198 MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg());
199 RI++;
200 MachineInstr *UseMO = RI->getParent();
201 unsigned LatencyOp = 0;
202 if (UseMO && BlockTrace.isDepInTrace(Root, UseMO)) {
203 LatencyOp = TSchedModel.computeOperandLatency(
204 NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO,
205 UseMO->findRegisterUseOperandIdx(MO.getReg()));
206 } else {
Hal Finkel17caf322015-08-05 07:45:28 +0000207 LatencyOp = TSchedModel.computeInstrLatency(NewRoot);
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000208 }
209 NewRootLatency = std::max(NewRootLatency, LatencyOp);
210 }
211 return NewRootLatency;
212}
213
Sanjay Patele79b43a2015-06-23 00:39:40 +0000214/// True when the new instruction sequence does not lengthen the critical path
215/// and the new sequence has less instructions or the new sequence improves the
216/// critical path.
217/// The DAGCombine code sequence ends in MI (Machine Instruction) Root.
218/// The new code sequence ends in MI NewRoot. A necessary condition for the new
219/// sequence to replace the old sequence is that it cannot lengthen the critical
220/// path. This is decided by the formula:
Sanjay Patel6b280772015-01-27 22:16:52 +0000221/// (NewRootDepth + NewRootLatency) <= (RootDepth + RootLatency + RootSlack)).
Sanjay Patele79b43a2015-06-23 00:39:40 +0000222/// If the new sequence has an equal length critical path but does not reduce
223/// the number of instructions (NewCodeHasLessInsts is false), then it is not
224/// considered an improvement. The slack is the number of cycles Root can be
225/// delayed before the critical patch becomes longer.
226bool MachineCombiner::improvesCriticalPathLen(
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000227 MachineBasicBlock *MBB, MachineInstr *Root,
228 MachineTraceMetrics::Trace BlockTrace,
229 SmallVectorImpl<MachineInstr *> &InsInstrs,
Sanjay Patele79b43a2015-06-23 00:39:40 +0000230 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg,
231 bool NewCodeHasLessInsts) {
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000232
Hal Finkele0fa8f22015-07-15 08:22:23 +0000233 assert(TSchedModel.hasInstrSchedModelOrItineraries() &&
234 "Missing machine model\n");
Sanjay Patelccb8d5c2015-06-10 19:52:58 +0000235 // NewRoot is the last instruction in the \p InsInstrs vector.
236 // Get depth and latency of NewRoot.
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000237 unsigned NewRootIdx = InsInstrs.size() - 1;
238 MachineInstr *NewRoot = InsInstrs[NewRootIdx];
239 unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace);
240 unsigned NewRootLatency = getLatency(Root, NewRoot, BlockTrace);
241
Sanjay Patelccb8d5c2015-06-10 19:52:58 +0000242 // Get depth, latency and slack of Root.
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000243 unsigned RootDepth = BlockTrace.getInstrCycles(Root).Depth;
244 unsigned RootLatency = TSchedModel.computeInstrLatency(Root);
245 unsigned RootSlack = BlockTrace.getInstrSlack(Root);
246
247 DEBUG(dbgs() << "DEPENDENCE DATA FOR " << Root << "\n";
248 dbgs() << " NewRootDepth: " << NewRootDepth
249 << " NewRootLatency: " << NewRootLatency << "\n";
250 dbgs() << " RootDepth: " << RootDepth << " RootLatency: " << RootLatency
251 << " RootSlack: " << RootSlack << "\n";
252 dbgs() << " NewRootDepth + NewRootLatency "
253 << NewRootDepth + NewRootLatency << "\n";
254 dbgs() << " RootDepth + RootLatency + RootSlack "
255 << RootDepth + RootLatency + RootSlack << "\n";);
256
Sanjay Patele79b43a2015-06-23 00:39:40 +0000257 unsigned NewCycleCount = NewRootDepth + NewRootLatency;
258 unsigned OldCycleCount = RootDepth + RootLatency + RootSlack;
259
260 if (NewCodeHasLessInsts)
261 return NewCycleCount <= OldCycleCount;
262 else
263 return NewCycleCount < OldCycleCount;
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000264}
265
266/// helper routine to convert instructions into SC
267void MachineCombiner::instr2instrSC(
268 SmallVectorImpl<MachineInstr *> &Instrs,
269 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) {
270 for (auto *InstrPtr : Instrs) {
271 unsigned Opc = InstrPtr->getOpcode();
272 unsigned Idx = TII->get(Opc).getSchedClass();
Pete Cooper11759452014-09-02 17:43:54 +0000273 const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx);
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000274 InstrsSC.push_back(SC);
275 }
276}
Sanjay Patelb1ca4e42015-01-27 22:26:56 +0000277/// True when the new instructions do not increase resource length
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000278bool MachineCombiner::preservesResourceLen(
279 MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace,
280 SmallVectorImpl<MachineInstr *> &InsInstrs,
281 SmallVectorImpl<MachineInstr *> &DelInstrs) {
Hal Finkele0fa8f22015-07-15 08:22:23 +0000282 if (!TSchedModel.hasInstrSchedModel())
283 return true;
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000284
285 // Compute current resource length
286
Gerolf Hoflehner97c383b2014-08-07 21:40:58 +0000287 //ArrayRef<const MachineBasicBlock *> MBBarr(MBB);
288 SmallVector <const MachineBasicBlock *, 1> MBBarr;
289 MBBarr.push_back(MBB);
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000290 unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr);
291
292 // Deal with SC rather than Instructions.
293 SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC;
294 SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC;
295
296 instr2instrSC(InsInstrs, InsInstrsSC);
297 instr2instrSC(DelInstrs, DelInstrsSC);
298
299 ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC);
300 ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC);
301
Sanjay Patelccb8d5c2015-06-10 19:52:58 +0000302 // Compute new resource length.
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000303 unsigned ResLenAfterCombine =
304 BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr);
305
306 DEBUG(dbgs() << "RESOURCE DATA: \n";
307 dbgs() << " resource len before: " << ResLenBeforeCombine
308 << " after: " << ResLenAfterCombine << "\n";);
309
310 return ResLenAfterCombine <= ResLenBeforeCombine;
311}
312
313/// \returns true when new instruction sequence should be generated
Sanjay Patel6b280772015-01-27 22:16:52 +0000314/// independent if it lengthens critical path or not
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000315bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize) {
316 if (OptSize && (NewSize < OldSize))
317 return true;
Hal Finkele0fa8f22015-07-15 08:22:23 +0000318 if (!TSchedModel.hasInstrSchedModelOrItineraries())
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000319 return true;
320 return false;
321}
322
Sanjay Patelb1ca4e42015-01-27 22:26:56 +0000323/// Substitute a slow code sequence with a faster one by
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000324/// evaluating instruction combining pattern.
325/// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction
326/// combining based on machine trace metrics. Only combine a sequence of
327/// instructions when this neither lengthens the critical path nor increases
328/// resource pressure. When optimizing for codesize always combine when the new
329/// sequence is shorter.
330bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) {
331 bool Changed = false;
332 DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n");
333
334 auto BlockIter = MBB->begin();
335
336 while (BlockIter != MBB->end()) {
337 auto &MI = *BlockIter++;
338
339 DEBUG(dbgs() << "INSTR "; MI.dump(); dbgs() << "\n";);
Sanjay Patelcfe03932015-06-19 23:21:42 +0000340 SmallVector<MachineCombinerPattern::MC_PATTERN, 16> Patterns;
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000341 // The motivating example is:
342 //
343 // MUL Other MUL_op1 MUL_op2 Other
344 // \ / \ | /
345 // ADD/SUB => MADD/MSUB
346 // (=Root) (=NewRoot)
347
348 // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is
349 // usually beneficial for code size it unfortunately can hurt performance
350 // when the ADD is on the critical path, but the MUL is not. With the
351 // substitution the MUL becomes part of the critical path (in form of the
352 // MADD) and can lengthen it on architectures where the MADD latency is
353 // longer than the ADD latency.
354 //
355 // For each instruction we check if it can be the root of a combiner
356 // pattern. Then for each pattern the new code sequence in form of MI is
357 // generated and evaluated. When the efficiency criteria (don't lengthen
358 // critical path, don't use more resources) is met the new sequence gets
359 // hooked up into the basic block before the old sequence is removed.
360 //
361 // The algorithm does not try to evaluate all patterns and pick the best.
362 // This is only an artificial restriction though. In practice there is
Sanjay Patelcfe03932015-06-19 23:21:42 +0000363 // mostly one pattern, and getMachineCombinerPatterns() can order patterns
364 // based on an internal cost heuristic.
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000365
Sanjay Patelcfe03932015-06-19 23:21:42 +0000366 if (TII->getMachineCombinerPatterns(MI, Patterns)) {
367 for (auto P : Patterns) {
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000368 SmallVector<MachineInstr *, 16> InsInstrs;
369 SmallVector<MachineInstr *, 16> DelInstrs;
370 DenseMap<unsigned, unsigned> InstrIdxForVirtReg;
371 if (!MinInstr)
372 MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
373 MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB);
374 Traces->verifyAnalysis();
375 TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs,
376 InstrIdxForVirtReg);
Sanjay Patele79b43a2015-06-23 00:39:40 +0000377 unsigned NewInstCount = InsInstrs.size();
378 unsigned OldInstCount = DelInstrs.size();
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000379 // Found pattern, but did not generate alternative sequence.
380 // This can happen e.g. when an immediate could not be materialized
381 // in a single instruction.
Sanjay Patele79b43a2015-06-23 00:39:40 +0000382 if (!NewInstCount)
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000383 continue;
384 // Substitute when we optimize for codesize and the new sequence has
385 // fewer instructions OR
Sanjay Patelf9114842015-05-21 21:29:13 +0000386 // the new sequence neither lengthens the critical path nor increases
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000387 // resource pressure.
Sanjay Patele79b43a2015-06-23 00:39:40 +0000388 if (doSubstitute(NewInstCount, OldInstCount) ||
389 (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs,
390 InstrIdxForVirtReg,
391 NewInstCount < OldInstCount) &&
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000392 preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs))) {
393 for (auto *InstrPtr : InsInstrs)
Sanjay Patel85924e52015-06-13 15:06:33 +0000394 MBB->insert((MachineBasicBlock::iterator) &MI, InstrPtr);
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000395 for (auto *InstrPtr : DelInstrs)
Gerolf Hoflehnerfe2c11f2014-08-13 22:07:36 +0000396 InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval();
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000397
398 Changed = true;
399 ++NumInstCombined;
400
401 Traces->invalidate(MBB);
402 Traces->verifyAnalysis();
Sanjay Patelccb8d5c2015-06-10 19:52:58 +0000403 // Eagerly stop after the first pattern fires.
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000404 break;
405 } else {
406 // Cleanup instructions of the alternative code sequence. There is no
407 // use for them.
Sanjay Patel57149982015-06-13 15:33:15 +0000408 MachineFunction *MF = MBB->getParent();
409 for (auto *InstrPtr : InsInstrs)
Sanjay Patel85924e52015-06-13 15:06:33 +0000410 MF->DeleteMachineInstr(InstrPtr);
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000411 }
412 InstrIdxForVirtReg.clear();
413 }
414 }
415 }
416
417 return Changed;
418}
419
420bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) {
Eric Christopher3d4276f2015-01-27 07:31:29 +0000421 const TargetSubtargetInfo &STI = MF.getSubtarget();
Eric Christopherd9134482014-08-04 21:25:23 +0000422 TII = STI.getInstrInfo();
423 TRI = STI.getRegisterInfo();
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000424 SchedModel = STI.getSchedModel();
Pete Cooper11759452014-09-02 17:43:54 +0000425 TSchedModel.init(SchedModel, &STI, TII);
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000426 MRI = &MF.getRegInfo();
427 Traces = &getAnalysis<MachineTraceMetrics>();
428 MinInstr = 0;
Sanjay Patel74ca3122015-08-11 14:31:14 +0000429 OptSize = MF.getFunction()->optForSize();
Gerolf Hoflehner5e1207e2014-08-03 21:35:39 +0000430
431 DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n');
432 if (!TII->useMachineCombiner()) {
433 DEBUG(dbgs() << " Skipping pass: Target does not support machine combiner\n");
434 return false;
435 }
436
437 bool Changed = false;
438
439 // Try to combine instructions.
440 for (auto &MBB : MF)
441 Changed |= combineInstructions(&MBB);
442
443 return Changed;
444}