blob: 8e4db1732bf92fa2903e52806e4165915439ba76 [file] [log] [blame]
Rong Xu3d2efdf2018-10-09 22:03:40 +00001//===---- X86CondBrFolding.cpp - optimize conditional branches ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// This file defines a pass that optimizes condition branches on x86 by taking
10// advantage of the three-way conditional code generated by compare
11// instructions.
12// Currently, it tries to hoisting EQ and NE conditional branch to a dominant
13// conditional branch condition where the same EQ/NE conditional code is
14// computed. An example:
15// bb_0:
16// cmp %0, 19
17// jg bb_1
18// jmp bb_2
19// bb_1:
20// cmp %0, 40
21// jg bb_3
22// jmp bb_4
23// bb_4:
24// cmp %0, 20
25// je bb_5
26// jmp bb_6
27// Here we could combine the two compares in bb_0 and bb_4 and have the
28// following code:
29// bb_0:
30// cmp %0, 20
31// jg bb_1
32// jl bb_2
33// jmp bb_5
34// bb_1:
35// cmp %0, 40
36// jg bb_3
37// jmp bb_6
38// For the case of %0 == 20 (bb_5), we eliminate two jumps, and the control
39// height for bb_6 is also reduced. bb_4 is gone after the optimization.
40//
41// There are plenty of this code patterns, especially from the switch case
42// lowing where we generate compare of "pivot-1" for the inner nodes in the
43// binary search tree.
44//===----------------------------------------------------------------------===//
45
46#include "X86.h"
47#include "X86InstrInfo.h"
48#include "X86Subtarget.h"
49#include "llvm/ADT/Statistic.h"
50#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
51#include "llvm/CodeGen/MachineFunctionPass.h"
52#include "llvm/CodeGen/MachineInstrBuilder.h"
53#include "llvm/CodeGen/MachineRegisterInfo.h"
54#include "llvm/Support/BranchProbability.h"
55
56using namespace llvm;
57
58#define DEBUG_TYPE "x86-condbr-folding"
59
60STATISTIC(NumFixedCondBrs, "Number of x86 condbr folded");
61
62namespace {
63class X86CondBrFoldingPass : public MachineFunctionPass {
64public:
Craig Topperba3ab782018-12-07 18:10:34 +000065 X86CondBrFoldingPass() : MachineFunctionPass(ID) {
66 initializeX86CondBrFoldingPassPass(*PassRegistry::getPassRegistry());
67 }
Rong Xu3d2efdf2018-10-09 22:03:40 +000068 StringRef getPassName() const override { return "X86 CondBr Folding"; }
69
70 bool runOnMachineFunction(MachineFunction &MF) override;
71
72 void getAnalysisUsage(AnalysisUsage &AU) const override {
73 MachineFunctionPass::getAnalysisUsage(AU);
74 AU.addRequired<MachineBranchProbabilityInfo>();
75 }
76
Craig Topperba3ab782018-12-07 18:10:34 +000077public:
Rong Xu3d2efdf2018-10-09 22:03:40 +000078 static char ID;
79};
Craig Topperba3ab782018-12-07 18:10:34 +000080} // namespace
Rong Xu3d2efdf2018-10-09 22:03:40 +000081
82char X86CondBrFoldingPass::ID = 0;
Craig Topperba3ab782018-12-07 18:10:34 +000083INITIALIZE_PASS(X86CondBrFoldingPass, "X86CondBrFolding", "X86CondBrFolding", false, false)
Rong Xu3d2efdf2018-10-09 22:03:40 +000084
85FunctionPass *llvm::createX86CondBrFolding() {
86 return new X86CondBrFoldingPass();
87}
88
Benjamin Kramerc55e9972018-10-13 22:18:22 +000089namespace {
Rong Xu3d2efdf2018-10-09 22:03:40 +000090// A class the stores the auxiliary information for each MBB.
91struct TargetMBBInfo {
92 MachineBasicBlock *TBB;
93 MachineBasicBlock *FBB;
94 MachineInstr *BrInstr;
95 MachineInstr *CmpInstr;
96 X86::CondCode BranchCode;
97 unsigned SrcReg;
98 int CmpValue;
99 bool Modified;
100 bool CmpBrOnly;
101};
102
103// A class that optimizes the conditional branch by hoisting and merge CondCode.
104class X86CondBrFolding {
105public:
106 X86CondBrFolding(const X86InstrInfo *TII,
107 const MachineBranchProbabilityInfo *MBPI,
108 MachineFunction &MF)
109 : TII(TII), MBPI(MBPI), MF(MF) {}
110 bool optimize();
111
112private:
113 const X86InstrInfo *TII;
114 const MachineBranchProbabilityInfo *MBPI;
115 MachineFunction &MF;
116 std::vector<std::unique_ptr<TargetMBBInfo>> MBBInfos;
117 SmallVector<MachineBasicBlock *, 4> RemoveList;
118
119 void optimizeCondBr(MachineBasicBlock &MBB,
120 SmallVectorImpl<MachineBasicBlock *> &BranchPath);
121 void fixBranchProb(MachineBasicBlock *NextMBB, MachineBasicBlock *RootMBB,
122 SmallVectorImpl<MachineBasicBlock *> &BranchPath);
123 void replaceBrDest(MachineBasicBlock *MBB, MachineBasicBlock *OrigDest,
124 MachineBasicBlock *NewDest);
125 void fixupModifiedCond(MachineBasicBlock *MBB);
126 std::unique_ptr<TargetMBBInfo> analyzeMBB(MachineBasicBlock &MBB);
127 static bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
128 int &CmpValue);
129 bool findPath(MachineBasicBlock *MBB,
130 SmallVectorImpl<MachineBasicBlock *> &BranchPath);
131 TargetMBBInfo *getMBBInfo(MachineBasicBlock *MBB) const {
132 return MBBInfos[MBB->getNumber()].get();
133 }
134};
Benjamin Kramerc55e9972018-10-13 22:18:22 +0000135} // namespace
Rong Xu3d2efdf2018-10-09 22:03:40 +0000136
137// Find a valid path that we can reuse the CondCode.
138// The resulted path (if return true) is stored in BranchPath.
139// Return value:
140// false: is no valid path is found.
141// true: a valid path is found and the targetBB can be reached.
142bool X86CondBrFolding::findPath(
143 MachineBasicBlock *MBB, SmallVectorImpl<MachineBasicBlock *> &BranchPath) {
144 TargetMBBInfo *MBBInfo = getMBBInfo(MBB);
145 assert(MBBInfo && "Expecting a candidate MBB");
146 int CmpValue = MBBInfo->CmpValue;
147
148 MachineBasicBlock *PredMBB = *MBB->pred_begin();
149 MachineBasicBlock *SaveMBB = MBB;
150 while (PredMBB) {
151 TargetMBBInfo *PredMBBInfo = getMBBInfo(PredMBB);
152 if (!PredMBBInfo || PredMBBInfo->SrcReg != MBBInfo->SrcReg)
153 return false;
154
155 assert(SaveMBB == PredMBBInfo->TBB || SaveMBB == PredMBBInfo->FBB);
156 bool IsFalseBranch = (SaveMBB == PredMBBInfo->FBB);
157
158 X86::CondCode CC = PredMBBInfo->BranchCode;
159 assert(CC == X86::COND_L || CC == X86::COND_G || CC == X86::COND_E);
160 int PredCmpValue = PredMBBInfo->CmpValue;
161 bool ValueCmpTrue = ((CmpValue < PredCmpValue && CC == X86::COND_L) ||
162 (CmpValue > PredCmpValue && CC == X86::COND_G) ||
163 (CmpValue == PredCmpValue && CC == X86::COND_E));
164 // Check if both the result of value compare and the branch target match.
165 if (!(ValueCmpTrue ^ IsFalseBranch)) {
166 LLVM_DEBUG(dbgs() << "Dead BB detected!\n");
167 return false;
168 }
169
170 BranchPath.push_back(PredMBB);
171 // These are the conditions on which we could combine the compares.
172 if ((CmpValue == PredCmpValue) ||
173 (CmpValue == PredCmpValue - 1 && CC == X86::COND_L) ||
174 (CmpValue == PredCmpValue + 1 && CC == X86::COND_G))
175 return true;
176
177 // If PredMBB has more than on preds, or not a pure cmp and br, we bailout.
178 if (PredMBB->pred_size() != 1 || !PredMBBInfo->CmpBrOnly)
179 return false;
180
181 SaveMBB = PredMBB;
182 PredMBB = *PredMBB->pred_begin();
183 }
184 return false;
185}
186
187// Fix up any PHI node in the successor of MBB.
188static void fixPHIsInSucc(MachineBasicBlock *MBB, MachineBasicBlock *OldMBB,
189 MachineBasicBlock *NewMBB) {
190 if (NewMBB == OldMBB)
191 return;
192 for (auto MI = MBB->instr_begin(), ME = MBB->instr_end();
193 MI != ME && MI->isPHI(); ++MI)
194 for (unsigned i = 2, e = MI->getNumOperands() + 1; i != e; i += 2) {
195 MachineOperand &MO = MI->getOperand(i);
196 if (MO.getMBB() == OldMBB)
197 MO.setMBB(NewMBB);
198 }
199}
200
201// Utility function to set branch probability for edge MBB->SuccMBB.
202static inline bool setBranchProb(MachineBasicBlock *MBB,
203 MachineBasicBlock *SuccMBB,
204 BranchProbability Prob) {
205 auto MBBI = std::find(MBB->succ_begin(), MBB->succ_end(), SuccMBB);
206 if (MBBI == MBB->succ_end())
207 return false;
208 MBB->setSuccProbability(MBBI, Prob);
209 return true;
210}
211
212// Utility function to find the unconditional br instruction in MBB.
213static inline MachineBasicBlock::iterator
214findUncondBrI(MachineBasicBlock *MBB) {
215 return std::find_if(MBB->begin(), MBB->end(), [](MachineInstr &MI) -> bool {
216 return MI.getOpcode() == X86::JMP_1;
217 });
218}
219
220// Replace MBB's original successor, OrigDest, with NewDest.
221// Also update the MBBInfo for MBB.
222void X86CondBrFolding::replaceBrDest(MachineBasicBlock *MBB,
223 MachineBasicBlock *OrigDest,
224 MachineBasicBlock *NewDest) {
225 TargetMBBInfo *MBBInfo = getMBBInfo(MBB);
226 MachineInstr *BrMI;
227 if (MBBInfo->TBB == OrigDest) {
228 BrMI = MBBInfo->BrInstr;
229 unsigned JNCC = GetCondBranchFromCond(MBBInfo->BranchCode);
230 MachineInstrBuilder MIB =
231 BuildMI(*MBB, BrMI, MBB->findDebugLoc(BrMI), TII->get(JNCC))
232 .addMBB(NewDest);
233 MBBInfo->TBB = NewDest;
234 MBBInfo->BrInstr = MIB.getInstr();
235 } else { // Should be the unconditional jump stmt.
236 MachineBasicBlock::iterator UncondBrI = findUncondBrI(MBB);
237 BuildMI(*MBB, UncondBrI, MBB->findDebugLoc(UncondBrI), TII->get(X86::JMP_1))
238 .addMBB(NewDest);
239 MBBInfo->FBB = NewDest;
240 BrMI = &*UncondBrI;
241 }
242 fixPHIsInSucc(NewDest, OrigDest, MBB);
243 BrMI->eraseFromParent();
244 MBB->addSuccessor(NewDest);
245 setBranchProb(MBB, NewDest, MBPI->getEdgeProbability(MBB, OrigDest));
246 MBB->removeSuccessor(OrigDest);
247}
248
249// Change the CondCode and BrInstr according to MBBInfo.
250void X86CondBrFolding::fixupModifiedCond(MachineBasicBlock *MBB) {
251 TargetMBBInfo *MBBInfo = getMBBInfo(MBB);
252 if (!MBBInfo->Modified)
253 return;
254
255 MachineInstr *BrMI = MBBInfo->BrInstr;
256 X86::CondCode CC = MBBInfo->BranchCode;
257 MachineInstrBuilder MIB = BuildMI(*MBB, BrMI, MBB->findDebugLoc(BrMI),
258 TII->get(GetCondBranchFromCond(CC)))
259 .addMBB(MBBInfo->TBB);
260 BrMI->eraseFromParent();
261 MBBInfo->BrInstr = MIB.getInstr();
262
263 MachineBasicBlock::iterator UncondBrI = findUncondBrI(MBB);
264 BuildMI(*MBB, UncondBrI, MBB->findDebugLoc(UncondBrI), TII->get(X86::JMP_1))
265 .addMBB(MBBInfo->FBB);
266 MBB->erase(UncondBrI);
267 MBBInfo->Modified = false;
268}
269
270//
271// Apply the transformation:
272// RootMBB -1-> ... PredMBB -3-> MBB -5-> TargetMBB
273// \-2-> \-4-> \-6-> FalseMBB
274// ==>
275// RootMBB -1-> ... PredMBB -7-> FalseMBB
276// TargetMBB <-8-/ \-2-> \-4->
277//
278// Note that PredMBB and RootMBB could be the same.
279// And in the case of dead TargetMBB, we will not have TargetMBB and edge 8.
280//
281// There are some special handling where the RootMBB is COND_E in which case
282// we directly short-cycle the brinstr.
283//
284void X86CondBrFolding::optimizeCondBr(
285 MachineBasicBlock &MBB, SmallVectorImpl<MachineBasicBlock *> &BranchPath) {
286
287 X86::CondCode CC;
288 TargetMBBInfo *MBBInfo = getMBBInfo(&MBB);
289 assert(MBBInfo && "Expecting a candidate MBB");
290 MachineBasicBlock *TargetMBB = MBBInfo->TBB;
291 BranchProbability TargetProb = MBPI->getEdgeProbability(&MBB, MBBInfo->TBB);
292
293 // Forward the jump from MBB's predecessor to MBB's false target.
294 MachineBasicBlock *PredMBB = BranchPath.front();
295 TargetMBBInfo *PredMBBInfo = getMBBInfo(PredMBB);
296 assert(PredMBBInfo && "Expecting a candidate MBB");
297 if (PredMBBInfo->Modified)
298 fixupModifiedCond(PredMBB);
299 CC = PredMBBInfo->BranchCode;
300 // Don't do this if depth of BranchPath is 1 and PredMBB is of COND_E.
301 // We will short-cycle directly for this case.
302 if (!(CC == X86::COND_E && BranchPath.size() == 1))
303 replaceBrDest(PredMBB, &MBB, MBBInfo->FBB);
304
305 MachineBasicBlock *RootMBB = BranchPath.back();
306 TargetMBBInfo *RootMBBInfo = getMBBInfo(RootMBB);
307 assert(RootMBBInfo && "Expecting a candidate MBB");
308 if (RootMBBInfo->Modified)
309 fixupModifiedCond(RootMBB);
310 CC = RootMBBInfo->BranchCode;
311
312 if (CC != X86::COND_E) {
313 MachineBasicBlock::iterator UncondBrI = findUncondBrI(RootMBB);
314 // RootMBB: Cond jump to the original not-taken MBB.
315 X86::CondCode NewCC;
316 switch (CC) {
317 case X86::COND_L:
318 NewCC = X86::COND_G;
319 break;
320 case X86::COND_G:
321 NewCC = X86::COND_L;
322 break;
323 default:
324 llvm_unreachable("unexpected condtional code.");
325 }
326 BuildMI(*RootMBB, UncondBrI, RootMBB->findDebugLoc(UncondBrI),
327 TII->get(GetCondBranchFromCond(NewCC)))
328 .addMBB(RootMBBInfo->FBB);
329
330 // RootMBB: Jump to TargetMBB
331 BuildMI(*RootMBB, UncondBrI, RootMBB->findDebugLoc(UncondBrI),
332 TII->get(X86::JMP_1))
333 .addMBB(TargetMBB);
334 RootMBB->addSuccessor(TargetMBB);
335 fixPHIsInSucc(TargetMBB, &MBB, RootMBB);
336 RootMBB->erase(UncondBrI);
337 } else {
338 replaceBrDest(RootMBB, RootMBBInfo->TBB, TargetMBB);
339 }
340
341 // Fix RootMBB's CmpValue to MBB's CmpValue to TargetMBB. Don't set Imm
342 // directly. Move MBB's stmt to here as the opcode might be different.
343 if (RootMBBInfo->CmpValue != MBBInfo->CmpValue) {
344 MachineInstr *NewCmp = MBBInfo->CmpInstr;
345 NewCmp->removeFromParent();
346 RootMBB->insert(RootMBBInfo->CmpInstr, NewCmp);
347 RootMBBInfo->CmpInstr->eraseFromParent();
348 }
349
350 // Fix branch Probabilities.
351 auto fixBranchProb = [&](MachineBasicBlock *NextMBB) {
352 BranchProbability Prob;
353 for (auto &I : BranchPath) {
354 MachineBasicBlock *ThisMBB = I;
355 if (!ThisMBB->hasSuccessorProbabilities() ||
356 !ThisMBB->isSuccessor(NextMBB))
357 break;
358 Prob = MBPI->getEdgeProbability(ThisMBB, NextMBB);
359 if (Prob.isUnknown())
360 break;
361 TargetProb = Prob * TargetProb;
362 Prob = Prob - TargetProb;
363 setBranchProb(ThisMBB, NextMBB, Prob);
364 if (ThisMBB == RootMBB) {
365 setBranchProb(ThisMBB, TargetMBB, TargetProb);
366 }
367 ThisMBB->normalizeSuccProbs();
368 if (ThisMBB == RootMBB)
369 break;
370 NextMBB = ThisMBB;
371 }
372 return true;
373 };
374 if (CC != X86::COND_E && !TargetProb.isUnknown())
375 fixBranchProb(MBBInfo->FBB);
376
377 if (CC != X86::COND_E)
378 RemoveList.push_back(&MBB);
379
380 // Invalidate MBBInfo just in case.
381 MBBInfos[MBB.getNumber()] = nullptr;
382 MBBInfos[RootMBB->getNumber()] = nullptr;
383
384 LLVM_DEBUG(dbgs() << "After optimization:\nRootMBB is: " << *RootMBB << "\n");
385 if (BranchPath.size() > 1)
386 LLVM_DEBUG(dbgs() << "PredMBB is: " << *(BranchPath[0]) << "\n");
387}
388
389// Driver function for optimization: find the valid candidate and apply
390// the transformation.
391bool X86CondBrFolding::optimize() {
392 bool Changed = false;
393 LLVM_DEBUG(dbgs() << "***** X86CondBr Folding on Function: " << MF.getName()
394 << " *****\n");
395 // Setup data structures.
396 MBBInfos.resize(MF.getNumBlockIDs());
397 for (auto &MBB : MF)
398 MBBInfos[MBB.getNumber()] = analyzeMBB(MBB);
399
400 for (auto &MBB : MF) {
401 TargetMBBInfo *MBBInfo = getMBBInfo(&MBB);
402 if (!MBBInfo || !MBBInfo->CmpBrOnly)
403 continue;
404 if (MBB.pred_size() != 1)
405 continue;
406 LLVM_DEBUG(dbgs() << "Work on MBB." << MBB.getNumber()
407 << " CmpValue: " << MBBInfo->CmpValue << "\n");
408 SmallVector<MachineBasicBlock *, 4> BranchPath;
409 if (!findPath(&MBB, BranchPath))
410 continue;
411
412#ifndef NDEBUG
413 LLVM_DEBUG(dbgs() << "Found one path (len=" << BranchPath.size() << "):\n");
414 int Index = 1;
415 LLVM_DEBUG(dbgs() << "Target MBB is: " << MBB << "\n");
416 for (auto I = BranchPath.rbegin(); I != BranchPath.rend(); ++I, ++Index) {
417 MachineBasicBlock *PMBB = *I;
418 TargetMBBInfo *PMBBInfo = getMBBInfo(PMBB);
419 LLVM_DEBUG(dbgs() << "Path MBB (" << Index << " of " << BranchPath.size()
420 << ") is " << *PMBB);
421 LLVM_DEBUG(dbgs() << "CC=" << PMBBInfo->BranchCode
422 << " Val=" << PMBBInfo->CmpValue
423 << " CmpBrOnly=" << PMBBInfo->CmpBrOnly << "\n\n");
424 }
425#endif
426 optimizeCondBr(MBB, BranchPath);
427 Changed = true;
428 }
429 NumFixedCondBrs += RemoveList.size();
430 for (auto MBBI : RemoveList) {
Rong Xu5c7bf1a2018-10-09 23:10:56 +0000431 while (!MBBI->succ_empty())
432 MBBI->removeSuccessor(MBBI->succ_end() - 1);
433
Rong Xu3d2efdf2018-10-09 22:03:40 +0000434 MBBI->eraseFromParent();
435 }
436
437 return Changed;
438}
439
440// Analyze instructions that generate CondCode and extract information.
441bool X86CondBrFolding::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
442 int &CmpValue) {
443 unsigned SrcRegIndex = 0;
444 unsigned ValueIndex = 0;
445 switch (MI.getOpcode()) {
446 // TODO: handle test instructions.
447 default:
448 return false;
449 case X86::CMP64ri32:
450 case X86::CMP64ri8:
451 case X86::CMP32ri:
452 case X86::CMP32ri8:
453 case X86::CMP16ri:
454 case X86::CMP16ri8:
455 case X86::CMP8ri:
456 SrcRegIndex = 0;
457 ValueIndex = 1;
458 break;
459 case X86::SUB64ri32:
460 case X86::SUB64ri8:
461 case X86::SUB32ri:
462 case X86::SUB32ri8:
463 case X86::SUB16ri:
464 case X86::SUB16ri8:
465 case X86::SUB8ri:
466 SrcRegIndex = 1;
467 ValueIndex = 2;
468 break;
469 }
470 SrcReg = MI.getOperand(SrcRegIndex).getReg();
471 assert(MI.getOperand(ValueIndex).isImm() && "Expecting Imm operand");
472 CmpValue = MI.getOperand(ValueIndex).getImm();
473 return true;
474}
475
476// Analyze a candidate MBB and set the extract all the information needed.
477// The valid candidate will have two successors.
478// It also should have a sequence of
479// Branch_instr,
480// CondBr,
481// UnCondBr.
482// Return TargetMBBInfo if MBB is a valid candidate and nullptr otherwise.
483std::unique_ptr<TargetMBBInfo>
484X86CondBrFolding::analyzeMBB(MachineBasicBlock &MBB) {
485 MachineBasicBlock *TBB;
486 MachineBasicBlock *FBB;
487 MachineInstr *BrInstr;
488 MachineInstr *CmpInstr;
489 X86::CondCode CC;
490 unsigned SrcReg;
491 int CmpValue;
492 bool Modified;
493 bool CmpBrOnly;
494
495 if (MBB.succ_size() != 2)
496 return nullptr;
497
498 CmpBrOnly = true;
499 FBB = TBB = nullptr;
500 CmpInstr = nullptr;
501 MachineBasicBlock::iterator I = MBB.end();
502 while (I != MBB.begin()) {
503 --I;
504 if (I->isDebugValue())
505 continue;
506 if (I->getOpcode() == X86::JMP_1) {
507 if (FBB)
508 return nullptr;
509 FBB = I->getOperand(0).getMBB();
510 continue;
511 }
512 if (I->isBranch()) {
513 if (TBB)
514 return nullptr;
515 CC = X86::getCondFromBranchOpc(I->getOpcode());
516 switch (CC) {
517 default:
518 return nullptr;
519 case X86::COND_E:
520 case X86::COND_L:
521 case X86::COND_G:
522 case X86::COND_NE:
523 case X86::COND_LE:
524 case X86::COND_GE:
525 break;
526 }
527 TBB = I->getOperand(0).getMBB();
528 BrInstr = &*I;
529 continue;
530 }
531 if (analyzeCompare(*I, SrcReg, CmpValue)) {
532 if (CmpInstr)
533 return nullptr;
534 CmpInstr = &*I;
535 continue;
536 }
537 CmpBrOnly = false;
538 break;
539 }
540
541 if (!TBB || !FBB || !CmpInstr)
542 return nullptr;
543
544 // Simplify CondCode. Note this is only to simplify the findPath logic
545 // and will not change the instruction here.
546 switch (CC) {
547 case X86::COND_NE:
548 CC = X86::COND_E;
549 std::swap(TBB, FBB);
550 Modified = true;
551 break;
552 case X86::COND_LE:
553 if (CmpValue == INT_MAX)
554 return nullptr;
555 CC = X86::COND_L;
556 CmpValue += 1;
557 Modified = true;
558 break;
559 case X86::COND_GE:
560 if (CmpValue == INT_MIN)
561 return nullptr;
562 CC = X86::COND_G;
563 CmpValue -= 1;
564 Modified = true;
565 break;
566 default:
567 Modified = false;
568 break;
569 }
570 return llvm::make_unique<TargetMBBInfo>(TargetMBBInfo{
571 TBB, FBB, BrInstr, CmpInstr, CC, SrcReg, CmpValue, Modified, CmpBrOnly});
572}
573
574bool X86CondBrFoldingPass::runOnMachineFunction(MachineFunction &MF) {
575 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
576 if (!ST.threewayBranchProfitable())
577 return false;
578 const X86InstrInfo *TII = ST.getInstrInfo();
579 const MachineBranchProbabilityInfo *MBPI =
580 &getAnalysis<MachineBranchProbabilityInfo>();
581
582 X86CondBrFolding CondBr(TII, MBPI, MF);
583 return CondBr.optimize();
584}