blob: 1d221930c2a46b3a4bd5eac60794181b0c95a2e9 [file] [log] [blame]
Rong Xu3d2efdf2018-10-09 22:03:40 +00001//===---- X86CondBrFolding.cpp - optimize conditional branches ------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9// This file defines a pass that optimizes condition branches on x86 by taking
10// advantage of the three-way conditional code generated by compare
11// instructions.
12// Currently, it tries to hoisting EQ and NE conditional branch to a dominant
13// conditional branch condition where the same EQ/NE conditional code is
14// computed. An example:
15// bb_0:
16// cmp %0, 19
17// jg bb_1
18// jmp bb_2
19// bb_1:
20// cmp %0, 40
21// jg bb_3
22// jmp bb_4
23// bb_4:
24// cmp %0, 20
25// je bb_5
26// jmp bb_6
27// Here we could combine the two compares in bb_0 and bb_4 and have the
28// following code:
29// bb_0:
30// cmp %0, 20
31// jg bb_1
32// jl bb_2
33// jmp bb_5
34// bb_1:
35// cmp %0, 40
36// jg bb_3
37// jmp bb_6
38// For the case of %0 == 20 (bb_5), we eliminate two jumps, and the control
39// height for bb_6 is also reduced. bb_4 is gone after the optimization.
40//
41// There are plenty of this code patterns, especially from the switch case
42// lowing where we generate compare of "pivot-1" for the inner nodes in the
43// binary search tree.
44//===----------------------------------------------------------------------===//
45
46#include "X86.h"
47#include "X86InstrInfo.h"
48#include "X86Subtarget.h"
49#include "llvm/ADT/Statistic.h"
50#include "llvm/CodeGen/MachineBranchProbabilityInfo.h"
51#include "llvm/CodeGen/MachineFunctionPass.h"
52#include "llvm/CodeGen/MachineInstrBuilder.h"
53#include "llvm/CodeGen/MachineRegisterInfo.h"
54#include "llvm/Support/BranchProbability.h"
55
56using namespace llvm;
57
58#define DEBUG_TYPE "x86-condbr-folding"
59
60STATISTIC(NumFixedCondBrs, "Number of x86 condbr folded");
61
62namespace {
63class X86CondBrFoldingPass : public MachineFunctionPass {
64public:
65 X86CondBrFoldingPass() : MachineFunctionPass(ID) {}
66
67 StringRef getPassName() const override { return "X86 CondBr Folding"; }
68
69 bool runOnMachineFunction(MachineFunction &MF) override;
70
71 void getAnalysisUsage(AnalysisUsage &AU) const override {
72 MachineFunctionPass::getAnalysisUsage(AU);
73 AU.addRequired<MachineBranchProbabilityInfo>();
74 }
75
76private:
77 static char ID;
78};
79
80char X86CondBrFoldingPass::ID = 0;
81} // namespace
82
83FunctionPass *llvm::createX86CondBrFolding() {
84 return new X86CondBrFoldingPass();
85}
86
Benjamin Kramerc55e9972018-10-13 22:18:22 +000087namespace {
Rong Xu3d2efdf2018-10-09 22:03:40 +000088// A class the stores the auxiliary information for each MBB.
89struct TargetMBBInfo {
90 MachineBasicBlock *TBB;
91 MachineBasicBlock *FBB;
92 MachineInstr *BrInstr;
93 MachineInstr *CmpInstr;
94 X86::CondCode BranchCode;
95 unsigned SrcReg;
96 int CmpValue;
97 bool Modified;
98 bool CmpBrOnly;
99};
100
101// A class that optimizes the conditional branch by hoisting and merge CondCode.
102class X86CondBrFolding {
103public:
104 X86CondBrFolding(const X86InstrInfo *TII,
105 const MachineBranchProbabilityInfo *MBPI,
106 MachineFunction &MF)
107 : TII(TII), MBPI(MBPI), MF(MF) {}
108 bool optimize();
109
110private:
111 const X86InstrInfo *TII;
112 const MachineBranchProbabilityInfo *MBPI;
113 MachineFunction &MF;
114 std::vector<std::unique_ptr<TargetMBBInfo>> MBBInfos;
115 SmallVector<MachineBasicBlock *, 4> RemoveList;
116
117 void optimizeCondBr(MachineBasicBlock &MBB,
118 SmallVectorImpl<MachineBasicBlock *> &BranchPath);
119 void fixBranchProb(MachineBasicBlock *NextMBB, MachineBasicBlock *RootMBB,
120 SmallVectorImpl<MachineBasicBlock *> &BranchPath);
121 void replaceBrDest(MachineBasicBlock *MBB, MachineBasicBlock *OrigDest,
122 MachineBasicBlock *NewDest);
123 void fixupModifiedCond(MachineBasicBlock *MBB);
124 std::unique_ptr<TargetMBBInfo> analyzeMBB(MachineBasicBlock &MBB);
125 static bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
126 int &CmpValue);
127 bool findPath(MachineBasicBlock *MBB,
128 SmallVectorImpl<MachineBasicBlock *> &BranchPath);
129 TargetMBBInfo *getMBBInfo(MachineBasicBlock *MBB) const {
130 return MBBInfos[MBB->getNumber()].get();
131 }
132};
Benjamin Kramerc55e9972018-10-13 22:18:22 +0000133} // namespace
Rong Xu3d2efdf2018-10-09 22:03:40 +0000134
135// Find a valid path that we can reuse the CondCode.
136// The resulted path (if return true) is stored in BranchPath.
137// Return value:
138// false: is no valid path is found.
139// true: a valid path is found and the targetBB can be reached.
140bool X86CondBrFolding::findPath(
141 MachineBasicBlock *MBB, SmallVectorImpl<MachineBasicBlock *> &BranchPath) {
142 TargetMBBInfo *MBBInfo = getMBBInfo(MBB);
143 assert(MBBInfo && "Expecting a candidate MBB");
144 int CmpValue = MBBInfo->CmpValue;
145
146 MachineBasicBlock *PredMBB = *MBB->pred_begin();
147 MachineBasicBlock *SaveMBB = MBB;
148 while (PredMBB) {
149 TargetMBBInfo *PredMBBInfo = getMBBInfo(PredMBB);
150 if (!PredMBBInfo || PredMBBInfo->SrcReg != MBBInfo->SrcReg)
151 return false;
152
153 assert(SaveMBB == PredMBBInfo->TBB || SaveMBB == PredMBBInfo->FBB);
154 bool IsFalseBranch = (SaveMBB == PredMBBInfo->FBB);
155
156 X86::CondCode CC = PredMBBInfo->BranchCode;
157 assert(CC == X86::COND_L || CC == X86::COND_G || CC == X86::COND_E);
158 int PredCmpValue = PredMBBInfo->CmpValue;
159 bool ValueCmpTrue = ((CmpValue < PredCmpValue && CC == X86::COND_L) ||
160 (CmpValue > PredCmpValue && CC == X86::COND_G) ||
161 (CmpValue == PredCmpValue && CC == X86::COND_E));
162 // Check if both the result of value compare and the branch target match.
163 if (!(ValueCmpTrue ^ IsFalseBranch)) {
164 LLVM_DEBUG(dbgs() << "Dead BB detected!\n");
165 return false;
166 }
167
168 BranchPath.push_back(PredMBB);
169 // These are the conditions on which we could combine the compares.
170 if ((CmpValue == PredCmpValue) ||
171 (CmpValue == PredCmpValue - 1 && CC == X86::COND_L) ||
172 (CmpValue == PredCmpValue + 1 && CC == X86::COND_G))
173 return true;
174
175 // If PredMBB has more than on preds, or not a pure cmp and br, we bailout.
176 if (PredMBB->pred_size() != 1 || !PredMBBInfo->CmpBrOnly)
177 return false;
178
179 SaveMBB = PredMBB;
180 PredMBB = *PredMBB->pred_begin();
181 }
182 return false;
183}
184
185// Fix up any PHI node in the successor of MBB.
186static void fixPHIsInSucc(MachineBasicBlock *MBB, MachineBasicBlock *OldMBB,
187 MachineBasicBlock *NewMBB) {
188 if (NewMBB == OldMBB)
189 return;
190 for (auto MI = MBB->instr_begin(), ME = MBB->instr_end();
191 MI != ME && MI->isPHI(); ++MI)
192 for (unsigned i = 2, e = MI->getNumOperands() + 1; i != e; i += 2) {
193 MachineOperand &MO = MI->getOperand(i);
194 if (MO.getMBB() == OldMBB)
195 MO.setMBB(NewMBB);
196 }
197}
198
199// Utility function to set branch probability for edge MBB->SuccMBB.
200static inline bool setBranchProb(MachineBasicBlock *MBB,
201 MachineBasicBlock *SuccMBB,
202 BranchProbability Prob) {
203 auto MBBI = std::find(MBB->succ_begin(), MBB->succ_end(), SuccMBB);
204 if (MBBI == MBB->succ_end())
205 return false;
206 MBB->setSuccProbability(MBBI, Prob);
207 return true;
208}
209
210// Utility function to find the unconditional br instruction in MBB.
211static inline MachineBasicBlock::iterator
212findUncondBrI(MachineBasicBlock *MBB) {
213 return std::find_if(MBB->begin(), MBB->end(), [](MachineInstr &MI) -> bool {
214 return MI.getOpcode() == X86::JMP_1;
215 });
216}
217
218// Replace MBB's original successor, OrigDest, with NewDest.
219// Also update the MBBInfo for MBB.
220void X86CondBrFolding::replaceBrDest(MachineBasicBlock *MBB,
221 MachineBasicBlock *OrigDest,
222 MachineBasicBlock *NewDest) {
223 TargetMBBInfo *MBBInfo = getMBBInfo(MBB);
224 MachineInstr *BrMI;
225 if (MBBInfo->TBB == OrigDest) {
226 BrMI = MBBInfo->BrInstr;
227 unsigned JNCC = GetCondBranchFromCond(MBBInfo->BranchCode);
228 MachineInstrBuilder MIB =
229 BuildMI(*MBB, BrMI, MBB->findDebugLoc(BrMI), TII->get(JNCC))
230 .addMBB(NewDest);
231 MBBInfo->TBB = NewDest;
232 MBBInfo->BrInstr = MIB.getInstr();
233 } else { // Should be the unconditional jump stmt.
234 MachineBasicBlock::iterator UncondBrI = findUncondBrI(MBB);
235 BuildMI(*MBB, UncondBrI, MBB->findDebugLoc(UncondBrI), TII->get(X86::JMP_1))
236 .addMBB(NewDest);
237 MBBInfo->FBB = NewDest;
238 BrMI = &*UncondBrI;
239 }
240 fixPHIsInSucc(NewDest, OrigDest, MBB);
241 BrMI->eraseFromParent();
242 MBB->addSuccessor(NewDest);
243 setBranchProb(MBB, NewDest, MBPI->getEdgeProbability(MBB, OrigDest));
244 MBB->removeSuccessor(OrigDest);
245}
246
247// Change the CondCode and BrInstr according to MBBInfo.
248void X86CondBrFolding::fixupModifiedCond(MachineBasicBlock *MBB) {
249 TargetMBBInfo *MBBInfo = getMBBInfo(MBB);
250 if (!MBBInfo->Modified)
251 return;
252
253 MachineInstr *BrMI = MBBInfo->BrInstr;
254 X86::CondCode CC = MBBInfo->BranchCode;
255 MachineInstrBuilder MIB = BuildMI(*MBB, BrMI, MBB->findDebugLoc(BrMI),
256 TII->get(GetCondBranchFromCond(CC)))
257 .addMBB(MBBInfo->TBB);
258 BrMI->eraseFromParent();
259 MBBInfo->BrInstr = MIB.getInstr();
260
261 MachineBasicBlock::iterator UncondBrI = findUncondBrI(MBB);
262 BuildMI(*MBB, UncondBrI, MBB->findDebugLoc(UncondBrI), TII->get(X86::JMP_1))
263 .addMBB(MBBInfo->FBB);
264 MBB->erase(UncondBrI);
265 MBBInfo->Modified = false;
266}
267
268//
269// Apply the transformation:
270// RootMBB -1-> ... PredMBB -3-> MBB -5-> TargetMBB
271// \-2-> \-4-> \-6-> FalseMBB
272// ==>
273// RootMBB -1-> ... PredMBB -7-> FalseMBB
274// TargetMBB <-8-/ \-2-> \-4->
275//
276// Note that PredMBB and RootMBB could be the same.
277// And in the case of dead TargetMBB, we will not have TargetMBB and edge 8.
278//
279// There are some special handling where the RootMBB is COND_E in which case
280// we directly short-cycle the brinstr.
281//
282void X86CondBrFolding::optimizeCondBr(
283 MachineBasicBlock &MBB, SmallVectorImpl<MachineBasicBlock *> &BranchPath) {
284
285 X86::CondCode CC;
286 TargetMBBInfo *MBBInfo = getMBBInfo(&MBB);
287 assert(MBBInfo && "Expecting a candidate MBB");
288 MachineBasicBlock *TargetMBB = MBBInfo->TBB;
289 BranchProbability TargetProb = MBPI->getEdgeProbability(&MBB, MBBInfo->TBB);
290
291 // Forward the jump from MBB's predecessor to MBB's false target.
292 MachineBasicBlock *PredMBB = BranchPath.front();
293 TargetMBBInfo *PredMBBInfo = getMBBInfo(PredMBB);
294 assert(PredMBBInfo && "Expecting a candidate MBB");
295 if (PredMBBInfo->Modified)
296 fixupModifiedCond(PredMBB);
297 CC = PredMBBInfo->BranchCode;
298 // Don't do this if depth of BranchPath is 1 and PredMBB is of COND_E.
299 // We will short-cycle directly for this case.
300 if (!(CC == X86::COND_E && BranchPath.size() == 1))
301 replaceBrDest(PredMBB, &MBB, MBBInfo->FBB);
302
303 MachineBasicBlock *RootMBB = BranchPath.back();
304 TargetMBBInfo *RootMBBInfo = getMBBInfo(RootMBB);
305 assert(RootMBBInfo && "Expecting a candidate MBB");
306 if (RootMBBInfo->Modified)
307 fixupModifiedCond(RootMBB);
308 CC = RootMBBInfo->BranchCode;
309
310 if (CC != X86::COND_E) {
311 MachineBasicBlock::iterator UncondBrI = findUncondBrI(RootMBB);
312 // RootMBB: Cond jump to the original not-taken MBB.
313 X86::CondCode NewCC;
314 switch (CC) {
315 case X86::COND_L:
316 NewCC = X86::COND_G;
317 break;
318 case X86::COND_G:
319 NewCC = X86::COND_L;
320 break;
321 default:
322 llvm_unreachable("unexpected condtional code.");
323 }
324 BuildMI(*RootMBB, UncondBrI, RootMBB->findDebugLoc(UncondBrI),
325 TII->get(GetCondBranchFromCond(NewCC)))
326 .addMBB(RootMBBInfo->FBB);
327
328 // RootMBB: Jump to TargetMBB
329 BuildMI(*RootMBB, UncondBrI, RootMBB->findDebugLoc(UncondBrI),
330 TII->get(X86::JMP_1))
331 .addMBB(TargetMBB);
332 RootMBB->addSuccessor(TargetMBB);
333 fixPHIsInSucc(TargetMBB, &MBB, RootMBB);
334 RootMBB->erase(UncondBrI);
335 } else {
336 replaceBrDest(RootMBB, RootMBBInfo->TBB, TargetMBB);
337 }
338
339 // Fix RootMBB's CmpValue to MBB's CmpValue to TargetMBB. Don't set Imm
340 // directly. Move MBB's stmt to here as the opcode might be different.
341 if (RootMBBInfo->CmpValue != MBBInfo->CmpValue) {
342 MachineInstr *NewCmp = MBBInfo->CmpInstr;
343 NewCmp->removeFromParent();
344 RootMBB->insert(RootMBBInfo->CmpInstr, NewCmp);
345 RootMBBInfo->CmpInstr->eraseFromParent();
346 }
347
348 // Fix branch Probabilities.
349 auto fixBranchProb = [&](MachineBasicBlock *NextMBB) {
350 BranchProbability Prob;
351 for (auto &I : BranchPath) {
352 MachineBasicBlock *ThisMBB = I;
353 if (!ThisMBB->hasSuccessorProbabilities() ||
354 !ThisMBB->isSuccessor(NextMBB))
355 break;
356 Prob = MBPI->getEdgeProbability(ThisMBB, NextMBB);
357 if (Prob.isUnknown())
358 break;
359 TargetProb = Prob * TargetProb;
360 Prob = Prob - TargetProb;
361 setBranchProb(ThisMBB, NextMBB, Prob);
362 if (ThisMBB == RootMBB) {
363 setBranchProb(ThisMBB, TargetMBB, TargetProb);
364 }
365 ThisMBB->normalizeSuccProbs();
366 if (ThisMBB == RootMBB)
367 break;
368 NextMBB = ThisMBB;
369 }
370 return true;
371 };
372 if (CC != X86::COND_E && !TargetProb.isUnknown())
373 fixBranchProb(MBBInfo->FBB);
374
375 if (CC != X86::COND_E)
376 RemoveList.push_back(&MBB);
377
378 // Invalidate MBBInfo just in case.
379 MBBInfos[MBB.getNumber()] = nullptr;
380 MBBInfos[RootMBB->getNumber()] = nullptr;
381
382 LLVM_DEBUG(dbgs() << "After optimization:\nRootMBB is: " << *RootMBB << "\n");
383 if (BranchPath.size() > 1)
384 LLVM_DEBUG(dbgs() << "PredMBB is: " << *(BranchPath[0]) << "\n");
385}
386
387// Driver function for optimization: find the valid candidate and apply
388// the transformation.
389bool X86CondBrFolding::optimize() {
390 bool Changed = false;
391 LLVM_DEBUG(dbgs() << "***** X86CondBr Folding on Function: " << MF.getName()
392 << " *****\n");
393 // Setup data structures.
394 MBBInfos.resize(MF.getNumBlockIDs());
395 for (auto &MBB : MF)
396 MBBInfos[MBB.getNumber()] = analyzeMBB(MBB);
397
398 for (auto &MBB : MF) {
399 TargetMBBInfo *MBBInfo = getMBBInfo(&MBB);
400 if (!MBBInfo || !MBBInfo->CmpBrOnly)
401 continue;
402 if (MBB.pred_size() != 1)
403 continue;
404 LLVM_DEBUG(dbgs() << "Work on MBB." << MBB.getNumber()
405 << " CmpValue: " << MBBInfo->CmpValue << "\n");
406 SmallVector<MachineBasicBlock *, 4> BranchPath;
407 if (!findPath(&MBB, BranchPath))
408 continue;
409
410#ifndef NDEBUG
411 LLVM_DEBUG(dbgs() << "Found one path (len=" << BranchPath.size() << "):\n");
412 int Index = 1;
413 LLVM_DEBUG(dbgs() << "Target MBB is: " << MBB << "\n");
414 for (auto I = BranchPath.rbegin(); I != BranchPath.rend(); ++I, ++Index) {
415 MachineBasicBlock *PMBB = *I;
416 TargetMBBInfo *PMBBInfo = getMBBInfo(PMBB);
417 LLVM_DEBUG(dbgs() << "Path MBB (" << Index << " of " << BranchPath.size()
418 << ") is " << *PMBB);
419 LLVM_DEBUG(dbgs() << "CC=" << PMBBInfo->BranchCode
420 << " Val=" << PMBBInfo->CmpValue
421 << " CmpBrOnly=" << PMBBInfo->CmpBrOnly << "\n\n");
422 }
423#endif
424 optimizeCondBr(MBB, BranchPath);
425 Changed = true;
426 }
427 NumFixedCondBrs += RemoveList.size();
428 for (auto MBBI : RemoveList) {
Rong Xu5c7bf1a2018-10-09 23:10:56 +0000429 while (!MBBI->succ_empty())
430 MBBI->removeSuccessor(MBBI->succ_end() - 1);
431
Rong Xu3d2efdf2018-10-09 22:03:40 +0000432 MBBI->eraseFromParent();
433 }
434
435 return Changed;
436}
437
438// Analyze instructions that generate CondCode and extract information.
439bool X86CondBrFolding::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
440 int &CmpValue) {
441 unsigned SrcRegIndex = 0;
442 unsigned ValueIndex = 0;
443 switch (MI.getOpcode()) {
444 // TODO: handle test instructions.
445 default:
446 return false;
447 case X86::CMP64ri32:
448 case X86::CMP64ri8:
449 case X86::CMP32ri:
450 case X86::CMP32ri8:
451 case X86::CMP16ri:
452 case X86::CMP16ri8:
453 case X86::CMP8ri:
454 SrcRegIndex = 0;
455 ValueIndex = 1;
456 break;
457 case X86::SUB64ri32:
458 case X86::SUB64ri8:
459 case X86::SUB32ri:
460 case X86::SUB32ri8:
461 case X86::SUB16ri:
462 case X86::SUB16ri8:
463 case X86::SUB8ri:
464 SrcRegIndex = 1;
465 ValueIndex = 2;
466 break;
467 }
468 SrcReg = MI.getOperand(SrcRegIndex).getReg();
469 assert(MI.getOperand(ValueIndex).isImm() && "Expecting Imm operand");
470 CmpValue = MI.getOperand(ValueIndex).getImm();
471 return true;
472}
473
474// Analyze a candidate MBB and set the extract all the information needed.
475// The valid candidate will have two successors.
476// It also should have a sequence of
477// Branch_instr,
478// CondBr,
479// UnCondBr.
480// Return TargetMBBInfo if MBB is a valid candidate and nullptr otherwise.
481std::unique_ptr<TargetMBBInfo>
482X86CondBrFolding::analyzeMBB(MachineBasicBlock &MBB) {
483 MachineBasicBlock *TBB;
484 MachineBasicBlock *FBB;
485 MachineInstr *BrInstr;
486 MachineInstr *CmpInstr;
487 X86::CondCode CC;
488 unsigned SrcReg;
489 int CmpValue;
490 bool Modified;
491 bool CmpBrOnly;
492
493 if (MBB.succ_size() != 2)
494 return nullptr;
495
496 CmpBrOnly = true;
497 FBB = TBB = nullptr;
498 CmpInstr = nullptr;
499 MachineBasicBlock::iterator I = MBB.end();
500 while (I != MBB.begin()) {
501 --I;
502 if (I->isDebugValue())
503 continue;
504 if (I->getOpcode() == X86::JMP_1) {
505 if (FBB)
506 return nullptr;
507 FBB = I->getOperand(0).getMBB();
508 continue;
509 }
510 if (I->isBranch()) {
511 if (TBB)
512 return nullptr;
513 CC = X86::getCondFromBranchOpc(I->getOpcode());
514 switch (CC) {
515 default:
516 return nullptr;
517 case X86::COND_E:
518 case X86::COND_L:
519 case X86::COND_G:
520 case X86::COND_NE:
521 case X86::COND_LE:
522 case X86::COND_GE:
523 break;
524 }
525 TBB = I->getOperand(0).getMBB();
526 BrInstr = &*I;
527 continue;
528 }
529 if (analyzeCompare(*I, SrcReg, CmpValue)) {
530 if (CmpInstr)
531 return nullptr;
532 CmpInstr = &*I;
533 continue;
534 }
535 CmpBrOnly = false;
536 break;
537 }
538
539 if (!TBB || !FBB || !CmpInstr)
540 return nullptr;
541
542 // Simplify CondCode. Note this is only to simplify the findPath logic
543 // and will not change the instruction here.
544 switch (CC) {
545 case X86::COND_NE:
546 CC = X86::COND_E;
547 std::swap(TBB, FBB);
548 Modified = true;
549 break;
550 case X86::COND_LE:
551 if (CmpValue == INT_MAX)
552 return nullptr;
553 CC = X86::COND_L;
554 CmpValue += 1;
555 Modified = true;
556 break;
557 case X86::COND_GE:
558 if (CmpValue == INT_MIN)
559 return nullptr;
560 CC = X86::COND_G;
561 CmpValue -= 1;
562 Modified = true;
563 break;
564 default:
565 Modified = false;
566 break;
567 }
568 return llvm::make_unique<TargetMBBInfo>(TargetMBBInfo{
569 TBB, FBB, BrInstr, CmpInstr, CC, SrcReg, CmpValue, Modified, CmpBrOnly});
570}
571
572bool X86CondBrFoldingPass::runOnMachineFunction(MachineFunction &MF) {
573 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
574 if (!ST.threewayBranchProfitable())
575 return false;
576 const X86InstrInfo *TII = ST.getInstrInfo();
577 const MachineBranchProbabilityInfo *MBPI =
578 &getAnalysis<MachineBranchProbabilityInfo>();
579
580 X86CondBrFolding CondBr(TII, MBPI, MF);
581 return CondBr.optimize();
582}