blob: 24be4f508cc73addfbc95dfab23126b3d0bd135a [file] [log] [blame]
Sanjoy Das083f3892016-05-18 22:55:34 +00001//===- GuardWidening.cpp - ---- Guard widening ----------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the guard widening pass. The semantics of the
11// @llvm.experimental.guard intrinsic lets LLVM transform it so that it fails
12// more often that it did before the transform. This optimization is called
13// "widening" and can be used hoist and common runtime checks in situations like
14// these:
15//
16// %cmp0 = 7 u< Length
17// call @llvm.experimental.guard(i1 %cmp0) [ "deopt"(...) ]
18// call @unknown_side_effects()
19// %cmp1 = 9 u< Length
20// call @llvm.experimental.guard(i1 %cmp1) [ "deopt"(...) ]
21// ...
22//
23// =>
24//
25// %cmp0 = 9 u< Length
26// call @llvm.experimental.guard(i1 %cmp0) [ "deopt"(...) ]
27// call @unknown_side_effects()
28// ...
29//
30// If %cmp0 is false, @llvm.experimental.guard will "deoptimize" back to a
31// generic implementation of the same function, which will have the correct
32// semantics from that point onward. It is always _legal_ to deoptimize (so
33// replacing %cmp0 with false is "correct"), though it may not always be
34// profitable to do so.
35//
36// NB! This pass is a work in progress. It hasn't been tuned to be "production
37// ready" yet. It is known to have quadriatic running time and will not scale
38// to large numbers of guards
39//
40//===----------------------------------------------------------------------===//
41
42#include "llvm/Transforms/Scalar/GuardWidening.h"
43#include "llvm/Pass.h"
44#include "llvm/ADT/DenseMap.h"
45#include "llvm/ADT/DepthFirstIterator.h"
46#include "llvm/Analysis/LoopInfo.h"
47#include "llvm/Analysis/PostDominators.h"
48#include "llvm/Analysis/ValueTracking.h"
49#include "llvm/IR/Dominators.h"
50#include "llvm/IR/IntrinsicInst.h"
51#include "llvm/IR/PatternMatch.h"
52#include "llvm/Support/Debug.h"
53#include "llvm/Transforms/Scalar.h"
54
55using namespace llvm;
56
57#define DEBUG_TYPE "guard-widening"
58
59namespace {
60
61class GuardWideningImpl {
62 DominatorTree &DT;
63 PostDominatorTree &PDT;
64 LoopInfo &LI;
65
66 /// The set of guards whose conditions have been widened into dominating
67 /// guards.
68 SmallVector<IntrinsicInst *, 16> EliminatedGuards;
69
70 /// The set of guards which have been widened to include conditions to other
71 /// guards.
72 DenseSet<IntrinsicInst *> WidenedGuards;
73
74 /// Try to eliminate guard \p Guard by widening it into an earlier dominating
75 /// guard. \p DFSI is the DFS iterator on the dominator tree that is
76 /// currently visiting the block containing \p Guard, and \p GuardsPerBlock
77 /// maps BasicBlocks to the set of guards seen in that block.
78 bool eliminateGuardViaWidening(
79 IntrinsicInst *Guard, const df_iterator<DomTreeNode *> &DFSI,
80 const DenseMap<BasicBlock *, SmallVector<IntrinsicInst *, 8>> &
81 GuardsPerBlock);
82
83 /// Used to keep track of which widening potential is more effective.
84 enum WideningScore {
85 /// Don't widen.
86 WS_IllegalOrNegative,
87
88 /// Widening is performance neutral as far as the cycles spent in check
89 /// conditions goes (but can still help, e.g., code layout, having less
90 /// deopt state).
91 WS_Neutral,
92
93 /// Widening is profitable.
94 WS_Positive,
95
96 /// Widening is very profitable. Not significantly different from \c
97 /// WS_Positive, except by the order.
98 WS_VeryPositive
99 };
100
101 static StringRef scoreTypeToString(WideningScore WS);
102
103 /// Compute the score for widening the condition in \p DominatedGuard
104 /// (contained in \p DominatedGuardLoop) into \p DominatingGuard (contained in
105 /// \p DominatingGuardLoop).
106 WideningScore computeWideningScore(IntrinsicInst *DominatedGuard,
107 Loop *DominatedGuardLoop,
108 IntrinsicInst *DominatingGuard,
109 Loop *DominatingGuardLoop);
110
111 /// Helper to check if \p V can be hoisted to \p InsertPos.
112 bool isAvailableAt(Value *V, Instruction *InsertPos) {
113 SmallPtrSet<Instruction *, 8> Visited;
114 return isAvailableAt(V, InsertPos, Visited);
115 }
116
117 bool isAvailableAt(Value *V, Instruction *InsertPos,
118 SmallPtrSetImpl<Instruction *> &Visited);
119
120 /// Helper to hoist \p V to \p InsertPos. Guaranteed to succeed if \c
121 /// isAvailableAt returned true.
122 void makeAvailableAt(Value *V, Instruction *InsertPos);
123
124 /// Common helper used by \c widenGuard and \c isWideningCondProfitable. Try
125 /// to generate an expression computing the logical AND of \p Cond0 and \p
126 /// Cond1. Return true if the expression computing the AND is only as
127 /// expensive as computing one of the two. If \p InsertPt is true then
128 /// actually generate the resulting expression, make it available at \p
129 /// InsertPt and return it in \p Result (else no change to the IR is made).
130 bool widenCondCommon(Value *Cond0, Value *Cond1, Instruction *InsertPt,
131 Value *&Result);
132
Sanjoy Dasf5f03312016-05-19 22:55:46 +0000133 /// Represents a range check of the form \c Base + \c Offset u< \c Length,
134 /// with the constraint that \c Length is not negative. \c CheckInst is the
135 /// pre-existing instruction in the IR that computes the result of this range
136 /// check.
137 struct RangeCheck {
138 Value *Base;
139 ConstantInt *Offset;
140 Value *Length;
141 ICmpInst *CheckInst;
142
143 RangeCheck() {}
144
145 explicit RangeCheck(Value *Base, ConstantInt *Offset, Value *Length,
146 ICmpInst *CheckInst)
147 : Base(Base), Offset(Offset), Length(Length), CheckInst(CheckInst) {}
148
149 void print(raw_ostream &OS, bool PrintTypes = false) {
150 OS << "Base: ";
151 Base->printAsOperand(OS, PrintTypes);
152 OS << " Offset: ";
153 Offset->printAsOperand(OS, PrintTypes);
154 OS << " Length: ";
155 Length->printAsOperand(OS, PrintTypes);
156 }
157
158 LLVM_DUMP_METHOD void dump() {
159 print(dbgs());
160 dbgs() << "\n";
161 }
162 };
163
164 /// Parse \p CheckCond into a conjunction (logical-and) of range checks; and
165 /// append them to \p Checks. Returns true on success, may clobber \c Checks
166 /// on failure.
167 bool parseRangeChecks(Value *CheckCond, SmallVectorImpl<RangeCheck> &Checks) {
168 SmallPtrSet<Value *, 8> Visited;
169 return parseRangeChecks(CheckCond, Checks, Visited);
170 }
171
172 bool parseRangeChecks(Value *CheckCond, SmallVectorImpl<RangeCheck> &Checks,
173 SmallPtrSetImpl<Value *> &Visited);
174
175 /// Combine the checks in \p Checks into a smaller set of checks and append
176 /// them into \p CombinedChecks. Return true on success (i.e. all of checks
177 /// in \p Checks were combined into \p CombinedChecks). Clobbers \p Checks
178 /// and \p CombinedChecks on success and on failure.
179 bool combineRangeChecks(SmallVectorImpl<RangeCheck> &Checks,
180 SmallVectorImpl<RangeCheck> &CombinedChecks);
181
Sanjoy Das083f3892016-05-18 22:55:34 +0000182 /// Can we compute the logical AND of \p Cond0 and \p Cond1 for the price of
183 /// computing only one of the two expressions?
184 bool isWideningCondProfitable(Value *Cond0, Value *Cond1) {
185 Value *ResultUnused;
186 return widenCondCommon(Cond0, Cond1, /*InsertPt=*/nullptr, ResultUnused);
187 }
188
189 /// Widen \p ToWiden to fail if \p NewCondition is false (in addition to
190 /// whatever it is already checking).
191 void widenGuard(IntrinsicInst *ToWiden, Value *NewCondition) {
192 Value *Result;
193 widenCondCommon(ToWiden->getArgOperand(0), NewCondition, ToWiden, Result);
194 ToWiden->setArgOperand(0, Result);
195 }
196
197public:
198 explicit GuardWideningImpl(DominatorTree &DT, PostDominatorTree &PDT,
199 LoopInfo &LI)
200 : DT(DT), PDT(PDT), LI(LI) {}
201
202 /// The entry point for this pass.
203 bool run();
204};
205
206struct GuardWideningLegacyPass : public FunctionPass {
207 static char ID;
208 GuardWideningPass Impl;
209
210 GuardWideningLegacyPass() : FunctionPass(ID) {
211 initializeGuardWideningLegacyPassPass(*PassRegistry::getPassRegistry());
212 }
213
214 bool runOnFunction(Function &F) override {
215 if (skipFunction(F))
216 return false;
217 return GuardWideningImpl(
218 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
219 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(),
220 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()).run();
221 }
222
223 void getAnalysisUsage(AnalysisUsage &AU) const override {
224 AU.setPreservesCFG();
225 AU.addRequired<DominatorTreeWrapperPass>();
226 AU.addRequired<PostDominatorTreeWrapperPass>();
227 AU.addRequired<LoopInfoWrapperPass>();
228 }
229};
230
231}
232
233bool GuardWideningImpl::run() {
234 using namespace llvm::PatternMatch;
235
236 DenseMap<BasicBlock *, SmallVector<IntrinsicInst *, 8>> GuardsInBlock;
237 bool Changed = false;
238
239 for (auto DFI = df_begin(DT.getRootNode()), DFE = df_end(DT.getRootNode());
240 DFI != DFE; ++DFI) {
241 auto *BB = (*DFI)->getBlock();
242 auto &CurrentList = GuardsInBlock[BB];
243
244 for (auto &I : *BB)
245 if (match(&I, m_Intrinsic<Intrinsic::experimental_guard>()))
246 CurrentList.push_back(cast<IntrinsicInst>(&I));
247
248 for (auto *II : CurrentList)
249 Changed |= eliminateGuardViaWidening(II, DFI, GuardsInBlock);
250 }
251
252 for (auto *II : EliminatedGuards)
253 if (!WidenedGuards.count(II))
254 II->eraseFromParent();
255
256 return Changed;
257}
258
259bool GuardWideningImpl::eliminateGuardViaWidening(
260 IntrinsicInst *GuardInst, const df_iterator<DomTreeNode *> &DFSI,
261 const DenseMap<BasicBlock *, SmallVector<IntrinsicInst *, 8>> &
262 GuardsInBlock) {
263 IntrinsicInst *BestSoFar = nullptr;
264 auto BestScoreSoFar = WS_IllegalOrNegative;
265 auto *GuardInstLoop = LI.getLoopFor(GuardInst->getParent());
266
267 // In the set of dominating guards, find the one we can merge GuardInst with
268 // for the most profit.
269 for (unsigned i = 0, e = DFSI.getPathLength(); i != e; ++i) {
270 auto *CurBB = DFSI.getPath(i)->getBlock();
271 auto *CurLoop = LI.getLoopFor(CurBB);
272 assert(GuardsInBlock.count(CurBB) && "Must have been populated by now!");
273 const auto &GuardsInCurBB = GuardsInBlock.find(CurBB)->second;
274
275 auto I = GuardsInCurBB.begin();
276 auto E = GuardsInCurBB.end();
277
278#ifndef NDEBUG
279 {
280 unsigned Index = 0;
281 for (auto &I : *CurBB) {
282 if (Index == GuardsInCurBB.size())
283 break;
284 if (GuardsInCurBB[Index] == &I)
285 Index++;
286 }
287 assert(Index == GuardsInCurBB.size() &&
288 "Guards expected to be in order!");
289 }
290#endif
291
292 assert((i == (e - 1)) == (GuardInst->getParent() == CurBB) && "Bad DFS?");
293
294 if (i == (e - 1)) {
295 // Corner case: make sure we're only looking at guards strictly dominating
296 // GuardInst when visiting GuardInst->getParent().
297 auto NewEnd = std::find(I, E, GuardInst);
298 assert(NewEnd != E && "GuardInst not in its own block?");
299 E = NewEnd;
300 }
301
302 for (auto *Candidate : make_range(I, E)) {
303 auto Score =
304 computeWideningScore(GuardInst, GuardInstLoop, Candidate, CurLoop);
305 DEBUG(dbgs() << "Score between " << *GuardInst->getArgOperand(0)
306 << " and " << *Candidate->getArgOperand(0) << " is "
307 << scoreTypeToString(Score) << "\n");
308 if (Score > BestScoreSoFar) {
309 BestScoreSoFar = Score;
310 BestSoFar = Candidate;
311 }
312 }
313 }
314
315 if (BestScoreSoFar == WS_IllegalOrNegative) {
316 DEBUG(dbgs() << "Did not eliminate guard " << *GuardInst << "\n");
317 return false;
318 }
319
320 assert(BestSoFar != GuardInst && "Should have never visited same guard!");
321 assert(DT.dominates(BestSoFar, GuardInst) && "Should be!");
322
323 DEBUG(dbgs() << "Widening " << *GuardInst << " into " << *BestSoFar
324 << " with score " << scoreTypeToString(BestScoreSoFar) << "\n");
325 widenGuard(BestSoFar, GuardInst->getArgOperand(0));
326 GuardInst->setArgOperand(0, ConstantInt::getTrue(GuardInst->getContext()));
327 EliminatedGuards.push_back(GuardInst);
328 WidenedGuards.insert(BestSoFar);
329 return true;
330}
331
332GuardWideningImpl::WideningScore GuardWideningImpl::computeWideningScore(
333 IntrinsicInst *DominatedGuard, Loop *DominatedGuardLoop,
334 IntrinsicInst *DominatingGuard, Loop *DominatingGuardLoop) {
335 bool HoistingOutOfLoop = false;
336
337 if (DominatingGuardLoop != DominatedGuardLoop) {
338 if (DominatingGuardLoop &&
339 !DominatingGuardLoop->contains(DominatedGuardLoop))
340 return WS_IllegalOrNegative;
341
342 HoistingOutOfLoop = true;
343 }
344
345 if (!isAvailableAt(DominatedGuard->getArgOperand(0), DominatingGuard))
346 return WS_IllegalOrNegative;
347
348 bool HoistingOutOfIf =
349 !PDT.dominates(DominatedGuard->getParent(), DominatingGuard->getParent());
350
351 if (isWideningCondProfitable(DominatedGuard->getArgOperand(0),
352 DominatingGuard->getArgOperand(0)))
353 return HoistingOutOfLoop ? WS_VeryPositive : WS_Positive;
354
355 if (HoistingOutOfLoop)
356 return WS_Positive;
357
358 return HoistingOutOfIf ? WS_IllegalOrNegative : WS_Neutral;
359}
360
361bool GuardWideningImpl::isAvailableAt(Value *V, Instruction *Loc,
362 SmallPtrSetImpl<Instruction *> &Visited) {
363 auto *Inst = dyn_cast<Instruction>(V);
364 if (!Inst || DT.dominates(Inst, Loc) || Visited.count(Inst))
365 return true;
366
367 if (!isSafeToSpeculativelyExecute(Inst, Loc, &DT) ||
368 Inst->mayReadFromMemory())
369 return false;
370
371 Visited.insert(Inst);
372
373 // We only want to go _up_ the dominance chain when recursing.
374 assert(!isa<PHINode>(Loc) &&
375 "PHIs should return false for isSafeToSpeculativelyExecute");
376 assert(DT.isReachableFromEntry(Inst->getParent()) &&
377 "We did a DFS from the block entry!");
378 return all_of(Inst->operands(),
379 [&](Value *Op) { return isAvailableAt(Op, Loc, Visited); });
380}
381
382void GuardWideningImpl::makeAvailableAt(Value *V, Instruction *Loc) {
383 auto *Inst = dyn_cast<Instruction>(V);
384 if (!Inst || DT.dominates(Inst, Loc))
385 return;
386
387 assert(isSafeToSpeculativelyExecute(Inst, Loc, &DT) &&
388 !Inst->mayReadFromMemory() && "Should've checked with isAvailableAt!");
389
390 for (Value *Op : Inst->operands())
391 makeAvailableAt(Op, Loc);
392
393 Inst->moveBefore(Loc);
394}
395
396bool GuardWideningImpl::widenCondCommon(Value *Cond0, Value *Cond1,
397 Instruction *InsertPt, Value *&Result) {
398 using namespace llvm::PatternMatch;
399
400 {
401 // L >u C0 && L >u C1 -> L >u max(C0, C1)
402 ConstantInt *RHS0, *RHS1;
403 Value *LHS;
404 ICmpInst::Predicate Pred0, Pred1;
405 if (match(Cond0, m_ICmp(Pred0, m_Value(LHS), m_ConstantInt(RHS0))) &&
406 match(Cond1, m_ICmp(Pred1, m_Specific(LHS), m_ConstantInt(RHS1)))) {
407
Sanjoy Dasb784ed32016-05-19 03:53:17 +0000408 ConstantRange CR0 =
409 ConstantRange::makeExactICmpRegion(Pred0, RHS0->getValue());
410 ConstantRange CR1 =
411 ConstantRange::makeExactICmpRegion(Pred1, RHS1->getValue());
Sanjoy Das083f3892016-05-18 22:55:34 +0000412
Sanjoy Dasb784ed32016-05-19 03:53:17 +0000413 // SubsetIntersect is a subset of the actual mathematical intersection of
414 // CR0 and CR1, while SupersetIntersect is a superset of the the actual
415 // mathematical intersection. If these two ConstantRanges are equal, then
416 // we know we were able to represent the actual mathematical intersection
417 // of CR0 and CR1, and can use the same to generate an icmp instruction.
418 //
419 // Given what we're doing here and the semantics of guards, it would
420 // actually be correct to just use SubsetIntersect, but that may be too
421 // aggressive in cases we care about.
422 auto SubsetIntersect = CR0.inverse().unionWith(CR1.inverse()).inverse();
423 auto SupersetIntersect = CR0.intersectWith(CR1);
424
425 APInt NewRHSAP;
426 CmpInst::Predicate Pred;
427 if (SubsetIntersect == SupersetIntersect &&
428 SubsetIntersect.getEquivalentICmp(Pred, NewRHSAP)) {
Sanjoy Das083f3892016-05-18 22:55:34 +0000429 if (InsertPt) {
Sanjoy Dasb784ed32016-05-19 03:53:17 +0000430 ConstantInt *NewRHS = ConstantInt::get(Cond0->getContext(), NewRHSAP);
431 Result = new ICmpInst(InsertPt, Pred, LHS, NewRHS, "wide.chk");
Sanjoy Das083f3892016-05-18 22:55:34 +0000432 }
Sanjoy Das083f3892016-05-18 22:55:34 +0000433 return true;
434 }
435 }
436 }
437
Sanjoy Dasf5f03312016-05-19 22:55:46 +0000438 {
439 SmallVector<GuardWideningImpl::RangeCheck, 4> Checks, CombinedChecks;
440 if (parseRangeChecks(Cond0, Checks) && parseRangeChecks(Cond1, Checks) &&
441 combineRangeChecks(Checks, CombinedChecks)) {
442 if (InsertPt) {
443 Result = nullptr;
444 for (auto &RC : CombinedChecks) {
445 makeAvailableAt(RC.CheckInst, InsertPt);
446 if (Result)
447 Result =
448 BinaryOperator::CreateAnd(RC.CheckInst, Result, "", InsertPt);
449 else
450 Result = RC.CheckInst;
451 }
452
453 Result->setName("wide.chk");
454 }
455 return true;
456 }
457 }
458
Sanjoy Das083f3892016-05-18 22:55:34 +0000459 // Base case -- just logical-and the two conditions together.
460
461 if (InsertPt) {
462 makeAvailableAt(Cond0, InsertPt);
463 makeAvailableAt(Cond1, InsertPt);
464
465 Result = BinaryOperator::CreateAnd(Cond0, Cond1, "wide.chk", InsertPt);
466 }
467
468 // We were not able to compute Cond0 AND Cond1 for the price of one.
469 return false;
470}
471
Sanjoy Dasf5f03312016-05-19 22:55:46 +0000472bool GuardWideningImpl::parseRangeChecks(
473 Value *CheckCond, SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
474 SmallPtrSetImpl<Value *> &Visited) {
475 if (!Visited.insert(CheckCond).second)
476 return true;
477
478 using namespace llvm::PatternMatch;
479
480 {
481 Value *AndLHS, *AndRHS;
482 if (match(CheckCond, m_And(m_Value(AndLHS), m_Value(AndRHS))))
483 return parseRangeChecks(AndLHS, Checks) &&
484 parseRangeChecks(AndRHS, Checks);
485 }
486
487 auto *IC = dyn_cast<ICmpInst>(CheckCond);
488 if (!IC || !IC->getOperand(0)->getType()->isIntegerTy() ||
489 (IC->getPredicate() != ICmpInst::ICMP_ULT &&
490 IC->getPredicate() != ICmpInst::ICMP_UGT))
491 return false;
492
493 Value *CmpLHS = IC->getOperand(0), *CmpRHS = IC->getOperand(1);
494 if (IC->getPredicate() == ICmpInst::ICMP_UGT)
495 std::swap(CmpLHS, CmpRHS);
496
497 auto &DL = IC->getModule()->getDataLayout();
498
499 GuardWideningImpl::RangeCheck Check;
500 Check.Base = CmpLHS;
501 Check.Offset =
502 cast<ConstantInt>(ConstantInt::getNullValue(CmpRHS->getType()));
503 Check.Length = CmpRHS;
504 Check.CheckInst = IC;
505
506 if (!isKnownNonNegative(Check.Length, DL))
507 return false;
508
509 // What we have in \c Check now is a correct interpretation of \p CheckCond.
510 // Try to see if we can move some constant offsets into the \c Offset field.
511
512 bool Changed;
513
514 do {
515 Value *OpLHS;
516 ConstantInt *OpRHS;
517 Changed = false;
518
519#ifndef NDEBUG
520 auto *BaseInst = dyn_cast<Instruction>(Check.Base);
521 assert((!BaseInst || DT.isReachableFromEntry(BaseInst->getParent())) &&
522 "Unreachable instruction?");
523#endif
524
525 if (match(Check.Base, m_Add(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
526 Check.Base = OpLHS;
527 Check.Offset =
528 ConstantInt::get(Check.Offset->getContext(),
529 Check.Offset->getValue() + OpRHS->getValue());
530 Changed = true;
531 } else if (match(Check.Base, m_Or(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
532 unsigned BitWidth = OpLHS->getType()->getScalarSizeInBits();
533 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
534 computeKnownBits(OpLHS, KnownZero, KnownOne, DL);
535 if ((OpRHS->getValue() & KnownZero) == OpRHS->getValue()) {
536 Check.Base = OpLHS;
537 Check.Offset =
538 ConstantInt::get(Check.Offset->getContext(),
539 Check.Offset->getValue() + OpRHS->getValue());
540 Changed = true;
541 }
542 }
543 } while (Changed);
544
545 Checks.push_back(Check);
546 return true;
547}
548
549bool GuardWideningImpl::combineRangeChecks(
550 SmallVectorImpl<GuardWideningImpl::RangeCheck> &Checks,
551 SmallVectorImpl<GuardWideningImpl::RangeCheck> &RangeChecksOut) {
552 unsigned OldCount = Checks.size();
553 while (!Checks.empty()) {
554 Value *Base = Checks[0].Base;
555 Value *Length = Checks[0].Length;
556 auto ChecksStart =
557 remove_if(Checks, [&](GuardWideningImpl::RangeCheck &RC) {
558 return RC.Base == Base && RC.Length == Length;
559 });
560
561 unsigned CheckCount = std::distance(ChecksStart, Checks.end());
562 assert(CheckCount != 0 && "We know we have at least one!");
563
564 if (CheckCount < 3) {
565 RangeChecksOut.insert(RangeChecksOut.end(), ChecksStart, Checks.end());
566 Checks.erase(ChecksStart, Checks.end());
567 continue;
568 }
569
570 // CheckCount will typically be 3 here, but so far there has been no need to
571 // hard-code that fact.
572
573 std::sort(ChecksStart, Checks.end(),
574 [&](GuardWideningImpl::RangeCheck &LHS,
575 GuardWideningImpl::RangeCheck &RHS) {
576 return LHS.Offset->getValue().slt(RHS.Offset->getValue());
577 });
578
579 // Note: std::sort should not invalidate the ChecksStart iterator.
580
581 ConstantInt *MinOffset = ChecksStart->Offset,
582 *MaxOffset = Checks.back().Offset;
583
584 unsigned BitWidth = MaxOffset->getValue().getBitWidth();
585 if ((MaxOffset->getValue() - MinOffset->getValue())
586 .ugt(APInt::getSignedMinValue(BitWidth)))
587 return false;
588
589 APInt MaxDiff = MaxOffset->getValue() - MinOffset->getValue();
590 APInt HighOffset = MaxOffset->getValue();
591 auto OffsetOK = [&](GuardWideningImpl::RangeCheck &RC) {
592 return (HighOffset - RC.Offset->getValue()).ult(MaxDiff);
593 };
594
595 if (MaxDiff.isMinValue() ||
596 !std::all_of(std::next(ChecksStart), Checks.end(), OffsetOK))
597 return false;
598
599 // We have a series of f+1 checks as:
600 //
601 // I+k_0 u< L ... Chk_0
602 // I_k_1 u< L ... Chk_1
603 // ...
604 // I_k_f u< L ... Chk_(f+1)
605 //
606 // with forall i in [0,f): k_f-k_i u< k_f-k_0 ... Precond_0
607 // k_f-k_0 u< INT_MIN+k_f ... Precond_1
608 // k_f != k_0 ... Precond_2
609 //
610 // Claim:
611 // Chk_0 AND Chk_(f+1) implies all the other checks
612 //
613 // Informal proof sketch:
614 //
615 // We will show that the integer range [I+k_0,I+k_f] does not unsigned-wrap
616 // (i.e. going from I+k_0 to I+k_f does not cross the -1,0 boundary) and
617 // thus I+k_f is the greatest unsigned value in that range.
618 //
619 // This combined with Ckh_(f+1) shows that everything in that range is u< L.
620 // Via Precond_0 we know that all of the indices in Chk_0 through Chk_(f+1)
621 // lie in [I+k_0,I+k_f], this proving our claim.
622 //
623 // To see that [I+k_0,I+k_f] is not a wrapping range, note that there are
624 // two possibilities: I+k_0 u< I+k_f or I+k_0 >u I+k_f (they can't be equal
625 // since k_0 != k_f). In the former case, [I+k_0,I+k_f] is not a wrapping
626 // range by definition, and the latter case is impossible:
627 //
628 // 0-----I+k_f---I+k_0----L---INT_MAX,INT_MIN------------------(-1)
629 // xxxxxx xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
630 //
631 // For Chk_0 to succeed, we'd have to have k_f-k_0 (the range highlighted
632 // with 'x' above) to be at least >u INT_MIN.
633
634 RangeChecksOut.emplace_back(Base, MinOffset, Length,
635 ChecksStart->CheckInst);
636 RangeChecksOut.emplace_back(Base, MaxOffset, Length,
637 Checks.back().CheckInst);
638
639 Checks.erase(ChecksStart, Checks.end());
640 }
641
642 assert(RangeChecksOut.size() <= OldCount && "We pessimized!");
643 return RangeChecksOut.size() != OldCount;
644}
645
Sanjoy Das083f3892016-05-18 22:55:34 +0000646PreservedAnalyses GuardWideningPass::run(Function &F,
647 AnalysisManager<Function> &AM) {
648 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
649 auto &LI = AM.getResult<LoopAnalysis>(F);
650 auto &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
651 bool Changed = GuardWideningImpl(DT, PDT, LI).run();
652 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
653}
654
655StringRef GuardWideningImpl::scoreTypeToString(WideningScore WS) {
656 switch (WS) {
657 case WS_IllegalOrNegative:
658 return "IllegalOrNegative";
659 case WS_Neutral:
660 return "Neutral";
661 case WS_Positive:
662 return "Positive";
663 case WS_VeryPositive:
664 return "VeryPositive";
665 }
666
667 llvm_unreachable("Fully covered switch above!");
668}
669
670char GuardWideningLegacyPass::ID = 0;
671
672INITIALIZE_PASS_BEGIN(GuardWideningLegacyPass, "guard-widening", "Widen guards",
673 false, false)
674INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
675INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
676INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
677INITIALIZE_PASS_END(GuardWideningLegacyPass, "guard-widening", "Widen guards",
678 false, false)
679
680FunctionPass *llvm::createGuardWideningPass() {
681 return new GuardWideningLegacyPass();
682}