Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 1 | //===- InstCombinePHI.cpp -------------------------------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This file implements the visitPHINode function. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "InstCombine.h" |
Duncan Sands | cd6636c | 2010-11-14 13:30:18 +0000 | [diff] [blame] | 15 | #include "llvm/Analysis/InstructionSimplify.h" |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 16 | #include "llvm/Target/TargetData.h" |
| 17 | #include "llvm/ADT/SmallPtrSet.h" |
| 18 | #include "llvm/ADT/STLExtras.h" |
| 19 | using namespace llvm; |
| 20 | |
| 21 | /// FoldPHIArgBinOpIntoPHI - If we have something like phi [add (a,b), add(a,c)] |
| 22 | /// and if a/b/c and the add's all have a single use, turn this into a phi |
| 23 | /// and a single binop. |
| 24 | Instruction *InstCombiner::FoldPHIArgBinOpIntoPHI(PHINode &PN) { |
| 25 | Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0)); |
| 26 | assert(isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)); |
| 27 | unsigned Opc = FirstInst->getOpcode(); |
| 28 | Value *LHSVal = FirstInst->getOperand(0); |
| 29 | Value *RHSVal = FirstInst->getOperand(1); |
| 30 | |
| 31 | const Type *LHSType = LHSVal->getType(); |
| 32 | const Type *RHSType = RHSVal->getType(); |
| 33 | |
| 34 | // Scan to see if all operands are the same opcode, and all have one use. |
| 35 | for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) { |
| 36 | Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i)); |
| 37 | if (!I || I->getOpcode() != Opc || !I->hasOneUse() || |
| 38 | // Verify type of the LHS matches so we don't fold cmp's of different |
| 39 | // types or GEP's with different index types. |
| 40 | I->getOperand(0)->getType() != LHSType || |
| 41 | I->getOperand(1)->getType() != RHSType) |
| 42 | return 0; |
| 43 | |
| 44 | // If they are CmpInst instructions, check their predicates |
| 45 | if (Opc == Instruction::ICmp || Opc == Instruction::FCmp) |
| 46 | if (cast<CmpInst>(I)->getPredicate() != |
| 47 | cast<CmpInst>(FirstInst)->getPredicate()) |
| 48 | return 0; |
| 49 | |
| 50 | // Keep track of which operand needs a phi node. |
| 51 | if (I->getOperand(0) != LHSVal) LHSVal = 0; |
| 52 | if (I->getOperand(1) != RHSVal) RHSVal = 0; |
| 53 | } |
| 54 | |
| 55 | // If both LHS and RHS would need a PHI, don't do this transformation, |
| 56 | // because it would increase the number of PHIs entering the block, |
| 57 | // which leads to higher register pressure. This is especially |
| 58 | // bad when the PHIs are in the header of a loop. |
| 59 | if (!LHSVal && !RHSVal) |
| 60 | return 0; |
| 61 | |
| 62 | // Otherwise, this is safe to transform! |
| 63 | |
| 64 | Value *InLHS = FirstInst->getOperand(0); |
| 65 | Value *InRHS = FirstInst->getOperand(1); |
| 66 | PHINode *NewLHS = 0, *NewRHS = 0; |
| 67 | if (LHSVal == 0) { |
| 68 | NewLHS = PHINode::Create(LHSType, |
| 69 | FirstInst->getOperand(0)->getName() + ".pn"); |
| 70 | NewLHS->reserveOperandSpace(PN.getNumOperands()/2); |
| 71 | NewLHS->addIncoming(InLHS, PN.getIncomingBlock(0)); |
| 72 | InsertNewInstBefore(NewLHS, PN); |
| 73 | LHSVal = NewLHS; |
| 74 | } |
| 75 | |
| 76 | if (RHSVal == 0) { |
| 77 | NewRHS = PHINode::Create(RHSType, |
| 78 | FirstInst->getOperand(1)->getName() + ".pn"); |
| 79 | NewRHS->reserveOperandSpace(PN.getNumOperands()/2); |
| 80 | NewRHS->addIncoming(InRHS, PN.getIncomingBlock(0)); |
| 81 | InsertNewInstBefore(NewRHS, PN); |
| 82 | RHSVal = NewRHS; |
| 83 | } |
| 84 | |
| 85 | // Add all operands to the new PHIs. |
| 86 | if (NewLHS || NewRHS) { |
| 87 | for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { |
| 88 | Instruction *InInst = cast<Instruction>(PN.getIncomingValue(i)); |
| 89 | if (NewLHS) { |
| 90 | Value *NewInLHS = InInst->getOperand(0); |
| 91 | NewLHS->addIncoming(NewInLHS, PN.getIncomingBlock(i)); |
| 92 | } |
| 93 | if (NewRHS) { |
| 94 | Value *NewInRHS = InInst->getOperand(1); |
| 95 | NewRHS->addIncoming(NewInRHS, PN.getIncomingBlock(i)); |
| 96 | } |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) |
| 101 | return BinaryOperator::Create(BinOp->getOpcode(), LHSVal, RHSVal); |
| 102 | CmpInst *CIOp = cast<CmpInst>(FirstInst); |
| 103 | return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(), |
| 104 | LHSVal, RHSVal); |
| 105 | } |
| 106 | |
| 107 | Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { |
| 108 | GetElementPtrInst *FirstInst =cast<GetElementPtrInst>(PN.getIncomingValue(0)); |
| 109 | |
| 110 | SmallVector<Value*, 16> FixedOperands(FirstInst->op_begin(), |
| 111 | FirstInst->op_end()); |
| 112 | // This is true if all GEP bases are allocas and if all indices into them are |
| 113 | // constants. |
| 114 | bool AllBasePointersAreAllocas = true; |
| 115 | |
| 116 | // We don't want to replace this phi if the replacement would require |
| 117 | // more than one phi, which leads to higher register pressure. This is |
| 118 | // especially bad when the PHIs are in the header of a loop. |
| 119 | bool NeededPhi = false; |
| 120 | |
Chris Lattner | 4bd8217 | 2011-02-17 22:21:26 +0000 | [diff] [blame] | 121 | bool AllInBounds = true; |
| 122 | |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 123 | // Scan to see if all operands are the same opcode, and all have one use. |
| 124 | for (unsigned i = 1; i != PN.getNumIncomingValues(); ++i) { |
| 125 | GetElementPtrInst *GEP= dyn_cast<GetElementPtrInst>(PN.getIncomingValue(i)); |
| 126 | if (!GEP || !GEP->hasOneUse() || GEP->getType() != FirstInst->getType() || |
| 127 | GEP->getNumOperands() != FirstInst->getNumOperands()) |
| 128 | return 0; |
| 129 | |
Chris Lattner | 4bd8217 | 2011-02-17 22:21:26 +0000 | [diff] [blame] | 130 | AllInBounds &= GEP->isInBounds(); |
| 131 | |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 132 | // Keep track of whether or not all GEPs are of alloca pointers. |
| 133 | if (AllBasePointersAreAllocas && |
| 134 | (!isa<AllocaInst>(GEP->getOperand(0)) || |
| 135 | !GEP->hasAllConstantIndices())) |
| 136 | AllBasePointersAreAllocas = false; |
| 137 | |
| 138 | // Compare the operand lists. |
| 139 | for (unsigned op = 0, e = FirstInst->getNumOperands(); op != e; ++op) { |
| 140 | if (FirstInst->getOperand(op) == GEP->getOperand(op)) |
| 141 | continue; |
| 142 | |
| 143 | // Don't merge two GEPs when two operands differ (introducing phi nodes) |
| 144 | // if one of the PHIs has a constant for the index. The index may be |
| 145 | // substantially cheaper to compute for the constants, so making it a |
| 146 | // variable index could pessimize the path. This also handles the case |
| 147 | // for struct indices, which must always be constant. |
| 148 | if (isa<ConstantInt>(FirstInst->getOperand(op)) || |
| 149 | isa<ConstantInt>(GEP->getOperand(op))) |
| 150 | return 0; |
| 151 | |
| 152 | if (FirstInst->getOperand(op)->getType() !=GEP->getOperand(op)->getType()) |
| 153 | return 0; |
| 154 | |
| 155 | // If we already needed a PHI for an earlier operand, and another operand |
| 156 | // also requires a PHI, we'd be introducing more PHIs than we're |
| 157 | // eliminating, which increases register pressure on entry to the PHI's |
| 158 | // block. |
| 159 | if (NeededPhi) |
| 160 | return 0; |
| 161 | |
| 162 | FixedOperands[op] = 0; // Needs a PHI. |
| 163 | NeededPhi = true; |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | // If all of the base pointers of the PHI'd GEPs are from allocas, don't |
| 168 | // bother doing this transformation. At best, this will just save a bit of |
| 169 | // offset calculation, but all the predecessors will have to materialize the |
| 170 | // stack address into a register anyway. We'd actually rather *clone* the |
| 171 | // load up into the predecessors so that we have a load of a gep of an alloca, |
| 172 | // which can usually all be folded into the load. |
| 173 | if (AllBasePointersAreAllocas) |
| 174 | return 0; |
| 175 | |
| 176 | // Otherwise, this is safe to transform. Insert PHI nodes for each operand |
| 177 | // that is variable. |
| 178 | SmallVector<PHINode*, 16> OperandPhis(FixedOperands.size()); |
| 179 | |
| 180 | bool HasAnyPHIs = false; |
| 181 | for (unsigned i = 0, e = FixedOperands.size(); i != e; ++i) { |
| 182 | if (FixedOperands[i]) continue; // operand doesn't need a phi. |
| 183 | Value *FirstOp = FirstInst->getOperand(i); |
| 184 | PHINode *NewPN = PHINode::Create(FirstOp->getType(), |
| 185 | FirstOp->getName()+".pn"); |
| 186 | InsertNewInstBefore(NewPN, PN); |
| 187 | |
| 188 | NewPN->reserveOperandSpace(e); |
| 189 | NewPN->addIncoming(FirstOp, PN.getIncomingBlock(0)); |
| 190 | OperandPhis[i] = NewPN; |
| 191 | FixedOperands[i] = NewPN; |
| 192 | HasAnyPHIs = true; |
| 193 | } |
| 194 | |
| 195 | |
| 196 | // Add all operands to the new PHIs. |
| 197 | if (HasAnyPHIs) { |
| 198 | for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { |
| 199 | GetElementPtrInst *InGEP =cast<GetElementPtrInst>(PN.getIncomingValue(i)); |
| 200 | BasicBlock *InBB = PN.getIncomingBlock(i); |
| 201 | |
| 202 | for (unsigned op = 0, e = OperandPhis.size(); op != e; ++op) |
| 203 | if (PHINode *OpPhi = OperandPhis[op]) |
| 204 | OpPhi->addIncoming(InGEP->getOperand(op), InBB); |
| 205 | } |
| 206 | } |
| 207 | |
| 208 | Value *Base = FixedOperands[0]; |
Chris Lattner | 4bd8217 | 2011-02-17 22:21:26 +0000 | [diff] [blame] | 209 | GetElementPtrInst *NewGEP = |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 210 | GetElementPtrInst::Create(Base, FixedOperands.begin()+1, |
| 211 | FixedOperands.end()); |
Chris Lattner | 4bd8217 | 2011-02-17 22:21:26 +0000 | [diff] [blame] | 212 | if (AllInBounds) NewGEP->setIsInbounds(); |
| 213 | return NewGEP; |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | |
| 217 | /// isSafeAndProfitableToSinkLoad - Return true if we know that it is safe to |
| 218 | /// sink the load out of the block that defines it. This means that it must be |
| 219 | /// obvious the value of the load is not changed from the point of the load to |
| 220 | /// the end of the block it is in. |
| 221 | /// |
| 222 | /// Finally, it is safe, but not profitable, to sink a load targetting a |
| 223 | /// non-address-taken alloca. Doing so will cause us to not promote the alloca |
| 224 | /// to a register. |
| 225 | static bool isSafeAndProfitableToSinkLoad(LoadInst *L) { |
| 226 | BasicBlock::iterator BBI = L, E = L->getParent()->end(); |
| 227 | |
| 228 | for (++BBI; BBI != E; ++BBI) |
| 229 | if (BBI->mayWriteToMemory()) |
| 230 | return false; |
| 231 | |
| 232 | // Check for non-address taken alloca. If not address-taken already, it isn't |
| 233 | // profitable to do this xform. |
| 234 | if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) { |
| 235 | bool isAddressTaken = false; |
| 236 | for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); |
| 237 | UI != E; ++UI) { |
Gabor Greif | 40119ce | 2010-07-12 14:15:58 +0000 | [diff] [blame] | 238 | User *U = *UI; |
| 239 | if (isa<LoadInst>(U)) continue; |
| 240 | if (StoreInst *SI = dyn_cast<StoreInst>(U)) { |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 241 | // If storing TO the alloca, then the address isn't taken. |
| 242 | if (SI->getOperand(1) == AI) continue; |
| 243 | } |
| 244 | isAddressTaken = true; |
| 245 | break; |
| 246 | } |
| 247 | |
| 248 | if (!isAddressTaken && AI->isStaticAlloca()) |
| 249 | return false; |
| 250 | } |
| 251 | |
| 252 | // If this load is a load from a GEP with a constant offset from an alloca, |
| 253 | // then we don't want to sink it. In its present form, it will be |
| 254 | // load [constant stack offset]. Sinking it will cause us to have to |
| 255 | // materialize the stack addresses in each predecessor in a register only to |
| 256 | // do a shared load from register in the successor. |
| 257 | if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(L->getOperand(0))) |
| 258 | if (AllocaInst *AI = dyn_cast<AllocaInst>(GEP->getOperand(0))) |
| 259 | if (AI->isStaticAlloca() && GEP->hasAllConstantIndices()) |
| 260 | return false; |
| 261 | |
| 262 | return true; |
| 263 | } |
| 264 | |
| 265 | Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) { |
| 266 | LoadInst *FirstLI = cast<LoadInst>(PN.getIncomingValue(0)); |
| 267 | |
| 268 | // When processing loads, we need to propagate two bits of information to the |
| 269 | // sunk load: whether it is volatile, and what its alignment is. We currently |
| 270 | // don't sink loads when some have their alignment specified and some don't. |
| 271 | // visitLoadInst will propagate an alignment onto the load when TD is around, |
| 272 | // and if TD isn't around, we can't handle the mixed case. |
| 273 | bool isVolatile = FirstLI->isVolatile(); |
| 274 | unsigned LoadAlignment = FirstLI->getAlignment(); |
Chris Lattner | 4c5fb1a | 2010-03-05 18:53:28 +0000 | [diff] [blame] | 275 | unsigned LoadAddrSpace = FirstLI->getPointerAddressSpace(); |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 276 | |
| 277 | // We can't sink the load if the loaded value could be modified between the |
| 278 | // load and the PHI. |
| 279 | if (FirstLI->getParent() != PN.getIncomingBlock(0) || |
| 280 | !isSafeAndProfitableToSinkLoad(FirstLI)) |
| 281 | return 0; |
| 282 | |
| 283 | // If the PHI is of volatile loads and the load block has multiple |
| 284 | // successors, sinking it would remove a load of the volatile value from |
| 285 | // the path through the other successor. |
| 286 | if (isVolatile && |
| 287 | FirstLI->getParent()->getTerminator()->getNumSuccessors() != 1) |
| 288 | return 0; |
| 289 | |
| 290 | // Check to see if all arguments are the same operation. |
| 291 | for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { |
| 292 | LoadInst *LI = dyn_cast<LoadInst>(PN.getIncomingValue(i)); |
| 293 | if (!LI || !LI->hasOneUse()) |
| 294 | return 0; |
| 295 | |
| 296 | // We can't sink the load if the loaded value could be modified between |
| 297 | // the load and the PHI. |
| 298 | if (LI->isVolatile() != isVolatile || |
| 299 | LI->getParent() != PN.getIncomingBlock(i) || |
Chris Lattner | 4c5fb1a | 2010-03-05 18:53:28 +0000 | [diff] [blame] | 300 | LI->getPointerAddressSpace() != LoadAddrSpace || |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 301 | !isSafeAndProfitableToSinkLoad(LI)) |
| 302 | return 0; |
| 303 | |
| 304 | // If some of the loads have an alignment specified but not all of them, |
| 305 | // we can't do the transformation. |
| 306 | if ((LoadAlignment != 0) != (LI->getAlignment() != 0)) |
| 307 | return 0; |
| 308 | |
| 309 | LoadAlignment = std::min(LoadAlignment, LI->getAlignment()); |
| 310 | |
| 311 | // If the PHI is of volatile loads and the load block has multiple |
| 312 | // successors, sinking it would remove a load of the volatile value from |
| 313 | // the path through the other successor. |
| 314 | if (isVolatile && |
| 315 | LI->getParent()->getTerminator()->getNumSuccessors() != 1) |
| 316 | return 0; |
| 317 | } |
| 318 | |
| 319 | // Okay, they are all the same operation. Create a new PHI node of the |
| 320 | // correct type, and PHI together all of the LHS's of the instructions. |
| 321 | PHINode *NewPN = PHINode::Create(FirstLI->getOperand(0)->getType(), |
| 322 | PN.getName()+".in"); |
| 323 | NewPN->reserveOperandSpace(PN.getNumOperands()/2); |
| 324 | |
| 325 | Value *InVal = FirstLI->getOperand(0); |
| 326 | NewPN->addIncoming(InVal, PN.getIncomingBlock(0)); |
| 327 | |
| 328 | // Add all operands to the new PHI. |
| 329 | for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { |
| 330 | Value *NewInVal = cast<LoadInst>(PN.getIncomingValue(i))->getOperand(0); |
| 331 | if (NewInVal != InVal) |
| 332 | InVal = 0; |
| 333 | NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i)); |
| 334 | } |
| 335 | |
| 336 | Value *PhiVal; |
| 337 | if (InVal) { |
| 338 | // The new PHI unions all of the same values together. This is really |
| 339 | // common, so we handle it intelligently here for compile-time speed. |
| 340 | PhiVal = InVal; |
| 341 | delete NewPN; |
| 342 | } else { |
| 343 | InsertNewInstBefore(NewPN, PN); |
| 344 | PhiVal = NewPN; |
| 345 | } |
| 346 | |
| 347 | // If this was a volatile load that we are merging, make sure to loop through |
| 348 | // and mark all the input loads as non-volatile. If we don't do this, we will |
| 349 | // insert a new volatile load and the old ones will not be deletable. |
| 350 | if (isVolatile) |
| 351 | for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) |
| 352 | cast<LoadInst>(PN.getIncomingValue(i))->setVolatile(false); |
| 353 | |
| 354 | return new LoadInst(PhiVal, "", isVolatile, LoadAlignment); |
| 355 | } |
| 356 | |
| 357 | |
| 358 | |
| 359 | /// FoldPHIArgOpIntoPHI - If all operands to a PHI node are the same "unary" |
| 360 | /// operator and they all are only used by the PHI, PHI together their |
| 361 | /// inputs, and do the operation once, to the result of the PHI. |
| 362 | Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) { |
| 363 | Instruction *FirstInst = cast<Instruction>(PN.getIncomingValue(0)); |
| 364 | |
| 365 | if (isa<GetElementPtrInst>(FirstInst)) |
| 366 | return FoldPHIArgGEPIntoPHI(PN); |
| 367 | if (isa<LoadInst>(FirstInst)) |
| 368 | return FoldPHIArgLoadIntoPHI(PN); |
| 369 | |
| 370 | // Scan the instruction, looking for input operations that can be folded away. |
| 371 | // If all input operands to the phi are the same instruction (e.g. a cast from |
| 372 | // the same type or "+42") we can pull the operation through the PHI, reducing |
| 373 | // code size and simplifying code. |
| 374 | Constant *ConstantOp = 0; |
| 375 | const Type *CastSrcTy = 0; |
| 376 | |
| 377 | if (isa<CastInst>(FirstInst)) { |
| 378 | CastSrcTy = FirstInst->getOperand(0)->getType(); |
| 379 | |
| 380 | // Be careful about transforming integer PHIs. We don't want to pessimize |
| 381 | // the code by turning an i32 into an i1293. |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 382 | if (PN.getType()->isIntegerTy() && CastSrcTy->isIntegerTy()) { |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 383 | if (!ShouldChangeType(PN.getType(), CastSrcTy)) |
| 384 | return 0; |
| 385 | } |
| 386 | } else if (isa<BinaryOperator>(FirstInst) || isa<CmpInst>(FirstInst)) { |
| 387 | // Can fold binop, compare or shift here if the RHS is a constant, |
| 388 | // otherwise call FoldPHIArgBinOpIntoPHI. |
| 389 | ConstantOp = dyn_cast<Constant>(FirstInst->getOperand(1)); |
| 390 | if (ConstantOp == 0) |
| 391 | return FoldPHIArgBinOpIntoPHI(PN); |
| 392 | } else { |
| 393 | return 0; // Cannot fold this operation. |
| 394 | } |
| 395 | |
| 396 | // Check to see if all arguments are the same operation. |
| 397 | for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { |
| 398 | Instruction *I = dyn_cast<Instruction>(PN.getIncomingValue(i)); |
| 399 | if (I == 0 || !I->hasOneUse() || !I->isSameOperationAs(FirstInst)) |
| 400 | return 0; |
| 401 | if (CastSrcTy) { |
| 402 | if (I->getOperand(0)->getType() != CastSrcTy) |
| 403 | return 0; // Cast operation must match. |
| 404 | } else if (I->getOperand(1) != ConstantOp) { |
| 405 | return 0; |
| 406 | } |
| 407 | } |
| 408 | |
| 409 | // Okay, they are all the same operation. Create a new PHI node of the |
| 410 | // correct type, and PHI together all of the LHS's of the instructions. |
| 411 | PHINode *NewPN = PHINode::Create(FirstInst->getOperand(0)->getType(), |
| 412 | PN.getName()+".in"); |
| 413 | NewPN->reserveOperandSpace(PN.getNumOperands()/2); |
| 414 | |
| 415 | Value *InVal = FirstInst->getOperand(0); |
| 416 | NewPN->addIncoming(InVal, PN.getIncomingBlock(0)); |
| 417 | |
| 418 | // Add all operands to the new PHI. |
| 419 | for (unsigned i = 1, e = PN.getNumIncomingValues(); i != e; ++i) { |
| 420 | Value *NewInVal = cast<Instruction>(PN.getIncomingValue(i))->getOperand(0); |
| 421 | if (NewInVal != InVal) |
| 422 | InVal = 0; |
| 423 | NewPN->addIncoming(NewInVal, PN.getIncomingBlock(i)); |
| 424 | } |
| 425 | |
| 426 | Value *PhiVal; |
| 427 | if (InVal) { |
| 428 | // The new PHI unions all of the same values together. This is really |
| 429 | // common, so we handle it intelligently here for compile-time speed. |
| 430 | PhiVal = InVal; |
| 431 | delete NewPN; |
| 432 | } else { |
| 433 | InsertNewInstBefore(NewPN, PN); |
| 434 | PhiVal = NewPN; |
| 435 | } |
| 436 | |
| 437 | // Insert and return the new operation. |
| 438 | if (CastInst *FirstCI = dyn_cast<CastInst>(FirstInst)) |
| 439 | return CastInst::Create(FirstCI->getOpcode(), PhiVal, PN.getType()); |
| 440 | |
| 441 | if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(FirstInst)) |
| 442 | return BinaryOperator::Create(BinOp->getOpcode(), PhiVal, ConstantOp); |
| 443 | |
| 444 | CmpInst *CIOp = cast<CmpInst>(FirstInst); |
| 445 | return CmpInst::Create(CIOp->getOpcode(), CIOp->getPredicate(), |
| 446 | PhiVal, ConstantOp); |
| 447 | } |
| 448 | |
| 449 | /// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle |
| 450 | /// that is dead. |
| 451 | static bool DeadPHICycle(PHINode *PN, |
| 452 | SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) { |
| 453 | if (PN->use_empty()) return true; |
| 454 | if (!PN->hasOneUse()) return false; |
| 455 | |
| 456 | // Remember this node, and if we find the cycle, return. |
| 457 | if (!PotentiallyDeadPHIs.insert(PN)) |
| 458 | return true; |
| 459 | |
| 460 | // Don't scan crazily complex things. |
| 461 | if (PotentiallyDeadPHIs.size() == 16) |
| 462 | return false; |
| 463 | |
| 464 | if (PHINode *PU = dyn_cast<PHINode>(PN->use_back())) |
| 465 | return DeadPHICycle(PU, PotentiallyDeadPHIs); |
| 466 | |
| 467 | return false; |
| 468 | } |
| 469 | |
| 470 | /// PHIsEqualValue - Return true if this phi node is always equal to |
| 471 | /// NonPhiInVal. This happens with mutually cyclic phi nodes like: |
| 472 | /// z = some value; x = phi (y, z); y = phi (x, z) |
| 473 | static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal, |
| 474 | SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) { |
| 475 | // See if we already saw this PHI node. |
| 476 | if (!ValueEqualPHIs.insert(PN)) |
| 477 | return true; |
| 478 | |
| 479 | // Don't scan crazily complex things. |
| 480 | if (ValueEqualPHIs.size() == 16) |
| 481 | return false; |
| 482 | |
| 483 | // Scan the operands to see if they are either phi nodes or are equal to |
| 484 | // the value. |
| 485 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
| 486 | Value *Op = PN->getIncomingValue(i); |
| 487 | if (PHINode *OpPN = dyn_cast<PHINode>(Op)) { |
| 488 | if (!PHIsEqualValue(OpPN, NonPhiInVal, ValueEqualPHIs)) |
| 489 | return false; |
| 490 | } else if (Op != NonPhiInVal) |
| 491 | return false; |
| 492 | } |
| 493 | |
| 494 | return true; |
| 495 | } |
| 496 | |
| 497 | |
| 498 | namespace { |
| 499 | struct PHIUsageRecord { |
| 500 | unsigned PHIId; // The ID # of the PHI (something determinstic to sort on) |
| 501 | unsigned Shift; // The amount shifted. |
| 502 | Instruction *Inst; // The trunc instruction. |
| 503 | |
| 504 | PHIUsageRecord(unsigned pn, unsigned Sh, Instruction *User) |
| 505 | : PHIId(pn), Shift(Sh), Inst(User) {} |
| 506 | |
| 507 | bool operator<(const PHIUsageRecord &RHS) const { |
| 508 | if (PHIId < RHS.PHIId) return true; |
| 509 | if (PHIId > RHS.PHIId) return false; |
| 510 | if (Shift < RHS.Shift) return true; |
| 511 | if (Shift > RHS.Shift) return false; |
| 512 | return Inst->getType()->getPrimitiveSizeInBits() < |
| 513 | RHS.Inst->getType()->getPrimitiveSizeInBits(); |
| 514 | } |
| 515 | }; |
| 516 | |
| 517 | struct LoweredPHIRecord { |
| 518 | PHINode *PN; // The PHI that was lowered. |
| 519 | unsigned Shift; // The amount shifted. |
| 520 | unsigned Width; // The width extracted. |
| 521 | |
| 522 | LoweredPHIRecord(PHINode *pn, unsigned Sh, const Type *Ty) |
| 523 | : PN(pn), Shift(Sh), Width(Ty->getPrimitiveSizeInBits()) {} |
| 524 | |
| 525 | // Ctor form used by DenseMap. |
| 526 | LoweredPHIRecord(PHINode *pn, unsigned Sh) |
| 527 | : PN(pn), Shift(Sh), Width(0) {} |
| 528 | }; |
| 529 | } |
| 530 | |
| 531 | namespace llvm { |
| 532 | template<> |
| 533 | struct DenseMapInfo<LoweredPHIRecord> { |
| 534 | static inline LoweredPHIRecord getEmptyKey() { |
| 535 | return LoweredPHIRecord(0, 0); |
| 536 | } |
| 537 | static inline LoweredPHIRecord getTombstoneKey() { |
| 538 | return LoweredPHIRecord(0, 1); |
| 539 | } |
| 540 | static unsigned getHashValue(const LoweredPHIRecord &Val) { |
| 541 | return DenseMapInfo<PHINode*>::getHashValue(Val.PN) ^ (Val.Shift>>3) ^ |
| 542 | (Val.Width>>3); |
| 543 | } |
| 544 | static bool isEqual(const LoweredPHIRecord &LHS, |
| 545 | const LoweredPHIRecord &RHS) { |
| 546 | return LHS.PN == RHS.PN && LHS.Shift == RHS.Shift && |
| 547 | LHS.Width == RHS.Width; |
| 548 | } |
| 549 | }; |
| 550 | template <> |
| 551 | struct isPodLike<LoweredPHIRecord> { static const bool value = true; }; |
| 552 | } |
| 553 | |
| 554 | |
| 555 | /// SliceUpIllegalIntegerPHI - This is an integer PHI and we know that it has an |
| 556 | /// illegal type: see if it is only used by trunc or trunc(lshr) operations. If |
| 557 | /// so, we split the PHI into the various pieces being extracted. This sort of |
| 558 | /// thing is introduced when SROA promotes an aggregate to large integer values. |
| 559 | /// |
| 560 | /// TODO: The user of the trunc may be an bitcast to float/double/vector or an |
| 561 | /// inttoptr. We should produce new PHIs in the right type. |
| 562 | /// |
| 563 | Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) { |
| 564 | // PHIUsers - Keep track of all of the truncated values extracted from a set |
| 565 | // of PHIs, along with their offset. These are the things we want to rewrite. |
| 566 | SmallVector<PHIUsageRecord, 16> PHIUsers; |
| 567 | |
| 568 | // PHIs are often mutually cyclic, so we keep track of a whole set of PHI |
| 569 | // nodes which are extracted from. PHIsToSlice is a set we use to avoid |
| 570 | // revisiting PHIs, PHIsInspected is a ordered list of PHIs that we need to |
| 571 | // check the uses of (to ensure they are all extracts). |
| 572 | SmallVector<PHINode*, 8> PHIsToSlice; |
| 573 | SmallPtrSet<PHINode*, 8> PHIsInspected; |
| 574 | |
| 575 | PHIsToSlice.push_back(&FirstPhi); |
| 576 | PHIsInspected.insert(&FirstPhi); |
| 577 | |
| 578 | for (unsigned PHIId = 0; PHIId != PHIsToSlice.size(); ++PHIId) { |
| 579 | PHINode *PN = PHIsToSlice[PHIId]; |
| 580 | |
| 581 | // Scan the input list of the PHI. If any input is an invoke, and if the |
| 582 | // input is defined in the predecessor, then we won't be split the critical |
| 583 | // edge which is required to insert a truncate. Because of this, we have to |
| 584 | // bail out. |
| 585 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
| 586 | InvokeInst *II = dyn_cast<InvokeInst>(PN->getIncomingValue(i)); |
| 587 | if (II == 0) continue; |
| 588 | if (II->getParent() != PN->getIncomingBlock(i)) |
| 589 | continue; |
| 590 | |
| 591 | // If we have a phi, and if it's directly in the predecessor, then we have |
| 592 | // a critical edge where we need to put the truncate. Since we can't |
| 593 | // split the edge in instcombine, we have to bail out. |
| 594 | return 0; |
| 595 | } |
| 596 | |
| 597 | |
| 598 | for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); |
| 599 | UI != E; ++UI) { |
| 600 | Instruction *User = cast<Instruction>(*UI); |
| 601 | |
| 602 | // If the user is a PHI, inspect its uses recursively. |
| 603 | if (PHINode *UserPN = dyn_cast<PHINode>(User)) { |
| 604 | if (PHIsInspected.insert(UserPN)) |
| 605 | PHIsToSlice.push_back(UserPN); |
| 606 | continue; |
| 607 | } |
| 608 | |
| 609 | // Truncates are always ok. |
| 610 | if (isa<TruncInst>(User)) { |
| 611 | PHIUsers.push_back(PHIUsageRecord(PHIId, 0, User)); |
| 612 | continue; |
| 613 | } |
| 614 | |
| 615 | // Otherwise it must be a lshr which can only be used by one trunc. |
| 616 | if (User->getOpcode() != Instruction::LShr || |
| 617 | !User->hasOneUse() || !isa<TruncInst>(User->use_back()) || |
| 618 | !isa<ConstantInt>(User->getOperand(1))) |
| 619 | return 0; |
| 620 | |
| 621 | unsigned Shift = cast<ConstantInt>(User->getOperand(1))->getZExtValue(); |
| 622 | PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, User->use_back())); |
| 623 | } |
| 624 | } |
| 625 | |
| 626 | // If we have no users, they must be all self uses, just nuke the PHI. |
| 627 | if (PHIUsers.empty()) |
| 628 | return ReplaceInstUsesWith(FirstPhi, UndefValue::get(FirstPhi.getType())); |
| 629 | |
| 630 | // If this phi node is transformable, create new PHIs for all the pieces |
| 631 | // extracted out of it. First, sort the users by their offset and size. |
| 632 | array_pod_sort(PHIUsers.begin(), PHIUsers.end()); |
| 633 | |
| 634 | DEBUG(errs() << "SLICING UP PHI: " << FirstPhi << '\n'; |
| 635 | for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i) |
| 636 | errs() << "AND USER PHI #" << i << ": " << *PHIsToSlice[i] <<'\n'; |
| 637 | ); |
| 638 | |
| 639 | // PredValues - This is a temporary used when rewriting PHI nodes. It is |
| 640 | // hoisted out here to avoid construction/destruction thrashing. |
| 641 | DenseMap<BasicBlock*, Value*> PredValues; |
| 642 | |
| 643 | // ExtractedVals - Each new PHI we introduce is saved here so we don't |
| 644 | // introduce redundant PHIs. |
| 645 | DenseMap<LoweredPHIRecord, PHINode*> ExtractedVals; |
| 646 | |
| 647 | for (unsigned UserI = 0, UserE = PHIUsers.size(); UserI != UserE; ++UserI) { |
| 648 | unsigned PHIId = PHIUsers[UserI].PHIId; |
| 649 | PHINode *PN = PHIsToSlice[PHIId]; |
| 650 | unsigned Offset = PHIUsers[UserI].Shift; |
| 651 | const Type *Ty = PHIUsers[UserI].Inst->getType(); |
| 652 | |
| 653 | PHINode *EltPHI; |
| 654 | |
| 655 | // If we've already lowered a user like this, reuse the previously lowered |
| 656 | // value. |
| 657 | if ((EltPHI = ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)]) == 0) { |
| 658 | |
| 659 | // Otherwise, Create the new PHI node for this user. |
| 660 | EltPHI = PHINode::Create(Ty, PN->getName()+".off"+Twine(Offset), PN); |
| 661 | assert(EltPHI->getType() != PN->getType() && |
| 662 | "Truncate didn't shrink phi?"); |
| 663 | |
| 664 | for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { |
| 665 | BasicBlock *Pred = PN->getIncomingBlock(i); |
| 666 | Value *&PredVal = PredValues[Pred]; |
| 667 | |
| 668 | // If we already have a value for this predecessor, reuse it. |
| 669 | if (PredVal) { |
| 670 | EltPHI->addIncoming(PredVal, Pred); |
| 671 | continue; |
| 672 | } |
| 673 | |
| 674 | // Handle the PHI self-reuse case. |
| 675 | Value *InVal = PN->getIncomingValue(i); |
| 676 | if (InVal == PN) { |
| 677 | PredVal = EltPHI; |
| 678 | EltPHI->addIncoming(PredVal, Pred); |
| 679 | continue; |
| 680 | } |
| 681 | |
| 682 | if (PHINode *InPHI = dyn_cast<PHINode>(PN)) { |
| 683 | // If the incoming value was a PHI, and if it was one of the PHIs we |
| 684 | // already rewrote it, just use the lowered value. |
| 685 | if (Value *Res = ExtractedVals[LoweredPHIRecord(InPHI, Offset, Ty)]) { |
| 686 | PredVal = Res; |
| 687 | EltPHI->addIncoming(PredVal, Pred); |
| 688 | continue; |
| 689 | } |
| 690 | } |
| 691 | |
| 692 | // Otherwise, do an extract in the predecessor. |
| 693 | Builder->SetInsertPoint(Pred, Pred->getTerminator()); |
| 694 | Value *Res = InVal; |
| 695 | if (Offset) |
| 696 | Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(), |
| 697 | Offset), "extract"); |
| 698 | Res = Builder->CreateTrunc(Res, Ty, "extract.t"); |
| 699 | PredVal = Res; |
| 700 | EltPHI->addIncoming(Res, Pred); |
| 701 | |
| 702 | // If the incoming value was a PHI, and if it was one of the PHIs we are |
| 703 | // rewriting, we will ultimately delete the code we inserted. This |
| 704 | // means we need to revisit that PHI to make sure we extract out the |
| 705 | // needed piece. |
| 706 | if (PHINode *OldInVal = dyn_cast<PHINode>(PN->getIncomingValue(i))) |
| 707 | if (PHIsInspected.count(OldInVal)) { |
| 708 | unsigned RefPHIId = std::find(PHIsToSlice.begin(),PHIsToSlice.end(), |
| 709 | OldInVal)-PHIsToSlice.begin(); |
| 710 | PHIUsers.push_back(PHIUsageRecord(RefPHIId, Offset, |
| 711 | cast<Instruction>(Res))); |
| 712 | ++UserE; |
| 713 | } |
| 714 | } |
| 715 | PredValues.clear(); |
| 716 | |
| 717 | DEBUG(errs() << " Made element PHI for offset " << Offset << ": " |
| 718 | << *EltPHI << '\n'); |
| 719 | ExtractedVals[LoweredPHIRecord(PN, Offset, Ty)] = EltPHI; |
| 720 | } |
| 721 | |
| 722 | // Replace the use of this piece with the PHI node. |
| 723 | ReplaceInstUsesWith(*PHIUsers[UserI].Inst, EltPHI); |
| 724 | } |
| 725 | |
| 726 | // Replace all the remaining uses of the PHI nodes (self uses and the lshrs) |
| 727 | // with undefs. |
| 728 | Value *Undef = UndefValue::get(FirstPhi.getType()); |
| 729 | for (unsigned i = 1, e = PHIsToSlice.size(); i != e; ++i) |
| 730 | ReplaceInstUsesWith(*PHIsToSlice[i], Undef); |
| 731 | return ReplaceInstUsesWith(FirstPhi, Undef); |
| 732 | } |
| 733 | |
| 734 | // PHINode simplification |
| 735 | // |
| 736 | Instruction *InstCombiner::visitPHINode(PHINode &PN) { |
| 737 | // If LCSSA is around, don't mess with Phi nodes |
| 738 | if (MustPreserveLCSSA) return 0; |
Duncan Sands | cd6636c | 2010-11-14 13:30:18 +0000 | [diff] [blame] | 739 | |
| 740 | if (Value *V = SimplifyInstruction(&PN, TD)) |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 741 | return ReplaceInstUsesWith(PN, V); |
| 742 | |
| 743 | // If all PHI operands are the same operation, pull them through the PHI, |
| 744 | // reducing code size. |
| 745 | if (isa<Instruction>(PN.getIncomingValue(0)) && |
| 746 | isa<Instruction>(PN.getIncomingValue(1)) && |
| 747 | cast<Instruction>(PN.getIncomingValue(0))->getOpcode() == |
| 748 | cast<Instruction>(PN.getIncomingValue(1))->getOpcode() && |
| 749 | // FIXME: The hasOneUse check will fail for PHIs that use the value more |
| 750 | // than themselves more than once. |
| 751 | PN.getIncomingValue(0)->hasOneUse()) |
| 752 | if (Instruction *Result = FoldPHIArgOpIntoPHI(PN)) |
| 753 | return Result; |
| 754 | |
| 755 | // If this is a trivial cycle in the PHI node graph, remove it. Basically, if |
| 756 | // this PHI only has a single use (a PHI), and if that PHI only has one use (a |
| 757 | // PHI)... break the cycle. |
| 758 | if (PN.hasOneUse()) { |
| 759 | Instruction *PHIUser = cast<Instruction>(PN.use_back()); |
| 760 | if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) { |
| 761 | SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs; |
| 762 | PotentiallyDeadPHIs.insert(&PN); |
| 763 | if (DeadPHICycle(PU, PotentiallyDeadPHIs)) |
| 764 | return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType())); |
| 765 | } |
| 766 | |
| 767 | // If this phi has a single use, and if that use just computes a value for |
| 768 | // the next iteration of a loop, delete the phi. This occurs with unused |
| 769 | // induction variables, e.g. "for (int j = 0; ; ++j);". Detecting this |
| 770 | // common case here is good because the only other things that catch this |
| 771 | // are induction variable analysis (sometimes) and ADCE, which is only run |
| 772 | // late. |
| 773 | if (PHIUser->hasOneUse() && |
| 774 | (isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) && |
| 775 | PHIUser->use_back() == &PN) { |
| 776 | return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType())); |
| 777 | } |
| 778 | } |
| 779 | |
| 780 | // We sometimes end up with phi cycles that non-obviously end up being the |
| 781 | // same value, for example: |
| 782 | // z = some value; x = phi (y, z); y = phi (x, z) |
| 783 | // where the phi nodes don't necessarily need to be in the same block. Do a |
| 784 | // quick check to see if the PHI node only contains a single non-phi value, if |
| 785 | // so, scan to see if the phi cycle is actually equal to that value. |
| 786 | { |
| 787 | unsigned InValNo = 0, NumOperandVals = PN.getNumIncomingValues(); |
| 788 | // Scan for the first non-phi operand. |
| 789 | while (InValNo != NumOperandVals && |
| 790 | isa<PHINode>(PN.getIncomingValue(InValNo))) |
| 791 | ++InValNo; |
| 792 | |
| 793 | if (InValNo != NumOperandVals) { |
| 794 | Value *NonPhiInVal = PN.getOperand(InValNo); |
| 795 | |
| 796 | // Scan the rest of the operands to see if there are any conflicts, if so |
| 797 | // there is no need to recursively scan other phis. |
| 798 | for (++InValNo; InValNo != NumOperandVals; ++InValNo) { |
| 799 | Value *OpVal = PN.getIncomingValue(InValNo); |
| 800 | if (OpVal != NonPhiInVal && !isa<PHINode>(OpVal)) |
| 801 | break; |
| 802 | } |
| 803 | |
| 804 | // If we scanned over all operands, then we have one unique value plus |
| 805 | // phi values. Scan PHI nodes to see if they all merge in each other or |
| 806 | // the value. |
| 807 | if (InValNo == NumOperandVals) { |
| 808 | SmallPtrSet<PHINode*, 16> ValueEqualPHIs; |
| 809 | if (PHIsEqualValue(&PN, NonPhiInVal, ValueEqualPHIs)) |
| 810 | return ReplaceInstUsesWith(PN, NonPhiInVal); |
| 811 | } |
| 812 | } |
| 813 | } |
| 814 | |
| 815 | // If there are multiple PHIs, sort their operands so that they all list |
| 816 | // the blocks in the same order. This will help identical PHIs be eliminated |
| 817 | // by other passes. Other passes shouldn't depend on this for correctness |
| 818 | // however. |
| 819 | PHINode *FirstPN = cast<PHINode>(PN.getParent()->begin()); |
| 820 | if (&PN != FirstPN) |
| 821 | for (unsigned i = 0, e = FirstPN->getNumIncomingValues(); i != e; ++i) { |
| 822 | BasicBlock *BBA = PN.getIncomingBlock(i); |
| 823 | BasicBlock *BBB = FirstPN->getIncomingBlock(i); |
| 824 | if (BBA != BBB) { |
| 825 | Value *VA = PN.getIncomingValue(i); |
| 826 | unsigned j = PN.getBasicBlockIndex(BBB); |
| 827 | Value *VB = PN.getIncomingValue(j); |
| 828 | PN.setIncomingBlock(i, BBB); |
| 829 | PN.setIncomingValue(i, VB); |
| 830 | PN.setIncomingBlock(j, BBA); |
| 831 | PN.setIncomingValue(j, VA); |
| 832 | // NOTE: Instcombine normally would want us to "return &PN" if we |
| 833 | // modified any of the operands of an instruction. However, since we |
| 834 | // aren't adding or removing uses (just rearranging them) we don't do |
| 835 | // this in this case. |
| 836 | } |
| 837 | } |
| 838 | |
| 839 | // If this is an integer PHI and we know that it has an illegal type, see if |
| 840 | // it is only used by trunc or trunc(lshr) operations. If so, we split the |
| 841 | // PHI into the various pieces being extracted. This sort of thing is |
| 842 | // introduced when SROA promotes an aggregate to a single large integer type. |
Duncan Sands | 1df9859 | 2010-02-16 11:11:14 +0000 | [diff] [blame] | 843 | if (PN.getType()->isIntegerTy() && TD && |
Chris Lattner | f54e729 | 2010-01-05 05:31:55 +0000 | [diff] [blame] | 844 | !TD->isLegalInteger(PN.getType()->getPrimitiveSizeInBits())) |
| 845 | if (Instruction *Res = SliceUpIllegalIntegerPHI(PN)) |
| 846 | return Res; |
| 847 | |
| 848 | return 0; |
Benjamin Kramer | eade002 | 2010-01-05 13:32:48 +0000 | [diff] [blame] | 849 | } |