Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 1 | //===- LoopAnalysis.cpp - Misc loop analysis routines //-------------------===// |
| 2 | // |
| 3 | // Copyright 2019 The MLIR Authors. |
| 4 | // |
| 5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | // you may not use this file except in compliance with the License. |
| 7 | // You may obtain a copy of the License at |
| 8 | // |
| 9 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | // |
| 11 | // Unless required by applicable law or agreed to in writing, software |
| 12 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | // See the License for the specific language governing permissions and |
| 15 | // limitations under the License. |
| 16 | // ============================================================================= |
| 17 | // |
| 18 | // This file implements miscellaneous loop analysis routines. |
| 19 | // |
| 20 | //===----------------------------------------------------------------------===// |
| 21 | |
| 22 | #include "mlir/Analysis/LoopAnalysis.h" |
| 23 | |
| 24 | #include "mlir/Analysis/AffineAnalysis.h" |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 25 | #include "mlir/Analysis/AffineStructures.h" |
| 26 | #include "mlir/Analysis/MLFunctionMatcher.h" |
Nicolas Vasilache | 13b3bce | 2018-11-20 08:36:07 -0800 | [diff] [blame^] | 27 | #include "mlir/Analysis/VectorAnalysis.h" |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 28 | #include "mlir/IR/Builders.h" |
| 29 | #include "mlir/IR/BuiltinOps.h" |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 30 | #include "mlir/IR/Statements.h" |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 31 | #include "mlir/StandardOps/StandardOps.h" |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 32 | #include "mlir/Support/Functional.h" |
Uday Bondhugula | 48e4c4b | 2018-10-03 10:07:54 -0700 | [diff] [blame] | 33 | #include "mlir/Support/MathExtras.h" |
Nicolas Vasilache | 6b19746 | 2018-11-14 04:04:10 -0800 | [diff] [blame] | 34 | #include "llvm/ADT/SmallString.h" |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 35 | |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 36 | using namespace mlir; |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 37 | |
| 38 | /// Returns the trip count of the loop as an affine expression if the latter is |
| 39 | /// expressible as an affine expression, and nullptr otherwise. The trip count |
| 40 | /// expression is simplified before returning. |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 41 | AffineExpr mlir::getTripCountExpr(const ForStmt &forStmt) { |
Nicolas Vasilache | ff30328 | 2018-11-07 05:44:50 -0800 | [diff] [blame] | 42 | // upper_bound - lower_bound |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 43 | int64_t loopSpan; |
| 44 | |
| 45 | int64_t step = forStmt.getStep(); |
| 46 | auto *context = forStmt.getContext(); |
| 47 | |
| 48 | if (forStmt.hasConstantBounds()) { |
| 49 | int64_t lb = forStmt.getConstantLowerBound(); |
| 50 | int64_t ub = forStmt.getConstantUpperBound(); |
Nicolas Vasilache | ff30328 | 2018-11-07 05:44:50 -0800 | [diff] [blame] | 51 | loopSpan = ub - lb; |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 52 | } else { |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 53 | auto lbMap = forStmt.getLowerBoundMap(); |
| 54 | auto ubMap = forStmt.getUpperBoundMap(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 55 | // TODO(bondhugula): handle max/min of multiple expressions. |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 56 | if (lbMap.getNumResults() != 1 || ubMap.getNumResults() != 1) |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 57 | return nullptr; |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 58 | |
| 59 | // TODO(bondhugula): handle bounds with different operands. |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 60 | // Bounds have different operands, unhandled for now. |
Uday Bondhugula | 5912e87 | 2018-09-18 10:22:03 -0700 | [diff] [blame] | 61 | if (!forStmt.matchingBoundOperandList()) |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 62 | return nullptr; |
| 63 | |
Nicolas Vasilache | ff30328 | 2018-11-07 05:44:50 -0800 | [diff] [blame] | 64 | // ub_expr - lb_expr |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 65 | AffineExpr lbExpr(lbMap.getResult(0)); |
| 66 | AffineExpr ubExpr(ubMap.getResult(0)); |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 67 | auto loopSpanExpr = simplifyAffineExpr( |
Nicolas Vasilache | ff30328 | 2018-11-07 05:44:50 -0800 | [diff] [blame] | 68 | ubExpr - lbExpr, std::max(lbMap.getNumDims(), ubMap.getNumDims()), |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 69 | std::max(lbMap.getNumSymbols(), ubMap.getNumSymbols())); |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 70 | auto cExpr = loopSpanExpr.dyn_cast<AffineConstantExpr>(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 71 | if (!cExpr) |
Nicolas Vasilache | bc74609 | 2018-10-08 10:20:25 -0700 | [diff] [blame] | 72 | return loopSpanExpr.ceilDiv(step); |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 73 | loopSpan = cExpr.getValue(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 74 | } |
| 75 | |
| 76 | // 0 iteration loops. |
Uday Bondhugula | ff5d6bd | 2018-09-27 18:03:27 -0700 | [diff] [blame] | 77 | if (loopSpan < 0) |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 78 | return 0; |
| 79 | |
Nicolas Vasilache | bc74609 | 2018-10-08 10:20:25 -0700 | [diff] [blame] | 80 | return getAffineConstantExpr(static_cast<uint64_t>(ceilDiv(loopSpan, step)), |
| 81 | context); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | /// Returns the trip count of the loop if it's a constant, None otherwise. This |
| 85 | /// method uses affine expression analysis (in turn using getTripCount) and is |
| 86 | /// able to determine constant trip count in non-trivial cases. |
| 87 | llvm::Optional<uint64_t> mlir::getConstantTripCount(const ForStmt &forStmt) { |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 88 | auto tripCountExpr = getTripCountExpr(forStmt); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 89 | |
Nicolas Vasilache | 32402e5 | 2018-10-08 08:09:50 -0700 | [diff] [blame] | 90 | if (!tripCountExpr) |
| 91 | return None; |
| 92 | |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 93 | if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>()) |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 94 | return constExpr.getValue(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 95 | |
| 96 | return None; |
| 97 | } |
| 98 | |
| 99 | /// Returns the greatest known integral divisor of the trip count. Affine |
| 100 | /// expression analysis is used (indirectly through getTripCount), and |
| 101 | /// this method is thus able to determine non-trivial divisors. |
| 102 | uint64_t mlir::getLargestDivisorOfTripCount(const ForStmt &forStmt) { |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 103 | auto tripCountExpr = getTripCountExpr(forStmt); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 104 | |
| 105 | if (!tripCountExpr) |
| 106 | return 1; |
| 107 | |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 108 | if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>()) { |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 109 | uint64_t tripCount = constExpr.getValue(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 110 | |
| 111 | // 0 iteration loops (greatest divisor is 2^64 - 1). |
| 112 | if (tripCount == 0) |
| 113 | return ULONG_MAX; |
| 114 | |
| 115 | // The greatest divisor is the trip count. |
| 116 | return tripCount; |
| 117 | } |
| 118 | |
| 119 | // Trip count is not a known constant; return its largest known divisor. |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 120 | return tripCountExpr.getLargestKnownDivisor(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 121 | } |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 122 | |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 123 | bool mlir::isAccessInvariant(const MLValue &input, MemRefType memRefType, |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 124 | ArrayRef<const MLValue *> indices, unsigned dim) { |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 125 | assert(indices.size() == memRefType.getRank()); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 126 | assert(dim < indices.size()); |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 127 | auto layoutMap = memRefType.getAffineMaps(); |
| 128 | assert(memRefType.getAffineMaps().size() <= 1); |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 129 | // TODO(ntv): remove dependence on Builder once we support non-identity |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 130 | // layout map. |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 131 | Builder b(memRefType.getContext()); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 132 | assert(layoutMap.empty() || |
| 133 | layoutMap[0] == b.getMultiDimIdentityMap(indices.size())); |
Uday Bondhugula | 861fe64 | 2018-10-18 11:14:26 -0700 | [diff] [blame] | 134 | (void)layoutMap; |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 135 | |
| 136 | SmallVector<OperationStmt *, 4> affineApplyOps; |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 137 | getReachableAffineApplyOps({const_cast<MLValue *>(indices[dim])}, |
| 138 | affineApplyOps); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 139 | |
| 140 | if (affineApplyOps.empty()) { |
| 141 | // Pointer equality test because of MLValue pointer semantics. |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 142 | return indices[dim] != &input; |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 143 | } |
| 144 | |
| 145 | assert(affineApplyOps.size() == 1 && |
| 146 | "CompositionAffineMapsPass must have " |
| 147 | "been run: there should be at most one AffineApplyOp"); |
Feng Liu | ec065d7 | 2018-10-19 09:07:58 -0700 | [diff] [blame] | 148 | auto composeOp = affineApplyOps[0]->cast<AffineApplyOp>(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 149 | // We need yet another level of indirection because the `dim` index of the |
| 150 | // access may not correspond to the `dim` index of composeOp. |
| 151 | unsigned idx = std::numeric_limits<unsigned>::max(); |
| 152 | unsigned numResults = composeOp->getNumResults(); |
| 153 | for (unsigned i = 0; i < numResults; ++i) { |
| 154 | if (indices[dim] == composeOp->getResult(i)) { |
| 155 | idx = i; |
| 156 | break; |
| 157 | } |
| 158 | } |
| 159 | assert(idx < std::numeric_limits<unsigned>::max()); |
| 160 | return !AffineValueMap(*composeOp) |
| 161 | .isFunctionOf(idx, &const_cast<MLValue &>(input)); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 162 | } |
| 163 | |
| 164 | /// Determines whether a load or a store has a contiguous access along the |
| 165 | /// value `input`. Contiguous is defined as either invariant or varying only |
| 166 | /// along the fastest varying memory dimension. |
| 167 | // TODO(ntv): allow more advanced notions of contiguity (non-fastest varying, |
| 168 | // check strides, ...). |
| 169 | template <typename LoadOrStoreOpPointer> |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 170 | static bool isContiguousAccess(const MLValue &input, |
| 171 | LoadOrStoreOpPointer memoryOp, |
| 172 | unsigned fastestVaryingDim) { |
| 173 | using namespace functional; |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 174 | auto indices = map([](const SSAValue *val) { return dyn_cast<MLValue>(val); }, |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 175 | memoryOp->getIndices()); |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 176 | auto memRefType = memoryOp->getMemRefType(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 177 | for (unsigned d = 0, numIndices = indices.size(); d < numIndices; ++d) { |
| 178 | if (fastestVaryingDim == (numIndices - 1) - d) { |
| 179 | continue; |
| 180 | } |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 181 | if (!isAccessInvariant(input, memRefType, indices, d)) { |
| 182 | return false; |
| 183 | } |
| 184 | } |
| 185 | return true; |
| 186 | } |
| 187 | |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 188 | template <typename LoadOrStoreOpPointer> |
| 189 | static bool isVectorElement(LoadOrStoreOpPointer memoryOp) { |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 190 | auto memRefType = memoryOp->getMemRefType(); |
| 191 | return memRefType.getElementType().template isa<VectorType>(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 192 | } |
| 193 | |
Nicolas Vasilache | 6b19746 | 2018-11-14 04:04:10 -0800 | [diff] [blame] | 194 | // TODO(ntv): make the following into MLIR instructions, then use isa<>. |
| 195 | static bool isVectorTransferReadOrWrite(const Statement &stmt) { |
| 196 | const auto *opStmt = cast<OperationStmt>(&stmt); |
Nicolas Vasilache | 13b3bce | 2018-11-20 08:36:07 -0800 | [diff] [blame^] | 197 | return isaVectorTransferRead(*opStmt) || isaVectorTransferWrite(*opStmt); |
Nicolas Vasilache | 6b19746 | 2018-11-14 04:04:10 -0800 | [diff] [blame] | 198 | } |
| 199 | |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 200 | using VectorizableStmtFun = |
| 201 | std::function<bool(const ForStmt &, const OperationStmt &)>; |
| 202 | |
| 203 | static bool isVectorizableLoopWithCond(const ForStmt &loop, |
| 204 | VectorizableStmtFun isVectorizableStmt) { |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 205 | if (!matcher::isParallelLoop(loop) && !matcher::isReductionLoop(loop)) { |
| 206 | return false; |
| 207 | } |
| 208 | |
| 209 | // No vectorization across conditionals for now. |
| 210 | auto conditionals = matcher::If(); |
| 211 | auto *forStmt = const_cast<ForStmt *>(&loop); |
| 212 | auto conditionalsMatched = conditionals.match(forStmt); |
| 213 | if (!conditionalsMatched.empty()) { |
| 214 | return false; |
| 215 | } |
| 216 | |
Nicolas Vasilache | 6b19746 | 2018-11-14 04:04:10 -0800 | [diff] [blame] | 217 | auto vectorTransfers = matcher::Op(isVectorTransferReadOrWrite); |
| 218 | auto vectorTransfersMatched = vectorTransfers.match(forStmt); |
| 219 | if (!vectorTransfersMatched.empty()) { |
| 220 | return false; |
| 221 | } |
| 222 | |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 223 | auto loadAndStores = matcher::Op(matcher::isLoadOrStore); |
| 224 | auto loadAndStoresMatched = loadAndStores.match(forStmt); |
| 225 | for (auto ls : loadAndStoresMatched) { |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 226 | auto *op = cast<OperationStmt>(ls.first); |
Feng Liu | ec065d7 | 2018-10-19 09:07:58 -0700 | [diff] [blame] | 227 | auto load = op->dyn_cast<LoadOp>(); |
| 228 | auto store = op->dyn_cast<StoreOp>(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 229 | // Only scalar types are considered vectorizable, all load/store must be |
| 230 | // vectorizable for a loop to qualify as vectorizable. |
| 231 | // TODO(ntv): ponder whether we want to be more general here. |
| 232 | bool vector = load ? isVectorElement(load) : isVectorElement(store); |
| 233 | if (vector) { |
| 234 | return false; |
| 235 | } |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 236 | if (!isVectorizableStmt(loop, *op)) { |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 237 | return false; |
| 238 | } |
| 239 | } |
| 240 | return true; |
| 241 | } |
Uday Bondhugula | 861fe64 | 2018-10-18 11:14:26 -0700 | [diff] [blame] | 242 | |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 243 | bool mlir::isVectorizableLoopAlongFastestVaryingMemRefDim( |
| 244 | const ForStmt &loop, unsigned fastestVaryingDim) { |
| 245 | VectorizableStmtFun fun( |
| 246 | [fastestVaryingDim](const ForStmt &loop, const OperationStmt &op) { |
| 247 | auto load = op.dyn_cast<LoadOp>(); |
| 248 | auto store = op.dyn_cast<StoreOp>(); |
| 249 | return load ? isContiguousAccess(loop, load, fastestVaryingDim) |
| 250 | : isContiguousAccess(loop, store, fastestVaryingDim); |
| 251 | }); |
| 252 | return isVectorizableLoopWithCond(loop, fun); |
| 253 | } |
| 254 | |
| 255 | bool mlir::isVectorizableLoop(const ForStmt &loop) { |
| 256 | VectorizableStmtFun fun( |
| 257 | // TODO: implement me |
| 258 | [](const ForStmt &loop, const OperationStmt &op) { return true; }); |
| 259 | return isVectorizableLoopWithCond(loop, fun); |
| 260 | } |
| 261 | |
Uday Bondhugula | 861fe64 | 2018-10-18 11:14:26 -0700 | [diff] [blame] | 262 | /// Checks whether SSA dominance would be violated if a for stmt's body |
| 263 | /// statements are shifted by the specified shifts. This method checks if a |
| 264 | /// 'def' and all its uses have the same shift factor. |
| 265 | // TODO(mlir-team): extend this to check for memory-based dependence |
| 266 | // violation when we have the support. |
| 267 | bool mlir::isStmtwiseShiftValid(const ForStmt &forStmt, |
| 268 | ArrayRef<uint64_t> shifts) { |
| 269 | assert(shifts.size() == forStmt.getStatements().size()); |
| 270 | unsigned s = 0; |
| 271 | for (const auto &stmt : forStmt) { |
| 272 | // A for or if stmt does not produce any def/results (that are used |
| 273 | // outside). |
| 274 | if (const auto *opStmt = dyn_cast<OperationStmt>(&stmt)) { |
| 275 | for (unsigned i = 0, e = opStmt->getNumResults(); i < e; ++i) { |
| 276 | const MLValue *result = opStmt->getResult(i); |
| 277 | for (const StmtOperand &use : result->getUses()) { |
| 278 | // If an ancestor statement doesn't lie in the block of forStmt, there |
| 279 | // is no shift to check. |
| 280 | // This is a naive way. If performance becomes an issue, a map can |
| 281 | // be used to store 'shifts' - to look up the shift for a statement in |
| 282 | // constant time. |
| 283 | if (auto *ancStmt = forStmt.findAncestorStmtInBlock(*use.getOwner())) |
| 284 | if (shifts[s] != shifts[forStmt.findStmtPosInBlock(*ancStmt)]) |
| 285 | return false; |
| 286 | } |
| 287 | } |
| 288 | } |
| 289 | s++; |
| 290 | } |
| 291 | return true; |
| 292 | } |