Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 1 | //===- LoopAnalysis.cpp - Misc loop analysis routines //-------------------===// |
| 2 | // |
| 3 | // Copyright 2019 The MLIR Authors. |
| 4 | // |
| 5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | // you may not use this file except in compliance with the License. |
| 7 | // You may obtain a copy of the License at |
| 8 | // |
| 9 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | // |
| 11 | // Unless required by applicable law or agreed to in writing, software |
| 12 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | // See the License for the specific language governing permissions and |
| 15 | // limitations under the License. |
| 16 | // ============================================================================= |
| 17 | // |
| 18 | // This file implements miscellaneous loop analysis routines. |
| 19 | // |
| 20 | //===----------------------------------------------------------------------===// |
| 21 | |
| 22 | #include "mlir/Analysis/LoopAnalysis.h" |
| 23 | |
| 24 | #include "mlir/Analysis/AffineAnalysis.h" |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 25 | #include "mlir/Analysis/AffineStructures.h" |
| 26 | #include "mlir/Analysis/MLFunctionMatcher.h" |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 27 | #include "mlir/IR/Builders.h" |
| 28 | #include "mlir/IR/BuiltinOps.h" |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 29 | #include "mlir/IR/Statements.h" |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 30 | #include "mlir/StandardOps/StandardOps.h" |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 31 | #include "mlir/Support/Functional.h" |
Uday Bondhugula | 48e4c4b | 2018-10-03 10:07:54 -0700 | [diff] [blame] | 32 | #include "mlir/Support/MathExtras.h" |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 33 | |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 34 | using namespace mlir; |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 35 | |
| 36 | /// Returns the trip count of the loop as an affine expression if the latter is |
| 37 | /// expressible as an affine expression, and nullptr otherwise. The trip count |
| 38 | /// expression is simplified before returning. |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 39 | AffineExpr mlir::getTripCountExpr(const ForStmt &forStmt) { |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 40 | // upper_bound - lower_bound + 1 |
| 41 | int64_t loopSpan; |
| 42 | |
| 43 | int64_t step = forStmt.getStep(); |
| 44 | auto *context = forStmt.getContext(); |
| 45 | |
| 46 | if (forStmt.hasConstantBounds()) { |
| 47 | int64_t lb = forStmt.getConstantLowerBound(); |
| 48 | int64_t ub = forStmt.getConstantUpperBound(); |
| 49 | loopSpan = ub - lb + 1; |
| 50 | } else { |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 51 | auto lbMap = forStmt.getLowerBoundMap(); |
| 52 | auto ubMap = forStmt.getUpperBoundMap(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 53 | // TODO(bondhugula): handle max/min of multiple expressions. |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 54 | if (lbMap.getNumResults() != 1 || ubMap.getNumResults() != 1) |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 55 | return nullptr; |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 56 | |
| 57 | // TODO(bondhugula): handle bounds with different operands. |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 58 | // Bounds have different operands, unhandled for now. |
Uday Bondhugula | 5912e87 | 2018-09-18 10:22:03 -0700 | [diff] [blame] | 59 | if (!forStmt.matchingBoundOperandList()) |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 60 | return nullptr; |
| 61 | |
| 62 | // ub_expr - lb_expr + 1 |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 63 | AffineExpr lbExpr(lbMap.getResult(0)); |
| 64 | AffineExpr ubExpr(ubMap.getResult(0)); |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 65 | auto loopSpanExpr = simplifyAffineExpr( |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 66 | ubExpr - lbExpr + 1, std::max(lbMap.getNumDims(), ubMap.getNumDims()), |
| 67 | std::max(lbMap.getNumSymbols(), ubMap.getNumSymbols())); |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 68 | auto cExpr = loopSpanExpr.dyn_cast<AffineConstantExpr>(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 69 | if (!cExpr) |
Nicolas Vasilache | bc74609 | 2018-10-08 10:20:25 -0700 | [diff] [blame] | 70 | return loopSpanExpr.ceilDiv(step); |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 71 | loopSpan = cExpr.getValue(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | // 0 iteration loops. |
Uday Bondhugula | ff5d6bd | 2018-09-27 18:03:27 -0700 | [diff] [blame] | 75 | if (loopSpan < 0) |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 76 | return 0; |
| 77 | |
Nicolas Vasilache | bc74609 | 2018-10-08 10:20:25 -0700 | [diff] [blame] | 78 | return getAffineConstantExpr(static_cast<uint64_t>(ceilDiv(loopSpan, step)), |
| 79 | context); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | /// Returns the trip count of the loop if it's a constant, None otherwise. This |
| 83 | /// method uses affine expression analysis (in turn using getTripCount) and is |
| 84 | /// able to determine constant trip count in non-trivial cases. |
| 85 | llvm::Optional<uint64_t> mlir::getConstantTripCount(const ForStmt &forStmt) { |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 86 | auto tripCountExpr = getTripCountExpr(forStmt); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 87 | |
Nicolas Vasilache | 32402e5 | 2018-10-08 08:09:50 -0700 | [diff] [blame] | 88 | if (!tripCountExpr) |
| 89 | return None; |
| 90 | |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 91 | if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>()) |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 92 | return constExpr.getValue(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 93 | |
| 94 | return None; |
| 95 | } |
| 96 | |
| 97 | /// Returns the greatest known integral divisor of the trip count. Affine |
| 98 | /// expression analysis is used (indirectly through getTripCount), and |
| 99 | /// this method is thus able to determine non-trivial divisors. |
| 100 | uint64_t mlir::getLargestDivisorOfTripCount(const ForStmt &forStmt) { |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 101 | auto tripCountExpr = getTripCountExpr(forStmt); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 102 | |
| 103 | if (!tripCountExpr) |
| 104 | return 1; |
| 105 | |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 106 | if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>()) { |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 107 | uint64_t tripCount = constExpr.getValue(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 108 | |
| 109 | // 0 iteration loops (greatest divisor is 2^64 - 1). |
| 110 | if (tripCount == 0) |
| 111 | return ULONG_MAX; |
| 112 | |
| 113 | // The greatest divisor is the trip count. |
| 114 | return tripCount; |
| 115 | } |
| 116 | |
| 117 | // Trip count is not a known constant; return its largest known divisor. |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 118 | return tripCountExpr.getLargestKnownDivisor(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 119 | } |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 120 | |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 121 | bool mlir::isAccessInvariant(const MLValue &input, MemRefType memRefType, |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 122 | ArrayRef<MLValue *> indices, unsigned dim) { |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 123 | assert(indices.size() == memRefType.getRank()); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 124 | assert(dim < indices.size()); |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 125 | auto layoutMap = memRefType.getAffineMaps(); |
| 126 | assert(memRefType.getAffineMaps().size() <= 1); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 127 | // TODO(ntv): remove dependency on Builder once we support non-identity |
| 128 | // layout map. |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 129 | Builder b(memRefType.getContext()); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 130 | assert(layoutMap.empty() || |
| 131 | layoutMap[0] == b.getMultiDimIdentityMap(indices.size())); |
Uday Bondhugula | 861fe64 | 2018-10-18 11:14:26 -0700 | [diff] [blame] | 132 | (void)layoutMap; |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 133 | |
| 134 | SmallVector<OperationStmt *, 4> affineApplyOps; |
| 135 | getReachableAffineApplyOps({indices[dim]}, affineApplyOps); |
| 136 | |
| 137 | if (affineApplyOps.empty()) { |
| 138 | // Pointer equality test because of MLValue pointer semantics. |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 139 | return indices[dim] != &input; |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | assert(affineApplyOps.size() == 1 && |
| 143 | "CompositionAffineMapsPass must have " |
| 144 | "been run: there should be at most one AffineApplyOp"); |
Feng Liu | ec065d7 | 2018-10-19 09:07:58 -0700 | [diff] [blame] | 145 | auto composeOp = affineApplyOps[0]->cast<AffineApplyOp>(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 146 | // We need yet another level of indirection because the `dim` index of the |
| 147 | // access may not correspond to the `dim` index of composeOp. |
| 148 | unsigned idx = std::numeric_limits<unsigned>::max(); |
| 149 | unsigned numResults = composeOp->getNumResults(); |
| 150 | for (unsigned i = 0; i < numResults; ++i) { |
| 151 | if (indices[dim] == composeOp->getResult(i)) { |
| 152 | idx = i; |
| 153 | break; |
| 154 | } |
| 155 | } |
| 156 | assert(idx < std::numeric_limits<unsigned>::max()); |
| 157 | return !AffineValueMap(*composeOp) |
| 158 | .isFunctionOf(idx, &const_cast<MLValue &>(input)); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | /// Determines whether a load or a store has a contiguous access along the |
| 162 | /// value `input`. Contiguous is defined as either invariant or varying only |
| 163 | /// along the fastest varying memory dimension. |
| 164 | // TODO(ntv): allow more advanced notions of contiguity (non-fastest varying, |
| 165 | // check strides, ...). |
| 166 | template <typename LoadOrStoreOpPointer> |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 167 | static bool isContiguousAccess(const MLValue &input, |
| 168 | LoadOrStoreOpPointer memoryOp, |
| 169 | unsigned fastestVaryingDim) { |
| 170 | using namespace functional; |
| 171 | auto indices = map([](SSAValue *val) { return dyn_cast<MLValue>(val); }, |
| 172 | memoryOp->getIndices()); |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 173 | auto memRefType = memoryOp->getMemRefType(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 174 | for (unsigned d = 0, numIndices = indices.size(); d < numIndices; ++d) { |
| 175 | if (fastestVaryingDim == (numIndices - 1) - d) { |
| 176 | continue; |
| 177 | } |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 178 | if (!isAccessInvariant(input, memRefType, indices, d)) { |
| 179 | return false; |
| 180 | } |
| 181 | } |
| 182 | return true; |
| 183 | } |
| 184 | |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 185 | template <typename LoadOrStoreOpPointer> |
| 186 | static bool isVectorElement(LoadOrStoreOpPointer memoryOp) { |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 187 | auto memRefType = memoryOp->getMemRefType(); |
| 188 | return memRefType.getElementType().template isa<VectorType>(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 189 | } |
| 190 | |
| 191 | bool mlir::isVectorizableLoop(const ForStmt &loop, unsigned fastestVaryingDim) { |
| 192 | if (!matcher::isParallelLoop(loop) && !matcher::isReductionLoop(loop)) { |
| 193 | return false; |
| 194 | } |
| 195 | |
| 196 | // No vectorization across conditionals for now. |
| 197 | auto conditionals = matcher::If(); |
| 198 | auto *forStmt = const_cast<ForStmt *>(&loop); |
| 199 | auto conditionalsMatched = conditionals.match(forStmt); |
| 200 | if (!conditionalsMatched.empty()) { |
| 201 | return false; |
| 202 | } |
| 203 | |
| 204 | auto loadAndStores = matcher::Op(matcher::isLoadOrStore); |
| 205 | auto loadAndStoresMatched = loadAndStores.match(forStmt); |
| 206 | for (auto ls : loadAndStoresMatched) { |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 207 | auto *op = cast<OperationStmt>(ls.first); |
Feng Liu | ec065d7 | 2018-10-19 09:07:58 -0700 | [diff] [blame] | 208 | auto load = op->dyn_cast<LoadOp>(); |
| 209 | auto store = op->dyn_cast<StoreOp>(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 210 | // Only scalar types are considered vectorizable, all load/store must be |
| 211 | // vectorizable for a loop to qualify as vectorizable. |
| 212 | // TODO(ntv): ponder whether we want to be more general here. |
| 213 | bool vector = load ? isVectorElement(load) : isVectorElement(store); |
| 214 | if (vector) { |
| 215 | return false; |
| 216 | } |
| 217 | bool contiguous = load ? isContiguousAccess(loop, load, fastestVaryingDim) |
| 218 | : isContiguousAccess(loop, store, fastestVaryingDim); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 219 | if (!contiguous) { |
| 220 | return false; |
| 221 | } |
| 222 | } |
| 223 | return true; |
| 224 | } |
Uday Bondhugula | 861fe64 | 2018-10-18 11:14:26 -0700 | [diff] [blame] | 225 | |
| 226 | /// Checks whether SSA dominance would be violated if a for stmt's body |
| 227 | /// statements are shifted by the specified shifts. This method checks if a |
| 228 | /// 'def' and all its uses have the same shift factor. |
| 229 | // TODO(mlir-team): extend this to check for memory-based dependence |
| 230 | // violation when we have the support. |
| 231 | bool mlir::isStmtwiseShiftValid(const ForStmt &forStmt, |
| 232 | ArrayRef<uint64_t> shifts) { |
| 233 | assert(shifts.size() == forStmt.getStatements().size()); |
| 234 | unsigned s = 0; |
| 235 | for (const auto &stmt : forStmt) { |
| 236 | // A for or if stmt does not produce any def/results (that are used |
| 237 | // outside). |
| 238 | if (const auto *opStmt = dyn_cast<OperationStmt>(&stmt)) { |
| 239 | for (unsigned i = 0, e = opStmt->getNumResults(); i < e; ++i) { |
| 240 | const MLValue *result = opStmt->getResult(i); |
| 241 | for (const StmtOperand &use : result->getUses()) { |
| 242 | // If an ancestor statement doesn't lie in the block of forStmt, there |
| 243 | // is no shift to check. |
| 244 | // This is a naive way. If performance becomes an issue, a map can |
| 245 | // be used to store 'shifts' - to look up the shift for a statement in |
| 246 | // constant time. |
| 247 | if (auto *ancStmt = forStmt.findAncestorStmtInBlock(*use.getOwner())) |
| 248 | if (shifts[s] != shifts[forStmt.findStmtPosInBlock(*ancStmt)]) |
| 249 | return false; |
| 250 | } |
| 251 | } |
| 252 | } |
| 253 | s++; |
| 254 | } |
| 255 | return true; |
| 256 | } |