Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 1 | //===- LoopAnalysis.cpp - Misc loop analysis routines //-------------------===// |
| 2 | // |
| 3 | // Copyright 2019 The MLIR Authors. |
| 4 | // |
| 5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | // you may not use this file except in compliance with the License. |
| 7 | // You may obtain a copy of the License at |
| 8 | // |
| 9 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | // |
| 11 | // Unless required by applicable law or agreed to in writing, software |
| 12 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | // See the License for the specific language governing permissions and |
| 15 | // limitations under the License. |
| 16 | // ============================================================================= |
| 17 | // |
| 18 | // This file implements miscellaneous loop analysis routines. |
| 19 | // |
| 20 | //===----------------------------------------------------------------------===// |
| 21 | |
| 22 | #include "mlir/Analysis/LoopAnalysis.h" |
| 23 | |
| 24 | #include "mlir/Analysis/AffineAnalysis.h" |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 25 | #include "mlir/Analysis/AffineStructures.h" |
| 26 | #include "mlir/Analysis/MLFunctionMatcher.h" |
Nicolas Vasilache | 13b3bce | 2018-11-20 08:36:07 -0800 | [diff] [blame] | 27 | #include "mlir/Analysis/VectorAnalysis.h" |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 28 | #include "mlir/IR/Builders.h" |
| 29 | #include "mlir/IR/BuiltinOps.h" |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 30 | #include "mlir/IR/Statements.h" |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 31 | #include "mlir/StandardOps/StandardOps.h" |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 32 | #include "mlir/Support/Functional.h" |
Uday Bondhugula | 48e4c4b | 2018-10-03 10:07:54 -0700 | [diff] [blame] | 33 | #include "mlir/Support/MathExtras.h" |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 34 | |
| 35 | #include "llvm/ADT/DenseSet.h" |
Nicolas Vasilache | 6b19746 | 2018-11-14 04:04:10 -0800 | [diff] [blame] | 36 | #include "llvm/ADT/SmallString.h" |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 37 | #include <type_traits> |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 38 | |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 39 | using namespace mlir; |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 40 | |
| 41 | /// Returns the trip count of the loop as an affine expression if the latter is |
| 42 | /// expressible as an affine expression, and nullptr otherwise. The trip count |
| 43 | /// expression is simplified before returning. |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 44 | AffineExpr mlir::getTripCountExpr(const ForStmt &forStmt) { |
Nicolas Vasilache | ff30328 | 2018-11-07 05:44:50 -0800 | [diff] [blame] | 45 | // upper_bound - lower_bound |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 46 | int64_t loopSpan; |
| 47 | |
| 48 | int64_t step = forStmt.getStep(); |
| 49 | auto *context = forStmt.getContext(); |
| 50 | |
| 51 | if (forStmt.hasConstantBounds()) { |
| 52 | int64_t lb = forStmt.getConstantLowerBound(); |
| 53 | int64_t ub = forStmt.getConstantUpperBound(); |
Nicolas Vasilache | ff30328 | 2018-11-07 05:44:50 -0800 | [diff] [blame] | 54 | loopSpan = ub - lb; |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 55 | } else { |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 56 | auto lbMap = forStmt.getLowerBoundMap(); |
| 57 | auto ubMap = forStmt.getUpperBoundMap(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 58 | // TODO(bondhugula): handle max/min of multiple expressions. |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 59 | if (lbMap.getNumResults() != 1 || ubMap.getNumResults() != 1) |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 60 | return nullptr; |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 61 | |
| 62 | // TODO(bondhugula): handle bounds with different operands. |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 63 | // Bounds have different operands, unhandled for now. |
Uday Bondhugula | 5912e87 | 2018-09-18 10:22:03 -0700 | [diff] [blame] | 64 | if (!forStmt.matchingBoundOperandList()) |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 65 | return nullptr; |
| 66 | |
Nicolas Vasilache | ff30328 | 2018-11-07 05:44:50 -0800 | [diff] [blame] | 67 | // ub_expr - lb_expr |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 68 | AffineExpr lbExpr(lbMap.getResult(0)); |
| 69 | AffineExpr ubExpr(ubMap.getResult(0)); |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 70 | auto loopSpanExpr = simplifyAffineExpr( |
Nicolas Vasilache | ff30328 | 2018-11-07 05:44:50 -0800 | [diff] [blame] | 71 | ubExpr - lbExpr, std::max(lbMap.getNumDims(), ubMap.getNumDims()), |
Nicolas Vasilache | 75ed337 | 2018-10-09 16:39:24 -0700 | [diff] [blame] | 72 | std::max(lbMap.getNumSymbols(), ubMap.getNumSymbols())); |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 73 | auto cExpr = loopSpanExpr.dyn_cast<AffineConstantExpr>(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 74 | if (!cExpr) |
Nicolas Vasilache | bc74609 | 2018-10-08 10:20:25 -0700 | [diff] [blame] | 75 | return loopSpanExpr.ceilDiv(step); |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 76 | loopSpan = cExpr.getValue(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 77 | } |
| 78 | |
| 79 | // 0 iteration loops. |
Uday Bondhugula | ff5d6bd | 2018-09-27 18:03:27 -0700 | [diff] [blame] | 80 | if (loopSpan < 0) |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 81 | return 0; |
| 82 | |
Nicolas Vasilache | bc74609 | 2018-10-08 10:20:25 -0700 | [diff] [blame] | 83 | return getAffineConstantExpr(static_cast<uint64_t>(ceilDiv(loopSpan, step)), |
| 84 | context); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | /// Returns the trip count of the loop if it's a constant, None otherwise. This |
| 88 | /// method uses affine expression analysis (in turn using getTripCount) and is |
| 89 | /// able to determine constant trip count in non-trivial cases. |
| 90 | llvm::Optional<uint64_t> mlir::getConstantTripCount(const ForStmt &forStmt) { |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 91 | auto tripCountExpr = getTripCountExpr(forStmt); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 92 | |
Nicolas Vasilache | 32402e5 | 2018-10-08 08:09:50 -0700 | [diff] [blame] | 93 | if (!tripCountExpr) |
| 94 | return None; |
| 95 | |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 96 | if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>()) |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 97 | return constExpr.getValue(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 98 | |
| 99 | return None; |
| 100 | } |
| 101 | |
| 102 | /// Returns the greatest known integral divisor of the trip count. Affine |
| 103 | /// expression analysis is used (indirectly through getTripCount), and |
| 104 | /// this method is thus able to determine non-trivial divisors. |
| 105 | uint64_t mlir::getLargestDivisorOfTripCount(const ForStmt &forStmt) { |
Nicolas Vasilache | 5373b09 | 2018-10-03 15:39:12 -0700 | [diff] [blame] | 106 | auto tripCountExpr = getTripCountExpr(forStmt); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 107 | |
| 108 | if (!tripCountExpr) |
| 109 | return 1; |
| 110 | |
Nicolas Vasilache | fb11e0e | 2018-10-08 13:47:18 -0700 | [diff] [blame] | 111 | if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>()) { |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 112 | uint64_t tripCount = constExpr.getValue(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 113 | |
| 114 | // 0 iteration loops (greatest divisor is 2^64 - 1). |
| 115 | if (tripCount == 0) |
| 116 | return ULONG_MAX; |
| 117 | |
| 118 | // The greatest divisor is the trip count. |
| 119 | return tripCount; |
| 120 | } |
| 121 | |
| 122 | // Trip count is not a known constant; return its largest known divisor. |
Nicolas Vasilache | b771709 | 2018-10-09 10:59:27 -0700 | [diff] [blame] | 123 | return tripCountExpr.getLargestKnownDivisor(); |
Uday Bondhugula | cf4f4c4 | 2018-09-12 10:21:23 -0700 | [diff] [blame] | 124 | } |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 125 | |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 126 | bool mlir::isAccessInvariant(const MLValue &iv, const MLValue &index) { |
| 127 | assert(isa<ForStmt>(iv) && "iv must be a ForStmt"); |
| 128 | assert(index.getType().isa<IndexType>() && "index must be of IndexType"); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 129 | SmallVector<OperationStmt *, 4> affineApplyOps; |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 130 | getReachableAffineApplyOps({const_cast<MLValue *>(&index)}, affineApplyOps); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 131 | |
| 132 | if (affineApplyOps.empty()) { |
| 133 | // Pointer equality test because of MLValue pointer semantics. |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 134 | return &index != &iv; |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 135 | } |
| 136 | |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 137 | assert( |
| 138 | affineApplyOps.size() == 1 && |
| 139 | "CompositionAffineMapsPass must have been run: there should be at most " |
| 140 | "one AffineApplyOp"); |
| 141 | |
Feng Liu | ec065d7 | 2018-10-19 09:07:58 -0700 | [diff] [blame] | 142 | auto composeOp = affineApplyOps[0]->cast<AffineApplyOp>(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 143 | // We need yet another level of indirection because the `dim` index of the |
| 144 | // access may not correspond to the `dim` index of composeOp. |
| 145 | unsigned idx = std::numeric_limits<unsigned>::max(); |
| 146 | unsigned numResults = composeOp->getNumResults(); |
| 147 | for (unsigned i = 0; i < numResults; ++i) { |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 148 | if (&index == composeOp->getResult(i)) { |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 149 | idx = i; |
| 150 | break; |
| 151 | } |
| 152 | } |
| 153 | assert(idx < std::numeric_limits<unsigned>::max()); |
| 154 | return !AffineValueMap(*composeOp) |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 155 | .isFunctionOf(idx, &const_cast<MLValue &>(iv)); |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 156 | } |
| 157 | |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 158 | llvm::DenseSet<const MLValue *> |
| 159 | mlir::getInvariantAccesses(const MLValue &iv, |
| 160 | llvm::ArrayRef<const MLValue *> indices) { |
| 161 | llvm::DenseSet<const MLValue *> res; |
| 162 | for (unsigned idx = 0, n = indices.size(); idx < n; ++idx) { |
| 163 | auto *val = indices[idx]; |
| 164 | if (isAccessInvariant(iv, *val)) { |
| 165 | res.insert(val); |
| 166 | } |
| 167 | } |
| 168 | return res; |
| 169 | } |
| 170 | |
| 171 | /// Given: |
| 172 | /// 1. an induction variable `iv` of type ForStmt; |
| 173 | /// 2. a `memoryOp` of type const LoadOp& or const StoreOp&; |
| 174 | /// 3. the index of the `fastestVaryingDim` along which to check; |
| 175 | /// determines whether `memoryOp`[`fastestVaryingDim`] is a contiguous access |
| 176 | /// along `iv`. |
| 177 | /// Contiguous is defined as either invariant or varying only along |
| 178 | /// `fastestVaryingDim`. |
| 179 | /// |
| 180 | /// Prerequisites: |
| 181 | /// 1. `iv` of the proper type; |
| 182 | /// 2. the MemRef accessed by `memoryOp` has no layout map or at most an |
| 183 | /// identity layout map. |
| 184 | /// |
| 185 | // TODO(ntv): check strides. |
| 186 | template <typename LoadOrStoreOp> |
| 187 | static bool isContiguousAccess(const MLValue &iv, const LoadOrStoreOp &memoryOp, |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 188 | unsigned fastestVaryingDim) { |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 189 | static_assert(std::is_same<LoadOrStoreOp, LoadOp>::value || |
| 190 | std::is_same<LoadOrStoreOp, StoreOp>::value, |
| 191 | "Must be called on either const LoadOp & or const StoreOp &"); |
| 192 | auto memRefType = memoryOp.getMemRefType(); |
| 193 | auto layoutMap = memRefType.getAffineMaps(); |
| 194 | (void)layoutMap; |
| 195 | Builder b(memoryOp.getOperation()->getContext()); |
| 196 | (void)b; |
| 197 | assert(layoutMap.empty() || |
| 198 | (layoutMap.size() == 1 && |
| 199 | layoutMap[0] == b.getMultiDimIdentityMap(layoutMap[0].getNumDims()))); |
| 200 | assert(fastestVaryingDim < memRefType.getRank()); |
| 201 | |
| 202 | auto indices = memoryOp.getIndices(); |
| 203 | // TODO(clattner): should iterator_range have a size method? |
| 204 | auto numIndices = indices.end() - indices.begin(); |
| 205 | unsigned d = 0; |
| 206 | for (auto index : indices) { |
| 207 | if (fastestVaryingDim == (numIndices - 1) - d++) { |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 208 | continue; |
| 209 | } |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 210 | if (!isAccessInvariant(iv, cast<MLValue>(*index))) { |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 211 | return false; |
| 212 | } |
| 213 | } |
| 214 | return true; |
| 215 | } |
| 216 | |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 217 | template <typename LoadOrStoreOpPointer> |
| 218 | static bool isVectorElement(LoadOrStoreOpPointer memoryOp) { |
River Riddle | 666dfbe | 2018-10-30 14:59:22 -0700 | [diff] [blame] | 219 | auto memRefType = memoryOp->getMemRefType(); |
| 220 | return memRefType.getElementType().template isa<VectorType>(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 221 | } |
| 222 | |
Nicolas Vasilache | 6b19746 | 2018-11-14 04:04:10 -0800 | [diff] [blame] | 223 | // TODO(ntv): make the following into MLIR instructions, then use isa<>. |
| 224 | static bool isVectorTransferReadOrWrite(const Statement &stmt) { |
| 225 | const auto *opStmt = cast<OperationStmt>(&stmt); |
Nicolas Vasilache | 9a19ada | 2018-12-03 15:21:27 -0800 | [diff] [blame] | 226 | return opStmt->isa<VectorTransferReadOp>() || |
| 227 | opStmt->isa<VectorTransferWriteOp>(); |
Nicolas Vasilache | 6b19746 | 2018-11-14 04:04:10 -0800 | [diff] [blame] | 228 | } |
| 229 | |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 230 | using VectorizableStmtFun = |
| 231 | std::function<bool(const ForStmt &, const OperationStmt &)>; |
| 232 | |
| 233 | static bool isVectorizableLoopWithCond(const ForStmt &loop, |
| 234 | VectorizableStmtFun isVectorizableStmt) { |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 235 | if (!matcher::isParallelLoop(loop) && !matcher::isReductionLoop(loop)) { |
| 236 | return false; |
| 237 | } |
| 238 | |
| 239 | // No vectorization across conditionals for now. |
| 240 | auto conditionals = matcher::If(); |
| 241 | auto *forStmt = const_cast<ForStmt *>(&loop); |
| 242 | auto conditionalsMatched = conditionals.match(forStmt); |
| 243 | if (!conditionalsMatched.empty()) { |
| 244 | return false; |
| 245 | } |
| 246 | |
Nicolas Vasilache | 6b19746 | 2018-11-14 04:04:10 -0800 | [diff] [blame] | 247 | auto vectorTransfers = matcher::Op(isVectorTransferReadOrWrite); |
| 248 | auto vectorTransfersMatched = vectorTransfers.match(forStmt); |
| 249 | if (!vectorTransfersMatched.empty()) { |
| 250 | return false; |
| 251 | } |
| 252 | |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 253 | auto loadAndStores = matcher::Op(matcher::isLoadOrStore); |
| 254 | auto loadAndStoresMatched = loadAndStores.match(forStmt); |
| 255 | for (auto ls : loadAndStoresMatched) { |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 256 | auto *op = cast<OperationStmt>(ls.first); |
Feng Liu | ec065d7 | 2018-10-19 09:07:58 -0700 | [diff] [blame] | 257 | auto load = op->dyn_cast<LoadOp>(); |
| 258 | auto store = op->dyn_cast<StoreOp>(); |
Nicolas Vasilache | 078d9b9 | 2018-10-30 07:54:23 -0700 | [diff] [blame] | 259 | // Only scalar types are considered vectorizable, all load/store must be |
| 260 | // vectorizable for a loop to qualify as vectorizable. |
| 261 | // TODO(ntv): ponder whether we want to be more general here. |
| 262 | bool vector = load ? isVectorElement(load) : isVectorElement(store); |
| 263 | if (vector) { |
| 264 | return false; |
| 265 | } |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 266 | if (!isVectorizableStmt(loop, *op)) { |
Nicolas Vasilache | fd8d256 | 2018-10-17 18:01:44 -0700 | [diff] [blame] | 267 | return false; |
| 268 | } |
| 269 | } |
| 270 | return true; |
| 271 | } |
Uday Bondhugula | 861fe64 | 2018-10-18 11:14:26 -0700 | [diff] [blame] | 272 | |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 273 | bool mlir::isVectorizableLoopAlongFastestVaryingMemRefDim( |
| 274 | const ForStmt &loop, unsigned fastestVaryingDim) { |
| 275 | VectorizableStmtFun fun( |
| 276 | [fastestVaryingDim](const ForStmt &loop, const OperationStmt &op) { |
| 277 | auto load = op.dyn_cast<LoadOp>(); |
| 278 | auto store = op.dyn_cast<StoreOp>(); |
Nicolas Vasilache | 787a93c | 2018-12-06 11:37:25 -0800 | [diff] [blame] | 279 | return load ? isContiguousAccess(loop, *load, fastestVaryingDim) |
| 280 | : isContiguousAccess(loop, *store, fastestVaryingDim); |
Nicolas Vasilache | d64816a | 2018-11-01 07:14:14 -0700 | [diff] [blame] | 281 | }); |
| 282 | return isVectorizableLoopWithCond(loop, fun); |
| 283 | } |
| 284 | |
| 285 | bool mlir::isVectorizableLoop(const ForStmt &loop) { |
| 286 | VectorizableStmtFun fun( |
| 287 | // TODO: implement me |
| 288 | [](const ForStmt &loop, const OperationStmt &op) { return true; }); |
| 289 | return isVectorizableLoopWithCond(loop, fun); |
| 290 | } |
| 291 | |
Uday Bondhugula | 861fe64 | 2018-10-18 11:14:26 -0700 | [diff] [blame] | 292 | /// Checks whether SSA dominance would be violated if a for stmt's body |
| 293 | /// statements are shifted by the specified shifts. This method checks if a |
| 294 | /// 'def' and all its uses have the same shift factor. |
| 295 | // TODO(mlir-team): extend this to check for memory-based dependence |
| 296 | // violation when we have the support. |
| 297 | bool mlir::isStmtwiseShiftValid(const ForStmt &forStmt, |
| 298 | ArrayRef<uint64_t> shifts) { |
| 299 | assert(shifts.size() == forStmt.getStatements().size()); |
| 300 | unsigned s = 0; |
| 301 | for (const auto &stmt : forStmt) { |
| 302 | // A for or if stmt does not produce any def/results (that are used |
| 303 | // outside). |
| 304 | if (const auto *opStmt = dyn_cast<OperationStmt>(&stmt)) { |
| 305 | for (unsigned i = 0, e = opStmt->getNumResults(); i < e; ++i) { |
| 306 | const MLValue *result = opStmt->getResult(i); |
| 307 | for (const StmtOperand &use : result->getUses()) { |
| 308 | // If an ancestor statement doesn't lie in the block of forStmt, there |
| 309 | // is no shift to check. |
| 310 | // This is a naive way. If performance becomes an issue, a map can |
| 311 | // be used to store 'shifts' - to look up the shift for a statement in |
| 312 | // constant time. |
| 313 | if (auto *ancStmt = forStmt.findAncestorStmtInBlock(*use.getOwner())) |
| 314 | if (shifts[s] != shifts[forStmt.findStmtPosInBlock(*ancStmt)]) |
| 315 | return false; |
| 316 | } |
| 317 | } |
| 318 | } |
| 319 | s++; |
| 320 | } |
| 321 | return true; |
| 322 | } |