blob: 253d5443b95696021a03bf400c48809497000e0d [file] [log] [blame]
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -07001//===- LoopAnalysis.cpp - Misc loop analysis routines //-------------------===//
2//
3// Copyright 2019 The MLIR Authors.
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16// =============================================================================
17//
18// This file implements miscellaneous loop analysis routines.
19//
20//===----------------------------------------------------------------------===//
21
22#include "mlir/Analysis/LoopAnalysis.h"
23
24#include "mlir/Analysis/AffineAnalysis.h"
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -070025#include "mlir/Analysis/AffineStructures.h"
26#include "mlir/Analysis/MLFunctionMatcher.h"
Nicolas Vasilache13b3bce2018-11-20 08:36:07 -080027#include "mlir/Analysis/VectorAnalysis.h"
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -070028#include "mlir/IR/Builders.h"
29#include "mlir/IR/BuiltinOps.h"
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070030#include "mlir/IR/Statements.h"
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -070031#include "mlir/StandardOps/StandardOps.h"
Nicolas Vasilache078d9b92018-10-30 07:54:23 -070032#include "mlir/Support/Functional.h"
Uday Bondhugula48e4c4b2018-10-03 10:07:54 -070033#include "mlir/Support/MathExtras.h"
Nicolas Vasilache787a93c2018-12-06 11:37:25 -080034
35#include "llvm/ADT/DenseSet.h"
Nicolas Vasilache6b197462018-11-14 04:04:10 -080036#include "llvm/ADT/SmallString.h"
Nicolas Vasilache787a93c2018-12-06 11:37:25 -080037#include <type_traits>
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070038
Nicolas Vasilache5373b092018-10-03 15:39:12 -070039using namespace mlir;
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070040
41/// Returns the trip count of the loop as an affine expression if the latter is
42/// expressible as an affine expression, and nullptr otherwise. The trip count
43/// expression is simplified before returning.
Nicolas Vasilachefb11e0e2018-10-08 13:47:18 -070044AffineExpr mlir::getTripCountExpr(const ForStmt &forStmt) {
Nicolas Vasilacheff303282018-11-07 05:44:50 -080045 // upper_bound - lower_bound
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070046 int64_t loopSpan;
47
48 int64_t step = forStmt.getStep();
49 auto *context = forStmt.getContext();
50
51 if (forStmt.hasConstantBounds()) {
52 int64_t lb = forStmt.getConstantLowerBound();
53 int64_t ub = forStmt.getConstantUpperBound();
Nicolas Vasilacheff303282018-11-07 05:44:50 -080054 loopSpan = ub - lb;
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070055 } else {
Nicolas Vasilache75ed3372018-10-09 16:39:24 -070056 auto lbMap = forStmt.getLowerBoundMap();
57 auto ubMap = forStmt.getUpperBoundMap();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070058 // TODO(bondhugula): handle max/min of multiple expressions.
Nicolas Vasilache75ed3372018-10-09 16:39:24 -070059 if (lbMap.getNumResults() != 1 || ubMap.getNumResults() != 1)
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070060 return nullptr;
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070061
62 // TODO(bondhugula): handle bounds with different operands.
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070063 // Bounds have different operands, unhandled for now.
Uday Bondhugula5912e872018-09-18 10:22:03 -070064 if (!forStmt.matchingBoundOperandList())
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070065 return nullptr;
66
Nicolas Vasilacheff303282018-11-07 05:44:50 -080067 // ub_expr - lb_expr
Nicolas Vasilache75ed3372018-10-09 16:39:24 -070068 AffineExpr lbExpr(lbMap.getResult(0));
69 AffineExpr ubExpr(ubMap.getResult(0));
Nicolas Vasilache5373b092018-10-03 15:39:12 -070070 auto loopSpanExpr = simplifyAffineExpr(
Nicolas Vasilacheff303282018-11-07 05:44:50 -080071 ubExpr - lbExpr, std::max(lbMap.getNumDims(), ubMap.getNumDims()),
Nicolas Vasilache75ed3372018-10-09 16:39:24 -070072 std::max(lbMap.getNumSymbols(), ubMap.getNumSymbols()));
Nicolas Vasilachefb11e0e2018-10-08 13:47:18 -070073 auto cExpr = loopSpanExpr.dyn_cast<AffineConstantExpr>();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070074 if (!cExpr)
Nicolas Vasilachebc746092018-10-08 10:20:25 -070075 return loopSpanExpr.ceilDiv(step);
Nicolas Vasilacheb7717092018-10-09 10:59:27 -070076 loopSpan = cExpr.getValue();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070077 }
78
79 // 0 iteration loops.
Uday Bondhugulaff5d6bd2018-09-27 18:03:27 -070080 if (loopSpan < 0)
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070081 return 0;
82
Nicolas Vasilachebc746092018-10-08 10:20:25 -070083 return getAffineConstantExpr(static_cast<uint64_t>(ceilDiv(loopSpan, step)),
84 context);
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070085}
86
87/// Returns the trip count of the loop if it's a constant, None otherwise. This
88/// method uses affine expression analysis (in turn using getTripCount) and is
89/// able to determine constant trip count in non-trivial cases.
90llvm::Optional<uint64_t> mlir::getConstantTripCount(const ForStmt &forStmt) {
Nicolas Vasilache5373b092018-10-03 15:39:12 -070091 auto tripCountExpr = getTripCountExpr(forStmt);
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070092
Nicolas Vasilache32402e52018-10-08 08:09:50 -070093 if (!tripCountExpr)
94 return None;
95
Nicolas Vasilachefb11e0e2018-10-08 13:47:18 -070096 if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>())
Nicolas Vasilacheb7717092018-10-09 10:59:27 -070097 return constExpr.getValue();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070098
99 return None;
100}
101
102/// Returns the greatest known integral divisor of the trip count. Affine
103/// expression analysis is used (indirectly through getTripCount), and
104/// this method is thus able to determine non-trivial divisors.
105uint64_t mlir::getLargestDivisorOfTripCount(const ForStmt &forStmt) {
Nicolas Vasilache5373b092018-10-03 15:39:12 -0700106 auto tripCountExpr = getTripCountExpr(forStmt);
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -0700107
108 if (!tripCountExpr)
109 return 1;
110
Nicolas Vasilachefb11e0e2018-10-08 13:47:18 -0700111 if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>()) {
Nicolas Vasilacheb7717092018-10-09 10:59:27 -0700112 uint64_t tripCount = constExpr.getValue();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -0700113
114 // 0 iteration loops (greatest divisor is 2^64 - 1).
115 if (tripCount == 0)
116 return ULONG_MAX;
117
118 // The greatest divisor is the trip count.
119 return tripCount;
120 }
121
122 // Trip count is not a known constant; return its largest known divisor.
Nicolas Vasilacheb7717092018-10-09 10:59:27 -0700123 return tripCountExpr.getLargestKnownDivisor();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -0700124}
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700125
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800126bool mlir::isAccessInvariant(const MLValue &iv, const MLValue &index) {
127 assert(isa<ForStmt>(iv) && "iv must be a ForStmt");
128 assert(index.getType().isa<IndexType>() && "index must be of IndexType");
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700129 SmallVector<OperationStmt *, 4> affineApplyOps;
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800130 getReachableAffineApplyOps({const_cast<MLValue *>(&index)}, affineApplyOps);
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700131
132 if (affineApplyOps.empty()) {
133 // Pointer equality test because of MLValue pointer semantics.
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800134 return &index != &iv;
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700135 }
136
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800137 assert(
138 affineApplyOps.size() == 1 &&
139 "CompositionAffineMapsPass must have been run: there should be at most "
140 "one AffineApplyOp");
141
Feng Liuec065d72018-10-19 09:07:58 -0700142 auto composeOp = affineApplyOps[0]->cast<AffineApplyOp>();
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700143 // We need yet another level of indirection because the `dim` index of the
144 // access may not correspond to the `dim` index of composeOp.
145 unsigned idx = std::numeric_limits<unsigned>::max();
146 unsigned numResults = composeOp->getNumResults();
147 for (unsigned i = 0; i < numResults; ++i) {
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800148 if (&index == composeOp->getResult(i)) {
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700149 idx = i;
150 break;
151 }
152 }
153 assert(idx < std::numeric_limits<unsigned>::max());
154 return !AffineValueMap(*composeOp)
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800155 .isFunctionOf(idx, &const_cast<MLValue &>(iv));
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700156}
157
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800158llvm::DenseSet<const MLValue *>
159mlir::getInvariantAccesses(const MLValue &iv,
160 llvm::ArrayRef<const MLValue *> indices) {
161 llvm::DenseSet<const MLValue *> res;
162 for (unsigned idx = 0, n = indices.size(); idx < n; ++idx) {
163 auto *val = indices[idx];
164 if (isAccessInvariant(iv, *val)) {
165 res.insert(val);
166 }
167 }
168 return res;
169}
170
171/// Given:
172/// 1. an induction variable `iv` of type ForStmt;
173/// 2. a `memoryOp` of type const LoadOp& or const StoreOp&;
174/// 3. the index of the `fastestVaryingDim` along which to check;
175/// determines whether `memoryOp`[`fastestVaryingDim`] is a contiguous access
176/// along `iv`.
177/// Contiguous is defined as either invariant or varying only along
178/// `fastestVaryingDim`.
179///
180/// Prerequisites:
181/// 1. `iv` of the proper type;
182/// 2. the MemRef accessed by `memoryOp` has no layout map or at most an
183/// identity layout map.
184///
185// TODO(ntv): check strides.
186template <typename LoadOrStoreOp>
187static bool isContiguousAccess(const MLValue &iv, const LoadOrStoreOp &memoryOp,
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700188 unsigned fastestVaryingDim) {
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800189 static_assert(std::is_same<LoadOrStoreOp, LoadOp>::value ||
190 std::is_same<LoadOrStoreOp, StoreOp>::value,
191 "Must be called on either const LoadOp & or const StoreOp &");
192 auto memRefType = memoryOp.getMemRefType();
193 auto layoutMap = memRefType.getAffineMaps();
194 (void)layoutMap;
195 Builder b(memoryOp.getOperation()->getContext());
196 (void)b;
197 assert(layoutMap.empty() ||
198 (layoutMap.size() == 1 &&
199 layoutMap[0] == b.getMultiDimIdentityMap(layoutMap[0].getNumDims())));
200 assert(fastestVaryingDim < memRefType.getRank());
201
202 auto indices = memoryOp.getIndices();
203 // TODO(clattner): should iterator_range have a size method?
204 auto numIndices = indices.end() - indices.begin();
205 unsigned d = 0;
206 for (auto index : indices) {
207 if (fastestVaryingDim == (numIndices - 1) - d++) {
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700208 continue;
209 }
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800210 if (!isAccessInvariant(iv, cast<MLValue>(*index))) {
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700211 return false;
212 }
213 }
214 return true;
215}
216
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700217template <typename LoadOrStoreOpPointer>
218static bool isVectorElement(LoadOrStoreOpPointer memoryOp) {
River Riddle666dfbe2018-10-30 14:59:22 -0700219 auto memRefType = memoryOp->getMemRefType();
220 return memRefType.getElementType().template isa<VectorType>();
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700221}
222
Nicolas Vasilache6b197462018-11-14 04:04:10 -0800223// TODO(ntv): make the following into MLIR instructions, then use isa<>.
224static bool isVectorTransferReadOrWrite(const Statement &stmt) {
225 const auto *opStmt = cast<OperationStmt>(&stmt);
Nicolas Vasilache9a19ada2018-12-03 15:21:27 -0800226 return opStmt->isa<VectorTransferReadOp>() ||
227 opStmt->isa<VectorTransferWriteOp>();
Nicolas Vasilache6b197462018-11-14 04:04:10 -0800228}
229
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700230using VectorizableStmtFun =
231 std::function<bool(const ForStmt &, const OperationStmt &)>;
232
233static bool isVectorizableLoopWithCond(const ForStmt &loop,
234 VectorizableStmtFun isVectorizableStmt) {
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700235 if (!matcher::isParallelLoop(loop) && !matcher::isReductionLoop(loop)) {
236 return false;
237 }
238
239 // No vectorization across conditionals for now.
240 auto conditionals = matcher::If();
241 auto *forStmt = const_cast<ForStmt *>(&loop);
242 auto conditionalsMatched = conditionals.match(forStmt);
243 if (!conditionalsMatched.empty()) {
244 return false;
245 }
246
Nicolas Vasilache6b197462018-11-14 04:04:10 -0800247 auto vectorTransfers = matcher::Op(isVectorTransferReadOrWrite);
248 auto vectorTransfersMatched = vectorTransfers.match(forStmt);
249 if (!vectorTransfersMatched.empty()) {
250 return false;
251 }
252
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700253 auto loadAndStores = matcher::Op(matcher::isLoadOrStore);
254 auto loadAndStoresMatched = loadAndStores.match(forStmt);
255 for (auto ls : loadAndStoresMatched) {
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700256 auto *op = cast<OperationStmt>(ls.first);
Feng Liuec065d72018-10-19 09:07:58 -0700257 auto load = op->dyn_cast<LoadOp>();
258 auto store = op->dyn_cast<StoreOp>();
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700259 // Only scalar types are considered vectorizable, all load/store must be
260 // vectorizable for a loop to qualify as vectorizable.
261 // TODO(ntv): ponder whether we want to be more general here.
262 bool vector = load ? isVectorElement(load) : isVectorElement(store);
263 if (vector) {
264 return false;
265 }
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700266 if (!isVectorizableStmt(loop, *op)) {
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700267 return false;
268 }
269 }
270 return true;
271}
Uday Bondhugula861fe642018-10-18 11:14:26 -0700272
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700273bool mlir::isVectorizableLoopAlongFastestVaryingMemRefDim(
274 const ForStmt &loop, unsigned fastestVaryingDim) {
275 VectorizableStmtFun fun(
276 [fastestVaryingDim](const ForStmt &loop, const OperationStmt &op) {
277 auto load = op.dyn_cast<LoadOp>();
278 auto store = op.dyn_cast<StoreOp>();
Nicolas Vasilache787a93c2018-12-06 11:37:25 -0800279 return load ? isContiguousAccess(loop, *load, fastestVaryingDim)
280 : isContiguousAccess(loop, *store, fastestVaryingDim);
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700281 });
282 return isVectorizableLoopWithCond(loop, fun);
283}
284
285bool mlir::isVectorizableLoop(const ForStmt &loop) {
286 VectorizableStmtFun fun(
287 // TODO: implement me
288 [](const ForStmt &loop, const OperationStmt &op) { return true; });
289 return isVectorizableLoopWithCond(loop, fun);
290}
291
Uday Bondhugula861fe642018-10-18 11:14:26 -0700292/// Checks whether SSA dominance would be violated if a for stmt's body
293/// statements are shifted by the specified shifts. This method checks if a
294/// 'def' and all its uses have the same shift factor.
295// TODO(mlir-team): extend this to check for memory-based dependence
296// violation when we have the support.
297bool mlir::isStmtwiseShiftValid(const ForStmt &forStmt,
298 ArrayRef<uint64_t> shifts) {
299 assert(shifts.size() == forStmt.getStatements().size());
300 unsigned s = 0;
301 for (const auto &stmt : forStmt) {
302 // A for or if stmt does not produce any def/results (that are used
303 // outside).
304 if (const auto *opStmt = dyn_cast<OperationStmt>(&stmt)) {
305 for (unsigned i = 0, e = opStmt->getNumResults(); i < e; ++i) {
306 const MLValue *result = opStmt->getResult(i);
307 for (const StmtOperand &use : result->getUses()) {
308 // If an ancestor statement doesn't lie in the block of forStmt, there
309 // is no shift to check.
310 // This is a naive way. If performance becomes an issue, a map can
311 // be used to store 'shifts' - to look up the shift for a statement in
312 // constant time.
313 if (auto *ancStmt = forStmt.findAncestorStmtInBlock(*use.getOwner()))
314 if (shifts[s] != shifts[forStmt.findStmtPosInBlock(*ancStmt)])
315 return false;
316 }
317 }
318 }
319 s++;
320 }
321 return true;
322}