blob: 78a8e2d7a463dcb5589b3eb11a6c50743d8190db [file] [log] [blame]
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -07001//===- LoopAnalysis.cpp - Misc loop analysis routines //-------------------===//
2//
3// Copyright 2019 The MLIR Authors.
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16// =============================================================================
17//
18// This file implements miscellaneous loop analysis routines.
19//
20//===----------------------------------------------------------------------===//
21
22#include "mlir/Analysis/LoopAnalysis.h"
23
24#include "mlir/Analysis/AffineAnalysis.h"
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -070025#include "mlir/Analysis/AffineStructures.h"
26#include "mlir/Analysis/MLFunctionMatcher.h"
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -070027#include "mlir/IR/Builders.h"
28#include "mlir/IR/BuiltinOps.h"
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070029#include "mlir/IR/Statements.h"
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -070030#include "mlir/StandardOps/StandardOps.h"
Nicolas Vasilache078d9b92018-10-30 07:54:23 -070031#include "mlir/Support/Functional.h"
Uday Bondhugula48e4c4b2018-10-03 10:07:54 -070032#include "mlir/Support/MathExtras.h"
Nicolas Vasilache6b197462018-11-14 04:04:10 -080033#include "llvm/ADT/SmallString.h"
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070034
Nicolas Vasilache5373b092018-10-03 15:39:12 -070035using namespace mlir;
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070036
37/// Returns the trip count of the loop as an affine expression if the latter is
38/// expressible as an affine expression, and nullptr otherwise. The trip count
39/// expression is simplified before returning.
Nicolas Vasilachefb11e0e2018-10-08 13:47:18 -070040AffineExpr mlir::getTripCountExpr(const ForStmt &forStmt) {
Nicolas Vasilacheff303282018-11-07 05:44:50 -080041 // upper_bound - lower_bound
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070042 int64_t loopSpan;
43
44 int64_t step = forStmt.getStep();
45 auto *context = forStmt.getContext();
46
47 if (forStmt.hasConstantBounds()) {
48 int64_t lb = forStmt.getConstantLowerBound();
49 int64_t ub = forStmt.getConstantUpperBound();
Nicolas Vasilacheff303282018-11-07 05:44:50 -080050 loopSpan = ub - lb;
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070051 } else {
Nicolas Vasilache75ed3372018-10-09 16:39:24 -070052 auto lbMap = forStmt.getLowerBoundMap();
53 auto ubMap = forStmt.getUpperBoundMap();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070054 // TODO(bondhugula): handle max/min of multiple expressions.
Nicolas Vasilache75ed3372018-10-09 16:39:24 -070055 if (lbMap.getNumResults() != 1 || ubMap.getNumResults() != 1)
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070056 return nullptr;
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070057
58 // TODO(bondhugula): handle bounds with different operands.
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070059 // Bounds have different operands, unhandled for now.
Uday Bondhugula5912e872018-09-18 10:22:03 -070060 if (!forStmt.matchingBoundOperandList())
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070061 return nullptr;
62
Nicolas Vasilacheff303282018-11-07 05:44:50 -080063 // ub_expr - lb_expr
Nicolas Vasilache75ed3372018-10-09 16:39:24 -070064 AffineExpr lbExpr(lbMap.getResult(0));
65 AffineExpr ubExpr(ubMap.getResult(0));
Nicolas Vasilache5373b092018-10-03 15:39:12 -070066 auto loopSpanExpr = simplifyAffineExpr(
Nicolas Vasilacheff303282018-11-07 05:44:50 -080067 ubExpr - lbExpr, std::max(lbMap.getNumDims(), ubMap.getNumDims()),
Nicolas Vasilache75ed3372018-10-09 16:39:24 -070068 std::max(lbMap.getNumSymbols(), ubMap.getNumSymbols()));
Nicolas Vasilachefb11e0e2018-10-08 13:47:18 -070069 auto cExpr = loopSpanExpr.dyn_cast<AffineConstantExpr>();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070070 if (!cExpr)
Nicolas Vasilachebc746092018-10-08 10:20:25 -070071 return loopSpanExpr.ceilDiv(step);
Nicolas Vasilacheb7717092018-10-09 10:59:27 -070072 loopSpan = cExpr.getValue();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070073 }
74
75 // 0 iteration loops.
Uday Bondhugulaff5d6bd2018-09-27 18:03:27 -070076 if (loopSpan < 0)
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070077 return 0;
78
Nicolas Vasilachebc746092018-10-08 10:20:25 -070079 return getAffineConstantExpr(static_cast<uint64_t>(ceilDiv(loopSpan, step)),
80 context);
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070081}
82
83/// Returns the trip count of the loop if it's a constant, None otherwise. This
84/// method uses affine expression analysis (in turn using getTripCount) and is
85/// able to determine constant trip count in non-trivial cases.
86llvm::Optional<uint64_t> mlir::getConstantTripCount(const ForStmt &forStmt) {
Nicolas Vasilache5373b092018-10-03 15:39:12 -070087 auto tripCountExpr = getTripCountExpr(forStmt);
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070088
Nicolas Vasilache32402e52018-10-08 08:09:50 -070089 if (!tripCountExpr)
90 return None;
91
Nicolas Vasilachefb11e0e2018-10-08 13:47:18 -070092 if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>())
Nicolas Vasilacheb7717092018-10-09 10:59:27 -070093 return constExpr.getValue();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -070094
95 return None;
96}
97
98/// Returns the greatest known integral divisor of the trip count. Affine
99/// expression analysis is used (indirectly through getTripCount), and
100/// this method is thus able to determine non-trivial divisors.
101uint64_t mlir::getLargestDivisorOfTripCount(const ForStmt &forStmt) {
Nicolas Vasilache5373b092018-10-03 15:39:12 -0700102 auto tripCountExpr = getTripCountExpr(forStmt);
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -0700103
104 if (!tripCountExpr)
105 return 1;
106
Nicolas Vasilachefb11e0e2018-10-08 13:47:18 -0700107 if (auto constExpr = tripCountExpr.dyn_cast<AffineConstantExpr>()) {
Nicolas Vasilacheb7717092018-10-09 10:59:27 -0700108 uint64_t tripCount = constExpr.getValue();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -0700109
110 // 0 iteration loops (greatest divisor is 2^64 - 1).
111 if (tripCount == 0)
112 return ULONG_MAX;
113
114 // The greatest divisor is the trip count.
115 return tripCount;
116 }
117
118 // Trip count is not a known constant; return its largest known divisor.
Nicolas Vasilacheb7717092018-10-09 10:59:27 -0700119 return tripCountExpr.getLargestKnownDivisor();
Uday Bondhugulacf4f4c42018-09-12 10:21:23 -0700120}
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700121
River Riddle666dfbe2018-10-30 14:59:22 -0700122bool mlir::isAccessInvariant(const MLValue &input, MemRefType memRefType,
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700123 ArrayRef<const MLValue *> indices, unsigned dim) {
River Riddle666dfbe2018-10-30 14:59:22 -0700124 assert(indices.size() == memRefType.getRank());
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700125 assert(dim < indices.size());
River Riddle666dfbe2018-10-30 14:59:22 -0700126 auto layoutMap = memRefType.getAffineMaps();
127 assert(memRefType.getAffineMaps().size() <= 1);
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700128 // TODO(ntv): remove dependence on Builder once we support non-identity
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700129 // layout map.
River Riddle666dfbe2018-10-30 14:59:22 -0700130 Builder b(memRefType.getContext());
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700131 assert(layoutMap.empty() ||
132 layoutMap[0] == b.getMultiDimIdentityMap(indices.size()));
Uday Bondhugula861fe642018-10-18 11:14:26 -0700133 (void)layoutMap;
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700134
135 SmallVector<OperationStmt *, 4> affineApplyOps;
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700136 getReachableAffineApplyOps({const_cast<MLValue *>(indices[dim])},
137 affineApplyOps);
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700138
139 if (affineApplyOps.empty()) {
140 // Pointer equality test because of MLValue pointer semantics.
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700141 return indices[dim] != &input;
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700142 }
143
144 assert(affineApplyOps.size() == 1 &&
145 "CompositionAffineMapsPass must have "
146 "been run: there should be at most one AffineApplyOp");
Feng Liuec065d72018-10-19 09:07:58 -0700147 auto composeOp = affineApplyOps[0]->cast<AffineApplyOp>();
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700148 // We need yet another level of indirection because the `dim` index of the
149 // access may not correspond to the `dim` index of composeOp.
150 unsigned idx = std::numeric_limits<unsigned>::max();
151 unsigned numResults = composeOp->getNumResults();
152 for (unsigned i = 0; i < numResults; ++i) {
153 if (indices[dim] == composeOp->getResult(i)) {
154 idx = i;
155 break;
156 }
157 }
158 assert(idx < std::numeric_limits<unsigned>::max());
159 return !AffineValueMap(*composeOp)
160 .isFunctionOf(idx, &const_cast<MLValue &>(input));
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700161}
162
163/// Determines whether a load or a store has a contiguous access along the
164/// value `input`. Contiguous is defined as either invariant or varying only
165/// along the fastest varying memory dimension.
166// TODO(ntv): allow more advanced notions of contiguity (non-fastest varying,
167// check strides, ...).
168template <typename LoadOrStoreOpPointer>
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700169static bool isContiguousAccess(const MLValue &input,
170 LoadOrStoreOpPointer memoryOp,
171 unsigned fastestVaryingDim) {
172 using namespace functional;
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700173 auto indices = map([](const SSAValue *val) { return dyn_cast<MLValue>(val); },
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700174 memoryOp->getIndices());
River Riddle666dfbe2018-10-30 14:59:22 -0700175 auto memRefType = memoryOp->getMemRefType();
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700176 for (unsigned d = 0, numIndices = indices.size(); d < numIndices; ++d) {
177 if (fastestVaryingDim == (numIndices - 1) - d) {
178 continue;
179 }
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700180 if (!isAccessInvariant(input, memRefType, indices, d)) {
181 return false;
182 }
183 }
184 return true;
185}
186
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700187template <typename LoadOrStoreOpPointer>
188static bool isVectorElement(LoadOrStoreOpPointer memoryOp) {
River Riddle666dfbe2018-10-30 14:59:22 -0700189 auto memRefType = memoryOp->getMemRefType();
190 return memRefType.getElementType().template isa<VectorType>();
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700191}
192
Nicolas Vasilache6b197462018-11-14 04:04:10 -0800193// TODO(ntv): make the following into MLIR instructions, then use isa<>.
194static bool isVectorTransferReadOrWrite(const Statement &stmt) {
195 const auto *opStmt = cast<OperationStmt>(&stmt);
196 llvm::SmallString<16> name(opStmt->getName().getStringRef());
197 return name == kVectorTransferReadOpName ||
198 name == kVectorTransferWriteOpName;
199}
200
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700201using VectorizableStmtFun =
202 std::function<bool(const ForStmt &, const OperationStmt &)>;
203
204static bool isVectorizableLoopWithCond(const ForStmt &loop,
205 VectorizableStmtFun isVectorizableStmt) {
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700206 if (!matcher::isParallelLoop(loop) && !matcher::isReductionLoop(loop)) {
207 return false;
208 }
209
210 // No vectorization across conditionals for now.
211 auto conditionals = matcher::If();
212 auto *forStmt = const_cast<ForStmt *>(&loop);
213 auto conditionalsMatched = conditionals.match(forStmt);
214 if (!conditionalsMatched.empty()) {
215 return false;
216 }
217
Nicolas Vasilache6b197462018-11-14 04:04:10 -0800218 auto vectorTransfers = matcher::Op(isVectorTransferReadOrWrite);
219 auto vectorTransfersMatched = vectorTransfers.match(forStmt);
220 if (!vectorTransfersMatched.empty()) {
221 return false;
222 }
223
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700224 auto loadAndStores = matcher::Op(matcher::isLoadOrStore);
225 auto loadAndStoresMatched = loadAndStores.match(forStmt);
226 for (auto ls : loadAndStoresMatched) {
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700227 auto *op = cast<OperationStmt>(ls.first);
Feng Liuec065d72018-10-19 09:07:58 -0700228 auto load = op->dyn_cast<LoadOp>();
229 auto store = op->dyn_cast<StoreOp>();
Nicolas Vasilache078d9b92018-10-30 07:54:23 -0700230 // Only scalar types are considered vectorizable, all load/store must be
231 // vectorizable for a loop to qualify as vectorizable.
232 // TODO(ntv): ponder whether we want to be more general here.
233 bool vector = load ? isVectorElement(load) : isVectorElement(store);
234 if (vector) {
235 return false;
236 }
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700237 if (!isVectorizableStmt(loop, *op)) {
Nicolas Vasilachefd8d2562018-10-17 18:01:44 -0700238 return false;
239 }
240 }
241 return true;
242}
Uday Bondhugula861fe642018-10-18 11:14:26 -0700243
Nicolas Vasilached64816a2018-11-01 07:14:14 -0700244bool mlir::isVectorizableLoopAlongFastestVaryingMemRefDim(
245 const ForStmt &loop, unsigned fastestVaryingDim) {
246 VectorizableStmtFun fun(
247 [fastestVaryingDim](const ForStmt &loop, const OperationStmt &op) {
248 auto load = op.dyn_cast<LoadOp>();
249 auto store = op.dyn_cast<StoreOp>();
250 return load ? isContiguousAccess(loop, load, fastestVaryingDim)
251 : isContiguousAccess(loop, store, fastestVaryingDim);
252 });
253 return isVectorizableLoopWithCond(loop, fun);
254}
255
256bool mlir::isVectorizableLoop(const ForStmt &loop) {
257 VectorizableStmtFun fun(
258 // TODO: implement me
259 [](const ForStmt &loop, const OperationStmt &op) { return true; });
260 return isVectorizableLoopWithCond(loop, fun);
261}
262
Uday Bondhugula861fe642018-10-18 11:14:26 -0700263/// Checks whether SSA dominance would be violated if a for stmt's body
264/// statements are shifted by the specified shifts. This method checks if a
265/// 'def' and all its uses have the same shift factor.
266// TODO(mlir-team): extend this to check for memory-based dependence
267// violation when we have the support.
268bool mlir::isStmtwiseShiftValid(const ForStmt &forStmt,
269 ArrayRef<uint64_t> shifts) {
270 assert(shifts.size() == forStmt.getStatements().size());
271 unsigned s = 0;
272 for (const auto &stmt : forStmt) {
273 // A for or if stmt does not produce any def/results (that are used
274 // outside).
275 if (const auto *opStmt = dyn_cast<OperationStmt>(&stmt)) {
276 for (unsigned i = 0, e = opStmt->getNumResults(); i < e; ++i) {
277 const MLValue *result = opStmt->getResult(i);
278 for (const StmtOperand &use : result->getUses()) {
279 // If an ancestor statement doesn't lie in the block of forStmt, there
280 // is no shift to check.
281 // This is a naive way. If performance becomes an issue, a map can
282 // be used to store 'shifts' - to look up the shift for a statement in
283 // constant time.
284 if (auto *ancStmt = forStmt.findAncestorStmtInBlock(*use.getOwner()))
285 if (shifts[s] != shifts[forStmt.findStmtPosInBlock(*ancStmt)])
286 return false;
287 }
288 }
289 }
290 s++;
291 }
292 return true;
293}