blob: 3524909617267e2d11f15858f1dbd916e2c83499 [file] [log] [blame]
Chris Lattnerf7e22732018-06-22 22:03:48 -07001//===- MLIRContext.cpp - MLIR Type Classes --------------------------------===//
2//
3// Copyright 2019 The MLIR Authors.
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16// =============================================================================
17
18#include "mlir/IR/MLIRContext.h"
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070019#include "mlir/IR/AffineExpr.h"
20#include "mlir/IR/AffineMap.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070021#include "mlir/IR/Attributes.h"
22#include "mlir/IR/Identifier.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070023#include "mlir/IR/Types.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070024#include "mlir/Support/STLExtras.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070025#include "llvm/ADT/DenseSet.h"
Chris Lattnered65a732018-06-28 20:45:33 -070026#include "llvm/ADT/StringMap.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070027#include "llvm/Support/Allocator.h"
28using namespace mlir;
29using namespace llvm;
30
31namespace {
32struct FunctionTypeKeyInfo : DenseMapInfo<FunctionType*> {
33 // Functions are uniqued based on their inputs and results.
34 using KeyTy = std::pair<ArrayRef<Type*>, ArrayRef<Type*>>;
35 using DenseMapInfo<FunctionType*>::getHashValue;
36 using DenseMapInfo<FunctionType*>::isEqual;
37
38 static unsigned getHashValue(KeyTy key) {
39 return hash_combine(hash_combine_range(key.first.begin(), key.first.end()),
40 hash_combine_range(key.second.begin(),
41 key.second.end()));
42 }
43
44 static bool isEqual(const KeyTy &lhs, const FunctionType *rhs) {
45 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
46 return false;
47 return lhs == KeyTy(rhs->getInputs(), rhs->getResults());
48 }
49};
Uday Bondhugula015cbb12018-07-03 20:16:08 -070050
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070051struct AffineMapKeyInfo : DenseMapInfo<AffineMap *> {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070052 // Affine maps are uniqued based on their dim/symbol counts and affine
53 // expressions.
Chris Lattner36b4ed12018-07-04 10:43:29 -070054 using KeyTy = std::tuple<unsigned, unsigned, ArrayRef<AffineExpr *>>;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070055 using DenseMapInfo<AffineMap *>::getHashValue;
56 using DenseMapInfo<AffineMap *>::isEqual;
57
58 static unsigned getHashValue(KeyTy key) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070059 return hash_combine(
Chris Lattner36b4ed12018-07-04 10:43:29 -070060 std::get<0>(key), std::get<1>(key),
61 hash_combine_range(std::get<2>(key).begin(), std::get<2>(key).end()));
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070062 }
63
Uday Bondhugula015cbb12018-07-03 20:16:08 -070064 static bool isEqual(const KeyTy &lhs, const AffineMap *rhs) {
65 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
66 return false;
Chris Lattner36b4ed12018-07-04 10:43:29 -070067 return lhs == std::make_tuple(rhs->getNumDims(), rhs->getNumSymbols(),
68 rhs->getResults());
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070069 }
70};
71
Chris Lattnerf7e22732018-06-22 22:03:48 -070072struct VectorTypeKeyInfo : DenseMapInfo<VectorType*> {
73 // Vectors are uniqued based on their element type and shape.
74 using KeyTy = std::pair<Type*, ArrayRef<unsigned>>;
75 using DenseMapInfo<VectorType*>::getHashValue;
76 using DenseMapInfo<VectorType*>::isEqual;
77
78 static unsigned getHashValue(KeyTy key) {
79 return hash_combine(DenseMapInfo<Type*>::getHashValue(key.first),
80 hash_combine_range(key.second.begin(),
81 key.second.end()));
82 }
83
84 static bool isEqual(const KeyTy &lhs, const VectorType *rhs) {
85 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
86 return false;
87 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
88 }
89};
Chris Lattner36b4ed12018-07-04 10:43:29 -070090
MLIR Team355ec862018-06-23 18:09:09 -070091struct RankedTensorTypeKeyInfo : DenseMapInfo<RankedTensorType*> {
92 // Ranked tensors are uniqued based on their element type and shape.
93 using KeyTy = std::pair<Type*, ArrayRef<int>>;
94 using DenseMapInfo<RankedTensorType*>::getHashValue;
95 using DenseMapInfo<RankedTensorType*>::isEqual;
96
97 static unsigned getHashValue(KeyTy key) {
98 return hash_combine(DenseMapInfo<Type*>::getHashValue(key.first),
99 hash_combine_range(key.second.begin(),
100 key.second.end()));
101 }
102
103 static bool isEqual(const KeyTy &lhs, const RankedTensorType *rhs) {
104 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
105 return false;
106 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
107 }
108};
Chris Lattner36b4ed12018-07-04 10:43:29 -0700109
110struct ArrayAttrKeyInfo : DenseMapInfo<ArrayAttr*> {
111 // Array attributes are uniqued based on their elements.
112 using KeyTy = ArrayRef<Attribute*>;
113 using DenseMapInfo<ArrayAttr*>::getHashValue;
114 using DenseMapInfo<ArrayAttr*>::isEqual;
115
116 static unsigned getHashValue(KeyTy key) {
117 return hash_combine_range(key.begin(), key.end());
118 }
119
120 static bool isEqual(const KeyTy &lhs, const ArrayAttr *rhs) {
121 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
122 return false;
123 return lhs == rhs->getValue();
124 }
125};
Chris Lattnerf7e22732018-06-22 22:03:48 -0700126} // end anonymous namespace.
127
128
129namespace mlir {
130/// This is the implementation of the MLIRContext class, using the pImpl idiom.
131/// This class is completely private to this file, so everything is public.
132class MLIRContextImpl {
133public:
134 /// We put immortal objects into this allocator.
135 llvm::BumpPtrAllocator allocator;
136
Chris Lattnered65a732018-06-28 20:45:33 -0700137 /// These are identifiers uniqued into this MLIRContext.
138 llvm::StringMap<char, llvm::BumpPtrAllocator&> identifiers;
139
Chris Lattnerf7e22732018-06-22 22:03:48 -0700140 // Primitive type uniquing.
Chris Lattnereee1a2d2018-07-04 09:13:39 -0700141 PrimitiveType *primitives[int(Type::Kind::LAST_PRIMITIVE_TYPE)+1] = {nullptr};
Chris Lattnerf7e22732018-06-22 22:03:48 -0700142
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700143 // Affine map uniquing.
144 using AffineMapSet = DenseSet<AffineMap *, AffineMapKeyInfo>;
145 AffineMapSet affineMaps;
146
Uday Bondhugula0b80a162018-07-03 21:34:58 -0700147 // Affine binary op expression uniquing. Figure out uniquing of dimensional
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700148 // or symbolic identifiers.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700149 DenseMap<std::tuple<unsigned, AffineExpr *, AffineExpr *>,
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700150 AffineBinaryOpExpr *>
151 affineExprs;
152
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700153 /// Integer type uniquing.
154 DenseMap<unsigned, IntegerType*> integers;
155
Chris Lattnerf7e22732018-06-22 22:03:48 -0700156 /// Function type uniquing.
157 using FunctionTypeSet = DenseSet<FunctionType*, FunctionTypeKeyInfo>;
158 FunctionTypeSet functions;
159
160 /// Vector type uniquing.
161 using VectorTypeSet = DenseSet<VectorType*, VectorTypeKeyInfo>;
162 VectorTypeSet vectors;
163
MLIR Team355ec862018-06-23 18:09:09 -0700164 /// Ranked tensor type uniquing.
165 using RankedTensorTypeSet = DenseSet<RankedTensorType*,
166 RankedTensorTypeKeyInfo>;
167 RankedTensorTypeSet rankedTensors;
168
169 /// Unranked tensor type uniquing.
170 DenseMap<Type*, UnrankedTensorType*> unrankedTensors;
171
Chris Lattner36b4ed12018-07-04 10:43:29 -0700172 // Attribute uniquing.
173 BoolAttr *boolAttrs[2] = { nullptr };
174 DenseMap<int64_t, IntegerAttr*> integerAttrs;
175 DenseMap<int64_t, FloatAttr*> floatAttrs;
176 StringMap<StringAttr*> stringAttrs;
177 using ArrayAttrSet = DenseSet<ArrayAttr*, ArrayAttrKeyInfo>;
178 ArrayAttrSet arrayAttrs;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700179
180public:
Chris Lattnered65a732018-06-28 20:45:33 -0700181 MLIRContextImpl() : identifiers(allocator) {}
182
Chris Lattnerf7e22732018-06-22 22:03:48 -0700183 /// Copy the specified array of elements into memory managed by our bump
184 /// pointer allocator. This assumes the elements are all PODs.
185 template<typename T>
186 ArrayRef<T> copyInto(ArrayRef<T> elements) {
187 auto result = allocator.Allocate<T>(elements.size());
188 std::uninitialized_copy(elements.begin(), elements.end(), result);
189 return ArrayRef<T>(result, elements.size());
190 }
191};
192} // end namespace mlir
193
194MLIRContext::MLIRContext() : impl(new MLIRContextImpl()) {
195}
196
197MLIRContext::~MLIRContext() {
198}
199
200
Chris Lattnered65a732018-06-28 20:45:33 -0700201//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700202// Identifier uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700203//===----------------------------------------------------------------------===//
204
205/// Return an identifier for the specified string.
206Identifier Identifier::get(StringRef str, const MLIRContext *context) {
207 assert(!str.empty() && "Cannot create an empty identifier");
208 assert(str.find('\0') == StringRef::npos &&
209 "Cannot create an identifier with a nul character");
210
211 auto &impl = context->getImpl();
212 auto it = impl.identifiers.insert({str, char()}).first;
213 return Identifier(it->getKeyData());
214}
215
Chris Lattnered65a732018-06-28 20:45:33 -0700216//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700217// Type uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700218//===----------------------------------------------------------------------===//
219
Chris Lattnereee1a2d2018-07-04 09:13:39 -0700220PrimitiveType *PrimitiveType::get(Kind kind, MLIRContext *context) {
221 assert(kind <= Kind::LAST_PRIMITIVE_TYPE && "Not a primitive type kind");
Chris Lattnerf7e22732018-06-22 22:03:48 -0700222 auto &impl = context->getImpl();
223
224 // We normally have these types.
225 if (impl.primitives[(int)kind])
226 return impl.primitives[(int)kind];
227
228 // On the first use, we allocate them into the bump pointer.
229 auto *ptr = impl.allocator.Allocate<PrimitiveType>();
230
231 // Initialize the memory using placement new.
232 new(ptr) PrimitiveType(kind, context);
233
234 // Cache and return it.
235 return impl.primitives[(int)kind] = ptr;
236}
237
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700238IntegerType *IntegerType::get(unsigned width, MLIRContext *context) {
239 auto &impl = context->getImpl();
240
241 auto *&result = impl.integers[width];
242 if (!result) {
243 result = impl.allocator.Allocate<IntegerType>();
244 new (result) IntegerType(width, context);
245 }
246
247 return result;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700248}
249
250FunctionType *FunctionType::get(ArrayRef<Type*> inputs, ArrayRef<Type*> results,
251 MLIRContext *context) {
252 auto &impl = context->getImpl();
253
254 // Look to see if we already have this function type.
255 FunctionTypeKeyInfo::KeyTy key(inputs, results);
256 auto existing = impl.functions.insert_as(nullptr, key);
257
258 // If we already have it, return that value.
259 if (!existing.second)
260 return *existing.first;
261
262 // On the first use, we allocate them into the bump pointer.
263 auto *result = impl.allocator.Allocate<FunctionType>();
264
265 // Copy the inputs and results into the bump pointer.
266 SmallVector<Type*, 16> types;
267 types.reserve(inputs.size()+results.size());
268 types.append(inputs.begin(), inputs.end());
269 types.append(results.begin(), results.end());
270 auto typesList = impl.copyInto(ArrayRef<Type*>(types));
271
272 // Initialize the memory using placement new.
273 new (result) FunctionType(typesList.data(), inputs.size(), results.size(),
274 context);
275
276 // Cache and return it.
277 return *existing.first = result;
278}
279
Chris Lattnerf7e22732018-06-22 22:03:48 -0700280VectorType *VectorType::get(ArrayRef<unsigned> shape, Type *elementType) {
281 assert(!shape.empty() && "vector types must have at least one dimension");
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700282 assert((isa<PrimitiveType>(elementType) || isa<IntegerType>(elementType)) &&
Chris Lattnerf7e22732018-06-22 22:03:48 -0700283 "vectors elements must be primitives");
284
285 auto *context = elementType->getContext();
286 auto &impl = context->getImpl();
287
288 // Look to see if we already have this vector type.
289 VectorTypeKeyInfo::KeyTy key(elementType, shape);
290 auto existing = impl.vectors.insert_as(nullptr, key);
291
292 // If we already have it, return that value.
293 if (!existing.second)
294 return *existing.first;
295
296 // On the first use, we allocate them into the bump pointer.
297 auto *result = impl.allocator.Allocate<VectorType>();
298
299 // Copy the shape into the bump pointer.
300 shape = impl.copyInto(shape);
301
302 // Initialize the memory using placement new.
303 new (result) VectorType(shape, cast<PrimitiveType>(elementType), context);
304
305 // Cache and return it.
306 return *existing.first = result;
307}
MLIR Team355ec862018-06-23 18:09:09 -0700308
309
Chris Lattnereee1a2d2018-07-04 09:13:39 -0700310TensorType::TensorType(Kind kind, Type *elementType, MLIRContext *context)
MLIR Team355ec862018-06-23 18:09:09 -0700311 : Type(kind, context), elementType(elementType) {
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700312 assert((isa<PrimitiveType>(elementType) || isa<VectorType>(elementType) ||
313 isa<IntegerType>(elementType)) &&
MLIR Team355ec862018-06-23 18:09:09 -0700314 "tensor elements must be primitives or vectors");
315 assert(isa<TensorType>(this));
316}
317
MLIR Team355ec862018-06-23 18:09:09 -0700318RankedTensorType *RankedTensorType::get(ArrayRef<int> shape,
319 Type *elementType) {
320 auto *context = elementType->getContext();
321 auto &impl = context->getImpl();
322
323 // Look to see if we already have this ranked tensor type.
324 RankedTensorTypeKeyInfo::KeyTy key(elementType, shape);
325 auto existing = impl.rankedTensors.insert_as(nullptr, key);
326
327 // If we already have it, return that value.
328 if (!existing.second)
329 return *existing.first;
330
331 // On the first use, we allocate them into the bump pointer.
332 auto *result = impl.allocator.Allocate<RankedTensorType>();
333
334 // Copy the shape into the bump pointer.
335 shape = impl.copyInto(shape);
336
337 // Initialize the memory using placement new.
338 new (result) RankedTensorType(shape, elementType, context);
339
340 // Cache and return it.
341 return *existing.first = result;
342}
343
344UnrankedTensorType *UnrankedTensorType::get(Type *elementType) {
345 auto *context = elementType->getContext();
346 auto &impl = context->getImpl();
347
348 // Look to see if we already have this unranked tensor type.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700349 auto *&result = impl.unrankedTensors[elementType];
MLIR Team355ec862018-06-23 18:09:09 -0700350
351 // If we already have it, return that value.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700352 if (result)
353 return result;
MLIR Team355ec862018-06-23 18:09:09 -0700354
355 // On the first use, we allocate them into the bump pointer.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700356 result = impl.allocator.Allocate<UnrankedTensorType>();
MLIR Team355ec862018-06-23 18:09:09 -0700357
358 // Initialize the memory using placement new.
359 new (result) UnrankedTensorType(elementType, context);
Chris Lattner36b4ed12018-07-04 10:43:29 -0700360 return result;
361}
362
363//===----------------------------------------------------------------------===//
364// Attribute uniquing
365//===----------------------------------------------------------------------===//
366
367BoolAttr *BoolAttr::get(bool value, MLIRContext *context) {
368 auto *&result = context->getImpl().boolAttrs[value];
369 if (result)
370 return result;
371
372 result = context->getImpl().allocator.Allocate<BoolAttr>();
373 new (result) BoolAttr(value);
374 return result;
375}
376
377IntegerAttr *IntegerAttr::get(int64_t value, MLIRContext *context) {
378 auto *&result = context->getImpl().integerAttrs[value];
379 if (result)
380 return result;
381
382 result = context->getImpl().allocator.Allocate<IntegerAttr>();
383 new (result) IntegerAttr(value);
384 return result;
385}
386
387FloatAttr *FloatAttr::get(double value, MLIRContext *context) {
388 // We hash based on the bit representation of the double to ensure we don't
389 // merge things like -0.0 and 0.0 in the hash comparison.
390 union {
391 double floatValue;
392 int64_t intValue;
393 };
394 floatValue = value;
395
396 auto *&result = context->getImpl().floatAttrs[intValue];
397 if (result)
398 return result;
399
400 result = context->getImpl().allocator.Allocate<FloatAttr>();
401 new (result) FloatAttr(value);
402 return result;
403}
404
405StringAttr *StringAttr::get(StringRef bytes, MLIRContext *context) {
406 auto it = context->getImpl().stringAttrs.insert({bytes, nullptr}).first;
407
408 if (it->second)
409 return it->second;
410
411 auto result = context->getImpl().allocator.Allocate<StringAttr>();
412 new (result) StringAttr(it->first());
413 it->second = result;
414 return result;
415}
416
417ArrayAttr *ArrayAttr::get(ArrayRef<Attribute*> value, MLIRContext *context) {
418 auto &impl = context->getImpl();
419
420 // Look to see if we already have this.
421 auto existing = impl.arrayAttrs.insert_as(nullptr, value);
422
423 // If we already have it, return that value.
424 if (!existing.second)
425 return *existing.first;
426
427 // On the first use, we allocate them into the bump pointer.
428 auto *result = impl.allocator.Allocate<ArrayAttr>();
429
430 // Copy the elements into the bump pointer.
431 value = impl.copyInto(value);
432
433 // Initialize the memory using placement new.
434 new (result) ArrayAttr(value);
MLIR Team355ec862018-06-23 18:09:09 -0700435
436 // Cache and return it.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700437 return *existing.first = result;
MLIR Team355ec862018-06-23 18:09:09 -0700438}
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700439
Chris Lattner36b4ed12018-07-04 10:43:29 -0700440//===----------------------------------------------------------------------===//
441// AffineMap and AffineExpr uniquing
442//===----------------------------------------------------------------------===//
443
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700444AffineMap *AffineMap::get(unsigned dimCount, unsigned symbolCount,
445 ArrayRef<AffineExpr *> results,
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700446 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700447 // The number of results can't be zero.
448 assert(!results.empty());
449
450 auto &impl = context->getImpl();
451
452 // Check if we already have this affine map.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700453 auto key = std::make_tuple(dimCount, symbolCount, results);
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700454 auto existing = impl.affineMaps.insert_as(nullptr, key);
455
456 // If we already have it, return that value.
457 if (!existing.second)
458 return *existing.first;
459
460 // On the first use, we allocate them into the bump pointer.
461 auto *res = impl.allocator.Allocate<AffineMap>();
462
463 // Copy the results into the bump pointer.
464 results = impl.copyInto(ArrayRef<AffineExpr *>(results));
465
466 // Initialize the memory using placement new.
467 new (res) AffineMap(dimCount, symbolCount, results.size(), results.data());
468
469 // Cache and return it.
470 return *existing.first = res;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700471}
472
473AffineBinaryOpExpr *AffineBinaryOpExpr::get(AffineExpr::Kind kind,
474 AffineExpr *lhsOperand,
475 AffineExpr *rhsOperand,
476 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700477 auto &impl = context->getImpl();
478
479 // Check if we already have this affine expression.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700480 auto keyValue = std::make_tuple((unsigned)kind, lhsOperand, rhsOperand);
481 auto *&result = impl.affineExprs[keyValue];
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700482
483 // If we already have it, return that value.
484 if (!result) {
485 // On the first use, we allocate them into the bump pointer.
486 result = impl.allocator.Allocate<AffineBinaryOpExpr>();
487
488 // Initialize the memory using placement new.
489 new (result) AffineBinaryOpExpr(kind, lhsOperand, rhsOperand);
490 }
491 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700492}
493
Chris Lattner36b4ed12018-07-04 10:43:29 -0700494// TODO(bondhugula): complete uniquing of remaining AffineExpr sub-classes.
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700495AffineAddExpr *AffineAddExpr::get(AffineExpr *lhsOperand,
496 AffineExpr *rhsOperand,
497 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700498 return cast<AffineAddExpr>(
499 AffineBinaryOpExpr::get(Kind::Add, lhsOperand, rhsOperand, context));
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700500}
501
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700502AffineSubExpr *AffineSubExpr::get(AffineExpr *lhsOperand,
503 AffineExpr *rhsOperand,
504 MLIRContext *context) {
505 return cast<AffineSubExpr>(
506 AffineBinaryOpExpr::get(Kind::Sub, lhsOperand, rhsOperand, context));
507}
508
509AffineMulExpr *AffineMulExpr::get(AffineExpr *lhsOperand,
510 AffineExpr *rhsOperand,
511 MLIRContext *context) {
512 return cast<AffineMulExpr>(
513 AffineBinaryOpExpr::get(Kind::Mul, lhsOperand, rhsOperand, context));
514}
515
516AffineFloorDivExpr *AffineFloorDivExpr::get(AffineExpr *lhsOperand,
517 AffineExpr *rhsOperand,
518 MLIRContext *context) {
519 return cast<AffineFloorDivExpr>(
520 AffineBinaryOpExpr::get(Kind::FloorDiv, lhsOperand, rhsOperand, context));
521}
522
523AffineCeilDivExpr *AffineCeilDivExpr::get(AffineExpr *lhsOperand,
524 AffineExpr *rhsOperand,
525 MLIRContext *context) {
526 return cast<AffineCeilDivExpr>(
527 AffineBinaryOpExpr::get(Kind::CeilDiv, lhsOperand, rhsOperand, context));
528}
529
530AffineModExpr *AffineModExpr::get(AffineExpr *lhsOperand,
531 AffineExpr *rhsOperand,
532 MLIRContext *context) {
533 return cast<AffineModExpr>(
534 AffineBinaryOpExpr::get(Kind::Mod, lhsOperand, rhsOperand, context));
535}
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700536
537AffineDimExpr *AffineDimExpr::get(unsigned position, MLIRContext *context) {
538 // TODO(bondhugula): complete this
539 // FIXME: this should be POD
540 return new AffineDimExpr(position);
541}
542
543AffineSymbolExpr *AffineSymbolExpr::get(unsigned position,
544 MLIRContext *context) {
545 // TODO(bondhugula): complete this
546 // FIXME: this should be POD
547 return new AffineSymbolExpr(position);
548}
549
550AffineConstantExpr *AffineConstantExpr::get(int64_t constant,
551 MLIRContext *context) {
552 // TODO(bondhugula): complete this
553 // FIXME: this should be POD
554 return new AffineConstantExpr(constant);
555}