blob: ca084dec88a3bf16025f24b1d1154658050a9888 [file] [log] [blame]
Chris Lattnerf7e22732018-06-22 22:03:48 -07001//===- MLIRContext.cpp - MLIR Type Classes --------------------------------===//
2//
3// Copyright 2019 The MLIR Authors.
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16// =============================================================================
17
18#include "mlir/IR/MLIRContext.h"
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070019#include "mlir/IR/AffineExpr.h"
20#include "mlir/IR/AffineMap.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070021#include "mlir/IR/Attributes.h"
22#include "mlir/IR/Identifier.h"
Chris Lattnerff0d5902018-07-05 09:12:11 -070023#include "mlir/IR/OperationSet.h"
24#include "mlir/IR/StandardOps.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070025#include "mlir/IR/Types.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070026#include "mlir/Support/STLExtras.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070027#include "llvm/ADT/DenseSet.h"
Chris Lattnered65a732018-06-28 20:45:33 -070028#include "llvm/ADT/StringMap.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070029#include "llvm/Support/Allocator.h"
30using namespace mlir;
31using namespace llvm;
32
33namespace {
34struct FunctionTypeKeyInfo : DenseMapInfo<FunctionType*> {
35 // Functions are uniqued based on their inputs and results.
36 using KeyTy = std::pair<ArrayRef<Type*>, ArrayRef<Type*>>;
37 using DenseMapInfo<FunctionType*>::getHashValue;
38 using DenseMapInfo<FunctionType*>::isEqual;
39
40 static unsigned getHashValue(KeyTy key) {
41 return hash_combine(hash_combine_range(key.first.begin(), key.first.end()),
42 hash_combine_range(key.second.begin(),
43 key.second.end()));
44 }
45
46 static bool isEqual(const KeyTy &lhs, const FunctionType *rhs) {
47 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
48 return false;
49 return lhs == KeyTy(rhs->getInputs(), rhs->getResults());
50 }
51};
Uday Bondhugula015cbb12018-07-03 20:16:08 -070052
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070053struct AffineMapKeyInfo : DenseMapInfo<AffineMap *> {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070054 // Affine maps are uniqued based on their dim/symbol counts and affine
55 // expressions.
Chris Lattner36b4ed12018-07-04 10:43:29 -070056 using KeyTy = std::tuple<unsigned, unsigned, ArrayRef<AffineExpr *>>;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070057 using DenseMapInfo<AffineMap *>::getHashValue;
58 using DenseMapInfo<AffineMap *>::isEqual;
59
60 static unsigned getHashValue(KeyTy key) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070061 return hash_combine(
Chris Lattner36b4ed12018-07-04 10:43:29 -070062 std::get<0>(key), std::get<1>(key),
63 hash_combine_range(std::get<2>(key).begin(), std::get<2>(key).end()));
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070064 }
65
Uday Bondhugula015cbb12018-07-03 20:16:08 -070066 static bool isEqual(const KeyTy &lhs, const AffineMap *rhs) {
67 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
68 return false;
Chris Lattner36b4ed12018-07-04 10:43:29 -070069 return lhs == std::make_tuple(rhs->getNumDims(), rhs->getNumSymbols(),
70 rhs->getResults());
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070071 }
72};
73
Chris Lattnerf7e22732018-06-22 22:03:48 -070074struct VectorTypeKeyInfo : DenseMapInfo<VectorType*> {
75 // Vectors are uniqued based on their element type and shape.
76 using KeyTy = std::pair<Type*, ArrayRef<unsigned>>;
77 using DenseMapInfo<VectorType*>::getHashValue;
78 using DenseMapInfo<VectorType*>::isEqual;
79
80 static unsigned getHashValue(KeyTy key) {
81 return hash_combine(DenseMapInfo<Type*>::getHashValue(key.first),
82 hash_combine_range(key.second.begin(),
83 key.second.end()));
84 }
85
86 static bool isEqual(const KeyTy &lhs, const VectorType *rhs) {
87 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
88 return false;
89 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
90 }
91};
Chris Lattner36b4ed12018-07-04 10:43:29 -070092
MLIR Team355ec862018-06-23 18:09:09 -070093struct RankedTensorTypeKeyInfo : DenseMapInfo<RankedTensorType*> {
94 // Ranked tensors are uniqued based on their element type and shape.
95 using KeyTy = std::pair<Type*, ArrayRef<int>>;
96 using DenseMapInfo<RankedTensorType*>::getHashValue;
97 using DenseMapInfo<RankedTensorType*>::isEqual;
98
99 static unsigned getHashValue(KeyTy key) {
100 return hash_combine(DenseMapInfo<Type*>::getHashValue(key.first),
101 hash_combine_range(key.second.begin(),
102 key.second.end()));
103 }
104
105 static bool isEqual(const KeyTy &lhs, const RankedTensorType *rhs) {
106 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
107 return false;
108 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
109 }
110};
Chris Lattner36b4ed12018-07-04 10:43:29 -0700111
112struct ArrayAttrKeyInfo : DenseMapInfo<ArrayAttr*> {
113 // Array attributes are uniqued based on their elements.
114 using KeyTy = ArrayRef<Attribute*>;
115 using DenseMapInfo<ArrayAttr*>::getHashValue;
116 using DenseMapInfo<ArrayAttr*>::isEqual;
117
118 static unsigned getHashValue(KeyTy key) {
119 return hash_combine_range(key.begin(), key.end());
120 }
121
122 static bool isEqual(const KeyTy &lhs, const ArrayAttr *rhs) {
123 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
124 return false;
125 return lhs == rhs->getValue();
126 }
127};
Chris Lattnerf7e22732018-06-22 22:03:48 -0700128} // end anonymous namespace.
129
130
131namespace mlir {
132/// This is the implementation of the MLIRContext class, using the pImpl idiom.
133/// This class is completely private to this file, so everything is public.
134class MLIRContextImpl {
135public:
136 /// We put immortal objects into this allocator.
137 llvm::BumpPtrAllocator allocator;
138
Chris Lattnerff0d5902018-07-05 09:12:11 -0700139 /// This is the set of all operations that are registered with the system.
140 OperationSet operationSet;
141
Chris Lattnered65a732018-06-28 20:45:33 -0700142 /// These are identifiers uniqued into this MLIRContext.
143 llvm::StringMap<char, llvm::BumpPtrAllocator&> identifiers;
144
Chris Lattnerf7e22732018-06-22 22:03:48 -0700145 // Primitive type uniquing.
Chris Lattnereee1a2d2018-07-04 09:13:39 -0700146 PrimitiveType *primitives[int(Type::Kind::LAST_PRIMITIVE_TYPE)+1] = {nullptr};
Chris Lattnerf7e22732018-06-22 22:03:48 -0700147
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700148 // Affine map uniquing.
149 using AffineMapSet = DenseSet<AffineMap *, AffineMapKeyInfo>;
150 AffineMapSet affineMaps;
151
Uday Bondhugula0b80a162018-07-03 21:34:58 -0700152 // Affine binary op expression uniquing. Figure out uniquing of dimensional
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700153 // or symbolic identifiers.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700154 DenseMap<std::tuple<unsigned, AffineExpr *, AffineExpr *>,
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700155 AffineBinaryOpExpr *>
156 affineExprs;
157
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700158 /// Integer type uniquing.
159 DenseMap<unsigned, IntegerType*> integers;
160
Chris Lattnerf7e22732018-06-22 22:03:48 -0700161 /// Function type uniquing.
162 using FunctionTypeSet = DenseSet<FunctionType*, FunctionTypeKeyInfo>;
163 FunctionTypeSet functions;
164
165 /// Vector type uniquing.
166 using VectorTypeSet = DenseSet<VectorType*, VectorTypeKeyInfo>;
167 VectorTypeSet vectors;
168
MLIR Team355ec862018-06-23 18:09:09 -0700169 /// Ranked tensor type uniquing.
170 using RankedTensorTypeSet = DenseSet<RankedTensorType*,
171 RankedTensorTypeKeyInfo>;
172 RankedTensorTypeSet rankedTensors;
173
174 /// Unranked tensor type uniquing.
175 DenseMap<Type*, UnrankedTensorType*> unrankedTensors;
176
Chris Lattner36b4ed12018-07-04 10:43:29 -0700177 // Attribute uniquing.
178 BoolAttr *boolAttrs[2] = { nullptr };
179 DenseMap<int64_t, IntegerAttr*> integerAttrs;
180 DenseMap<int64_t, FloatAttr*> floatAttrs;
181 StringMap<StringAttr*> stringAttrs;
182 using ArrayAttrSet = DenseSet<ArrayAttr*, ArrayAttrKeyInfo>;
183 ArrayAttrSet arrayAttrs;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700184
185public:
Chris Lattnerff0d5902018-07-05 09:12:11 -0700186 MLIRContextImpl() : identifiers(allocator) {
187 registerStandardOperations(operationSet);
188 }
Chris Lattnered65a732018-06-28 20:45:33 -0700189
Chris Lattnerf7e22732018-06-22 22:03:48 -0700190 /// Copy the specified array of elements into memory managed by our bump
191 /// pointer allocator. This assumes the elements are all PODs.
192 template<typename T>
193 ArrayRef<T> copyInto(ArrayRef<T> elements) {
194 auto result = allocator.Allocate<T>(elements.size());
195 std::uninitialized_copy(elements.begin(), elements.end(), result);
196 return ArrayRef<T>(result, elements.size());
197 }
198};
199} // end namespace mlir
200
201MLIRContext::MLIRContext() : impl(new MLIRContextImpl()) {
202}
203
204MLIRContext::~MLIRContext() {
205}
206
Chris Lattnerff0d5902018-07-05 09:12:11 -0700207/// Return the operation set associated with the specified MLIRContext object.
208OperationSet &OperationSet::get(MLIRContext *context) {
209 return context->getImpl().operationSet;
210}
Chris Lattnerf7e22732018-06-22 22:03:48 -0700211
Chris Lattnered65a732018-06-28 20:45:33 -0700212//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700213// Identifier uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700214//===----------------------------------------------------------------------===//
215
216/// Return an identifier for the specified string.
217Identifier Identifier::get(StringRef str, const MLIRContext *context) {
218 assert(!str.empty() && "Cannot create an empty identifier");
219 assert(str.find('\0') == StringRef::npos &&
220 "Cannot create an identifier with a nul character");
221
222 auto &impl = context->getImpl();
223 auto it = impl.identifiers.insert({str, char()}).first;
224 return Identifier(it->getKeyData());
225}
226
Chris Lattnered65a732018-06-28 20:45:33 -0700227//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700228// Type uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700229//===----------------------------------------------------------------------===//
230
Chris Lattnereee1a2d2018-07-04 09:13:39 -0700231PrimitiveType *PrimitiveType::get(Kind kind, MLIRContext *context) {
232 assert(kind <= Kind::LAST_PRIMITIVE_TYPE && "Not a primitive type kind");
Chris Lattnerf7e22732018-06-22 22:03:48 -0700233 auto &impl = context->getImpl();
234
235 // We normally have these types.
236 if (impl.primitives[(int)kind])
237 return impl.primitives[(int)kind];
238
239 // On the first use, we allocate them into the bump pointer.
240 auto *ptr = impl.allocator.Allocate<PrimitiveType>();
241
242 // Initialize the memory using placement new.
243 new(ptr) PrimitiveType(kind, context);
244
245 // Cache and return it.
246 return impl.primitives[(int)kind] = ptr;
247}
248
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700249IntegerType *IntegerType::get(unsigned width, MLIRContext *context) {
250 auto &impl = context->getImpl();
251
252 auto *&result = impl.integers[width];
253 if (!result) {
254 result = impl.allocator.Allocate<IntegerType>();
255 new (result) IntegerType(width, context);
256 }
257
258 return result;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700259}
260
261FunctionType *FunctionType::get(ArrayRef<Type*> inputs, ArrayRef<Type*> results,
262 MLIRContext *context) {
263 auto &impl = context->getImpl();
264
265 // Look to see if we already have this function type.
266 FunctionTypeKeyInfo::KeyTy key(inputs, results);
267 auto existing = impl.functions.insert_as(nullptr, key);
268
269 // If we already have it, return that value.
270 if (!existing.second)
271 return *existing.first;
272
273 // On the first use, we allocate them into the bump pointer.
274 auto *result = impl.allocator.Allocate<FunctionType>();
275
276 // Copy the inputs and results into the bump pointer.
277 SmallVector<Type*, 16> types;
278 types.reserve(inputs.size()+results.size());
279 types.append(inputs.begin(), inputs.end());
280 types.append(results.begin(), results.end());
281 auto typesList = impl.copyInto(ArrayRef<Type*>(types));
282
283 // Initialize the memory using placement new.
284 new (result) FunctionType(typesList.data(), inputs.size(), results.size(),
285 context);
286
287 // Cache and return it.
288 return *existing.first = result;
289}
290
Chris Lattnerf7e22732018-06-22 22:03:48 -0700291VectorType *VectorType::get(ArrayRef<unsigned> shape, Type *elementType) {
292 assert(!shape.empty() && "vector types must have at least one dimension");
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700293 assert((isa<PrimitiveType>(elementType) || isa<IntegerType>(elementType)) &&
Chris Lattnerf7e22732018-06-22 22:03:48 -0700294 "vectors elements must be primitives");
295
296 auto *context = elementType->getContext();
297 auto &impl = context->getImpl();
298
299 // Look to see if we already have this vector type.
300 VectorTypeKeyInfo::KeyTy key(elementType, shape);
301 auto existing = impl.vectors.insert_as(nullptr, key);
302
303 // If we already have it, return that value.
304 if (!existing.second)
305 return *existing.first;
306
307 // On the first use, we allocate them into the bump pointer.
308 auto *result = impl.allocator.Allocate<VectorType>();
309
310 // Copy the shape into the bump pointer.
311 shape = impl.copyInto(shape);
312
313 // Initialize the memory using placement new.
314 new (result) VectorType(shape, cast<PrimitiveType>(elementType), context);
315
316 // Cache and return it.
317 return *existing.first = result;
318}
MLIR Team355ec862018-06-23 18:09:09 -0700319
320
Chris Lattnereee1a2d2018-07-04 09:13:39 -0700321TensorType::TensorType(Kind kind, Type *elementType, MLIRContext *context)
MLIR Team355ec862018-06-23 18:09:09 -0700322 : Type(kind, context), elementType(elementType) {
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700323 assert((isa<PrimitiveType>(elementType) || isa<VectorType>(elementType) ||
324 isa<IntegerType>(elementType)) &&
MLIR Team355ec862018-06-23 18:09:09 -0700325 "tensor elements must be primitives or vectors");
326 assert(isa<TensorType>(this));
327}
328
MLIR Team355ec862018-06-23 18:09:09 -0700329RankedTensorType *RankedTensorType::get(ArrayRef<int> shape,
330 Type *elementType) {
331 auto *context = elementType->getContext();
332 auto &impl = context->getImpl();
333
334 // Look to see if we already have this ranked tensor type.
335 RankedTensorTypeKeyInfo::KeyTy key(elementType, shape);
336 auto existing = impl.rankedTensors.insert_as(nullptr, key);
337
338 // If we already have it, return that value.
339 if (!existing.second)
340 return *existing.first;
341
342 // On the first use, we allocate them into the bump pointer.
343 auto *result = impl.allocator.Allocate<RankedTensorType>();
344
345 // Copy the shape into the bump pointer.
346 shape = impl.copyInto(shape);
347
348 // Initialize the memory using placement new.
349 new (result) RankedTensorType(shape, elementType, context);
350
351 // Cache and return it.
352 return *existing.first = result;
353}
354
355UnrankedTensorType *UnrankedTensorType::get(Type *elementType) {
356 auto *context = elementType->getContext();
357 auto &impl = context->getImpl();
358
359 // Look to see if we already have this unranked tensor type.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700360 auto *&result = impl.unrankedTensors[elementType];
MLIR Team355ec862018-06-23 18:09:09 -0700361
362 // If we already have it, return that value.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700363 if (result)
364 return result;
MLIR Team355ec862018-06-23 18:09:09 -0700365
366 // On the first use, we allocate them into the bump pointer.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700367 result = impl.allocator.Allocate<UnrankedTensorType>();
MLIR Team355ec862018-06-23 18:09:09 -0700368
369 // Initialize the memory using placement new.
370 new (result) UnrankedTensorType(elementType, context);
Chris Lattner36b4ed12018-07-04 10:43:29 -0700371 return result;
372}
373
374//===----------------------------------------------------------------------===//
375// Attribute uniquing
376//===----------------------------------------------------------------------===//
377
378BoolAttr *BoolAttr::get(bool value, MLIRContext *context) {
379 auto *&result = context->getImpl().boolAttrs[value];
380 if (result)
381 return result;
382
383 result = context->getImpl().allocator.Allocate<BoolAttr>();
384 new (result) BoolAttr(value);
385 return result;
386}
387
388IntegerAttr *IntegerAttr::get(int64_t value, MLIRContext *context) {
389 auto *&result = context->getImpl().integerAttrs[value];
390 if (result)
391 return result;
392
393 result = context->getImpl().allocator.Allocate<IntegerAttr>();
394 new (result) IntegerAttr(value);
395 return result;
396}
397
398FloatAttr *FloatAttr::get(double value, MLIRContext *context) {
399 // We hash based on the bit representation of the double to ensure we don't
400 // merge things like -0.0 and 0.0 in the hash comparison.
401 union {
402 double floatValue;
403 int64_t intValue;
404 };
405 floatValue = value;
406
407 auto *&result = context->getImpl().floatAttrs[intValue];
408 if (result)
409 return result;
410
411 result = context->getImpl().allocator.Allocate<FloatAttr>();
412 new (result) FloatAttr(value);
413 return result;
414}
415
416StringAttr *StringAttr::get(StringRef bytes, MLIRContext *context) {
417 auto it = context->getImpl().stringAttrs.insert({bytes, nullptr}).first;
418
419 if (it->second)
420 return it->second;
421
422 auto result = context->getImpl().allocator.Allocate<StringAttr>();
423 new (result) StringAttr(it->first());
424 it->second = result;
425 return result;
426}
427
428ArrayAttr *ArrayAttr::get(ArrayRef<Attribute*> value, MLIRContext *context) {
429 auto &impl = context->getImpl();
430
431 // Look to see if we already have this.
432 auto existing = impl.arrayAttrs.insert_as(nullptr, value);
433
434 // If we already have it, return that value.
435 if (!existing.second)
436 return *existing.first;
437
438 // On the first use, we allocate them into the bump pointer.
439 auto *result = impl.allocator.Allocate<ArrayAttr>();
440
441 // Copy the elements into the bump pointer.
442 value = impl.copyInto(value);
443
444 // Initialize the memory using placement new.
445 new (result) ArrayAttr(value);
MLIR Team355ec862018-06-23 18:09:09 -0700446
447 // Cache and return it.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700448 return *existing.first = result;
MLIR Team355ec862018-06-23 18:09:09 -0700449}
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700450
Chris Lattner36b4ed12018-07-04 10:43:29 -0700451//===----------------------------------------------------------------------===//
452// AffineMap and AffineExpr uniquing
453//===----------------------------------------------------------------------===//
454
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700455AffineMap *AffineMap::get(unsigned dimCount, unsigned symbolCount,
456 ArrayRef<AffineExpr *> results,
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700457 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700458 // The number of results can't be zero.
459 assert(!results.empty());
460
461 auto &impl = context->getImpl();
462
463 // Check if we already have this affine map.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700464 auto key = std::make_tuple(dimCount, symbolCount, results);
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700465 auto existing = impl.affineMaps.insert_as(nullptr, key);
466
467 // If we already have it, return that value.
468 if (!existing.second)
469 return *existing.first;
470
471 // On the first use, we allocate them into the bump pointer.
472 auto *res = impl.allocator.Allocate<AffineMap>();
473
474 // Copy the results into the bump pointer.
475 results = impl.copyInto(ArrayRef<AffineExpr *>(results));
476
477 // Initialize the memory using placement new.
478 new (res) AffineMap(dimCount, symbolCount, results.size(), results.data());
479
480 // Cache and return it.
481 return *existing.first = res;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700482}
483
484AffineBinaryOpExpr *AffineBinaryOpExpr::get(AffineExpr::Kind kind,
485 AffineExpr *lhsOperand,
486 AffineExpr *rhsOperand,
487 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700488 auto &impl = context->getImpl();
489
490 // Check if we already have this affine expression.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700491 auto keyValue = std::make_tuple((unsigned)kind, lhsOperand, rhsOperand);
492 auto *&result = impl.affineExprs[keyValue];
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700493
494 // If we already have it, return that value.
495 if (!result) {
496 // On the first use, we allocate them into the bump pointer.
497 result = impl.allocator.Allocate<AffineBinaryOpExpr>();
498
499 // Initialize the memory using placement new.
500 new (result) AffineBinaryOpExpr(kind, lhsOperand, rhsOperand);
501 }
502 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700503}
504
Chris Lattner36b4ed12018-07-04 10:43:29 -0700505// TODO(bondhugula): complete uniquing of remaining AffineExpr sub-classes.
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700506AffineAddExpr *AffineAddExpr::get(AffineExpr *lhsOperand,
507 AffineExpr *rhsOperand,
508 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700509 return cast<AffineAddExpr>(
510 AffineBinaryOpExpr::get(Kind::Add, lhsOperand, rhsOperand, context));
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700511}
512
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700513AffineSubExpr *AffineSubExpr::get(AffineExpr *lhsOperand,
514 AffineExpr *rhsOperand,
515 MLIRContext *context) {
516 return cast<AffineSubExpr>(
517 AffineBinaryOpExpr::get(Kind::Sub, lhsOperand, rhsOperand, context));
518}
519
520AffineMulExpr *AffineMulExpr::get(AffineExpr *lhsOperand,
521 AffineExpr *rhsOperand,
522 MLIRContext *context) {
523 return cast<AffineMulExpr>(
524 AffineBinaryOpExpr::get(Kind::Mul, lhsOperand, rhsOperand, context));
525}
526
527AffineFloorDivExpr *AffineFloorDivExpr::get(AffineExpr *lhsOperand,
528 AffineExpr *rhsOperand,
529 MLIRContext *context) {
530 return cast<AffineFloorDivExpr>(
531 AffineBinaryOpExpr::get(Kind::FloorDiv, lhsOperand, rhsOperand, context));
532}
533
534AffineCeilDivExpr *AffineCeilDivExpr::get(AffineExpr *lhsOperand,
535 AffineExpr *rhsOperand,
536 MLIRContext *context) {
537 return cast<AffineCeilDivExpr>(
538 AffineBinaryOpExpr::get(Kind::CeilDiv, lhsOperand, rhsOperand, context));
539}
540
541AffineModExpr *AffineModExpr::get(AffineExpr *lhsOperand,
542 AffineExpr *rhsOperand,
543 MLIRContext *context) {
544 return cast<AffineModExpr>(
545 AffineBinaryOpExpr::get(Kind::Mod, lhsOperand, rhsOperand, context));
546}
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700547
548AffineDimExpr *AffineDimExpr::get(unsigned position, MLIRContext *context) {
549 // TODO(bondhugula): complete this
550 // FIXME: this should be POD
551 return new AffineDimExpr(position);
552}
553
554AffineSymbolExpr *AffineSymbolExpr::get(unsigned position,
555 MLIRContext *context) {
556 // TODO(bondhugula): complete this
557 // FIXME: this should be POD
558 return new AffineSymbolExpr(position);
559}
560
561AffineConstantExpr *AffineConstantExpr::get(int64_t constant,
562 MLIRContext *context) {
563 // TODO(bondhugula): complete this
564 // FIXME: this should be POD
565 return new AffineConstantExpr(constant);
566}