blob: 761f80441795c5e3d3fdf06580d540780f213d42 [file] [log] [blame]
Chris Lattnerf7e22732018-06-22 22:03:48 -07001//===- MLIRContext.cpp - MLIR Type Classes --------------------------------===//
2//
3// Copyright 2019 The MLIR Authors.
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16// =============================================================================
17
18#include "mlir/IR/MLIRContext.h"
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -070019#include "AttributeListStorage.h"
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070020#include "mlir/IR/AffineExpr.h"
21#include "mlir/IR/AffineMap.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070022#include "mlir/IR/Attributes.h"
23#include "mlir/IR/Identifier.h"
Chris Lattnerff0d5902018-07-05 09:12:11 -070024#include "mlir/IR/OperationSet.h"
25#include "mlir/IR/StandardOps.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070026#include "mlir/IR/Types.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070027#include "mlir/Support/STLExtras.h"
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -070028#include "third_party/llvm/llvm/include/llvm/ADT/STLExtras.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070029#include "llvm/ADT/DenseSet.h"
Chris Lattnered65a732018-06-28 20:45:33 -070030#include "llvm/ADT/StringMap.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070031#include "llvm/Support/Allocator.h"
32using namespace mlir;
33using namespace llvm;
34
35namespace {
36struct FunctionTypeKeyInfo : DenseMapInfo<FunctionType*> {
37 // Functions are uniqued based on their inputs and results.
38 using KeyTy = std::pair<ArrayRef<Type*>, ArrayRef<Type*>>;
39 using DenseMapInfo<FunctionType*>::getHashValue;
40 using DenseMapInfo<FunctionType*>::isEqual;
41
42 static unsigned getHashValue(KeyTy key) {
43 return hash_combine(hash_combine_range(key.first.begin(), key.first.end()),
44 hash_combine_range(key.second.begin(),
45 key.second.end()));
46 }
47
48 static bool isEqual(const KeyTy &lhs, const FunctionType *rhs) {
49 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
50 return false;
51 return lhs == KeyTy(rhs->getInputs(), rhs->getResults());
52 }
53};
Uday Bondhugula015cbb12018-07-03 20:16:08 -070054
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070055struct AffineMapKeyInfo : DenseMapInfo<AffineMap *> {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070056 // Affine maps are uniqued based on their dim/symbol counts and affine
57 // expressions.
Chris Lattner36b4ed12018-07-04 10:43:29 -070058 using KeyTy = std::tuple<unsigned, unsigned, ArrayRef<AffineExpr *>>;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070059 using DenseMapInfo<AffineMap *>::getHashValue;
60 using DenseMapInfo<AffineMap *>::isEqual;
61
62 static unsigned getHashValue(KeyTy key) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070063 return hash_combine(
Chris Lattner36b4ed12018-07-04 10:43:29 -070064 std::get<0>(key), std::get<1>(key),
65 hash_combine_range(std::get<2>(key).begin(), std::get<2>(key).end()));
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070066 }
67
Uday Bondhugula015cbb12018-07-03 20:16:08 -070068 static bool isEqual(const KeyTy &lhs, const AffineMap *rhs) {
69 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
70 return false;
Chris Lattner36b4ed12018-07-04 10:43:29 -070071 return lhs == std::make_tuple(rhs->getNumDims(), rhs->getNumSymbols(),
72 rhs->getResults());
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070073 }
74};
75
Chris Lattnerf7e22732018-06-22 22:03:48 -070076struct VectorTypeKeyInfo : DenseMapInfo<VectorType*> {
77 // Vectors are uniqued based on their element type and shape.
78 using KeyTy = std::pair<Type*, ArrayRef<unsigned>>;
79 using DenseMapInfo<VectorType*>::getHashValue;
80 using DenseMapInfo<VectorType*>::isEqual;
81
82 static unsigned getHashValue(KeyTy key) {
83 return hash_combine(DenseMapInfo<Type*>::getHashValue(key.first),
84 hash_combine_range(key.second.begin(),
85 key.second.end()));
86 }
87
88 static bool isEqual(const KeyTy &lhs, const VectorType *rhs) {
89 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
90 return false;
91 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
92 }
93};
Chris Lattner36b4ed12018-07-04 10:43:29 -070094
MLIR Team355ec862018-06-23 18:09:09 -070095struct RankedTensorTypeKeyInfo : DenseMapInfo<RankedTensorType*> {
96 // Ranked tensors are uniqued based on their element type and shape.
97 using KeyTy = std::pair<Type*, ArrayRef<int>>;
98 using DenseMapInfo<RankedTensorType*>::getHashValue;
99 using DenseMapInfo<RankedTensorType*>::isEqual;
100
101 static unsigned getHashValue(KeyTy key) {
102 return hash_combine(DenseMapInfo<Type*>::getHashValue(key.first),
103 hash_combine_range(key.second.begin(),
104 key.second.end()));
105 }
106
107 static bool isEqual(const KeyTy &lhs, const RankedTensorType *rhs) {
108 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
109 return false;
110 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
111 }
112};
Chris Lattner36b4ed12018-07-04 10:43:29 -0700113
114struct ArrayAttrKeyInfo : DenseMapInfo<ArrayAttr*> {
115 // Array attributes are uniqued based on their elements.
116 using KeyTy = ArrayRef<Attribute*>;
117 using DenseMapInfo<ArrayAttr*>::getHashValue;
118 using DenseMapInfo<ArrayAttr*>::isEqual;
119
120 static unsigned getHashValue(KeyTy key) {
121 return hash_combine_range(key.begin(), key.end());
122 }
123
124 static bool isEqual(const KeyTy &lhs, const ArrayAttr *rhs) {
125 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
126 return false;
127 return lhs == rhs->getValue();
128 }
129};
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -0700130
131struct AttributeListKeyInfo : DenseMapInfo<AttributeListStorage *> {
132 // Array attributes are uniqued based on their elements.
133 using KeyTy = ArrayRef<NamedAttribute>;
134 using DenseMapInfo<AttributeListStorage *>::getHashValue;
135 using DenseMapInfo<AttributeListStorage *>::isEqual;
136
137 static unsigned getHashValue(KeyTy key) {
138 return hash_combine_range(key.begin(), key.end());
139 }
140
141 static bool isEqual(const KeyTy &lhs, const AttributeListStorage *rhs) {
142 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
143 return false;
144 return lhs == rhs->getElements();
145 }
146};
147
Chris Lattnerf7e22732018-06-22 22:03:48 -0700148} // end anonymous namespace.
149
150
151namespace mlir {
152/// This is the implementation of the MLIRContext class, using the pImpl idiom.
153/// This class is completely private to this file, so everything is public.
154class MLIRContextImpl {
155public:
156 /// We put immortal objects into this allocator.
157 llvm::BumpPtrAllocator allocator;
158
Chris Lattnerff0d5902018-07-05 09:12:11 -0700159 /// This is the set of all operations that are registered with the system.
160 OperationSet operationSet;
161
Chris Lattnered65a732018-06-28 20:45:33 -0700162 /// These are identifiers uniqued into this MLIRContext.
163 llvm::StringMap<char, llvm::BumpPtrAllocator&> identifiers;
164
Chris Lattnerf7e22732018-06-22 22:03:48 -0700165 // Primitive type uniquing.
Chris Lattnereee1a2d2018-07-04 09:13:39 -0700166 PrimitiveType *primitives[int(Type::Kind::LAST_PRIMITIVE_TYPE)+1] = {nullptr};
Chris Lattnerf7e22732018-06-22 22:03:48 -0700167
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700168 // Affine map uniquing.
169 using AffineMapSet = DenseSet<AffineMap *, AffineMapKeyInfo>;
170 AffineMapSet affineMaps;
171
Uday Bondhugula0b80a162018-07-03 21:34:58 -0700172 // Affine binary op expression uniquing. Figure out uniquing of dimensional
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700173 // or symbolic identifiers.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700174 DenseMap<std::tuple<unsigned, AffineExpr *, AffineExpr *>,
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700175 AffineBinaryOpExpr *>
176 affineExprs;
177
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700178 /// Integer type uniquing.
179 DenseMap<unsigned, IntegerType*> integers;
180
Chris Lattnerf7e22732018-06-22 22:03:48 -0700181 /// Function type uniquing.
182 using FunctionTypeSet = DenseSet<FunctionType*, FunctionTypeKeyInfo>;
183 FunctionTypeSet functions;
184
185 /// Vector type uniquing.
186 using VectorTypeSet = DenseSet<VectorType*, VectorTypeKeyInfo>;
187 VectorTypeSet vectors;
188
MLIR Team355ec862018-06-23 18:09:09 -0700189 /// Ranked tensor type uniquing.
190 using RankedTensorTypeSet = DenseSet<RankedTensorType*,
191 RankedTensorTypeKeyInfo>;
192 RankedTensorTypeSet rankedTensors;
193
194 /// Unranked tensor type uniquing.
195 DenseMap<Type*, UnrankedTensorType*> unrankedTensors;
196
Chris Lattner36b4ed12018-07-04 10:43:29 -0700197 // Attribute uniquing.
198 BoolAttr *boolAttrs[2] = { nullptr };
199 DenseMap<int64_t, IntegerAttr*> integerAttrs;
200 DenseMap<int64_t, FloatAttr*> floatAttrs;
201 StringMap<StringAttr*> stringAttrs;
202 using ArrayAttrSet = DenseSet<ArrayAttr*, ArrayAttrKeyInfo>;
203 ArrayAttrSet arrayAttrs;
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -0700204 using AttributeListSet =
205 DenseSet<AttributeListStorage *, AttributeListKeyInfo>;
206 AttributeListSet attributeLists;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700207
208public:
Chris Lattnerff0d5902018-07-05 09:12:11 -0700209 MLIRContextImpl() : identifiers(allocator) {
210 registerStandardOperations(operationSet);
211 }
Chris Lattnered65a732018-06-28 20:45:33 -0700212
Chris Lattnerf7e22732018-06-22 22:03:48 -0700213 /// Copy the specified array of elements into memory managed by our bump
214 /// pointer allocator. This assumes the elements are all PODs.
215 template<typename T>
216 ArrayRef<T> copyInto(ArrayRef<T> elements) {
217 auto result = allocator.Allocate<T>(elements.size());
218 std::uninitialized_copy(elements.begin(), elements.end(), result);
219 return ArrayRef<T>(result, elements.size());
220 }
221};
222} // end namespace mlir
223
224MLIRContext::MLIRContext() : impl(new MLIRContextImpl()) {
225}
226
227MLIRContext::~MLIRContext() {
228}
229
Chris Lattnerff0d5902018-07-05 09:12:11 -0700230/// Return the operation set associated with the specified MLIRContext object.
231OperationSet &OperationSet::get(MLIRContext *context) {
232 return context->getImpl().operationSet;
233}
Chris Lattnerf7e22732018-06-22 22:03:48 -0700234
Chris Lattner21e67f62018-07-06 10:46:19 -0700235/// If this operation has a registered operation description in the
236/// OperationSet, return it. Otherwise return null.
237/// TODO: Shouldn't have to pass a Context here.
238const AbstractOperation *
239Operation::getAbstractOperation(MLIRContext *context) const {
240 return OperationSet::get(context).lookup(getName().str());
241}
242
Chris Lattnered65a732018-06-28 20:45:33 -0700243//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700244// Identifier uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700245//===----------------------------------------------------------------------===//
246
247/// Return an identifier for the specified string.
248Identifier Identifier::get(StringRef str, const MLIRContext *context) {
249 assert(!str.empty() && "Cannot create an empty identifier");
250 assert(str.find('\0') == StringRef::npos &&
251 "Cannot create an identifier with a nul character");
252
253 auto &impl = context->getImpl();
254 auto it = impl.identifiers.insert({str, char()}).first;
255 return Identifier(it->getKeyData());
256}
257
Chris Lattnered65a732018-06-28 20:45:33 -0700258//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700259// Type uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700260//===----------------------------------------------------------------------===//
261
Chris Lattnereee1a2d2018-07-04 09:13:39 -0700262PrimitiveType *PrimitiveType::get(Kind kind, MLIRContext *context) {
263 assert(kind <= Kind::LAST_PRIMITIVE_TYPE && "Not a primitive type kind");
Chris Lattnerf7e22732018-06-22 22:03:48 -0700264 auto &impl = context->getImpl();
265
266 // We normally have these types.
267 if (impl.primitives[(int)kind])
268 return impl.primitives[(int)kind];
269
270 // On the first use, we allocate them into the bump pointer.
271 auto *ptr = impl.allocator.Allocate<PrimitiveType>();
272
273 // Initialize the memory using placement new.
274 new(ptr) PrimitiveType(kind, context);
275
276 // Cache and return it.
277 return impl.primitives[(int)kind] = ptr;
278}
279
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700280IntegerType *IntegerType::get(unsigned width, MLIRContext *context) {
281 auto &impl = context->getImpl();
282
283 auto *&result = impl.integers[width];
284 if (!result) {
285 result = impl.allocator.Allocate<IntegerType>();
286 new (result) IntegerType(width, context);
287 }
288
289 return result;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700290}
291
292FunctionType *FunctionType::get(ArrayRef<Type*> inputs, ArrayRef<Type*> results,
293 MLIRContext *context) {
294 auto &impl = context->getImpl();
295
296 // Look to see if we already have this function type.
297 FunctionTypeKeyInfo::KeyTy key(inputs, results);
298 auto existing = impl.functions.insert_as(nullptr, key);
299
300 // If we already have it, return that value.
301 if (!existing.second)
302 return *existing.first;
303
304 // On the first use, we allocate them into the bump pointer.
305 auto *result = impl.allocator.Allocate<FunctionType>();
306
307 // Copy the inputs and results into the bump pointer.
308 SmallVector<Type*, 16> types;
309 types.reserve(inputs.size()+results.size());
310 types.append(inputs.begin(), inputs.end());
311 types.append(results.begin(), results.end());
312 auto typesList = impl.copyInto(ArrayRef<Type*>(types));
313
314 // Initialize the memory using placement new.
315 new (result) FunctionType(typesList.data(), inputs.size(), results.size(),
316 context);
317
318 // Cache and return it.
319 return *existing.first = result;
320}
321
Chris Lattnerf7e22732018-06-22 22:03:48 -0700322VectorType *VectorType::get(ArrayRef<unsigned> shape, Type *elementType) {
323 assert(!shape.empty() && "vector types must have at least one dimension");
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700324 assert((isa<PrimitiveType>(elementType) || isa<IntegerType>(elementType)) &&
Chris Lattnerf7e22732018-06-22 22:03:48 -0700325 "vectors elements must be primitives");
326
327 auto *context = elementType->getContext();
328 auto &impl = context->getImpl();
329
330 // Look to see if we already have this vector type.
331 VectorTypeKeyInfo::KeyTy key(elementType, shape);
332 auto existing = impl.vectors.insert_as(nullptr, key);
333
334 // If we already have it, return that value.
335 if (!existing.second)
336 return *existing.first;
337
338 // On the first use, we allocate them into the bump pointer.
339 auto *result = impl.allocator.Allocate<VectorType>();
340
341 // Copy the shape into the bump pointer.
342 shape = impl.copyInto(shape);
343
344 // Initialize the memory using placement new.
345 new (result) VectorType(shape, cast<PrimitiveType>(elementType), context);
346
347 // Cache and return it.
348 return *existing.first = result;
349}
MLIR Team355ec862018-06-23 18:09:09 -0700350
351
Chris Lattnereee1a2d2018-07-04 09:13:39 -0700352TensorType::TensorType(Kind kind, Type *elementType, MLIRContext *context)
MLIR Team355ec862018-06-23 18:09:09 -0700353 : Type(kind, context), elementType(elementType) {
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700354 assert((isa<PrimitiveType>(elementType) || isa<VectorType>(elementType) ||
355 isa<IntegerType>(elementType)) &&
MLIR Team355ec862018-06-23 18:09:09 -0700356 "tensor elements must be primitives or vectors");
357 assert(isa<TensorType>(this));
358}
359
MLIR Team355ec862018-06-23 18:09:09 -0700360RankedTensorType *RankedTensorType::get(ArrayRef<int> shape,
361 Type *elementType) {
362 auto *context = elementType->getContext();
363 auto &impl = context->getImpl();
364
365 // Look to see if we already have this ranked tensor type.
366 RankedTensorTypeKeyInfo::KeyTy key(elementType, shape);
367 auto existing = impl.rankedTensors.insert_as(nullptr, key);
368
369 // If we already have it, return that value.
370 if (!existing.second)
371 return *existing.first;
372
373 // On the first use, we allocate them into the bump pointer.
374 auto *result = impl.allocator.Allocate<RankedTensorType>();
375
376 // Copy the shape into the bump pointer.
377 shape = impl.copyInto(shape);
378
379 // Initialize the memory using placement new.
380 new (result) RankedTensorType(shape, elementType, context);
381
382 // Cache and return it.
383 return *existing.first = result;
384}
385
386UnrankedTensorType *UnrankedTensorType::get(Type *elementType) {
387 auto *context = elementType->getContext();
388 auto &impl = context->getImpl();
389
390 // Look to see if we already have this unranked tensor type.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700391 auto *&result = impl.unrankedTensors[elementType];
MLIR Team355ec862018-06-23 18:09:09 -0700392
393 // If we already have it, return that value.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700394 if (result)
395 return result;
MLIR Team355ec862018-06-23 18:09:09 -0700396
397 // On the first use, we allocate them into the bump pointer.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700398 result = impl.allocator.Allocate<UnrankedTensorType>();
MLIR Team355ec862018-06-23 18:09:09 -0700399
400 // Initialize the memory using placement new.
401 new (result) UnrankedTensorType(elementType, context);
Chris Lattner36b4ed12018-07-04 10:43:29 -0700402 return result;
403}
404
405//===----------------------------------------------------------------------===//
406// Attribute uniquing
407//===----------------------------------------------------------------------===//
408
409BoolAttr *BoolAttr::get(bool value, MLIRContext *context) {
410 auto *&result = context->getImpl().boolAttrs[value];
411 if (result)
412 return result;
413
414 result = context->getImpl().allocator.Allocate<BoolAttr>();
415 new (result) BoolAttr(value);
416 return result;
417}
418
419IntegerAttr *IntegerAttr::get(int64_t value, MLIRContext *context) {
420 auto *&result = context->getImpl().integerAttrs[value];
421 if (result)
422 return result;
423
424 result = context->getImpl().allocator.Allocate<IntegerAttr>();
425 new (result) IntegerAttr(value);
426 return result;
427}
428
429FloatAttr *FloatAttr::get(double value, MLIRContext *context) {
430 // We hash based on the bit representation of the double to ensure we don't
431 // merge things like -0.0 and 0.0 in the hash comparison.
432 union {
433 double floatValue;
434 int64_t intValue;
435 };
436 floatValue = value;
437
438 auto *&result = context->getImpl().floatAttrs[intValue];
439 if (result)
440 return result;
441
442 result = context->getImpl().allocator.Allocate<FloatAttr>();
443 new (result) FloatAttr(value);
444 return result;
445}
446
447StringAttr *StringAttr::get(StringRef bytes, MLIRContext *context) {
448 auto it = context->getImpl().stringAttrs.insert({bytes, nullptr}).first;
449
450 if (it->second)
451 return it->second;
452
453 auto result = context->getImpl().allocator.Allocate<StringAttr>();
454 new (result) StringAttr(it->first());
455 it->second = result;
456 return result;
457}
458
459ArrayAttr *ArrayAttr::get(ArrayRef<Attribute*> value, MLIRContext *context) {
460 auto &impl = context->getImpl();
461
462 // Look to see if we already have this.
463 auto existing = impl.arrayAttrs.insert_as(nullptr, value);
464
465 // If we already have it, return that value.
466 if (!existing.second)
467 return *existing.first;
468
469 // On the first use, we allocate them into the bump pointer.
470 auto *result = impl.allocator.Allocate<ArrayAttr>();
471
472 // Copy the elements into the bump pointer.
473 value = impl.copyInto(value);
474
475 // Initialize the memory using placement new.
476 new (result) ArrayAttr(value);
MLIR Team355ec862018-06-23 18:09:09 -0700477
478 // Cache and return it.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700479 return *existing.first = result;
MLIR Team355ec862018-06-23 18:09:09 -0700480}
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700481
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -0700482/// Perform a three-way comparison between the names of the specified
483/// NamedAttributes.
484static int compareNamedAttributes(const NamedAttribute *lhs,
485 const NamedAttribute *rhs) {
486 return lhs->first.str().compare(rhs->first.str());
487}
488
489/// Given a list of NamedAttribute's, canonicalize the list (sorting
490/// by name) and return the unique'd result. Note that the empty list is
491/// represented with a null pointer.
492AttributeListStorage *AttributeListStorage::get(ArrayRef<NamedAttribute> attrs,
493 MLIRContext *context) {
494 // We need to sort the element list to canonicalize it, but we also don't want
495 // to do a ton of work in the super common case where the element list is
496 // already sorted.
497 SmallVector<NamedAttribute, 8> storage;
498 switch (attrs.size()) {
499 case 0:
500 // An empty list is represented with a null pointer.
501 return nullptr;
502 case 1:
503 // A single element is already sorted.
504 break;
505 case 2:
506 // Don't invoke a general sort for two element case.
507 if (attrs[0].first.str() > attrs[1].first.str()) {
508 storage.push_back(attrs[1]);
509 storage.push_back(attrs[0]);
510 attrs = storage;
511 }
512 break;
513 default:
514 // Check to see they are sorted already.
515 bool isSorted = true;
516 for (unsigned i = 0, e = attrs.size() - 1; i != e; ++i) {
517 if (attrs[i].first.str() > attrs[i + 1].first.str()) {
518 isSorted = false;
519 break;
520 }
521 }
522 // If not, do a general sort.
523 if (!isSorted) {
524 storage.append(attrs.begin(), attrs.end());
525 llvm::array_pod_sort(storage.begin(), storage.end(),
526 compareNamedAttributes);
527 attrs = storage;
528 }
529 }
530
531 // Ok, now that we've canonicalized our attributes, unique them.
532 auto &impl = context->getImpl();
533
534 // Look to see if we already have this.
535 auto existing = impl.attributeLists.insert_as(nullptr, attrs);
536
537 // If we already have it, return that value.
538 if (!existing.second)
539 return *existing.first;
540
541 // Otherwise, allocate a new AttributeListStorage, unique it and return it.
542 auto byteSize =
543 AttributeListStorage::totalSizeToAlloc<NamedAttribute>(attrs.size());
544 auto rawMem = impl.allocator.Allocate(byteSize, alignof(NamedAttribute));
545
546 // Placement initialize the AggregateSymbolicValue.
547 auto result = ::new (rawMem) AttributeListStorage(attrs.size());
548 std::uninitialized_copy(attrs.begin(), attrs.end(),
549 result->getTrailingObjects<NamedAttribute>());
550 return *existing.first = result;
551}
552
Chris Lattner36b4ed12018-07-04 10:43:29 -0700553//===----------------------------------------------------------------------===//
554// AffineMap and AffineExpr uniquing
555//===----------------------------------------------------------------------===//
556
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700557AffineMap *AffineMap::get(unsigned dimCount, unsigned symbolCount,
558 ArrayRef<AffineExpr *> results,
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700559 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700560 // The number of results can't be zero.
561 assert(!results.empty());
562
563 auto &impl = context->getImpl();
564
565 // Check if we already have this affine map.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700566 auto key = std::make_tuple(dimCount, symbolCount, results);
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700567 auto existing = impl.affineMaps.insert_as(nullptr, key);
568
569 // If we already have it, return that value.
570 if (!existing.second)
571 return *existing.first;
572
573 // On the first use, we allocate them into the bump pointer.
574 auto *res = impl.allocator.Allocate<AffineMap>();
575
576 // Copy the results into the bump pointer.
577 results = impl.copyInto(ArrayRef<AffineExpr *>(results));
578
579 // Initialize the memory using placement new.
580 new (res) AffineMap(dimCount, symbolCount, results.size(), results.data());
581
582 // Cache and return it.
583 return *existing.first = res;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700584}
585
586AffineBinaryOpExpr *AffineBinaryOpExpr::get(AffineExpr::Kind kind,
587 AffineExpr *lhsOperand,
588 AffineExpr *rhsOperand,
589 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700590 auto &impl = context->getImpl();
591
592 // Check if we already have this affine expression.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700593 auto keyValue = std::make_tuple((unsigned)kind, lhsOperand, rhsOperand);
594 auto *&result = impl.affineExprs[keyValue];
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700595
596 // If we already have it, return that value.
597 if (!result) {
598 // On the first use, we allocate them into the bump pointer.
599 result = impl.allocator.Allocate<AffineBinaryOpExpr>();
600
601 // Initialize the memory using placement new.
602 new (result) AffineBinaryOpExpr(kind, lhsOperand, rhsOperand);
603 }
604 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700605}
606
Chris Lattner36b4ed12018-07-04 10:43:29 -0700607// TODO(bondhugula): complete uniquing of remaining AffineExpr sub-classes.
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700608AffineAddExpr *AffineAddExpr::get(AffineExpr *lhsOperand,
609 AffineExpr *rhsOperand,
610 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700611 return cast<AffineAddExpr>(
612 AffineBinaryOpExpr::get(Kind::Add, lhsOperand, rhsOperand, context));
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700613}
614
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700615AffineSubExpr *AffineSubExpr::get(AffineExpr *lhsOperand,
616 AffineExpr *rhsOperand,
617 MLIRContext *context) {
618 return cast<AffineSubExpr>(
619 AffineBinaryOpExpr::get(Kind::Sub, lhsOperand, rhsOperand, context));
620}
621
622AffineMulExpr *AffineMulExpr::get(AffineExpr *lhsOperand,
623 AffineExpr *rhsOperand,
624 MLIRContext *context) {
625 return cast<AffineMulExpr>(
626 AffineBinaryOpExpr::get(Kind::Mul, lhsOperand, rhsOperand, context));
627}
628
629AffineFloorDivExpr *AffineFloorDivExpr::get(AffineExpr *lhsOperand,
630 AffineExpr *rhsOperand,
631 MLIRContext *context) {
632 return cast<AffineFloorDivExpr>(
633 AffineBinaryOpExpr::get(Kind::FloorDiv, lhsOperand, rhsOperand, context));
634}
635
636AffineCeilDivExpr *AffineCeilDivExpr::get(AffineExpr *lhsOperand,
637 AffineExpr *rhsOperand,
638 MLIRContext *context) {
639 return cast<AffineCeilDivExpr>(
640 AffineBinaryOpExpr::get(Kind::CeilDiv, lhsOperand, rhsOperand, context));
641}
642
643AffineModExpr *AffineModExpr::get(AffineExpr *lhsOperand,
644 AffineExpr *rhsOperand,
645 MLIRContext *context) {
646 return cast<AffineModExpr>(
647 AffineBinaryOpExpr::get(Kind::Mod, lhsOperand, rhsOperand, context));
648}
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700649
650AffineDimExpr *AffineDimExpr::get(unsigned position, MLIRContext *context) {
651 // TODO(bondhugula): complete this
652 // FIXME: this should be POD
653 return new AffineDimExpr(position);
654}
655
656AffineSymbolExpr *AffineSymbolExpr::get(unsigned position,
657 MLIRContext *context) {
658 // TODO(bondhugula): complete this
659 // FIXME: this should be POD
660 return new AffineSymbolExpr(position);
661}
662
663AffineConstantExpr *AffineConstantExpr::get(int64_t constant,
664 MLIRContext *context) {
665 // TODO(bondhugula): complete this
666 // FIXME: this should be POD
667 return new AffineConstantExpr(constant);
668}