blob: 46d010349d66e3936a85129272527978937d627d [file] [log] [blame]
Chris Lattnerf7e22732018-06-22 22:03:48 -07001//===- MLIRContext.cpp - MLIR Type Classes --------------------------------===//
2//
3// Copyright 2019 The MLIR Authors.
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16// =============================================================================
17
18#include "mlir/IR/MLIRContext.h"
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -070019#include "AttributeListStorage.h"
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070020#include "mlir/IR/AffineExpr.h"
21#include "mlir/IR/AffineMap.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070022#include "mlir/IR/Attributes.h"
Chris Lattner4613d9e2018-08-19 21:17:22 -070023#include "mlir/IR/Function.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070024#include "mlir/IR/Identifier.h"
Uday Bondhugulabc535622018-08-07 14:24:38 -070025#include "mlir/IR/IntegerSet.h"
Chris Lattnerfc647d52018-08-27 21:05:16 -070026#include "mlir/IR/Location.h"
Chris Lattnerff0d5902018-07-05 09:12:11 -070027#include "mlir/IR/OperationSet.h"
28#include "mlir/IR/StandardOps.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070029#include "mlir/IR/Types.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070030#include "mlir/Support/STLExtras.h"
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -070031#include "third_party/llvm/llvm/include/llvm/ADT/STLExtras.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070032#include "llvm/ADT/DenseSet.h"
Chris Lattnered65a732018-06-28 20:45:33 -070033#include "llvm/ADT/StringMap.h"
Chris Lattner95865062018-08-01 10:18:59 -070034#include "llvm/ADT/Twine.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070035#include "llvm/Support/Allocator.h"
Chris Lattner95865062018-08-01 10:18:59 -070036#include "llvm/Support/raw_ostream.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070037using namespace mlir;
38using namespace llvm;
39
40namespace {
James Molloy87d81022018-07-23 11:44:40 -070041struct FunctionTypeKeyInfo : DenseMapInfo<FunctionType *> {
Chris Lattnerf7e22732018-06-22 22:03:48 -070042 // Functions are uniqued based on their inputs and results.
James Molloy87d81022018-07-23 11:44:40 -070043 using KeyTy = std::pair<ArrayRef<Type *>, ArrayRef<Type *>>;
44 using DenseMapInfo<FunctionType *>::getHashValue;
45 using DenseMapInfo<FunctionType *>::isEqual;
Chris Lattnerf7e22732018-06-22 22:03:48 -070046
47 static unsigned getHashValue(KeyTy key) {
James Molloy87d81022018-07-23 11:44:40 -070048 return hash_combine(
49 hash_combine_range(key.first.begin(), key.first.end()),
50 hash_combine_range(key.second.begin(), key.second.end()));
Chris Lattnerf7e22732018-06-22 22:03:48 -070051 }
52
53 static bool isEqual(const KeyTy &lhs, const FunctionType *rhs) {
54 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
55 return false;
56 return lhs == KeyTy(rhs->getInputs(), rhs->getResults());
57 }
58};
Uday Bondhugula015cbb12018-07-03 20:16:08 -070059
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070060struct AffineMapKeyInfo : DenseMapInfo<AffineMap *> {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070061 // Affine maps are uniqued based on their dim/symbol counts and affine
62 // expressions.
Uday Bondhugula0115dbb2018-07-11 21:31:07 -070063 using KeyTy = std::tuple<unsigned, unsigned, ArrayRef<AffineExpr *>,
64 ArrayRef<AffineExpr *>>;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070065 using DenseMapInfo<AffineMap *>::getHashValue;
66 using DenseMapInfo<AffineMap *>::isEqual;
67
68 static unsigned getHashValue(KeyTy key) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070069 return hash_combine(
Chris Lattner36b4ed12018-07-04 10:43:29 -070070 std::get<0>(key), std::get<1>(key),
Uday Bondhugula0115dbb2018-07-11 21:31:07 -070071 hash_combine_range(std::get<2>(key).begin(), std::get<2>(key).end()),
72 hash_combine_range(std::get<3>(key).begin(), std::get<3>(key).end()));
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070073 }
74
Uday Bondhugula015cbb12018-07-03 20:16:08 -070075 static bool isEqual(const KeyTy &lhs, const AffineMap *rhs) {
76 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
77 return false;
Chris Lattner36b4ed12018-07-04 10:43:29 -070078 return lhs == std::make_tuple(rhs->getNumDims(), rhs->getNumSymbols(),
Uday Bondhugula0115dbb2018-07-11 21:31:07 -070079 rhs->getResults(), rhs->getRangeSizes());
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070080 }
81};
82
James Molloy87d81022018-07-23 11:44:40 -070083struct VectorTypeKeyInfo : DenseMapInfo<VectorType *> {
Chris Lattnerf7e22732018-06-22 22:03:48 -070084 // Vectors are uniqued based on their element type and shape.
James Molloy87d81022018-07-23 11:44:40 -070085 using KeyTy = std::pair<Type *, ArrayRef<unsigned>>;
86 using DenseMapInfo<VectorType *>::getHashValue;
87 using DenseMapInfo<VectorType *>::isEqual;
Chris Lattnerf7e22732018-06-22 22:03:48 -070088
89 static unsigned getHashValue(KeyTy key) {
James Molloy87d81022018-07-23 11:44:40 -070090 return hash_combine(
91 DenseMapInfo<Type *>::getHashValue(key.first),
92 hash_combine_range(key.second.begin(), key.second.end()));
Chris Lattnerf7e22732018-06-22 22:03:48 -070093 }
94
95 static bool isEqual(const KeyTy &lhs, const VectorType *rhs) {
96 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
97 return false;
98 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
99 }
100};
Chris Lattner36b4ed12018-07-04 10:43:29 -0700101
James Molloy87d81022018-07-23 11:44:40 -0700102struct RankedTensorTypeKeyInfo : DenseMapInfo<RankedTensorType *> {
MLIR Team355ec862018-06-23 18:09:09 -0700103 // Ranked tensors are uniqued based on their element type and shape.
James Molloy87d81022018-07-23 11:44:40 -0700104 using KeyTy = std::pair<Type *, ArrayRef<int>>;
105 using DenseMapInfo<RankedTensorType *>::getHashValue;
106 using DenseMapInfo<RankedTensorType *>::isEqual;
MLIR Team355ec862018-06-23 18:09:09 -0700107
108 static unsigned getHashValue(KeyTy key) {
James Molloy87d81022018-07-23 11:44:40 -0700109 return hash_combine(
110 DenseMapInfo<Type *>::getHashValue(key.first),
111 hash_combine_range(key.second.begin(), key.second.end()));
MLIR Team355ec862018-06-23 18:09:09 -0700112 }
113
114 static bool isEqual(const KeyTy &lhs, const RankedTensorType *rhs) {
115 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
116 return false;
117 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
118 }
119};
Chris Lattner36b4ed12018-07-04 10:43:29 -0700120
James Molloy87d81022018-07-23 11:44:40 -0700121struct MemRefTypeKeyInfo : DenseMapInfo<MemRefType *> {
MLIR Team718c82f2018-07-16 09:45:22 -0700122 // MemRefs are uniqued based on their element type, shape, affine map
123 // composition, and memory space.
James Molloy87d81022018-07-23 11:44:40 -0700124 using KeyTy =
125 std::tuple<Type *, ArrayRef<int>, ArrayRef<AffineMap *>, unsigned>;
126 using DenseMapInfo<MemRefType *>::getHashValue;
127 using DenseMapInfo<MemRefType *>::isEqual;
MLIR Team718c82f2018-07-16 09:45:22 -0700128
129 static unsigned getHashValue(KeyTy key) {
130 return hash_combine(
James Molloy87d81022018-07-23 11:44:40 -0700131 DenseMapInfo<Type *>::getHashValue(std::get<0>(key)),
MLIR Team718c82f2018-07-16 09:45:22 -0700132 hash_combine_range(std::get<1>(key).begin(), std::get<1>(key).end()),
133 hash_combine_range(std::get<2>(key).begin(), std::get<2>(key).end()),
134 std::get<3>(key));
135 }
136
137 static bool isEqual(const KeyTy &lhs, const MemRefType *rhs) {
138 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
139 return false;
140 return lhs == std::make_tuple(rhs->getElementType(), rhs->getShape(),
141 rhs->getAffineMaps(), rhs->getMemorySpace());
142 }
143};
144
James Molloy87d81022018-07-23 11:44:40 -0700145struct ArrayAttrKeyInfo : DenseMapInfo<ArrayAttr *> {
Chris Lattner36b4ed12018-07-04 10:43:29 -0700146 // Array attributes are uniqued based on their elements.
James Molloy87d81022018-07-23 11:44:40 -0700147 using KeyTy = ArrayRef<Attribute *>;
148 using DenseMapInfo<ArrayAttr *>::getHashValue;
149 using DenseMapInfo<ArrayAttr *>::isEqual;
Chris Lattner36b4ed12018-07-04 10:43:29 -0700150
151 static unsigned getHashValue(KeyTy key) {
152 return hash_combine_range(key.begin(), key.end());
153 }
154
155 static bool isEqual(const KeyTy &lhs, const ArrayAttr *rhs) {
156 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
157 return false;
158 return lhs == rhs->getValue();
159 }
160};
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -0700161
162struct AttributeListKeyInfo : DenseMapInfo<AttributeListStorage *> {
163 // Array attributes are uniqued based on their elements.
164 using KeyTy = ArrayRef<NamedAttribute>;
165 using DenseMapInfo<AttributeListStorage *>::getHashValue;
166 using DenseMapInfo<AttributeListStorage *>::isEqual;
167
168 static unsigned getHashValue(KeyTy key) {
169 return hash_combine_range(key.begin(), key.end());
170 }
171
172 static bool isEqual(const KeyTy &lhs, const AttributeListStorage *rhs) {
173 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
174 return false;
175 return lhs == rhs->getElements();
176 }
177};
178
Chris Lattnerf7e22732018-06-22 22:03:48 -0700179} // end anonymous namespace.
180
Chris Lattnerf7e22732018-06-22 22:03:48 -0700181namespace mlir {
182/// This is the implementation of the MLIRContext class, using the pImpl idiom.
183/// This class is completely private to this file, so everything is public.
184class MLIRContextImpl {
185public:
Chris Lattnerff0d5902018-07-05 09:12:11 -0700186 /// This is the set of all operations that are registered with the system.
187 OperationSet operationSet;
188
Chris Lattnerfc647d52018-08-27 21:05:16 -0700189 /// We put location info into this allocator, since it is generally not
190 /// touched by compiler passes.
191 llvm::BumpPtrAllocator locationAllocator;
192
193 /// The singleton for UnknownLoc.
194 UnknownLoc *theUnknownLoc = nullptr;
195
196 /// These are filename locations uniqued into this MLIRContext.
197 llvm::StringMap<char, llvm::BumpPtrAllocator &> filenames;
198
199 /// FileLineColLoc uniquing.
200 DenseMap<std::tuple<const char *, unsigned, unsigned>, FileLineColLoc *>
201 fileLineColLocs;
202
203 /// We put immortal objects into this allocator.
204 llvm::BumpPtrAllocator allocator;
205
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700206 /// This is the handler to use to report diagnostics, or null if not
207 /// registered.
208 MLIRContext::DiagnosticHandlerTy diagnosticHandler;
Chris Lattner95865062018-08-01 10:18:59 -0700209
Chris Lattnered65a732018-06-28 20:45:33 -0700210 /// These are identifiers uniqued into this MLIRContext.
James Molloy87d81022018-07-23 11:44:40 -0700211 llvm::StringMap<char, llvm::BumpPtrAllocator &> identifiers;
Chris Lattnered65a732018-06-28 20:45:33 -0700212
Chris Lattnerc3251192018-07-27 13:09:58 -0700213 // Uniquing table for 'other' types.
214 OtherType *otherTypes[int(Type::Kind::LAST_OTHER_TYPE) -
215 int(Type::Kind::FIRST_OTHER_TYPE) + 1] = {nullptr};
216
217 // Uniquing table for 'float' types.
218 FloatType *floatTypes[int(Type::Kind::LAST_FLOATING_POINT_TYPE) -
219 int(Type::Kind::FIRST_FLOATING_POINT_TYPE) + 1] = {
James Molloy87d81022018-07-23 11:44:40 -0700220 nullptr};
Chris Lattnerf7e22732018-06-22 22:03:48 -0700221
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700222 // Affine map uniquing.
223 using AffineMapSet = DenseSet<AffineMap *, AffineMapKeyInfo>;
224 AffineMapSet affineMaps;
225
Uday Bondhugula0b80a162018-07-03 21:34:58 -0700226 // Affine binary op expression uniquing. Figure out uniquing of dimensional
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700227 // or symbolic identifiers.
Uday Bondhugula3934d4d2018-07-09 09:00:25 -0700228 DenseMap<std::tuple<unsigned, AffineExpr *, AffineExpr *>, AffineExpr *>
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700229 affineExprs;
230
Uday Bondhugula4e5078b2018-07-24 22:34:09 -0700231 // Uniqui'ing of AffineDimExpr, AffineSymbolExpr's by their position.
232 std::vector<AffineDimExpr *> dimExprs;
233 std::vector<AffineSymbolExpr *> symbolExprs;
234
235 // Uniqui'ing of AffineConstantExpr using constant value as key.
236 DenseMap<int64_t, AffineConstantExpr *> constExprs;
237
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700238 /// Integer type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700239 DenseMap<unsigned, IntegerType *> integers;
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700240
Chris Lattnerf7e22732018-06-22 22:03:48 -0700241 /// Function type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700242 using FunctionTypeSet = DenseSet<FunctionType *, FunctionTypeKeyInfo>;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700243 FunctionTypeSet functions;
244
245 /// Vector type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700246 using VectorTypeSet = DenseSet<VectorType *, VectorTypeKeyInfo>;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700247 VectorTypeSet vectors;
248
MLIR Team355ec862018-06-23 18:09:09 -0700249 /// Ranked tensor type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700250 using RankedTensorTypeSet =
251 DenseSet<RankedTensorType *, RankedTensorTypeKeyInfo>;
MLIR Team355ec862018-06-23 18:09:09 -0700252 RankedTensorTypeSet rankedTensors;
253
254 /// Unranked tensor type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700255 DenseMap<Type *, UnrankedTensorType *> unrankedTensors;
MLIR Team355ec862018-06-23 18:09:09 -0700256
MLIR Team718c82f2018-07-16 09:45:22 -0700257 /// MemRef type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700258 using MemRefTypeSet = DenseSet<MemRefType *, MemRefTypeKeyInfo>;
MLIR Team718c82f2018-07-16 09:45:22 -0700259 MemRefTypeSet memrefs;
260
Chris Lattner36b4ed12018-07-04 10:43:29 -0700261 // Attribute uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700262 BoolAttr *boolAttrs[2] = {nullptr};
263 DenseMap<int64_t, IntegerAttr *> integerAttrs;
264 DenseMap<int64_t, FloatAttr *> floatAttrs;
265 StringMap<StringAttr *> stringAttrs;
266 using ArrayAttrSet = DenseSet<ArrayAttr *, ArrayAttrKeyInfo>;
Chris Lattner36b4ed12018-07-04 10:43:29 -0700267 ArrayAttrSet arrayAttrs;
James Molloy87d81022018-07-23 11:44:40 -0700268 DenseMap<AffineMap *, AffineMapAttr *> affineMapAttrs;
James Molloyf0d2f442018-08-03 01:54:46 -0700269 DenseMap<Type *, TypeAttr *> typeAttrs;
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -0700270 using AttributeListSet =
271 DenseSet<AttributeListStorage *, AttributeListKeyInfo>;
272 AttributeListSet attributeLists;
Chris Lattner1aa46322018-08-21 17:55:22 -0700273 DenseMap<const Function *, FunctionAttr *> functionAttrs;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700274
275public:
Chris Lattnerfc647d52018-08-27 21:05:16 -0700276 MLIRContextImpl() : filenames(locationAllocator), identifiers(allocator) {
Chris Lattnerff0d5902018-07-05 09:12:11 -0700277 registerStandardOperations(operationSet);
278 }
Chris Lattnered65a732018-06-28 20:45:33 -0700279
Chris Lattnerf7e22732018-06-22 22:03:48 -0700280 /// Copy the specified array of elements into memory managed by our bump
281 /// pointer allocator. This assumes the elements are all PODs.
James Molloy72b0cbe2018-08-01 12:55:27 -0700282 template <typename T>
283 ArrayRef<T> copyInto(ArrayRef<T> elements) {
Chris Lattnerf7e22732018-06-22 22:03:48 -0700284 auto result = allocator.Allocate<T>(elements.size());
285 std::uninitialized_copy(elements.begin(), elements.end(), result);
286 return ArrayRef<T>(result, elements.size());
287 }
288};
289} // end namespace mlir
290
James Molloy87d81022018-07-23 11:44:40 -0700291MLIRContext::MLIRContext() : impl(new MLIRContextImpl()) {}
Chris Lattnerf7e22732018-06-22 22:03:48 -0700292
James Molloy87d81022018-07-23 11:44:40 -0700293MLIRContext::~MLIRContext() {}
Chris Lattnerf7e22732018-06-22 22:03:48 -0700294
Chris Lattner95865062018-08-01 10:18:59 -0700295/// Register an issue handler with this LLVM context. The issue handler is
296/// passed location information if present (nullptr if not) along with a
297/// message and a boolean that indicates whether this is an error or warning.
298void MLIRContext::registerDiagnosticHandler(
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700299 const DiagnosticHandlerTy &handler) {
300 getImpl().diagnosticHandler = handler;
Chris Lattner95865062018-08-01 10:18:59 -0700301}
302
Chris Lattnerea5c3dc2018-08-21 08:42:19 -0700303/// Return the current diagnostic handler, or null if none is present.
304auto MLIRContext::getDiagnosticHandler() const -> DiagnosticHandlerTy {
305 return getImpl().diagnosticHandler;
306}
307
Chris Lattner95865062018-08-01 10:18:59 -0700308/// This emits a diagnostic using the registered issue handle if present, or
309/// with the default behavior if not. The MLIR compiler should not generally
310/// interact with this, it should use methods on Operation instead.
Chris Lattnerfc647d52018-08-27 21:05:16 -0700311void MLIRContext::emitDiagnostic(Location *location, const llvm::Twine &message,
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700312 DiagnosticKind kind) const {
Chris Lattner95865062018-08-01 10:18:59 -0700313 // If we had a handler registered, emit the diagnostic using it.
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700314 auto handler = getImpl().diagnosticHandler;
315 if (handler && location)
316 return handler(location, message.str(), kind);
Chris Lattner95865062018-08-01 10:18:59 -0700317
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700318 // The default behavior for notes and warnings is to ignore them.
319 if (kind != DiagnosticKind::Error)
Chris Lattner95865062018-08-01 10:18:59 -0700320 return;
321
Chris Lattnerfc647d52018-08-27 21:05:16 -0700322 // TODO(clattner): can improve this now!
323
Chris Lattner95865062018-08-01 10:18:59 -0700324 // The default behavior for errors is to emit them to stderr and exit.
325 llvm::errs() << message.str() << "\n";
326 llvm::errs().flush();
327 exit(1);
328}
329
Chris Lattnerff0d5902018-07-05 09:12:11 -0700330/// Return the operation set associated with the specified MLIRContext object.
331OperationSet &OperationSet::get(MLIRContext *context) {
332 return context->getImpl().operationSet;
333}
Chris Lattnerf7e22732018-06-22 22:03:48 -0700334
Chris Lattner21e67f62018-07-06 10:46:19 -0700335/// If this operation has a registered operation description in the
336/// OperationSet, return it. Otherwise return null.
Chris Lattner95865062018-08-01 10:18:59 -0700337const AbstractOperation *Operation::getAbstractOperation() const {
338 return OperationSet::get(getContext()).lookup(getName().str());
Chris Lattner21e67f62018-07-06 10:46:19 -0700339}
340
Chris Lattnered65a732018-06-28 20:45:33 -0700341//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700342// Identifier uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700343//===----------------------------------------------------------------------===//
344
345/// Return an identifier for the specified string.
346Identifier Identifier::get(StringRef str, const MLIRContext *context) {
347 assert(!str.empty() && "Cannot create an empty identifier");
348 assert(str.find('\0') == StringRef::npos &&
349 "Cannot create an identifier with a nul character");
350
351 auto &impl = context->getImpl();
352 auto it = impl.identifiers.insert({str, char()}).first;
353 return Identifier(it->getKeyData());
354}
355
Chris Lattnered65a732018-06-28 20:45:33 -0700356//===----------------------------------------------------------------------===//
Chris Lattnerfc647d52018-08-27 21:05:16 -0700357// Location uniquing
358//===----------------------------------------------------------------------===//
359
360UnknownLoc *UnknownLoc::get(MLIRContext *context) {
361 auto &impl = context->getImpl();
362 if (auto *result = impl.theUnknownLoc)
363 return result;
364
365 impl.theUnknownLoc = impl.allocator.Allocate<UnknownLoc>();
366 new (impl.theUnknownLoc) UnknownLoc();
367 return impl.theUnknownLoc;
368}
369
370UniquedFilename UniquedFilename::get(StringRef filename, MLIRContext *context) {
371 auto &impl = context->getImpl();
372 auto it = impl.filenames.insert({filename, char()}).first;
373 return UniquedFilename(it->getKeyData());
374}
375
376FileLineColLoc *FileLineColLoc::get(UniquedFilename filename, unsigned line,
377 unsigned column, MLIRContext *context) {
378 auto &impl = context->getImpl();
379 auto &entry =
380 impl.fileLineColLocs[std::make_tuple(filename.data(), line, column)];
381 if (!entry) {
382 entry = impl.allocator.Allocate<FileLineColLoc>();
383 new (entry) FileLineColLoc(filename, line, column);
384 }
385
386 return entry;
387}
388
389//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700390// Type uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700391//===----------------------------------------------------------------------===//
392
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700393IntegerType *IntegerType::get(unsigned width, MLIRContext *context) {
394 auto &impl = context->getImpl();
395
396 auto *&result = impl.integers[width];
397 if (!result) {
398 result = impl.allocator.Allocate<IntegerType>();
399 new (result) IntegerType(width, context);
400 }
401
402 return result;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700403}
404
Chris Lattnerc3251192018-07-27 13:09:58 -0700405FloatType *FloatType::get(Kind kind, MLIRContext *context) {
406 assert(kind >= Kind::FIRST_FLOATING_POINT_TYPE &&
407 kind <= Kind::LAST_FLOATING_POINT_TYPE && "Not an FP type kind");
408 auto &impl = context->getImpl();
409
410 // We normally have these types.
411 auto *&entry =
412 impl.floatTypes[(int)kind - int(Kind::FIRST_FLOATING_POINT_TYPE)];
413 if (entry)
414 return entry;
415
416 // On the first use, we allocate them into the bump pointer.
417 auto *ptr = impl.allocator.Allocate<FloatType>();
418
419 // Initialize the memory using placement new.
420 new (ptr) FloatType(kind, context);
421
422 // Cache and return it.
423 return entry = ptr;
424}
425
426OtherType *OtherType::get(Kind kind, MLIRContext *context) {
427 assert(kind >= Kind::FIRST_OTHER_TYPE && kind <= Kind::LAST_OTHER_TYPE &&
428 "Not an 'other' type kind");
429 auto &impl = context->getImpl();
430
431 // We normally have these types.
432 auto *&entry = impl.otherTypes[(int)kind - int(Kind::FIRST_OTHER_TYPE)];
433 if (entry)
434 return entry;
435
436 // On the first use, we allocate them into the bump pointer.
437 auto *ptr = impl.allocator.Allocate<OtherType>();
438
439 // Initialize the memory using placement new.
440 new (ptr) OtherType(kind, context);
441
442 // Cache and return it.
443 return entry = ptr;
444}
445
James Molloy87d81022018-07-23 11:44:40 -0700446FunctionType *FunctionType::get(ArrayRef<Type *> inputs,
447 ArrayRef<Type *> results,
Chris Lattnerf7e22732018-06-22 22:03:48 -0700448 MLIRContext *context) {
449 auto &impl = context->getImpl();
450
451 // Look to see if we already have this function type.
452 FunctionTypeKeyInfo::KeyTy key(inputs, results);
453 auto existing = impl.functions.insert_as(nullptr, key);
454
455 // If we already have it, return that value.
456 if (!existing.second)
457 return *existing.first;
458
459 // On the first use, we allocate them into the bump pointer.
460 auto *result = impl.allocator.Allocate<FunctionType>();
461
462 // Copy the inputs and results into the bump pointer.
James Molloy87d81022018-07-23 11:44:40 -0700463 SmallVector<Type *, 16> types;
464 types.reserve(inputs.size() + results.size());
Chris Lattnerf7e22732018-06-22 22:03:48 -0700465 types.append(inputs.begin(), inputs.end());
466 types.append(results.begin(), results.end());
James Molloy87d81022018-07-23 11:44:40 -0700467 auto typesList = impl.copyInto(ArrayRef<Type *>(types));
Chris Lattnerf7e22732018-06-22 22:03:48 -0700468
469 // Initialize the memory using placement new.
James Molloy87d81022018-07-23 11:44:40 -0700470 new (result)
471 FunctionType(typesList.data(), inputs.size(), results.size(), context);
Chris Lattnerf7e22732018-06-22 22:03:48 -0700472
473 // Cache and return it.
474 return *existing.first = result;
475}
476
Chris Lattnerf7e22732018-06-22 22:03:48 -0700477VectorType *VectorType::get(ArrayRef<unsigned> shape, Type *elementType) {
478 assert(!shape.empty() && "vector types must have at least one dimension");
Chris Lattnerc3251192018-07-27 13:09:58 -0700479 assert((isa<FloatType>(elementType) || isa<IntegerType>(elementType)) &&
Chris Lattnerf7e22732018-06-22 22:03:48 -0700480 "vectors elements must be primitives");
481
482 auto *context = elementType->getContext();
483 auto &impl = context->getImpl();
484
485 // Look to see if we already have this vector type.
486 VectorTypeKeyInfo::KeyTy key(elementType, shape);
487 auto existing = impl.vectors.insert_as(nullptr, key);
488
489 // If we already have it, return that value.
490 if (!existing.second)
491 return *existing.first;
492
493 // On the first use, we allocate them into the bump pointer.
494 auto *result = impl.allocator.Allocate<VectorType>();
495
496 // Copy the shape into the bump pointer.
497 shape = impl.copyInto(shape);
498
499 // Initialize the memory using placement new.
Jacques Pienaar3cdb8542018-07-23 11:48:22 -0700500 new (result) VectorType(shape, elementType, context);
Chris Lattnerf7e22732018-06-22 22:03:48 -0700501
502 // Cache and return it.
503 return *existing.first = result;
504}
MLIR Team355ec862018-06-23 18:09:09 -0700505
MLIR Team355ec862018-06-23 18:09:09 -0700506RankedTensorType *RankedTensorType::get(ArrayRef<int> shape,
507 Type *elementType) {
508 auto *context = elementType->getContext();
509 auto &impl = context->getImpl();
510
511 // Look to see if we already have this ranked tensor type.
512 RankedTensorTypeKeyInfo::KeyTy key(elementType, shape);
513 auto existing = impl.rankedTensors.insert_as(nullptr, key);
514
515 // If we already have it, return that value.
516 if (!existing.second)
517 return *existing.first;
518
519 // On the first use, we allocate them into the bump pointer.
520 auto *result = impl.allocator.Allocate<RankedTensorType>();
521
522 // Copy the shape into the bump pointer.
523 shape = impl.copyInto(shape);
524
525 // Initialize the memory using placement new.
526 new (result) RankedTensorType(shape, elementType, context);
527
528 // Cache and return it.
529 return *existing.first = result;
530}
531
532UnrankedTensorType *UnrankedTensorType::get(Type *elementType) {
533 auto *context = elementType->getContext();
534 auto &impl = context->getImpl();
535
536 // Look to see if we already have this unranked tensor type.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700537 auto *&result = impl.unrankedTensors[elementType];
MLIR Team355ec862018-06-23 18:09:09 -0700538
539 // If we already have it, return that value.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700540 if (result)
541 return result;
MLIR Team355ec862018-06-23 18:09:09 -0700542
543 // On the first use, we allocate them into the bump pointer.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700544 result = impl.allocator.Allocate<UnrankedTensorType>();
MLIR Team355ec862018-06-23 18:09:09 -0700545
546 // Initialize the memory using placement new.
547 new (result) UnrankedTensorType(elementType, context);
Chris Lattner36b4ed12018-07-04 10:43:29 -0700548 return result;
549}
550
MLIR Team718c82f2018-07-16 09:45:22 -0700551MemRefType *MemRefType::get(ArrayRef<int> shape, Type *elementType,
James Molloy87d81022018-07-23 11:44:40 -0700552 ArrayRef<AffineMap *> affineMapComposition,
MLIR Team718c82f2018-07-16 09:45:22 -0700553 unsigned memorySpace) {
554 auto *context = elementType->getContext();
555 auto &impl = context->getImpl();
556
557 // Look to see if we already have this memref type.
James Molloy87d81022018-07-23 11:44:40 -0700558 auto key =
559 std::make_tuple(elementType, shape, affineMapComposition, memorySpace);
MLIR Team718c82f2018-07-16 09:45:22 -0700560 auto existing = impl.memrefs.insert_as(nullptr, key);
561
562 // If we already have it, return that value.
563 if (!existing.second)
564 return *existing.first;
565
566 // On the first use, we allocate them into the bump pointer.
567 auto *result = impl.allocator.Allocate<MemRefType>();
568
569 // Copy the shape into the bump pointer.
570 shape = impl.copyInto(shape);
571
572 // Copy the affine map composition into the bump pointer.
573 // TODO(andydavis) Assert that the structure of the composition is valid.
James Molloy87d81022018-07-23 11:44:40 -0700574 affineMapComposition =
575 impl.copyInto(ArrayRef<AffineMap *>(affineMapComposition));
MLIR Team718c82f2018-07-16 09:45:22 -0700576
577 // Initialize the memory using placement new.
578 new (result) MemRefType(shape, elementType, affineMapComposition, memorySpace,
579 context);
580 // Cache and return it.
581 return *existing.first = result;
582}
583
Chris Lattner36b4ed12018-07-04 10:43:29 -0700584//===----------------------------------------------------------------------===//
585// Attribute uniquing
586//===----------------------------------------------------------------------===//
587
588BoolAttr *BoolAttr::get(bool value, MLIRContext *context) {
589 auto *&result = context->getImpl().boolAttrs[value];
590 if (result)
591 return result;
592
593 result = context->getImpl().allocator.Allocate<BoolAttr>();
594 new (result) BoolAttr(value);
595 return result;
596}
597
598IntegerAttr *IntegerAttr::get(int64_t value, MLIRContext *context) {
599 auto *&result = context->getImpl().integerAttrs[value];
600 if (result)
601 return result;
602
603 result = context->getImpl().allocator.Allocate<IntegerAttr>();
604 new (result) IntegerAttr(value);
605 return result;
606}
607
608FloatAttr *FloatAttr::get(double value, MLIRContext *context) {
609 // We hash based on the bit representation of the double to ensure we don't
610 // merge things like -0.0 and 0.0 in the hash comparison.
611 union {
612 double floatValue;
613 int64_t intValue;
614 };
615 floatValue = value;
616
617 auto *&result = context->getImpl().floatAttrs[intValue];
618 if (result)
619 return result;
620
621 result = context->getImpl().allocator.Allocate<FloatAttr>();
622 new (result) FloatAttr(value);
623 return result;
624}
625
626StringAttr *StringAttr::get(StringRef bytes, MLIRContext *context) {
627 auto it = context->getImpl().stringAttrs.insert({bytes, nullptr}).first;
628
629 if (it->second)
630 return it->second;
631
632 auto result = context->getImpl().allocator.Allocate<StringAttr>();
633 new (result) StringAttr(it->first());
634 it->second = result;
635 return result;
636}
637
James Molloy87d81022018-07-23 11:44:40 -0700638ArrayAttr *ArrayAttr::get(ArrayRef<Attribute *> value, MLIRContext *context) {
Chris Lattner36b4ed12018-07-04 10:43:29 -0700639 auto &impl = context->getImpl();
640
641 // Look to see if we already have this.
642 auto existing = impl.arrayAttrs.insert_as(nullptr, value);
643
644 // If we already have it, return that value.
645 if (!existing.second)
646 return *existing.first;
647
648 // On the first use, we allocate them into the bump pointer.
649 auto *result = impl.allocator.Allocate<ArrayAttr>();
650
651 // Copy the elements into the bump pointer.
652 value = impl.copyInto(value);
653
Chris Lattnerea5c3dc2018-08-21 08:42:19 -0700654 // Check to see if any of the elements have a function attr.
655 bool hasFunctionAttr = false;
656 for (auto *elt : value)
657 if (elt->isOrContainsFunction()) {
658 hasFunctionAttr = true;
659 break;
660 }
661
Chris Lattner36b4ed12018-07-04 10:43:29 -0700662 // Initialize the memory using placement new.
Chris Lattnerea5c3dc2018-08-21 08:42:19 -0700663 new (result) ArrayAttr(value, hasFunctionAttr);
MLIR Team355ec862018-06-23 18:09:09 -0700664
665 // Cache and return it.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700666 return *existing.first = result;
MLIR Team355ec862018-06-23 18:09:09 -0700667}
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700668
James Molloy87d81022018-07-23 11:44:40 -0700669AffineMapAttr *AffineMapAttr::get(AffineMap *value, MLIRContext *context) {
MLIR Teamb61885d2018-07-18 16:29:21 -0700670 auto *&result = context->getImpl().affineMapAttrs[value];
671 if (result)
672 return result;
673
674 result = context->getImpl().allocator.Allocate<AffineMapAttr>();
675 new (result) AffineMapAttr(value);
676 return result;
677}
678
James Molloyf0d2f442018-08-03 01:54:46 -0700679TypeAttr *TypeAttr::get(Type *type, MLIRContext *context) {
680 auto *&result = context->getImpl().typeAttrs[type];
681 if (result)
682 return result;
683
684 result = context->getImpl().allocator.Allocate<TypeAttr>();
685 new (result) TypeAttr(type);
686 return result;
687}
688
Chris Lattner1aa46322018-08-21 17:55:22 -0700689FunctionAttr *FunctionAttr::get(const Function *value, MLIRContext *context) {
690 assert(value && "Cannot get FunctionAttr for a null function");
691
Chris Lattner4613d9e2018-08-19 21:17:22 -0700692 auto *&result = context->getImpl().functionAttrs[value];
693 if (result)
694 return result;
695
696 result = context->getImpl().allocator.Allocate<FunctionAttr>();
Chris Lattner1aa46322018-08-21 17:55:22 -0700697 new (result) FunctionAttr(const_cast<Function *>(value));
Chris Lattner4613d9e2018-08-19 21:17:22 -0700698 return result;
699}
700
Chris Lattner1aa46322018-08-21 17:55:22 -0700701FunctionType *FunctionAttr::getType() const { return getValue()->getType(); }
702
Chris Lattner4613d9e2018-08-19 21:17:22 -0700703/// This function is used by the internals of the Function class to null out
704/// attributes refering to functions that are about to be deleted.
705void FunctionAttr::dropFunctionReference(Function *value) {
706 // Check to see if there was an attribute referring to this function.
707 auto &functionAttrs = value->getContext()->getImpl().functionAttrs;
708
709 // If not, then we're done.
710 auto it = functionAttrs.find(value);
711 if (it == functionAttrs.end())
712 return;
713
714 // If so, null out the function reference in the attribute (to avoid dangling
715 // pointers) and remove the entry from the map so the map doesn't contain
716 // dangling keys.
717 it->second->value = nullptr;
718 functionAttrs.erase(it);
719}
720
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -0700721/// Perform a three-way comparison between the names of the specified
722/// NamedAttributes.
723static int compareNamedAttributes(const NamedAttribute *lhs,
724 const NamedAttribute *rhs) {
725 return lhs->first.str().compare(rhs->first.str());
726}
727
728/// Given a list of NamedAttribute's, canonicalize the list (sorting
729/// by name) and return the unique'd result. Note that the empty list is
730/// represented with a null pointer.
731AttributeListStorage *AttributeListStorage::get(ArrayRef<NamedAttribute> attrs,
732 MLIRContext *context) {
733 // We need to sort the element list to canonicalize it, but we also don't want
734 // to do a ton of work in the super common case where the element list is
735 // already sorted.
736 SmallVector<NamedAttribute, 8> storage;
737 switch (attrs.size()) {
738 case 0:
739 // An empty list is represented with a null pointer.
740 return nullptr;
741 case 1:
742 // A single element is already sorted.
743 break;
744 case 2:
745 // Don't invoke a general sort for two element case.
746 if (attrs[0].first.str() > attrs[1].first.str()) {
747 storage.push_back(attrs[1]);
748 storage.push_back(attrs[0]);
749 attrs = storage;
750 }
751 break;
752 default:
753 // Check to see they are sorted already.
754 bool isSorted = true;
755 for (unsigned i = 0, e = attrs.size() - 1; i != e; ++i) {
756 if (attrs[i].first.str() > attrs[i + 1].first.str()) {
757 isSorted = false;
758 break;
759 }
760 }
761 // If not, do a general sort.
762 if (!isSorted) {
763 storage.append(attrs.begin(), attrs.end());
764 llvm::array_pod_sort(storage.begin(), storage.end(),
765 compareNamedAttributes);
766 attrs = storage;
767 }
768 }
769
770 // Ok, now that we've canonicalized our attributes, unique them.
771 auto &impl = context->getImpl();
772
773 // Look to see if we already have this.
774 auto existing = impl.attributeLists.insert_as(nullptr, attrs);
775
776 // If we already have it, return that value.
777 if (!existing.second)
778 return *existing.first;
779
780 // Otherwise, allocate a new AttributeListStorage, unique it and return it.
781 auto byteSize =
782 AttributeListStorage::totalSizeToAlloc<NamedAttribute>(attrs.size());
783 auto rawMem = impl.allocator.Allocate(byteSize, alignof(NamedAttribute));
784
785 // Placement initialize the AggregateSymbolicValue.
786 auto result = ::new (rawMem) AttributeListStorage(attrs.size());
787 std::uninitialized_copy(attrs.begin(), attrs.end(),
788 result->getTrailingObjects<NamedAttribute>());
789 return *existing.first = result;
790}
791
Chris Lattner36b4ed12018-07-04 10:43:29 -0700792//===----------------------------------------------------------------------===//
793// AffineMap and AffineExpr uniquing
794//===----------------------------------------------------------------------===//
795
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700796AffineMap *AffineMap::get(unsigned dimCount, unsigned symbolCount,
797 ArrayRef<AffineExpr *> results,
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700798 ArrayRef<AffineExpr *> rangeSizes,
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700799 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700800 // The number of results can't be zero.
801 assert(!results.empty());
802
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700803 assert(rangeSizes.empty() || results.size() == rangeSizes.size());
804
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700805 auto &impl = context->getImpl();
806
807 // Check if we already have this affine map.
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700808 auto key = std::make_tuple(dimCount, symbolCount, results, rangeSizes);
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700809 auto existing = impl.affineMaps.insert_as(nullptr, key);
810
811 // If we already have it, return that value.
812 if (!existing.second)
813 return *existing.first;
814
815 // On the first use, we allocate them into the bump pointer.
816 auto *res = impl.allocator.Allocate<AffineMap>();
817
Uday Bondhugula1e500b42018-07-12 18:04:04 -0700818 // Copy the results and range sizes into the bump pointer.
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700819 results = impl.copyInto(ArrayRef<AffineExpr *>(results));
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700820 rangeSizes = impl.copyInto(ArrayRef<AffineExpr *>(rangeSizes));
821
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700822 // Initialize the memory using placement new.
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700823 new (res) AffineMap(dimCount, symbolCount, results.size(), results.data(),
824 rangeSizes.empty() ? nullptr : rangeSizes.data());
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700825
826 // Cache and return it.
827 return *existing.first = res;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700828}
829
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700830/// Return a binary affine op expression with the specified op type and
831/// operands: if it doesn't exist, create it and store it; if it is already
832/// present, return from the list. The stored expressions are unique: they are
833/// constructed and stored in a simplified/canonicalized form. The result after
834/// simplification could be any form of affine expression.
835AffineExpr *AffineBinaryOpExpr::get(AffineExpr::Kind kind, AffineExpr *lhs,
836 AffineExpr *rhs, MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700837 auto &impl = context->getImpl();
838
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700839 // Check if we already have this affine expression, and return it if we do.
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700840 auto keyValue = std::make_tuple((unsigned)kind, lhs, rhs);
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700841 auto cached = impl.affineExprs.find(keyValue);
842 if (cached != impl.affineExprs.end())
843 return cached->second;
Uday Bondhugula3934d4d2018-07-09 09:00:25 -0700844
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700845 // Simplify the expression if possible.
846 AffineExpr *simplified;
847 switch (kind) {
848 case Kind::Add:
849 simplified = AffineBinaryOpExpr::simplifyAdd(lhs, rhs, context);
850 break;
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700851 case Kind::Mul:
852 simplified = AffineBinaryOpExpr::simplifyMul(lhs, rhs, context);
853 break;
854 case Kind::FloorDiv:
855 simplified = AffineBinaryOpExpr::simplifyFloorDiv(lhs, rhs, context);
856 break;
857 case Kind::CeilDiv:
858 simplified = AffineBinaryOpExpr::simplifyCeilDiv(lhs, rhs, context);
859 break;
860 case Kind::Mod:
861 simplified = AffineBinaryOpExpr::simplifyMod(lhs, rhs, context);
862 break;
863 default:
864 llvm_unreachable("unexpected binary affine expr");
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700865 }
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700866
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700867 // The simplified one would have already been cached; just return it.
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700868 if (simplified)
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700869 return simplified;
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700870
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700871 // An expression with these operands will already be in the
872 // simplified/canonical form. Create and store it.
873 auto *result = impl.allocator.Allocate<AffineBinaryOpExpr>();
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700874 // Initialize the memory using placement new.
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700875 new (result) AffineBinaryOpExpr(kind, lhs, rhs);
876 bool inserted = impl.affineExprs.insert({keyValue, result}).second;
877 assert(inserted && "the expression shouldn't already exist in the map");
878 (void)inserted;
879 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700880}
881
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700882AffineDimExpr *AffineDimExpr::get(unsigned position, MLIRContext *context) {
Uday Bondhugula4e5078b2018-07-24 22:34:09 -0700883 auto &impl = context->getImpl();
884
885 // Check if we need to resize.
886 if (position >= impl.dimExprs.size())
887 impl.dimExprs.resize(position + 1, nullptr);
888
889 auto *&result = impl.dimExprs[position];
890 if (result)
891 return result;
892
893 result = impl.allocator.Allocate<AffineDimExpr>();
894 // Initialize the memory using placement new.
895 new (result) AffineDimExpr(position);
896 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700897}
898
899AffineSymbolExpr *AffineSymbolExpr::get(unsigned position,
900 MLIRContext *context) {
Uday Bondhugula4e5078b2018-07-24 22:34:09 -0700901 auto &impl = context->getImpl();
902
903 // Check if we need to resize.
904 if (position >= impl.symbolExprs.size())
905 impl.symbolExprs.resize(position + 1, nullptr);
906
907 auto *&result = impl.symbolExprs[position];
908 if (result)
909 return result;
910
911 result = impl.allocator.Allocate<AffineSymbolExpr>();
912 // Initialize the memory using placement new.
913 new (result) AffineSymbolExpr(position);
914 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700915}
916
917AffineConstantExpr *AffineConstantExpr::get(int64_t constant,
918 MLIRContext *context) {
Uday Bondhugula4e5078b2018-07-24 22:34:09 -0700919 auto &impl = context->getImpl();
920 auto *&result = impl.constExprs[constant];
921
922 if (result)
923 return result;
924
925 result = impl.allocator.Allocate<AffineConstantExpr>();
926 // Initialize the memory using placement new.
927 new (result) AffineConstantExpr(constant);
928 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700929}
Uday Bondhugulabc535622018-08-07 14:24:38 -0700930
931//===----------------------------------------------------------------------===//
932// Integer Sets: these are allocated into the bump pointer, and are immutable.
933// But they aren't uniqued like AffineMap's; there isn't an advantage to.
934//===----------------------------------------------------------------------===//
935
936IntegerSet *IntegerSet::get(unsigned dimCount, unsigned symbolCount,
937 ArrayRef<AffineExpr *> constraints,
938 ArrayRef<bool> eqFlags, MLIRContext *context) {
939 assert(eqFlags.size() == constraints.size());
940
941 auto &impl = context->getImpl();
942
943 // Allocate them into the bump pointer.
944 auto *res = impl.allocator.Allocate<IntegerSet>();
945
946 // Copy the equalities and inequalities into the bump pointer.
947 constraints = impl.copyInto(ArrayRef<AffineExpr *>(constraints));
948 eqFlags = impl.copyInto(ArrayRef<bool>(eqFlags));
949
950 // Initialize the memory using placement new.
951 return new (res) IntegerSet(dimCount, symbolCount, constraints.size(),
952 constraints.data(), eqFlags.data());
953}