blob: 32e2f529d7b3d49b99a97eca21be0203660159f9 [file] [log] [blame]
Chris Lattnerf7e22732018-06-22 22:03:48 -07001//===- MLIRContext.cpp - MLIR Type Classes --------------------------------===//
2//
3// Copyright 2019 The MLIR Authors.
4//
5// Licensed under the Apache License, Version 2.0 (the "License");
6// you may not use this file except in compliance with the License.
7// You may obtain a copy of the License at
8//
9// http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing, software
12// distributed under the License is distributed on an "AS IS" BASIS,
13// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14// See the License for the specific language governing permissions and
15// limitations under the License.
16// =============================================================================
17
18#include "mlir/IR/MLIRContext.h"
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -070019#include "AttributeListStorage.h"
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070020#include "mlir/IR/AffineExpr.h"
21#include "mlir/IR/AffineMap.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070022#include "mlir/IR/Attributes.h"
Chris Lattner4613d9e2018-08-19 21:17:22 -070023#include "mlir/IR/Function.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070024#include "mlir/IR/Identifier.h"
Uday Bondhugulabc535622018-08-07 14:24:38 -070025#include "mlir/IR/IntegerSet.h"
Chris Lattnerfc647d52018-08-27 21:05:16 -070026#include "mlir/IR/Location.h"
Chris Lattnerff0d5902018-07-05 09:12:11 -070027#include "mlir/IR/OperationSet.h"
28#include "mlir/IR/StandardOps.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070029#include "mlir/IR/Types.h"
Chris Lattner36b4ed12018-07-04 10:43:29 -070030#include "mlir/Support/STLExtras.h"
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -070031#include "third_party/llvm/llvm/include/llvm/ADT/STLExtras.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070032#include "llvm/ADT/DenseSet.h"
Chris Lattnered65a732018-06-28 20:45:33 -070033#include "llvm/ADT/StringMap.h"
Chris Lattner95865062018-08-01 10:18:59 -070034#include "llvm/ADT/Twine.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070035#include "llvm/Support/Allocator.h"
Chris Lattner95865062018-08-01 10:18:59 -070036#include "llvm/Support/raw_ostream.h"
Chris Lattnerf7e22732018-06-22 22:03:48 -070037using namespace mlir;
38using namespace llvm;
39
40namespace {
James Molloy87d81022018-07-23 11:44:40 -070041struct FunctionTypeKeyInfo : DenseMapInfo<FunctionType *> {
Chris Lattnerf7e22732018-06-22 22:03:48 -070042 // Functions are uniqued based on their inputs and results.
James Molloy87d81022018-07-23 11:44:40 -070043 using KeyTy = std::pair<ArrayRef<Type *>, ArrayRef<Type *>>;
44 using DenseMapInfo<FunctionType *>::getHashValue;
45 using DenseMapInfo<FunctionType *>::isEqual;
Chris Lattnerf7e22732018-06-22 22:03:48 -070046
47 static unsigned getHashValue(KeyTy key) {
James Molloy87d81022018-07-23 11:44:40 -070048 return hash_combine(
49 hash_combine_range(key.first.begin(), key.first.end()),
50 hash_combine_range(key.second.begin(), key.second.end()));
Chris Lattnerf7e22732018-06-22 22:03:48 -070051 }
52
53 static bool isEqual(const KeyTy &lhs, const FunctionType *rhs) {
54 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
55 return false;
56 return lhs == KeyTy(rhs->getInputs(), rhs->getResults());
57 }
58};
Uday Bondhugula015cbb12018-07-03 20:16:08 -070059
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070060struct AffineMapKeyInfo : DenseMapInfo<AffineMap *> {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070061 // Affine maps are uniqued based on their dim/symbol counts and affine
62 // expressions.
Uday Bondhugula0115dbb2018-07-11 21:31:07 -070063 using KeyTy = std::tuple<unsigned, unsigned, ArrayRef<AffineExpr *>,
64 ArrayRef<AffineExpr *>>;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070065 using DenseMapInfo<AffineMap *>::getHashValue;
66 using DenseMapInfo<AffineMap *>::isEqual;
67
68 static unsigned getHashValue(KeyTy key) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -070069 return hash_combine(
Chris Lattner36b4ed12018-07-04 10:43:29 -070070 std::get<0>(key), std::get<1>(key),
Uday Bondhugula0115dbb2018-07-11 21:31:07 -070071 hash_combine_range(std::get<2>(key).begin(), std::get<2>(key).end()),
72 hash_combine_range(std::get<3>(key).begin(), std::get<3>(key).end()));
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070073 }
74
Uday Bondhugula015cbb12018-07-03 20:16:08 -070075 static bool isEqual(const KeyTy &lhs, const AffineMap *rhs) {
76 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
77 return false;
Chris Lattner36b4ed12018-07-04 10:43:29 -070078 return lhs == std::make_tuple(rhs->getNumDims(), rhs->getNumSymbols(),
Uday Bondhugula0115dbb2018-07-11 21:31:07 -070079 rhs->getResults(), rhs->getRangeSizes());
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -070080 }
81};
82
James Molloy87d81022018-07-23 11:44:40 -070083struct VectorTypeKeyInfo : DenseMapInfo<VectorType *> {
Chris Lattnerf7e22732018-06-22 22:03:48 -070084 // Vectors are uniqued based on their element type and shape.
James Molloy87d81022018-07-23 11:44:40 -070085 using KeyTy = std::pair<Type *, ArrayRef<unsigned>>;
86 using DenseMapInfo<VectorType *>::getHashValue;
87 using DenseMapInfo<VectorType *>::isEqual;
Chris Lattnerf7e22732018-06-22 22:03:48 -070088
89 static unsigned getHashValue(KeyTy key) {
James Molloy87d81022018-07-23 11:44:40 -070090 return hash_combine(
91 DenseMapInfo<Type *>::getHashValue(key.first),
92 hash_combine_range(key.second.begin(), key.second.end()));
Chris Lattnerf7e22732018-06-22 22:03:48 -070093 }
94
95 static bool isEqual(const KeyTy &lhs, const VectorType *rhs) {
96 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
97 return false;
98 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
99 }
100};
Chris Lattner36b4ed12018-07-04 10:43:29 -0700101
James Molloy87d81022018-07-23 11:44:40 -0700102struct RankedTensorTypeKeyInfo : DenseMapInfo<RankedTensorType *> {
MLIR Team355ec862018-06-23 18:09:09 -0700103 // Ranked tensors are uniqued based on their element type and shape.
James Molloy87d81022018-07-23 11:44:40 -0700104 using KeyTy = std::pair<Type *, ArrayRef<int>>;
105 using DenseMapInfo<RankedTensorType *>::getHashValue;
106 using DenseMapInfo<RankedTensorType *>::isEqual;
MLIR Team355ec862018-06-23 18:09:09 -0700107
108 static unsigned getHashValue(KeyTy key) {
James Molloy87d81022018-07-23 11:44:40 -0700109 return hash_combine(
110 DenseMapInfo<Type *>::getHashValue(key.first),
111 hash_combine_range(key.second.begin(), key.second.end()));
MLIR Team355ec862018-06-23 18:09:09 -0700112 }
113
114 static bool isEqual(const KeyTy &lhs, const RankedTensorType *rhs) {
115 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
116 return false;
117 return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
118 }
119};
Chris Lattner36b4ed12018-07-04 10:43:29 -0700120
James Molloy87d81022018-07-23 11:44:40 -0700121struct MemRefTypeKeyInfo : DenseMapInfo<MemRefType *> {
MLIR Team718c82f2018-07-16 09:45:22 -0700122 // MemRefs are uniqued based on their element type, shape, affine map
123 // composition, and memory space.
James Molloy87d81022018-07-23 11:44:40 -0700124 using KeyTy =
125 std::tuple<Type *, ArrayRef<int>, ArrayRef<AffineMap *>, unsigned>;
126 using DenseMapInfo<MemRefType *>::getHashValue;
127 using DenseMapInfo<MemRefType *>::isEqual;
MLIR Team718c82f2018-07-16 09:45:22 -0700128
129 static unsigned getHashValue(KeyTy key) {
130 return hash_combine(
James Molloy87d81022018-07-23 11:44:40 -0700131 DenseMapInfo<Type *>::getHashValue(std::get<0>(key)),
MLIR Team718c82f2018-07-16 09:45:22 -0700132 hash_combine_range(std::get<1>(key).begin(), std::get<1>(key).end()),
133 hash_combine_range(std::get<2>(key).begin(), std::get<2>(key).end()),
134 std::get<3>(key));
135 }
136
137 static bool isEqual(const KeyTy &lhs, const MemRefType *rhs) {
138 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
139 return false;
140 return lhs == std::make_tuple(rhs->getElementType(), rhs->getShape(),
141 rhs->getAffineMaps(), rhs->getMemorySpace());
142 }
143};
144
James Molloy87d81022018-07-23 11:44:40 -0700145struct ArrayAttrKeyInfo : DenseMapInfo<ArrayAttr *> {
Chris Lattner36b4ed12018-07-04 10:43:29 -0700146 // Array attributes are uniqued based on their elements.
James Molloy87d81022018-07-23 11:44:40 -0700147 using KeyTy = ArrayRef<Attribute *>;
148 using DenseMapInfo<ArrayAttr *>::getHashValue;
149 using DenseMapInfo<ArrayAttr *>::isEqual;
Chris Lattner36b4ed12018-07-04 10:43:29 -0700150
151 static unsigned getHashValue(KeyTy key) {
152 return hash_combine_range(key.begin(), key.end());
153 }
154
155 static bool isEqual(const KeyTy &lhs, const ArrayAttr *rhs) {
156 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
157 return false;
158 return lhs == rhs->getValue();
159 }
160};
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -0700161
162struct AttributeListKeyInfo : DenseMapInfo<AttributeListStorage *> {
163 // Array attributes are uniqued based on their elements.
164 using KeyTy = ArrayRef<NamedAttribute>;
165 using DenseMapInfo<AttributeListStorage *>::getHashValue;
166 using DenseMapInfo<AttributeListStorage *>::isEqual;
167
168 static unsigned getHashValue(KeyTy key) {
169 return hash_combine_range(key.begin(), key.end());
170 }
171
172 static bool isEqual(const KeyTy &lhs, const AttributeListStorage *rhs) {
173 if (rhs == getEmptyKey() || rhs == getTombstoneKey())
174 return false;
175 return lhs == rhs->getElements();
176 }
177};
178
Chris Lattnerf7e22732018-06-22 22:03:48 -0700179} // end anonymous namespace.
180
Chris Lattnerf7e22732018-06-22 22:03:48 -0700181namespace mlir {
182/// This is the implementation of the MLIRContext class, using the pImpl idiom.
183/// This class is completely private to this file, so everything is public.
184class MLIRContextImpl {
185public:
Chris Lattnerff0d5902018-07-05 09:12:11 -0700186 /// This is the set of all operations that are registered with the system.
187 OperationSet operationSet;
188
Chris Lattnerfc647d52018-08-27 21:05:16 -0700189 /// We put location info into this allocator, since it is generally not
190 /// touched by compiler passes.
191 llvm::BumpPtrAllocator locationAllocator;
192
193 /// The singleton for UnknownLoc.
194 UnknownLoc *theUnknownLoc = nullptr;
195
196 /// These are filename locations uniqued into this MLIRContext.
197 llvm::StringMap<char, llvm::BumpPtrAllocator &> filenames;
198
199 /// FileLineColLoc uniquing.
200 DenseMap<std::tuple<const char *, unsigned, unsigned>, FileLineColLoc *>
201 fileLineColLocs;
202
203 /// We put immortal objects into this allocator.
204 llvm::BumpPtrAllocator allocator;
205
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700206 /// This is the handler to use to report diagnostics, or null if not
207 /// registered.
208 MLIRContext::DiagnosticHandlerTy diagnosticHandler;
Chris Lattner95865062018-08-01 10:18:59 -0700209
Chris Lattnered65a732018-06-28 20:45:33 -0700210 /// These are identifiers uniqued into this MLIRContext.
James Molloy87d81022018-07-23 11:44:40 -0700211 llvm::StringMap<char, llvm::BumpPtrAllocator &> identifiers;
Chris Lattnered65a732018-06-28 20:45:33 -0700212
Chris Lattnerc3251192018-07-27 13:09:58 -0700213 // Uniquing table for 'other' types.
214 OtherType *otherTypes[int(Type::Kind::LAST_OTHER_TYPE) -
215 int(Type::Kind::FIRST_OTHER_TYPE) + 1] = {nullptr};
216
217 // Uniquing table for 'float' types.
218 FloatType *floatTypes[int(Type::Kind::LAST_FLOATING_POINT_TYPE) -
219 int(Type::Kind::FIRST_FLOATING_POINT_TYPE) + 1] = {
James Molloy87d81022018-07-23 11:44:40 -0700220 nullptr};
Chris Lattnerf7e22732018-06-22 22:03:48 -0700221
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700222 // Affine map uniquing.
223 using AffineMapSet = DenseSet<AffineMap *, AffineMapKeyInfo>;
224 AffineMapSet affineMaps;
225
Uday Bondhugula0b80a162018-07-03 21:34:58 -0700226 // Affine binary op expression uniquing. Figure out uniquing of dimensional
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700227 // or symbolic identifiers.
Uday Bondhugula3934d4d2018-07-09 09:00:25 -0700228 DenseMap<std::tuple<unsigned, AffineExpr *, AffineExpr *>, AffineExpr *>
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700229 affineExprs;
230
Uday Bondhugula4e5078b2018-07-24 22:34:09 -0700231 // Uniqui'ing of AffineDimExpr, AffineSymbolExpr's by their position.
232 std::vector<AffineDimExpr *> dimExprs;
233 std::vector<AffineSymbolExpr *> symbolExprs;
234
235 // Uniqui'ing of AffineConstantExpr using constant value as key.
236 DenseMap<int64_t, AffineConstantExpr *> constExprs;
237
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700238 /// Integer type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700239 DenseMap<unsigned, IntegerType *> integers;
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700240
Chris Lattnerf7e22732018-06-22 22:03:48 -0700241 /// Function type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700242 using FunctionTypeSet = DenseSet<FunctionType *, FunctionTypeKeyInfo>;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700243 FunctionTypeSet functions;
244
245 /// Vector type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700246 using VectorTypeSet = DenseSet<VectorType *, VectorTypeKeyInfo>;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700247 VectorTypeSet vectors;
248
MLIR Team355ec862018-06-23 18:09:09 -0700249 /// Ranked tensor type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700250 using RankedTensorTypeSet =
251 DenseSet<RankedTensorType *, RankedTensorTypeKeyInfo>;
MLIR Team355ec862018-06-23 18:09:09 -0700252 RankedTensorTypeSet rankedTensors;
253
254 /// Unranked tensor type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700255 DenseMap<Type *, UnrankedTensorType *> unrankedTensors;
MLIR Team355ec862018-06-23 18:09:09 -0700256
MLIR Team718c82f2018-07-16 09:45:22 -0700257 /// MemRef type uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700258 using MemRefTypeSet = DenseSet<MemRefType *, MemRefTypeKeyInfo>;
MLIR Team718c82f2018-07-16 09:45:22 -0700259 MemRefTypeSet memrefs;
260
Chris Lattner36b4ed12018-07-04 10:43:29 -0700261 // Attribute uniquing.
James Molloy87d81022018-07-23 11:44:40 -0700262 BoolAttr *boolAttrs[2] = {nullptr};
263 DenseMap<int64_t, IntegerAttr *> integerAttrs;
264 DenseMap<int64_t, FloatAttr *> floatAttrs;
265 StringMap<StringAttr *> stringAttrs;
266 using ArrayAttrSet = DenseSet<ArrayAttr *, ArrayAttrKeyInfo>;
Chris Lattner36b4ed12018-07-04 10:43:29 -0700267 ArrayAttrSet arrayAttrs;
James Molloy87d81022018-07-23 11:44:40 -0700268 DenseMap<AffineMap *, AffineMapAttr *> affineMapAttrs;
James Molloyf0d2f442018-08-03 01:54:46 -0700269 DenseMap<Type *, TypeAttr *> typeAttrs;
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -0700270 using AttributeListSet =
271 DenseSet<AttributeListStorage *, AttributeListKeyInfo>;
272 AttributeListSet attributeLists;
Chris Lattner1aa46322018-08-21 17:55:22 -0700273 DenseMap<const Function *, FunctionAttr *> functionAttrs;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700274
275public:
Chris Lattnerfc647d52018-08-27 21:05:16 -0700276 MLIRContextImpl() : filenames(locationAllocator), identifiers(allocator) {
Chris Lattnerff0d5902018-07-05 09:12:11 -0700277 registerStandardOperations(operationSet);
278 }
Chris Lattnered65a732018-06-28 20:45:33 -0700279
Chris Lattnerf7e22732018-06-22 22:03:48 -0700280 /// Copy the specified array of elements into memory managed by our bump
281 /// pointer allocator. This assumes the elements are all PODs.
James Molloy72b0cbe2018-08-01 12:55:27 -0700282 template <typename T>
283 ArrayRef<T> copyInto(ArrayRef<T> elements) {
Chris Lattnerf7e22732018-06-22 22:03:48 -0700284 auto result = allocator.Allocate<T>(elements.size());
285 std::uninitialized_copy(elements.begin(), elements.end(), result);
286 return ArrayRef<T>(result, elements.size());
287 }
288};
289} // end namespace mlir
290
James Molloy87d81022018-07-23 11:44:40 -0700291MLIRContext::MLIRContext() : impl(new MLIRContextImpl()) {}
Chris Lattnerf7e22732018-06-22 22:03:48 -0700292
James Molloy87d81022018-07-23 11:44:40 -0700293MLIRContext::~MLIRContext() {}
Chris Lattnerf7e22732018-06-22 22:03:48 -0700294
Chris Lattner95865062018-08-01 10:18:59 -0700295/// Register an issue handler with this LLVM context. The issue handler is
296/// passed location information if present (nullptr if not) along with a
297/// message and a boolean that indicates whether this is an error or warning.
298void MLIRContext::registerDiagnosticHandler(
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700299 const DiagnosticHandlerTy &handler) {
300 getImpl().diagnosticHandler = handler;
Chris Lattner95865062018-08-01 10:18:59 -0700301}
302
Chris Lattnerea5c3dc2018-08-21 08:42:19 -0700303/// Return the current diagnostic handler, or null if none is present.
304auto MLIRContext::getDiagnosticHandler() const -> DiagnosticHandlerTy {
305 return getImpl().diagnosticHandler;
306}
307
Chris Lattner95865062018-08-01 10:18:59 -0700308/// This emits a diagnostic using the registered issue handle if present, or
309/// with the default behavior if not. The MLIR compiler should not generally
310/// interact with this, it should use methods on Operation instead.
Chris Lattnerfc647d52018-08-27 21:05:16 -0700311void MLIRContext::emitDiagnostic(Location *location, const llvm::Twine &message,
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700312 DiagnosticKind kind) const {
Chris Lattner95865062018-08-01 10:18:59 -0700313 // If we had a handler registered, emit the diagnostic using it.
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700314 auto handler = getImpl().diagnosticHandler;
315 if (handler && location)
316 return handler(location, message.str(), kind);
Chris Lattner95865062018-08-01 10:18:59 -0700317
Chris Lattnerf7bdf952018-08-05 21:12:29 -0700318 // The default behavior for notes and warnings is to ignore them.
319 if (kind != DiagnosticKind::Error)
Chris Lattner95865062018-08-01 10:18:59 -0700320 return;
321
Chris Lattner7879f842018-09-02 22:01:45 -0700322 auto &os = llvm::errs();
323
324 if (auto fileLoc = dyn_cast<FileLineColLoc>(location))
325 os << fileLoc->getFilename() << ':' << fileLoc->getLine() << ':'
326 << fileLoc->getColumn() << ": ";
327
328 os << "error: ";
Chris Lattnerfc647d52018-08-27 21:05:16 -0700329
Chris Lattner95865062018-08-01 10:18:59 -0700330 // The default behavior for errors is to emit them to stderr and exit.
Chris Lattner7879f842018-09-02 22:01:45 -0700331 os << message.str() << '\n';
332 os.flush();
Chris Lattner95865062018-08-01 10:18:59 -0700333 exit(1);
334}
335
Chris Lattnerff0d5902018-07-05 09:12:11 -0700336/// Return the operation set associated with the specified MLIRContext object.
337OperationSet &OperationSet::get(MLIRContext *context) {
338 return context->getImpl().operationSet;
339}
Chris Lattnerf7e22732018-06-22 22:03:48 -0700340
Chris Lattner21e67f62018-07-06 10:46:19 -0700341/// If this operation has a registered operation description in the
342/// OperationSet, return it. Otherwise return null.
Chris Lattner95865062018-08-01 10:18:59 -0700343const AbstractOperation *Operation::getAbstractOperation() const {
344 return OperationSet::get(getContext()).lookup(getName().str());
Chris Lattner21e67f62018-07-06 10:46:19 -0700345}
346
Chris Lattnered65a732018-06-28 20:45:33 -0700347//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700348// Identifier uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700349//===----------------------------------------------------------------------===//
350
351/// Return an identifier for the specified string.
352Identifier Identifier::get(StringRef str, const MLIRContext *context) {
353 assert(!str.empty() && "Cannot create an empty identifier");
354 assert(str.find('\0') == StringRef::npos &&
355 "Cannot create an identifier with a nul character");
356
357 auto &impl = context->getImpl();
358 auto it = impl.identifiers.insert({str, char()}).first;
359 return Identifier(it->getKeyData());
360}
361
Chris Lattnered65a732018-06-28 20:45:33 -0700362//===----------------------------------------------------------------------===//
Chris Lattnerfc647d52018-08-27 21:05:16 -0700363// Location uniquing
364//===----------------------------------------------------------------------===//
365
366UnknownLoc *UnknownLoc::get(MLIRContext *context) {
367 auto &impl = context->getImpl();
368 if (auto *result = impl.theUnknownLoc)
369 return result;
370
371 impl.theUnknownLoc = impl.allocator.Allocate<UnknownLoc>();
372 new (impl.theUnknownLoc) UnknownLoc();
373 return impl.theUnknownLoc;
374}
375
376UniquedFilename UniquedFilename::get(StringRef filename, MLIRContext *context) {
377 auto &impl = context->getImpl();
378 auto it = impl.filenames.insert({filename, char()}).first;
379 return UniquedFilename(it->getKeyData());
380}
381
382FileLineColLoc *FileLineColLoc::get(UniquedFilename filename, unsigned line,
383 unsigned column, MLIRContext *context) {
384 auto &impl = context->getImpl();
385 auto &entry =
386 impl.fileLineColLocs[std::make_tuple(filename.data(), line, column)];
387 if (!entry) {
388 entry = impl.allocator.Allocate<FileLineColLoc>();
389 new (entry) FileLineColLoc(filename, line, column);
390 }
391
392 return entry;
393}
394
395//===----------------------------------------------------------------------===//
Chris Lattner36b4ed12018-07-04 10:43:29 -0700396// Type uniquing
Chris Lattnered65a732018-06-28 20:45:33 -0700397//===----------------------------------------------------------------------===//
398
Chris Lattnerf958bbe2018-06-29 22:08:05 -0700399IntegerType *IntegerType::get(unsigned width, MLIRContext *context) {
400 auto &impl = context->getImpl();
401
402 auto *&result = impl.integers[width];
403 if (!result) {
404 result = impl.allocator.Allocate<IntegerType>();
405 new (result) IntegerType(width, context);
406 }
407
408 return result;
Chris Lattnerf7e22732018-06-22 22:03:48 -0700409}
410
Chris Lattnerc3251192018-07-27 13:09:58 -0700411FloatType *FloatType::get(Kind kind, MLIRContext *context) {
412 assert(kind >= Kind::FIRST_FLOATING_POINT_TYPE &&
413 kind <= Kind::LAST_FLOATING_POINT_TYPE && "Not an FP type kind");
414 auto &impl = context->getImpl();
415
416 // We normally have these types.
417 auto *&entry =
418 impl.floatTypes[(int)kind - int(Kind::FIRST_FLOATING_POINT_TYPE)];
419 if (entry)
420 return entry;
421
422 // On the first use, we allocate them into the bump pointer.
423 auto *ptr = impl.allocator.Allocate<FloatType>();
424
425 // Initialize the memory using placement new.
426 new (ptr) FloatType(kind, context);
427
428 // Cache and return it.
429 return entry = ptr;
430}
431
432OtherType *OtherType::get(Kind kind, MLIRContext *context) {
433 assert(kind >= Kind::FIRST_OTHER_TYPE && kind <= Kind::LAST_OTHER_TYPE &&
434 "Not an 'other' type kind");
435 auto &impl = context->getImpl();
436
437 // We normally have these types.
438 auto *&entry = impl.otherTypes[(int)kind - int(Kind::FIRST_OTHER_TYPE)];
439 if (entry)
440 return entry;
441
442 // On the first use, we allocate them into the bump pointer.
443 auto *ptr = impl.allocator.Allocate<OtherType>();
444
445 // Initialize the memory using placement new.
446 new (ptr) OtherType(kind, context);
447
448 // Cache and return it.
449 return entry = ptr;
450}
451
James Molloy87d81022018-07-23 11:44:40 -0700452FunctionType *FunctionType::get(ArrayRef<Type *> inputs,
453 ArrayRef<Type *> results,
Chris Lattnerf7e22732018-06-22 22:03:48 -0700454 MLIRContext *context) {
455 auto &impl = context->getImpl();
456
457 // Look to see if we already have this function type.
458 FunctionTypeKeyInfo::KeyTy key(inputs, results);
459 auto existing = impl.functions.insert_as(nullptr, key);
460
461 // If we already have it, return that value.
462 if (!existing.second)
463 return *existing.first;
464
465 // On the first use, we allocate them into the bump pointer.
466 auto *result = impl.allocator.Allocate<FunctionType>();
467
468 // Copy the inputs and results into the bump pointer.
James Molloy87d81022018-07-23 11:44:40 -0700469 SmallVector<Type *, 16> types;
470 types.reserve(inputs.size() + results.size());
Chris Lattnerf7e22732018-06-22 22:03:48 -0700471 types.append(inputs.begin(), inputs.end());
472 types.append(results.begin(), results.end());
James Molloy87d81022018-07-23 11:44:40 -0700473 auto typesList = impl.copyInto(ArrayRef<Type *>(types));
Chris Lattnerf7e22732018-06-22 22:03:48 -0700474
475 // Initialize the memory using placement new.
James Molloy87d81022018-07-23 11:44:40 -0700476 new (result)
477 FunctionType(typesList.data(), inputs.size(), results.size(), context);
Chris Lattnerf7e22732018-06-22 22:03:48 -0700478
479 // Cache and return it.
480 return *existing.first = result;
481}
482
Chris Lattnerf7e22732018-06-22 22:03:48 -0700483VectorType *VectorType::get(ArrayRef<unsigned> shape, Type *elementType) {
484 assert(!shape.empty() && "vector types must have at least one dimension");
Chris Lattnerc3251192018-07-27 13:09:58 -0700485 assert((isa<FloatType>(elementType) || isa<IntegerType>(elementType)) &&
Chris Lattnerf7e22732018-06-22 22:03:48 -0700486 "vectors elements must be primitives");
487
488 auto *context = elementType->getContext();
489 auto &impl = context->getImpl();
490
491 // Look to see if we already have this vector type.
492 VectorTypeKeyInfo::KeyTy key(elementType, shape);
493 auto existing = impl.vectors.insert_as(nullptr, key);
494
495 // If we already have it, return that value.
496 if (!existing.second)
497 return *existing.first;
498
499 // On the first use, we allocate them into the bump pointer.
500 auto *result = impl.allocator.Allocate<VectorType>();
501
502 // Copy the shape into the bump pointer.
503 shape = impl.copyInto(shape);
504
505 // Initialize the memory using placement new.
Jacques Pienaar3cdb8542018-07-23 11:48:22 -0700506 new (result) VectorType(shape, elementType, context);
Chris Lattnerf7e22732018-06-22 22:03:48 -0700507
508 // Cache and return it.
509 return *existing.first = result;
510}
MLIR Team355ec862018-06-23 18:09:09 -0700511
MLIR Team355ec862018-06-23 18:09:09 -0700512RankedTensorType *RankedTensorType::get(ArrayRef<int> shape,
513 Type *elementType) {
514 auto *context = elementType->getContext();
515 auto &impl = context->getImpl();
516
517 // Look to see if we already have this ranked tensor type.
518 RankedTensorTypeKeyInfo::KeyTy key(elementType, shape);
519 auto existing = impl.rankedTensors.insert_as(nullptr, key);
520
521 // If we already have it, return that value.
522 if (!existing.second)
523 return *existing.first;
524
525 // On the first use, we allocate them into the bump pointer.
526 auto *result = impl.allocator.Allocate<RankedTensorType>();
527
528 // Copy the shape into the bump pointer.
529 shape = impl.copyInto(shape);
530
531 // Initialize the memory using placement new.
532 new (result) RankedTensorType(shape, elementType, context);
533
534 // Cache and return it.
535 return *existing.first = result;
536}
537
538UnrankedTensorType *UnrankedTensorType::get(Type *elementType) {
539 auto *context = elementType->getContext();
540 auto &impl = context->getImpl();
541
542 // Look to see if we already have this unranked tensor type.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700543 auto *&result = impl.unrankedTensors[elementType];
MLIR Team355ec862018-06-23 18:09:09 -0700544
545 // If we already have it, return that value.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700546 if (result)
547 return result;
MLIR Team355ec862018-06-23 18:09:09 -0700548
549 // On the first use, we allocate them into the bump pointer.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700550 result = impl.allocator.Allocate<UnrankedTensorType>();
MLIR Team355ec862018-06-23 18:09:09 -0700551
552 // Initialize the memory using placement new.
553 new (result) UnrankedTensorType(elementType, context);
Chris Lattner36b4ed12018-07-04 10:43:29 -0700554 return result;
555}
556
MLIR Team718c82f2018-07-16 09:45:22 -0700557MemRefType *MemRefType::get(ArrayRef<int> shape, Type *elementType,
James Molloy87d81022018-07-23 11:44:40 -0700558 ArrayRef<AffineMap *> affineMapComposition,
MLIR Team718c82f2018-07-16 09:45:22 -0700559 unsigned memorySpace) {
560 auto *context = elementType->getContext();
561 auto &impl = context->getImpl();
562
563 // Look to see if we already have this memref type.
James Molloy87d81022018-07-23 11:44:40 -0700564 auto key =
565 std::make_tuple(elementType, shape, affineMapComposition, memorySpace);
MLIR Team718c82f2018-07-16 09:45:22 -0700566 auto existing = impl.memrefs.insert_as(nullptr, key);
567
568 // If we already have it, return that value.
569 if (!existing.second)
570 return *existing.first;
571
572 // On the first use, we allocate them into the bump pointer.
573 auto *result = impl.allocator.Allocate<MemRefType>();
574
575 // Copy the shape into the bump pointer.
576 shape = impl.copyInto(shape);
577
578 // Copy the affine map composition into the bump pointer.
579 // TODO(andydavis) Assert that the structure of the composition is valid.
James Molloy87d81022018-07-23 11:44:40 -0700580 affineMapComposition =
581 impl.copyInto(ArrayRef<AffineMap *>(affineMapComposition));
MLIR Team718c82f2018-07-16 09:45:22 -0700582
583 // Initialize the memory using placement new.
584 new (result) MemRefType(shape, elementType, affineMapComposition, memorySpace,
585 context);
586 // Cache and return it.
587 return *existing.first = result;
588}
589
Chris Lattner36b4ed12018-07-04 10:43:29 -0700590//===----------------------------------------------------------------------===//
591// Attribute uniquing
592//===----------------------------------------------------------------------===//
593
594BoolAttr *BoolAttr::get(bool value, MLIRContext *context) {
595 auto *&result = context->getImpl().boolAttrs[value];
596 if (result)
597 return result;
598
599 result = context->getImpl().allocator.Allocate<BoolAttr>();
600 new (result) BoolAttr(value);
601 return result;
602}
603
604IntegerAttr *IntegerAttr::get(int64_t value, MLIRContext *context) {
605 auto *&result = context->getImpl().integerAttrs[value];
606 if (result)
607 return result;
608
609 result = context->getImpl().allocator.Allocate<IntegerAttr>();
610 new (result) IntegerAttr(value);
611 return result;
612}
613
614FloatAttr *FloatAttr::get(double value, MLIRContext *context) {
615 // We hash based on the bit representation of the double to ensure we don't
616 // merge things like -0.0 and 0.0 in the hash comparison.
617 union {
618 double floatValue;
619 int64_t intValue;
620 };
621 floatValue = value;
622
623 auto *&result = context->getImpl().floatAttrs[intValue];
624 if (result)
625 return result;
626
627 result = context->getImpl().allocator.Allocate<FloatAttr>();
628 new (result) FloatAttr(value);
629 return result;
630}
631
632StringAttr *StringAttr::get(StringRef bytes, MLIRContext *context) {
633 auto it = context->getImpl().stringAttrs.insert({bytes, nullptr}).first;
634
635 if (it->second)
636 return it->second;
637
638 auto result = context->getImpl().allocator.Allocate<StringAttr>();
639 new (result) StringAttr(it->first());
640 it->second = result;
641 return result;
642}
643
James Molloy87d81022018-07-23 11:44:40 -0700644ArrayAttr *ArrayAttr::get(ArrayRef<Attribute *> value, MLIRContext *context) {
Chris Lattner36b4ed12018-07-04 10:43:29 -0700645 auto &impl = context->getImpl();
646
647 // Look to see if we already have this.
648 auto existing = impl.arrayAttrs.insert_as(nullptr, value);
649
650 // If we already have it, return that value.
651 if (!existing.second)
652 return *existing.first;
653
654 // On the first use, we allocate them into the bump pointer.
655 auto *result = impl.allocator.Allocate<ArrayAttr>();
656
657 // Copy the elements into the bump pointer.
658 value = impl.copyInto(value);
659
Chris Lattnerea5c3dc2018-08-21 08:42:19 -0700660 // Check to see if any of the elements have a function attr.
661 bool hasFunctionAttr = false;
662 for (auto *elt : value)
663 if (elt->isOrContainsFunction()) {
664 hasFunctionAttr = true;
665 break;
666 }
667
Chris Lattner36b4ed12018-07-04 10:43:29 -0700668 // Initialize the memory using placement new.
Chris Lattnerea5c3dc2018-08-21 08:42:19 -0700669 new (result) ArrayAttr(value, hasFunctionAttr);
MLIR Team355ec862018-06-23 18:09:09 -0700670
671 // Cache and return it.
Chris Lattner36b4ed12018-07-04 10:43:29 -0700672 return *existing.first = result;
MLIR Team355ec862018-06-23 18:09:09 -0700673}
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700674
James Molloy87d81022018-07-23 11:44:40 -0700675AffineMapAttr *AffineMapAttr::get(AffineMap *value, MLIRContext *context) {
MLIR Teamb61885d2018-07-18 16:29:21 -0700676 auto *&result = context->getImpl().affineMapAttrs[value];
677 if (result)
678 return result;
679
680 result = context->getImpl().allocator.Allocate<AffineMapAttr>();
681 new (result) AffineMapAttr(value);
682 return result;
683}
684
James Molloyf0d2f442018-08-03 01:54:46 -0700685TypeAttr *TypeAttr::get(Type *type, MLIRContext *context) {
686 auto *&result = context->getImpl().typeAttrs[type];
687 if (result)
688 return result;
689
690 result = context->getImpl().allocator.Allocate<TypeAttr>();
691 new (result) TypeAttr(type);
692 return result;
693}
694
Chris Lattner1aa46322018-08-21 17:55:22 -0700695FunctionAttr *FunctionAttr::get(const Function *value, MLIRContext *context) {
696 assert(value && "Cannot get FunctionAttr for a null function");
697
Chris Lattner4613d9e2018-08-19 21:17:22 -0700698 auto *&result = context->getImpl().functionAttrs[value];
699 if (result)
700 return result;
701
702 result = context->getImpl().allocator.Allocate<FunctionAttr>();
Chris Lattner1aa46322018-08-21 17:55:22 -0700703 new (result) FunctionAttr(const_cast<Function *>(value));
Chris Lattner4613d9e2018-08-19 21:17:22 -0700704 return result;
705}
706
Chris Lattner1aa46322018-08-21 17:55:22 -0700707FunctionType *FunctionAttr::getType() const { return getValue()->getType(); }
708
Chris Lattner4613d9e2018-08-19 21:17:22 -0700709/// This function is used by the internals of the Function class to null out
710/// attributes refering to functions that are about to be deleted.
711void FunctionAttr::dropFunctionReference(Function *value) {
712 // Check to see if there was an attribute referring to this function.
713 auto &functionAttrs = value->getContext()->getImpl().functionAttrs;
714
715 // If not, then we're done.
716 auto it = functionAttrs.find(value);
717 if (it == functionAttrs.end())
718 return;
719
720 // If so, null out the function reference in the attribute (to avoid dangling
721 // pointers) and remove the entry from the map so the map doesn't contain
722 // dangling keys.
723 it->second->value = nullptr;
724 functionAttrs.erase(it);
725}
726
Chris Lattnerdf1a2fc2018-07-05 21:20:59 -0700727/// Perform a three-way comparison between the names of the specified
728/// NamedAttributes.
729static int compareNamedAttributes(const NamedAttribute *lhs,
730 const NamedAttribute *rhs) {
731 return lhs->first.str().compare(rhs->first.str());
732}
733
734/// Given a list of NamedAttribute's, canonicalize the list (sorting
735/// by name) and return the unique'd result. Note that the empty list is
736/// represented with a null pointer.
737AttributeListStorage *AttributeListStorage::get(ArrayRef<NamedAttribute> attrs,
738 MLIRContext *context) {
739 // We need to sort the element list to canonicalize it, but we also don't want
740 // to do a ton of work in the super common case where the element list is
741 // already sorted.
742 SmallVector<NamedAttribute, 8> storage;
743 switch (attrs.size()) {
744 case 0:
745 // An empty list is represented with a null pointer.
746 return nullptr;
747 case 1:
748 // A single element is already sorted.
749 break;
750 case 2:
751 // Don't invoke a general sort for two element case.
752 if (attrs[0].first.str() > attrs[1].first.str()) {
753 storage.push_back(attrs[1]);
754 storage.push_back(attrs[0]);
755 attrs = storage;
756 }
757 break;
758 default:
759 // Check to see they are sorted already.
760 bool isSorted = true;
761 for (unsigned i = 0, e = attrs.size() - 1; i != e; ++i) {
762 if (attrs[i].first.str() > attrs[i + 1].first.str()) {
763 isSorted = false;
764 break;
765 }
766 }
767 // If not, do a general sort.
768 if (!isSorted) {
769 storage.append(attrs.begin(), attrs.end());
770 llvm::array_pod_sort(storage.begin(), storage.end(),
771 compareNamedAttributes);
772 attrs = storage;
773 }
774 }
775
776 // Ok, now that we've canonicalized our attributes, unique them.
777 auto &impl = context->getImpl();
778
779 // Look to see if we already have this.
780 auto existing = impl.attributeLists.insert_as(nullptr, attrs);
781
782 // If we already have it, return that value.
783 if (!existing.second)
784 return *existing.first;
785
786 // Otherwise, allocate a new AttributeListStorage, unique it and return it.
787 auto byteSize =
788 AttributeListStorage::totalSizeToAlloc<NamedAttribute>(attrs.size());
789 auto rawMem = impl.allocator.Allocate(byteSize, alignof(NamedAttribute));
790
791 // Placement initialize the AggregateSymbolicValue.
792 auto result = ::new (rawMem) AttributeListStorage(attrs.size());
793 std::uninitialized_copy(attrs.begin(), attrs.end(),
794 result->getTrailingObjects<NamedAttribute>());
795 return *existing.first = result;
796}
797
Chris Lattner36b4ed12018-07-04 10:43:29 -0700798//===----------------------------------------------------------------------===//
799// AffineMap and AffineExpr uniquing
800//===----------------------------------------------------------------------===//
801
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700802AffineMap *AffineMap::get(unsigned dimCount, unsigned symbolCount,
803 ArrayRef<AffineExpr *> results,
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700804 ArrayRef<AffineExpr *> rangeSizes,
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700805 MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700806 // The number of results can't be zero.
807 assert(!results.empty());
808
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700809 assert(rangeSizes.empty() || results.size() == rangeSizes.size());
810
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700811 auto &impl = context->getImpl();
812
813 // Check if we already have this affine map.
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700814 auto key = std::make_tuple(dimCount, symbolCount, results, rangeSizes);
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700815 auto existing = impl.affineMaps.insert_as(nullptr, key);
816
817 // If we already have it, return that value.
818 if (!existing.second)
819 return *existing.first;
820
821 // On the first use, we allocate them into the bump pointer.
822 auto *res = impl.allocator.Allocate<AffineMap>();
823
Uday Bondhugula1e500b42018-07-12 18:04:04 -0700824 // Copy the results and range sizes into the bump pointer.
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700825 results = impl.copyInto(ArrayRef<AffineExpr *>(results));
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700826 rangeSizes = impl.copyInto(ArrayRef<AffineExpr *>(rangeSizes));
827
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700828 // Initialize the memory using placement new.
Uday Bondhugula0115dbb2018-07-11 21:31:07 -0700829 new (res) AffineMap(dimCount, symbolCount, results.size(), results.data(),
830 rangeSizes.empty() ? nullptr : rangeSizes.data());
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700831
832 // Cache and return it.
833 return *existing.first = res;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700834}
835
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700836/// Return a binary affine op expression with the specified op type and
837/// operands: if it doesn't exist, create it and store it; if it is already
838/// present, return from the list. The stored expressions are unique: they are
839/// constructed and stored in a simplified/canonicalized form. The result after
840/// simplification could be any form of affine expression.
841AffineExpr *AffineBinaryOpExpr::get(AffineExpr::Kind kind, AffineExpr *lhs,
842 AffineExpr *rhs, MLIRContext *context) {
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700843 auto &impl = context->getImpl();
844
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700845 // Check if we already have this affine expression, and return it if we do.
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700846 auto keyValue = std::make_tuple((unsigned)kind, lhs, rhs);
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700847 auto cached = impl.affineExprs.find(keyValue);
848 if (cached != impl.affineExprs.end())
849 return cached->second;
Uday Bondhugula3934d4d2018-07-09 09:00:25 -0700850
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700851 // Simplify the expression if possible.
852 AffineExpr *simplified;
853 switch (kind) {
854 case Kind::Add:
855 simplified = AffineBinaryOpExpr::simplifyAdd(lhs, rhs, context);
856 break;
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700857 case Kind::Mul:
858 simplified = AffineBinaryOpExpr::simplifyMul(lhs, rhs, context);
859 break;
860 case Kind::FloorDiv:
861 simplified = AffineBinaryOpExpr::simplifyFloorDiv(lhs, rhs, context);
862 break;
863 case Kind::CeilDiv:
864 simplified = AffineBinaryOpExpr::simplifyCeilDiv(lhs, rhs, context);
865 break;
866 case Kind::Mod:
867 simplified = AffineBinaryOpExpr::simplifyMod(lhs, rhs, context);
868 break;
869 default:
870 llvm_unreachable("unexpected binary affine expr");
Uday Bondhugula015cbb12018-07-03 20:16:08 -0700871 }
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700872
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700873 // The simplified one would have already been cached; just return it.
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700874 if (simplified)
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700875 return simplified;
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700876
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700877 // An expression with these operands will already be in the
878 // simplified/canonical form. Create and store it.
879 auto *result = impl.allocator.Allocate<AffineBinaryOpExpr>();
Uday Bondhugulae082aad2018-07-11 21:19:31 -0700880 // Initialize the memory using placement new.
Uday Bondhugula0dd940c2018-07-26 00:19:21 -0700881 new (result) AffineBinaryOpExpr(kind, lhs, rhs);
882 bool inserted = impl.affineExprs.insert({keyValue, result}).second;
883 assert(inserted && "the expression shouldn't already exist in the map");
884 (void)inserted;
885 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700886}
887
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700888AffineDimExpr *AffineDimExpr::get(unsigned position, MLIRContext *context) {
Uday Bondhugula4e5078b2018-07-24 22:34:09 -0700889 auto &impl = context->getImpl();
890
891 // Check if we need to resize.
892 if (position >= impl.dimExprs.size())
893 impl.dimExprs.resize(position + 1, nullptr);
894
895 auto *&result = impl.dimExprs[position];
896 if (result)
897 return result;
898
899 result = impl.allocator.Allocate<AffineDimExpr>();
900 // Initialize the memory using placement new.
901 new (result) AffineDimExpr(position);
902 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700903}
904
905AffineSymbolExpr *AffineSymbolExpr::get(unsigned position,
906 MLIRContext *context) {
Uday Bondhugula4e5078b2018-07-24 22:34:09 -0700907 auto &impl = context->getImpl();
908
909 // Check if we need to resize.
910 if (position >= impl.symbolExprs.size())
911 impl.symbolExprs.resize(position + 1, nullptr);
912
913 auto *&result = impl.symbolExprs[position];
914 if (result)
915 return result;
916
917 result = impl.allocator.Allocate<AffineSymbolExpr>();
918 // Initialize the memory using placement new.
919 new (result) AffineSymbolExpr(position);
920 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700921}
922
923AffineConstantExpr *AffineConstantExpr::get(int64_t constant,
924 MLIRContext *context) {
Uday Bondhugula4e5078b2018-07-24 22:34:09 -0700925 auto &impl = context->getImpl();
926 auto *&result = impl.constExprs[constant];
927
928 if (result)
929 return result;
930
931 result = impl.allocator.Allocate<AffineConstantExpr>();
932 // Initialize the memory using placement new.
933 new (result) AffineConstantExpr(constant);
934 return result;
Uday Bondhugulafaf37dd2018-06-29 18:09:29 -0700935}
Uday Bondhugulabc535622018-08-07 14:24:38 -0700936
937//===----------------------------------------------------------------------===//
938// Integer Sets: these are allocated into the bump pointer, and are immutable.
939// But they aren't uniqued like AffineMap's; there isn't an advantage to.
940//===----------------------------------------------------------------------===//
941
942IntegerSet *IntegerSet::get(unsigned dimCount, unsigned symbolCount,
943 ArrayRef<AffineExpr *> constraints,
944 ArrayRef<bool> eqFlags, MLIRContext *context) {
945 assert(eqFlags.size() == constraints.size());
946
947 auto &impl = context->getImpl();
948
949 // Allocate them into the bump pointer.
950 auto *res = impl.allocator.Allocate<IntegerSet>();
951
952 // Copy the equalities and inequalities into the bump pointer.
953 constraints = impl.copyInto(ArrayRef<AffineExpr *>(constraints));
954 eqFlags = impl.copyInto(ArrayRef<bool>(eqFlags));
955
956 // Initialize the memory using placement new.
957 return new (res) IntegerSet(dimCount, symbolCount, constraints.size(),
958 constraints.data(), eqFlags.data());
959}