Introduce IR support for MLIRContext, primitive types, function types, and
vector types.
tensors and memref types are still TODO, and would be a good starter project
for someone.
PiperOrigin-RevId: 201782748
diff --git a/include/mlir/IR/Function.h b/include/mlir/IR/Function.h
index 72f077c..7eaf406 100644
--- a/include/mlir/IR/Function.h
+++ b/include/mlir/IR/Function.h
@@ -27,12 +27,14 @@
#include "mlir/Support/LLVM.h"
namespace mlir {
+ class FunctionType;
+
/// This is the base class for all of the MLIR function types
class Function {
std::string name;
- // TODO: type and lots of other stuff.
+ FunctionType *const type;
public:
- explicit Function(StringRef name);
+ explicit Function(StringRef name, FunctionType *type);
void print(raw_ostream &os);
void dump();
diff --git a/include/mlir/IR/MLIRContext.h b/include/mlir/IR/MLIRContext.h
new file mode 100644
index 0000000..2bd019a
--- /dev/null
+++ b/include/mlir/IR/MLIRContext.h
@@ -0,0 +1,48 @@
+//===- MLIRContext.h - MLIR Global Context Class ----------------*- C++ -*-===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#ifndef MLIR_IR_MLIRCONTEXT_H
+#define MLIR_IR_MLIRCONTEXT_H
+
+#include <memory>
+
+namespace mlir {
+ class MLIRContextImpl;
+
+/// MLIRContext is the top-level object for a collection of MLIR modules. It
+/// holds immortal uniqued objects like types, and the tables used to unique
+/// them.
+///
+/// MLIRContext gets a redundant "MLIR" prefix because otherwise it ends up with
+/// a very generic name ("Context") and because it is uncommon for clients to
+/// interact with it.
+///
+class MLIRContext {
+ const std::unique_ptr<MLIRContextImpl> impl;
+ MLIRContext(const MLIRContext&) = delete;
+ void operator=(const MLIRContext&) = delete;
+public:
+ explicit MLIRContext();
+ ~MLIRContext();
+
+ // This is effectively private given that only MLIRContext.cpp can see the
+ // MLIRContextImpl type.
+ MLIRContextImpl &getImpl() const { return *impl.get(); }
+};
+} // end namespace mlir
+
+#endif // MLIR_IR_MLIRCONTEXT_H
diff --git a/include/mlir/IR/Types.h b/include/mlir/IR/Types.h
new file mode 100644
index 0000000..f29cf82
--- /dev/null
+++ b/include/mlir/IR/Types.h
@@ -0,0 +1,229 @@
+//===- Types.h - MLIR Type Classes ------------------------------*- C++ -*-===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#ifndef MLIR_IR_TYPES_H
+#define MLIR_IR_TYPES_H
+
+#include "mlir/Support/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
+
+namespace mlir {
+ class MLIRContext;
+ class PrimitiveType;
+
+/// Integer identifier for all the concrete type kinds.
+enum class TypeKind {
+ // Integer.
+ I1,
+ I8,
+ I16,
+ I32,
+ I64,
+
+ // Target pointer sized integer.
+ Int,
+
+ // Floating point.
+ BF16,
+ F16,
+ F32,
+ F64,
+
+ LAST_PRIMITIVE_TYPE = F64,
+
+ // Derived types.
+ Function,
+ Vector,
+
+ // TODO: Tensor / MemRef types.
+};
+
+/// Instances of the Type class are immutable, uniqued, immortal, and owned by
+/// MLIRContext. As such, they are passed around by raw non-const pointer.
+///
+class Type {
+public:
+
+ /// Return the classification for this type.
+ TypeKind getKind() const {
+ return kind;
+ }
+
+ /// Return true if this type is the specified kind.
+ bool is(TypeKind k) const {
+ return kind == k;
+ }
+
+ /// Return the LLVMContext in which this type was uniqued.
+ MLIRContext *getContext() const { return context; }
+
+ /// Print the current type.
+ void print(raw_ostream &os) const;
+ void dump() const;
+
+ // Convenience factories.
+ static PrimitiveType *getI1(MLIRContext *ctx);
+ static PrimitiveType *getI8(MLIRContext *ctx);
+ static PrimitiveType *getI16(MLIRContext *ctx);
+ static PrimitiveType *getI32(MLIRContext *ctx);
+ static PrimitiveType *getI64(MLIRContext *ctx);
+ static PrimitiveType *getInt(MLIRContext *ctx);
+ static PrimitiveType *getBF16(MLIRContext *ctx);
+ static PrimitiveType *getF16(MLIRContext *ctx);
+ static PrimitiveType *getF32(MLIRContext *ctx);
+ static PrimitiveType *getF64(MLIRContext *ctx);
+
+protected:
+ explicit Type(TypeKind kind, MLIRContext *context)
+ : context(context), kind(kind), subclassData(0) {
+ }
+ explicit Type(TypeKind kind, MLIRContext *context, unsigned subClassData)
+ : Type(kind, context) {
+ setSubclassData(subClassData);
+ }
+
+ ~Type() = default;
+
+ unsigned getSubclassData() const { return subclassData; }
+
+ void setSubclassData(unsigned val) {
+ subclassData = val;
+ // Ensure we don't have any accidental truncation.
+ assert(getSubclassData() == val && "Subclass data too large for field");
+ }
+
+private:
+ /// This refers to the MLIRContext in which this type was uniqued.
+ MLIRContext *const context;
+
+ /// Classification of the subclass, used for type checking.
+ TypeKind kind : 8;
+
+ // Space for subclasses to store data.
+ unsigned subclassData : 24;
+};
+
+inline raw_ostream &operator<<(raw_ostream &os, const Type &type) {
+ type.print(os);
+ return os;
+}
+
+/// Primitive types are the atomic base of the type system, including integer
+/// and floating point values.
+class PrimitiveType : public Type {
+public:
+ static PrimitiveType *get(TypeKind kind, MLIRContext *context);
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const Type *T) {
+ return T->getKind() <= TypeKind::LAST_PRIMITIVE_TYPE;
+ }
+private:
+ PrimitiveType(TypeKind kind, MLIRContext *context);
+};
+
+inline PrimitiveType *Type::getI1(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::I1, ctx);
+}
+inline PrimitiveType *Type::getI8(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::I8, ctx);
+}
+inline PrimitiveType *Type::getI16(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::I16, ctx);
+}
+inline PrimitiveType *Type::getI32(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::I32, ctx);
+}
+inline PrimitiveType *Type::getI64(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::I64, ctx);
+}
+inline PrimitiveType *Type::getInt(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::Int, ctx);
+}
+inline PrimitiveType *Type::getBF16(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::BF16, ctx);
+}
+inline PrimitiveType *Type::getF16(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::F16, ctx);
+}
+inline PrimitiveType *Type::getF32(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::F32, ctx);
+}
+inline PrimitiveType *Type::getF64(MLIRContext *ctx) {
+ return PrimitiveType::get(TypeKind::F64, ctx);
+}
+
+
+/// Function types map from a list of inputs to a list of results.
+class FunctionType : public Type {
+public:
+ static FunctionType *get(ArrayRef<Type*> inputs, ArrayRef<Type*> results,
+ MLIRContext *context);
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const Type *T) {
+ return T->getKind() == TypeKind::Function;
+ }
+
+ ArrayRef<Type*> getInputs() const {
+ return ArrayRef<Type*>(inputsAndResults, getSubclassData());
+ }
+
+ ArrayRef<Type*> getResults() const {
+ return ArrayRef<Type*>(inputsAndResults+getSubclassData(), numResults);
+ }
+
+private:
+ unsigned numResults;
+ Type *const *inputsAndResults;
+
+ FunctionType(Type *const *inputsAndResults, unsigned numInputs,
+ unsigned numResults, MLIRContext *context);
+};
+
+
+/// Vector types represent multi-dimensional SIMD vectors, and have fixed a
+/// known constant shape with one or more dimension.
+class VectorType : public Type {
+public:
+ static VectorType *get(ArrayRef<unsigned> shape, Type *elementType);
+
+ ArrayRef<unsigned> getShape() const {
+ return ArrayRef<unsigned>(shapeElements, getSubclassData());
+ }
+
+ PrimitiveType *getElementType() const {
+ return elementType;
+ }
+
+ /// Methods for support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const Type *T) {
+ return T->getKind() == TypeKind::Vector;
+ }
+
+private:
+ const unsigned *shapeElements;
+ PrimitiveType *elementType;
+
+ VectorType(ArrayRef<unsigned> shape, PrimitiveType *elementType,
+ MLIRContext *context);
+};
+
+
+} // end namespace mlir
+
+#endif // MLIR_IR_TYPES_H
diff --git a/include/mlir/Parser.h b/include/mlir/Parser.h
index cb5f1c0..42f25bb 100644
--- a/include/mlir/Parser.h
+++ b/include/mlir/Parser.h
@@ -28,10 +28,11 @@
namespace mlir {
class Module;
+class MLIRContext;
/// This parses the file specified by the indicated SourceMgr and returns an
/// MLIR module if it was valid. If not, it emits diagnostics and returns null.
-Module *parseSourceFile(llvm::SourceMgr &sourceMgr);
+Module *parseSourceFile(llvm::SourceMgr &sourceMgr, MLIRContext *context);
} // end namespace mlir
diff --git a/include/mlir/Support/STLExtras.h b/include/mlir/Support/STLExtras.h
new file mode 100644
index 0000000..da0b62e
--- /dev/null
+++ b/include/mlir/Support/STLExtras.h
@@ -0,0 +1,61 @@
+//===- STLExtras.h - STL-like extensions that are used by MLIR --*- C++ -*-===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+//
+// This file contains stuff that should be arguably sunk down to the LLVM
+// Support/STLExtras.h file over time.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_SUPPORT_STLEXTRAS_H
+#define MLIR_SUPPORT_STLEXTRAS_H
+
+namespace mlir {
+
+/// An STL-style algorithm similar to std::for_each that applies a second
+/// functor between every pair of elements.
+///
+/// This provides the control flow logic to, for example, print a
+/// comma-separated list:
+/// \code
+/// interleave(names.begin(), names.end(),
+/// [&](StringRef name) { os << name; },
+/// [&] { os << ", "; });
+/// \endcode
+template <typename ForwardIterator, typename UnaryFunctor,
+ typename NullaryFunctor>
+inline void interleave(ForwardIterator begin, ForwardIterator end,
+ UnaryFunctor each_fn,
+ NullaryFunctor between_fn) {
+ if (begin == end)
+ return;
+ each_fn(*begin);
+ ++begin;
+ for (; begin != end; ++begin) {
+ between_fn();
+ each_fn(*begin);
+ }
+}
+
+template <typename Container, typename UnaryFunctor, typename NullaryFunctor>
+inline void interleave(const Container &c, UnaryFunctor each_fn,
+ NullaryFunctor between_fn) {
+ interleave(c.begin(), c.end(), each_fn, between_fn);
+}
+
+} // end namespace swift
+
+#endif // MLIR_SUPPORT_STLEXTRAS_H
diff --git a/lib/IR/Function.cpp b/lib/IR/Function.cpp
index 6407e9c..54c9166 100644
--- a/lib/IR/Function.cpp
+++ b/lib/IR/Function.cpp
@@ -16,14 +16,37 @@
// =============================================================================
#include "mlir/IR/Function.h"
+#include "mlir/IR/Types.h"
+#include "mlir/Support/STLExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace mlir;
-Function::Function(StringRef name) : name(name.str()) {
+Function::Function(StringRef name, FunctionType *type)
+ : name(name.str()), type(type) {
}
void Function::print(raw_ostream &os) {
- os << "extfunc @" << name << "()\n";
+ os << "extfunc @" << name << '(';
+ interleave(type->getInputs(),
+ [&](Type *eltType) { os << *eltType; },
+ [&]() { os << ", "; });
+ os << ')';
+
+ switch (type->getResults().size()) {
+ case 0: break;
+ case 1:
+ os << " -> " << *type->getResults()[0];
+ break;
+ default:
+ os << " -> (";
+ interleave(type->getResults(),
+ [&](Type *eltType) { os << *eltType; },
+ [&]() { os << ", "; });
+ os << ')';
+ break;
+ }
+
+ os << "\n";
}
void Function::dump() {
diff --git a/lib/IR/MLIRContext.cpp b/lib/IR/MLIRContext.cpp
new file mode 100644
index 0000000..a2befc3
--- /dev/null
+++ b/lib/IR/MLIRContext.cpp
@@ -0,0 +1,200 @@
+//===- MLIRContext.cpp - MLIR Type Classes --------------------------------===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#include "mlir/IR/MLIRContext.h"
+#include "mlir/IR/Types.h"
+#include "mlir/Support/LLVM.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/Allocator.h"
+using namespace mlir;
+using namespace llvm;
+
+namespace {
+struct FunctionTypeKeyInfo : DenseMapInfo<FunctionType*> {
+ // Functions are uniqued based on their inputs and results.
+ using KeyTy = std::pair<ArrayRef<Type*>, ArrayRef<Type*>>;
+ using DenseMapInfo<FunctionType*>::getHashValue;
+ using DenseMapInfo<FunctionType*>::isEqual;
+
+ static unsigned getHashValue(KeyTy key) {
+ return hash_combine(hash_combine_range(key.first.begin(), key.first.end()),
+ hash_combine_range(key.second.begin(),
+ key.second.end()));
+ }
+
+ static bool isEqual(const KeyTy &lhs, const FunctionType *rhs) {
+ if (rhs == getEmptyKey() || rhs == getTombstoneKey())
+ return false;
+ return lhs == KeyTy(rhs->getInputs(), rhs->getResults());
+ }
+};
+struct VectorTypeKeyInfo : DenseMapInfo<VectorType*> {
+ // Vectors are uniqued based on their element type and shape.
+ using KeyTy = std::pair<Type*, ArrayRef<unsigned>>;
+ using DenseMapInfo<VectorType*>::getHashValue;
+ using DenseMapInfo<VectorType*>::isEqual;
+
+ static unsigned getHashValue(KeyTy key) {
+ return hash_combine(DenseMapInfo<Type*>::getHashValue(key.first),
+ hash_combine_range(key.second.begin(),
+ key.second.end()));
+ }
+
+ static bool isEqual(const KeyTy &lhs, const VectorType *rhs) {
+ if (rhs == getEmptyKey() || rhs == getTombstoneKey())
+ return false;
+ return lhs == KeyTy(rhs->getElementType(), rhs->getShape());
+ }
+};
+} // end anonymous namespace.
+
+
+namespace mlir {
+/// This is the implementation of the MLIRContext class, using the pImpl idiom.
+/// This class is completely private to this file, so everything is public.
+class MLIRContextImpl {
+public:
+ /// We put immortal objects into this allocator.
+ llvm::BumpPtrAllocator allocator;
+
+ // Primitive type uniquing.
+ PrimitiveType *primitives[int(TypeKind::LAST_PRIMITIVE_TYPE)+1] = { nullptr };
+
+ /// Function type uniquing.
+ using FunctionTypeSet = DenseSet<FunctionType*, FunctionTypeKeyInfo>;
+ FunctionTypeSet functions;
+
+ /// Vector type uniquing.
+ using VectorTypeSet = DenseSet<VectorType*, VectorTypeKeyInfo>;
+ VectorTypeSet vectors;
+
+
+public:
+ /// Copy the specified array of elements into memory managed by our bump
+ /// pointer allocator. This assumes the elements are all PODs.
+ template<typename T>
+ ArrayRef<T> copyInto(ArrayRef<T> elements) {
+ auto result = allocator.Allocate<T>(elements.size());
+ std::uninitialized_copy(elements.begin(), elements.end(), result);
+ return ArrayRef<T>(result, elements.size());
+ }
+};
+} // end namespace mlir
+
+MLIRContext::MLIRContext() : impl(new MLIRContextImpl()) {
+}
+
+MLIRContext::~MLIRContext() {
+}
+
+
+PrimitiveType::PrimitiveType(TypeKind kind, MLIRContext *context)
+ : Type(kind, context) {
+
+}
+
+PrimitiveType *PrimitiveType::get(TypeKind kind, MLIRContext *context) {
+ assert(kind <= TypeKind::LAST_PRIMITIVE_TYPE && "Not a primitive type kind");
+ auto &impl = context->getImpl();
+
+ // We normally have these types.
+ if (impl.primitives[(int)kind])
+ return impl.primitives[(int)kind];
+
+ // On the first use, we allocate them into the bump pointer.
+ auto *ptr = impl.allocator.Allocate<PrimitiveType>();
+
+ // Initialize the memory using placement new.
+ new(ptr) PrimitiveType(kind, context);
+
+ // Cache and return it.
+ return impl.primitives[(int)kind] = ptr;
+}
+
+FunctionType::FunctionType(Type *const *inputsAndResults, unsigned numInputs,
+ unsigned numResults, MLIRContext *context)
+ : Type(TypeKind::Function, context, numInputs),
+ numResults(numResults), inputsAndResults(inputsAndResults) {
+}
+
+FunctionType *FunctionType::get(ArrayRef<Type*> inputs, ArrayRef<Type*> results,
+ MLIRContext *context) {
+ auto &impl = context->getImpl();
+
+ // Look to see if we already have this function type.
+ FunctionTypeKeyInfo::KeyTy key(inputs, results);
+ auto existing = impl.functions.insert_as(nullptr, key);
+
+ // If we already have it, return that value.
+ if (!existing.second)
+ return *existing.first;
+
+ // On the first use, we allocate them into the bump pointer.
+ auto *result = impl.allocator.Allocate<FunctionType>();
+
+ // Copy the inputs and results into the bump pointer.
+ SmallVector<Type*, 16> types;
+ types.reserve(inputs.size()+results.size());
+ types.append(inputs.begin(), inputs.end());
+ types.append(results.begin(), results.end());
+ auto typesList = impl.copyInto(ArrayRef<Type*>(types));
+
+ // Initialize the memory using placement new.
+ new (result) FunctionType(typesList.data(), inputs.size(), results.size(),
+ context);
+
+ // Cache and return it.
+ return *existing.first = result;
+}
+
+
+
+VectorType::VectorType(ArrayRef<unsigned> shape, PrimitiveType *elementType,
+ MLIRContext *context)
+ : Type(TypeKind::Vector, context, shape.size()),
+ shapeElements(shape.data()), elementType(elementType) {
+}
+
+
+VectorType *VectorType::get(ArrayRef<unsigned> shape, Type *elementType) {
+ assert(!shape.empty() && "vector types must have at least one dimension");
+ assert(isa<PrimitiveType>(elementType) &&
+ "vectors elements must be primitives");
+
+ auto *context = elementType->getContext();
+ auto &impl = context->getImpl();
+
+ // Look to see if we already have this vector type.
+ VectorTypeKeyInfo::KeyTy key(elementType, shape);
+ auto existing = impl.vectors.insert_as(nullptr, key);
+
+ // If we already have it, return that value.
+ if (!existing.second)
+ return *existing.first;
+
+ // On the first use, we allocate them into the bump pointer.
+ auto *result = impl.allocator.Allocate<VectorType>();
+
+ // Copy the shape into the bump pointer.
+ shape = impl.copyInto(shape);
+
+ // Initialize the memory using placement new.
+ new (result) VectorType(shape, cast<PrimitiveType>(elementType), context);
+
+ // Cache and return it.
+ return *existing.first = result;
+}
diff --git a/lib/IR/Types.cpp b/lib/IR/Types.cpp
new file mode 100644
index 0000000..5485995
--- /dev/null
+++ b/lib/IR/Types.cpp
@@ -0,0 +1,68 @@
+//===- Types.cpp - MLIR Type Classes --------------------------------------===//
+//
+// Copyright 2019 The MLIR Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// =============================================================================
+
+#include "mlir/IR/Types.h"
+#include "mlir/Support/LLVM.h"
+#include "llvm/Support/raw_ostream.h"
+#include "mlir/Support/STLExtras.h"
+using namespace mlir;
+
+void Type::print(raw_ostream &os) const {
+ switch (getKind()) {
+ case TypeKind::I1: os << "i1"; return;
+ case TypeKind::I8: os << "i8"; return;
+ case TypeKind::I16: os << "i16"; return;
+ case TypeKind::I32: os << "i32"; return;
+ case TypeKind::I64: os << "i64"; return;
+ case TypeKind::Int: os << "int"; return;
+ case TypeKind::BF16: os << "bf16"; return;
+ case TypeKind::F16: os << "f16"; return;
+ case TypeKind::F32: os << "f32"; return;
+ case TypeKind::F64: os << "f64"; return;
+ case TypeKind::Function: {
+ auto *func = cast<FunctionType>(this);
+ os << '(';
+ interleave(func->getInputs(),
+ [&](Type *type) { os << *type; },
+ [&]() { os << ", "; });
+ os << ") -> ";
+ auto results = func->getResults();
+ if (results.size() == 1)
+ os << *results[0];
+ else {
+ os << '(';
+ interleave(results,
+ [&](Type *type) { os << *type; },
+ [&]() { os << ", "; });
+ os << ")";
+ }
+ return;
+ }
+ case TypeKind::Vector: {
+ auto *v = cast<VectorType>(this);
+ os << "vector<";
+ for (auto dim : v->getShape())
+ os << dim << 'x';
+ os << *v->getElementType() << '>';
+ return;
+ }
+ }
+}
+
+void Type::dump() const {
+ print(llvm::errs());
+}
diff --git a/lib/Parser/Parser.cpp b/lib/Parser/Parser.cpp
index 6dde8c0..7e3076f 100644
--- a/lib/Parser/Parser.cpp
+++ b/lib/Parser/Parser.cpp
@@ -22,13 +22,14 @@
#include "mlir/Parser.h"
#include "Lexer.h"
#include "mlir/IR/Module.h"
+#include "mlir/IR/Types.h"
#include "llvm/Support/SourceMgr.h"
using namespace mlir;
using llvm::SourceMgr;
namespace {
-/// Simple enum to make code read better. Failure is "true" in a boolean
-/// context.
+/// Simple enum to make code read better in cases that would otherwise return a
+/// bool value. Failure is "true" in a boolean context.
enum ParseResult {
ParseSuccess,
ParseFailure
@@ -37,13 +38,17 @@
/// Main parser implementation.
class Parser {
public:
- Parser(llvm::SourceMgr &sourceMgr) : lex(sourceMgr), curToken(lex.lexToken()){
+ Parser(llvm::SourceMgr &sourceMgr, MLIRContext *context)
+ : context(context), lex(sourceMgr), curToken(lex.lexToken()){
module.reset(new Module());
}
Module *parseModule();
private:
// State.
+ MLIRContext *const context;
+
+ // The lexer for the source file we're parsing.
Lexer lex;
// This is the next token that hasn't been consumed yet.
@@ -86,19 +91,24 @@
const std::function<ParseResult()> &parseElement,
bool allowEmptyList = true);
+ // We have two forms of parsing methods - those that return a non-null
+ // pointer on success, and those that return a ParseResult to indicate whether
+ // they returned a failure. The second class fills in by-reference arguments
+ // as the results of their action.
+
// Type parsing.
- ParseResult parsePrimitiveType();
- ParseResult parseElementType();
- ParseResult parseVectorType();
+ PrimitiveType *parsePrimitiveType();
+ Type *parseElementType();
+ VectorType *parseVectorType();
ParseResult parseDimensionListRanked(SmallVectorImpl<int> &dimensions);
- ParseResult parseTensorType();
- ParseResult parseMemRefType();
- ParseResult parseFunctionType();
- ParseResult parseType();
- ParseResult parseTypeList();
+ Type *parseTensorType();
+ Type *parseMemRefType();
+ Type *parseFunctionType();
+ Type *parseType();
+ ParseResult parseTypeList(SmallVectorImpl<Type*> &elements);
// Top level entity parsing.
- ParseResult parseFunctionSignature(StringRef &name);
+ ParseResult parseFunctionSignature(StringRef &name, FunctionType *&type);
ParseResult parseExtFunc();
};
} // end anonymous namespace
@@ -166,40 +176,40 @@
/// | `i1` | `i8` | `i16` | `i32` | `i64` // Sized integers
/// | `int`
///
-ParseResult Parser::parsePrimitiveType() {
- // TODO: Build IR objects.
+PrimitiveType *Parser::parsePrimitiveType() {
switch (curToken.getKind()) {
- default: return emitError("expected type");
+ default:
+ return (emitError("expected type"), nullptr);
case Token::kw_bf16:
consumeToken(Token::kw_bf16);
- return ParseSuccess;
+ return Type::getBF16(context);
case Token::kw_f16:
consumeToken(Token::kw_f16);
- return ParseSuccess;
+ return Type::getF16(context);
case Token::kw_f32:
consumeToken(Token::kw_f32);
- return ParseSuccess;
+ return Type::getF32(context);
case Token::kw_f64:
consumeToken(Token::kw_f64);
- return ParseSuccess;
+ return Type::getF64(context);
case Token::kw_i1:
consumeToken(Token::kw_i1);
- return ParseSuccess;
- case Token::kw_i16:
- consumeToken(Token::kw_i16);
- return ParseSuccess;
- case Token::kw_i32:
- consumeToken(Token::kw_i32);
- return ParseSuccess;
- case Token::kw_i64:
- consumeToken(Token::kw_i64);
- return ParseSuccess;
+ return Type::getI1(context);
case Token::kw_i8:
consumeToken(Token::kw_i8);
- return ParseSuccess;
+ return Type::getI8(context);
+ case Token::kw_i16:
+ consumeToken(Token::kw_i16);
+ return Type::getI16(context);
+ case Token::kw_i32:
+ consumeToken(Token::kw_i32);
+ return Type::getI32(context);
+ case Token::kw_i64:
+ consumeToken(Token::kw_i64);
+ return Type::getI64(context);
case Token::kw_int:
consumeToken(Token::kw_int);
- return ParseSuccess;
+ return Type::getInt(context);
}
}
@@ -207,7 +217,7 @@
///
/// element-type ::= primitive-type | vector-type
///
-ParseResult Parser::parseElementType() {
+Type *Parser::parseElementType() {
if (curToken.is(Token::kw_vector))
return parseVectorType();
@@ -219,21 +229,21 @@
/// vector-type ::= `vector` `<` const-dimension-list primitive-type `>`
/// const-dimension-list ::= (integer-literal `x`)+
///
-ParseResult Parser::parseVectorType() {
+VectorType *Parser::parseVectorType() {
consumeToken(Token::kw_vector);
if (!consumeIf(Token::less))
- return emitError("expected '<' in vector type");
+ return (emitError("expected '<' in vector type"), nullptr);
if (curToken.isNot(Token::integer))
- return emitError("expected dimension size in vector type");
+ return (emitError("expected dimension size in vector type"), nullptr);
SmallVector<unsigned, 4> dimensions;
while (curToken.is(Token::integer)) {
// Make sure this integer value is in bound and valid.
auto dimension = curToken.getUnsignedIntegerValue();
if (!dimension.hasValue())
- return emitError("invalid dimension in vector type");
+ return (emitError("invalid dimension in vector type"), nullptr);
dimensions.push_back(dimension.getValue());
consumeToken(Token::integer);
@@ -241,7 +251,7 @@
// Make sure we have an 'x' or something like 'xbf32'.
if (curToken.isNot(Token::bare_identifier) ||
curToken.getSpelling()[0] != 'x')
- return emitError("expected 'x' in vector dimension list");
+ return (emitError("expected 'x' in vector dimension list"), nullptr);
// If we had a prefix of 'x', lex the next token immediately after the 'x'.
if (curToken.getSpelling().size() != 1)
@@ -252,15 +262,14 @@
}
// Parse the element type.
- if (parsePrimitiveType())
- return ParseFailure;
+ auto *elementType = parsePrimitiveType();
+ if (!elementType)
+ return nullptr;
if (!consumeIf(Token::greater))
- return emitError("expected '>' in vector type");
+ return (emitError("expected '>' in vector type"), nullptr);
- // TODO: Form IR object.
-
- return ParseSuccess;
+ return VectorType::get(dimensions, elementType);
}
/// Parse a dimension list of a tensor or memref type. This populates the
@@ -303,11 +312,11 @@
/// tensor-type ::= `tensor` `<` dimension-list element-type `>`
/// dimension-list ::= dimension-list-ranked | `??`
///
-ParseResult Parser::parseTensorType() {
+Type *Parser::parseTensorType() {
consumeToken(Token::kw_tensor);
if (!consumeIf(Token::less))
- return emitError("expected '<' in tensor type");
+ return (emitError("expected '<' in tensor type"), nullptr);
bool isUnranked;
SmallVector<int, 4> dimensions;
@@ -317,19 +326,19 @@
} else {
isUnranked = false;
if (parseDimensionListRanked(dimensions))
- return ParseFailure;
+ return nullptr;
}
// Parse the element type.
- if (parseElementType())
- return ParseFailure;
+ auto elementType = parseElementType();
+ if (!elementType)
+ return nullptr;
if (!consumeIf(Token::greater))
- return emitError("expected '>' in tensor type");
+ return (emitError("expected '>' in tensor type"), nullptr);
- // TODO: Form IR object.
-
- return ParseSuccess;
+ // FIXME: Add an IR representation for tensor types.
+ return Type::getI1(context);
}
/// Parse a memref type.
@@ -340,29 +349,29 @@
/// semi-affine-map-composition ::= (semi-affine-map `,` )* semi-affine-map
/// memory-space ::= integer-literal /* | TODO: address-space-id */
///
-ParseResult Parser::parseMemRefType() {
+Type *Parser::parseMemRefType() {
consumeToken(Token::kw_memref);
if (!consumeIf(Token::less))
- return emitError("expected '<' in memref type");
+ return (emitError("expected '<' in memref type"), nullptr);
SmallVector<int, 4> dimensions;
if (parseDimensionListRanked(dimensions))
- return ParseFailure;
+ return nullptr;
// Parse the element type.
- if (parseElementType())
- return ParseFailure;
+ auto elementType = parseElementType();
+ if (!elementType)
+ return nullptr;
// TODO: Parse semi-affine-map-composition.
// TODO: Parse memory-space.
if (!consumeIf(Token::greater))
- return emitError("expected '>' in memref type");
+ return (emitError("expected '>' in memref type"), nullptr);
- // TODO: Form IR object.
-
- return ParseSuccess;
+ // FIXME: Add an IR representation for memref types.
+ return Type::getI1(context);
}
@@ -371,20 +380,21 @@
///
/// function-type ::= type-list-parens `->` type-list
///
-ParseResult Parser::parseFunctionType() {
+Type *Parser::parseFunctionType() {
assert(curToken.is(Token::l_paren));
- if (parseTypeList())
- return ParseFailure;
+ SmallVector<Type*, 4> arguments;
+ if (parseTypeList(arguments))
+ return nullptr;
if (!consumeIf(Token::arrow))
- return emitError("expected '->' in function type");
+ return (emitError("expected '->' in function type"), nullptr);
- if (parseTypeList())
- return ParseFailure;
+ SmallVector<Type*, 4> results;
+ if (parseTypeList(results))
+ return nullptr;
- // TODO: Build IR object.
- return ParseSuccess;
+ return FunctionType::get(arguments, results, context);
}
@@ -397,7 +407,7 @@
/// | function-type
/// element-type ::= primitive-type | vector-type
///
-ParseResult Parser::parseType() {
+Type *Parser::parseType() {
switch (curToken.getKind()) {
case Token::kw_memref: return parseMemRefType();
case Token::kw_tensor: return parseTensorType();
@@ -415,20 +425,20 @@
/// type-list-parens ::= `(` `)`
/// | `(` type (`,` type)* `)`
///
-ParseResult Parser::parseTypeList() {
+ParseResult Parser::parseTypeList(SmallVectorImpl<Type*> &elements) {
+ auto parseElt = [&]() -> ParseResult {
+ auto elt = parseType();
+ elements.push_back(elt);
+ return elt ? ParseSuccess : ParseFailure;
+ };
+
// If there is no parens, then it must be a singular type.
if (!consumeIf(Token::l_paren))
- return parseType();
+ return parseElt();
- if (parseCommaSeparatedList(Token::r_paren,
- [&]() -> ParseResult {
- // TODO: Add to list of IR values we're parsing.
- return parseType();
- })) {
+ if (parseCommaSeparatedList(Token::r_paren, parseElt))
return ParseFailure;
- }
- // TODO: Build IR objects.
return ParseSuccess;
}
@@ -443,7 +453,8 @@
/// argument-list ::= type (`,` type)* | /*empty*/
/// function-signature ::= function-id `(` argument-list `)` (`->` type-list)?
///
-ParseResult Parser::parseFunctionSignature(StringRef &name) {
+ParseResult Parser::parseFunctionSignature(StringRef &name,
+ FunctionType *&type) {
if (curToken.isNot(Token::at_identifier))
return emitError("expected a function identifier like '@foo'");
@@ -453,17 +464,17 @@
if (curToken.isNot(Token::l_paren))
return emitError("expected '(' in function signature");
- if (parseTypeList())
+ SmallVector<Type*, 4> arguments;
+ if (parseTypeList(arguments))
return ParseFailure;
// Parse the return type if present.
+ SmallVector<Type*, 4> results;
if (consumeIf(Token::arrow)) {
- if (parseTypeList())
+ if (parseTypeList(results))
return ParseFailure;
-
- // TODO: Build IR object.
}
-
+ type = FunctionType::get(arguments, results, context);
return ParseSuccess;
}
@@ -476,12 +487,13 @@
consumeToken(Token::kw_extfunc);
StringRef name;
- if (parseFunctionSignature(name))
+ FunctionType *type = nullptr;
+ if (parseFunctionSignature(name, type))
return ParseFailure;
// Okay, the external function definition was parsed correctly.
- module->functionList.push_back(new Function(name));
+ module->functionList.push_back(new Function(name, type));
return ParseSuccess;
}
@@ -518,6 +530,6 @@
/// This parses the file specified by the indicated SourceMgr and returns an
/// MLIR module if it was valid. If not, it emits diagnostics and returns null.
-Module *mlir::parseSourceFile(llvm::SourceMgr &sourceMgr) {
- return Parser(sourceMgr).parseModule();
+Module *mlir::parseSourceFile(llvm::SourceMgr &sourceMgr, MLIRContext *context){
+ return Parser(sourceMgr, context).parseModule();
}
diff --git a/test/IR/parser.mlir b/test/IR/parser.mlir
index a724363..a30f2e3 100644
--- a/test/IR/parser.mlir
+++ b/test/IR/parser.mlir
@@ -4,25 +4,28 @@
; RUN: %S/../../mlir-opt %s -o - | FileCheck %s
-; CHECK: extfunc @foo()
+; CHECK: extfunc @foo(i32, i64) -> f32
extfunc @foo(i32, i64) -> f32
; CHECK: extfunc @bar()
extfunc @bar() -> ()
-; CHECK: extfunc @baz()
+; CHECK: extfunc @baz() -> (i1, int, f32)
extfunc @baz() -> (i1, int, f32)
; CHECK: extfunc @missingReturn()
extfunc @missingReturn()
-; CHECK: extfunc @vectors()
+; CHECK: extfunc @vectors(vector<1xf32>, vector<2x4xf32>)
extfunc @vectors(vector<1 x f32>, vector<2x4xf32>)
-; CHECK: extfunc @tensors()
+; CHECK: extfunc @tensors(i1, i1, i1, i1)
extfunc @tensors(tensor<?? f32>, tensor<?? vector<2x4xf32>>,
tensor<1x?x4x?x?xint>, tensor<i8>)
-; CHECK: extfunc @memrefs()
+; CHECK: extfunc @memrefs(i1, i1)
extfunc @memrefs(memref<1x?x4x?x?xint>, memref<i8>)
+
+; CHECK: extfunc @functions((i1, i1) -> (), () -> ())
+extfunc @functions((memref<1x?x4x?x?xint>, memref<i8>) -> (), ()->())
diff --git a/tools/mlir-opt/mlir-opt.cpp b/tools/mlir-opt/mlir-opt.cpp
index b5a548d..a75ba76 100644
--- a/tools/mlir-opt/mlir-opt.cpp
+++ b/tools/mlir-opt/mlir-opt.cpp
@@ -21,6 +21,7 @@
//
//===----------------------------------------------------------------------===//
+#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Module.h"
#include "mlir/Parser.h"
#include "llvm/Support/CommandLine.h"
@@ -56,6 +57,8 @@
int main(int argc, char **argv) {
InitLLVM x(argc, argv);
+ MLIRContext context;
+
cl::ParseCommandLineOptions(argc, argv, "MLIR modular optimizer driver\n");
// Set up the input file.
@@ -71,7 +74,7 @@
sourceMgr.AddNewSourceBuffer(std::move(*fileOrErr), SMLoc());
// Parse the input file and emit any errors.
- std::unique_ptr<Module> module(parseSourceFile(sourceMgr));
+ std::unique_ptr<Module> module(parseSourceFile(sourceMgr, &context));
if (!module) return 1;
// Print the output.