Check in LLVM r95781.
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
new file mode 100644
index 0000000..1ab2f55
--- /dev/null
+++ b/lib/CodeGen/ABIInfo.h
@@ -0,0 +1,153 @@
+//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_ABIINFO_H
+#define CLANG_CODEGEN_ABIINFO_H
+
+#include "clang/AST/Type.h"
+
+#include <cassert>
+
+namespace llvm {
+  class Type;
+  class Value;
+  class LLVMContext;
+}
+
+namespace clang {
+  class ASTContext;
+
+  // FIXME: This is a layering issue if we want to move ABIInfo
+  // down. Fortunately CGFunctionInfo has no real tie to CodeGen.
+  namespace CodeGen {
+    class CGFunctionInfo;
+    class CodeGenFunction;
+  }
+
+  /* FIXME: All of this stuff should be part of the target interface
+     somehow. It is currently here because it is not clear how to factor
+     the targets to support this, since the Targets currently live in a
+     layer below types n'stuff.
+  */
+
+  /// ABIArgInfo - Helper class to encapsulate information about how a
+  /// specific C type should be passed to or returned from a function.
+  class ABIArgInfo {
+  public:
+    enum Kind {
+      Direct,    /// Pass the argument directly using the normal
+                 /// converted LLVM type. Complex and structure types
+                 /// are passed using first class aggregates.
+
+      Extend,    /// Valid only for integer argument types. Same as 'direct'
+                 /// but also emit a zero/sign extension attribute.
+
+      Indirect,  /// Pass the argument indirectly via a hidden pointer
+                 /// with the specified alignment (0 indicates default
+                 /// alignment).
+
+      Ignore,    /// Ignore the argument (treat as void). Useful for
+                 /// void and empty structs.
+
+      Coerce,    /// Only valid for aggregate return types, the argument
+                 /// should be accessed by coercion to a provided type.
+
+      Expand,    /// Only valid for aggregate argument types. The
+                 /// structure should be expanded into consecutive
+                 /// arguments for its constituent fields. Currently
+                 /// expand is only allowed on structures whose fields
+                 /// are all scalar types or are themselves expandable
+                 /// types.
+
+      KindFirst=Direct, KindLast=Expand
+    };
+
+  private:
+    Kind TheKind;
+    const llvm::Type *TypeData;
+    unsigned UIntData;
+    bool BoolData;
+
+    ABIArgInfo(Kind K, const llvm::Type *TD=0,
+               unsigned UI=0, bool B = false) 
+      : TheKind(K), TypeData(TD), UIntData(UI), BoolData(B) {}
+
+  public:
+    ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
+
+    static ABIArgInfo getDirect() {
+      return ABIArgInfo(Direct);
+    }
+    static ABIArgInfo getExtend() {
+      return ABIArgInfo(Extend);
+    }
+    static ABIArgInfo getIgnore() {
+      return ABIArgInfo(Ignore);
+    }
+    static ABIArgInfo getCoerce(const llvm::Type *T) {
+      return ABIArgInfo(Coerce, T);
+    }
+    static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true) {
+      return ABIArgInfo(Indirect, 0, Alignment, ByVal);
+    }
+    static ABIArgInfo getExpand() {
+      return ABIArgInfo(Expand);
+    }
+
+    Kind getKind() const { return TheKind; }
+    bool isDirect() const { return TheKind == Direct; }
+    bool isExtend() const { return TheKind == Extend; }
+    bool isIgnore() const { return TheKind == Ignore; }
+    bool isCoerce() const { return TheKind == Coerce; }
+    bool isIndirect() const { return TheKind == Indirect; }
+    bool isExpand() const { return TheKind == Expand; }
+
+    // Coerce accessors
+    const llvm::Type *getCoerceToType() const {
+      assert(TheKind == Coerce && "Invalid kind!");
+      return TypeData;
+    }
+
+    // Indirect accessors
+    unsigned getIndirectAlign() const {
+      assert(TheKind == Indirect && "Invalid kind!");
+      return UIntData;
+    }
+
+    bool getIndirectByVal() const {
+      assert(TheKind == Indirect && "Invalid kind!");
+      return BoolData;
+    }
+    
+    void dump() const;
+  };
+
+  /// ABIInfo - Target specific hooks for defining how a type should be
+  /// passed or returned from functions.
+  class ABIInfo {
+  public:
+    virtual ~ABIInfo();
+
+    virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
+                             ASTContext &Ctx,
+                             llvm::LLVMContext &VMContext) const = 0;
+
+    /// EmitVAArg - Emit the target dependent code to load a value of
+    /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+    // FIXME: This is a gaping layering violation if we wanted to drop
+    // the ABI information any lower than CodeGen. Of course, for
+    // VAArg handling it has to be at this level; there is no way to
+    // abstract this out.
+    virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                   CodeGen::CodeGenFunction &CGF) const = 0;
+  };
+}  // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
new file mode 100644
index 0000000..05d138b
--- /dev/null
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -0,0 +1,1219 @@
+//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include <algorithm>
+
+using namespace clang;
+using namespace CodeGen;
+
+llvm::Constant *CodeGenFunction::
+BuildDescriptorBlockDecl(bool BlockHasCopyDispose, CharUnits Size,
+                         const llvm::StructType* Ty,
+                         std::vector<HelperInfo> *NoteForHelper) {
+  const llvm::Type *UnsignedLongTy
+    = CGM.getTypes().ConvertType(getContext().UnsignedLongTy);
+  llvm::Constant *C;
+  std::vector<llvm::Constant*> Elts;
+
+  // reserved
+  C = llvm::ConstantInt::get(UnsignedLongTy, 0);
+  Elts.push_back(C);
+
+  // Size
+  // FIXME: What is the right way to say this doesn't fit?  We should give
+  // a user diagnostic in that case.  Better fix would be to change the
+  // API to size_t.
+  C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity());
+  Elts.push_back(C);
+
+  if (BlockHasCopyDispose) {
+    // copy_func_helper_decl
+    Elts.push_back(BuildCopyHelper(Ty, NoteForHelper));
+
+    // destroy_func_decl
+    Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
+  }
+
+  C = llvm::ConstantStruct::get(VMContext, Elts, false);
+
+  C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
+                               llvm::GlobalValue::InternalLinkage,
+                               C, "__block_descriptor_tmp");
+  return C;
+}
+
+llvm::Constant *BlockModule::getNSConcreteGlobalBlock() {
+  if (NSConcreteGlobalBlock == 0)
+    NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+                                                      "_NSConcreteGlobalBlock");
+  return NSConcreteGlobalBlock;
+}
+
+llvm::Constant *BlockModule::getNSConcreteStackBlock() {
+  if (NSConcreteStackBlock == 0)
+    NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+                                                     "_NSConcreteStackBlock");
+  return NSConcreteStackBlock;
+}
+
+static void CollectBlockDeclRefInfo(
+  const Stmt *S, CodeGenFunction::BlockInfo &Info,
+  llvm::SmallSet<const DeclContext *, 16> &InnerContexts) {
+  for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+       I != E; ++I)
+    if (*I)
+      CollectBlockDeclRefInfo(*I, Info, InnerContexts);
+
+  // We want to ensure we walk down into block literals so we can find
+  // all nested BlockDeclRefExprs.
+  if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+    InnerContexts.insert(cast<DeclContext>(BE->getBlockDecl()));
+    CollectBlockDeclRefInfo(BE->getBody(), Info, InnerContexts);
+  }
+
+  if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
+    // FIXME: Handle enums.
+    if (isa<FunctionDecl>(BDRE->getDecl()))
+      return;
+
+    // Only Decls that escape are added.
+    if (!InnerContexts.count(BDRE->getDecl()->getDeclContext()))
+      Info.DeclRefs.push_back(BDRE);
+  }
+}
+
+/// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be
+/// declared as a global variable instead of on the stack.
+static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) {
+  return Info.DeclRefs.empty();
+}
+
+/// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to
+/// ensure we can generate the debug information for the parameter for the block
+/// invoke function.
+static void AllocateAllBlockDeclRefs(const CodeGenFunction::BlockInfo &Info,
+                                     CodeGenFunction *CGF) {
+  // Always allocate self, as it is often handy in the debugger, even if there
+  // is no codegen in the block that uses it.  This is also useful to always do
+  // this as if we didn't, we'd have to figure out all code that uses a self
+  // pointer, including implicit uses.
+  if (const ObjCMethodDecl *OMD
+      = dyn_cast_or_null<ObjCMethodDecl>(CGF->CurFuncDecl)) {
+    ImplicitParamDecl *SelfDecl = OMD->getSelfDecl();
+    BlockDeclRefExpr *BDRE = new (CGF->getContext())
+      BlockDeclRefExpr(SelfDecl,
+                       SelfDecl->getType(), SourceLocation(), false);
+    CGF->AllocateBlockDecl(BDRE);
+  }
+
+  // FIXME: Also always forward the this pointer in C++ as well.
+
+  for (size_t i = 0; i < Info.DeclRefs.size(); ++i)
+    CGF->AllocateBlockDecl(Info.DeclRefs[i]);
+}
+
+// FIXME: Push most into CGM, passing down a few bits, like current function
+// name.
+llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
+
+  std::string Name = CurFn->getName();
+  CodeGenFunction::BlockInfo Info(0, Name.c_str());
+  llvm::SmallSet<const DeclContext *, 16> InnerContexts;
+  InnerContexts.insert(BE->getBlockDecl());
+  CollectBlockDeclRefInfo(BE->getBody(), Info, InnerContexts);
+
+  // Check if the block can be global.
+  // FIXME: This test doesn't work for nested blocks yet.  Longer term, I'd like
+  // to just have one code path.  We should move this function into CGM and pass
+  // CGF, then we can just check to see if CGF is 0.
+  if (0 && CanBlockBeGlobal(Info))
+    return CGM.GetAddrOfGlobalBlock(BE, Name.c_str());
+
+  size_t BlockFields = 5;
+
+  bool hasIntrospection  = CGM.getContext().getLangOptions().BlockIntrospection;
+
+  if (hasIntrospection) {
+    BlockFields++;
+  }
+  std::vector<llvm::Constant*> Elts(BlockFields);
+
+  if (hasIntrospection) {
+    std::string BlockTypeEncoding;
+    CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
+
+    Elts[5] = llvm::ConstantExpr::getBitCast(
+            CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty);
+  }
+
+  llvm::Constant *C;
+  llvm::Value *V;
+
+  {
+    // C = BuildBlockStructInitlist();
+    unsigned int flags = BLOCK_HAS_DESCRIPTOR;
+
+    if (hasIntrospection)
+      flags |= BLOCK_HAS_OBJC_TYPE;
+
+    // We run this first so that we set BlockHasCopyDispose from the entire
+    // block literal.
+    // __invoke
+    CharUnits subBlockSize; 
+    CharUnits subBlockAlign;
+    llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
+    bool subBlockHasCopyDispose = false;
+    llvm::Function *Fn
+      = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl,
+                                                   LocalDeclMap,
+                                                   subBlockSize,
+                                                   subBlockAlign,
+                                                   subBlockDeclRefDecls,
+                                                   subBlockHasCopyDispose);
+    BlockHasCopyDispose |= subBlockHasCopyDispose;
+    Elts[3] = Fn;
+
+    // FIXME: Don't use BlockHasCopyDispose, it is set more often then
+    // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); }
+    if (subBlockHasCopyDispose)
+      flags |= BLOCK_HAS_COPY_DISPOSE;
+
+    // __isa
+    C = CGM.getNSConcreteStackBlock();
+    C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
+    Elts[0] = C;
+
+    // __flags
+    const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+      CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+    C = llvm::ConstantInt::get(IntTy, flags);
+    Elts[1] = C;
+
+    // __reserved
+    C = llvm::ConstantInt::get(IntTy, 0);
+    Elts[2] = C;
+
+    if (subBlockDeclRefDecls.size() == 0) {
+      // __descriptor
+      Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize,
+                                         0, 0);
+
+      // Optimize to being a global block.
+      Elts[0] = CGM.getNSConcreteGlobalBlock();
+      Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
+
+      C = llvm::ConstantStruct::get(VMContext, Elts, false);
+
+      C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
+                                   llvm::GlobalValue::InternalLinkage, C,
+                                   "__block_holder_tmp_" +
+                                   llvm::Twine(CGM.getGlobalUniqueCount()));
+      QualType BPT = BE->getType();
+      C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
+      return C;
+    }
+
+    std::vector<const llvm::Type *> Types(BlockFields+subBlockDeclRefDecls.size());
+    for (int i=0; i<4; ++i)
+      Types[i] = Elts[i]->getType();
+    Types[4] = PtrToInt8Ty;
+    if (hasIntrospection)
+      Types[5] = PtrToInt8Ty;
+
+    for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) {
+      const Expr *E = subBlockDeclRefDecls[i];
+      const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
+      QualType Ty = E->getType();
+      if (BDRE && BDRE->isByRef()) {
+        Types[i+BlockFields] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
+      } else
+        Types[i+BlockFields] = ConvertType(Ty);
+    }
+
+    llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true);
+
+    llvm::AllocaInst *A = CreateTempAlloca(Ty);
+    A->setAlignment(subBlockAlign.getQuantity());
+    V = A;
+
+    std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size());
+    int helpersize = 0;
+
+    for (unsigned i=0; i<4; ++i)
+      Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
+    if (hasIntrospection)
+      Builder.CreateStore(Elts[5], Builder.CreateStructGEP(V, 5, "block.tmp"));
+
+    for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i)
+      {
+        // FIXME: Push const down.
+        Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]);
+        DeclRefExpr *DR;
+        ValueDecl *VD;
+
+        DR = dyn_cast<DeclRefExpr>(E);
+        // Skip padding.
+        if (DR) continue;
+
+        BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
+        VD = BDRE->getDecl();
+
+        llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp");
+        NoteForHelper[helpersize].index = i+5;
+        NoteForHelper[helpersize].RequiresCopying
+          = BlockRequiresCopying(VD->getType());
+        NoteForHelper[helpersize].flag
+          = (VD->getType()->isBlockPointerType()
+             ? BLOCK_FIELD_IS_BLOCK
+             : BLOCK_FIELD_IS_OBJECT);
+
+        if (LocalDeclMap[VD]) {
+          if (BDRE->isByRef()) {
+            NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
+              // FIXME: Someone double check this.
+              (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
+            llvm::Value *Loc = LocalDeclMap[VD];
+            Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
+            Loc = Builder.CreateLoad(Loc);
+            Builder.CreateStore(Loc, Addr);
+            ++helpersize;
+            continue;
+          } else
+            E = new (getContext()) DeclRefExpr (VD,
+                                                VD->getType(), 
+                                                SourceLocation());
+        }
+        if (BDRE->isByRef()) {
+          NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
+            // FIXME: Someone double check this.
+            (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
+          E = new (getContext())
+            UnaryOperator(E, UnaryOperator::AddrOf,
+                          getContext().getPointerType(E->getType()),
+                          SourceLocation());
+        }
+        ++helpersize;
+
+        RValue r = EmitAnyExpr(E, Addr, false);
+        if (r.isScalar()) {
+          llvm::Value *Loc = r.getScalarVal();
+          const llvm::Type *Ty = Types[i+BlockFields];
+          if  (BDRE->isByRef()) {
+            // E is now the address of the value field, instead, we want the
+            // address of the actual ByRef struct.  We optimize this slightly
+            // compared to gcc by not grabbing the forwarding slot as this must
+            // be done during Block_copy for us, and we can postpone the work
+            // until then.
+            CharUnits offset = BlockDecls[BDRE->getDecl()];
+
+            llvm::Value *BlockLiteral = LoadBlockStruct();
+
+            Loc = Builder.CreateGEP(BlockLiteral,
+                       llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+                                                           offset.getQuantity()),
+                                    "block.literal");
+            Ty = llvm::PointerType::get(Ty, 0);
+            Loc = Builder.CreateBitCast(Loc, Ty);
+            Loc = Builder.CreateLoad(Loc);
+            // Loc = Builder.CreateBitCast(Loc, Ty);
+          }
+          Builder.CreateStore(Loc, Addr);
+        } else if (r.isComplex())
+          // FIXME: implement
+          ErrorUnsupported(BE, "complex in block literal");
+        else if (r.isAggregate())
+          ; // Already created into the destination
+        else
+          assert (0 && "bad block variable");
+        // FIXME: Ensure that the offset created by the backend for
+        // the struct matches the previously computed offset in BlockDecls.
+      }
+    NoteForHelper.resize(helpersize);
+
+    // __descriptor
+    llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose,
+                                                       subBlockSize, Ty,
+                                                       &NoteForHelper);
+    Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
+    Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
+  }
+
+  QualType BPT = BE->getType();
+  return Builder.CreateBitCast(V, ConvertType(BPT));
+}
+
+
+const llvm::Type *BlockModule::getBlockDescriptorType() {
+  if (BlockDescriptorType)
+    return BlockDescriptorType;
+
+  const llvm::Type *UnsignedLongTy =
+    getTypes().ConvertType(getContext().UnsignedLongTy);
+
+  // struct __block_descriptor {
+  //   unsigned long reserved;
+  //   unsigned long block_size;
+  // };
+  BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(),
+                                              UnsignedLongTy,
+                                              UnsignedLongTy,
+                                              NULL);
+
+  getModule().addTypeName("struct.__block_descriptor",
+                          BlockDescriptorType);
+
+  return BlockDescriptorType;
+}
+
+const llvm::Type *BlockModule::getGenericBlockLiteralType() {
+  if (GenericBlockLiteralType)
+    return GenericBlockLiteralType;
+
+  const llvm::Type *BlockDescPtrTy =
+    llvm::PointerType::getUnqual(getBlockDescriptorType());
+
+  const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+    getTypes().ConvertType(getContext().IntTy));
+
+  // struct __block_literal_generic {
+  //   void *__isa;
+  //   int __flags;
+  //   int __reserved;
+  //   void (*__invoke)(void *);
+  //   struct __block_descriptor *__descriptor;
+  //   // GNU runtime only:
+  //   const char *types;
+  // };
+  if (CGM.getContext().getLangOptions().BlockIntrospection)
+    GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
+                                                  PtrToInt8Ty,
+                                                  IntTy,
+                                                  IntTy,
+                                                  PtrToInt8Ty,
+                                                  BlockDescPtrTy,
+                                                  PtrToInt8Ty,
+                                                  NULL);
+  else
+    GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
+                                                  PtrToInt8Ty,
+                                                  IntTy,
+                                                  IntTy,
+                                                  PtrToInt8Ty,
+                                                  BlockDescPtrTy,
+                                                  NULL);
+
+  getModule().addTypeName("struct.__block_literal_generic",
+                          GenericBlockLiteralType);
+
+  return GenericBlockLiteralType;
+}
+
+const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() {
+  if (GenericExtendedBlockLiteralType)
+    return GenericExtendedBlockLiteralType;
+
+  const llvm::Type *BlockDescPtrTy =
+    llvm::PointerType::getUnqual(getBlockDescriptorType());
+
+  const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+    getTypes().ConvertType(getContext().IntTy));
+
+  // struct __block_literal_generic {
+  //   void *__isa;
+  //   int __flags;
+  //   int __reserved;
+  //   void (*__invoke)(void *);
+  //   struct __block_descriptor *__descriptor;
+  //   void *__copy_func_helper_decl;
+  //   void *__destroy_func_decl;
+  // };
+  GenericExtendedBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
+                                                          PtrToInt8Ty,
+                                                          IntTy,
+                                                          IntTy,
+                                                          PtrToInt8Ty,
+                                                          BlockDescPtrTy,
+                                                          PtrToInt8Ty,
+                                                          PtrToInt8Ty,
+                                                          NULL);
+
+  getModule().addTypeName("struct.__block_literal_extended_generic",
+                          GenericExtendedBlockLiteralType);
+
+  return GenericExtendedBlockLiteralType;
+}
+
+RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E, 
+                                          ReturnValueSlot ReturnValue) {
+  const BlockPointerType *BPT =
+    E->getCallee()->getType()->getAs<BlockPointerType>();
+
+  llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+
+  // Get a pointer to the generic block literal.
+  const llvm::Type *BlockLiteralTy =
+    llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
+
+  // Bitcast the callee to a block literal.
+  llvm::Value *BlockLiteral =
+    Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+
+  // Get the function pointer from the literal.
+  llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
+
+  BlockLiteral =
+    Builder.CreateBitCast(BlockLiteral,
+                          llvm::Type::getInt8PtrTy(VMContext),
+                          "tmp");
+
+  // Add the block literal.
+  QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
+  CallArgList Args;
+  Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy));
+
+  QualType FnType = BPT->getPointeeType();
+
+  // And the rest of the arguments.
+  EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(),
+               E->arg_begin(), E->arg_end());
+
+  // Load the function.
+  llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp");
+
+  const FunctionType *FuncTy = FnType->getAs<FunctionType>();
+  QualType ResultType = FuncTy->getResultType();
+
+  const CGFunctionInfo &FnInfo =
+    CGM.getTypes().getFunctionInfo(ResultType, Args, FuncTy->getCallConv(),
+                                   FuncTy->getNoReturnAttr());
+
+  // Cast the function pointer to the right type.
+  const llvm::Type *BlockFTy =
+    CGM.getTypes().GetFunctionType(FnInfo, false);
+
+  const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+  Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+
+  // And call the block.
+  return EmitCall(FnInfo, Func, ReturnValue, Args);
+}
+
+CharUnits CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
+  const ValueDecl *VD = E->getDecl();
+  CharUnits &offset = BlockDecls[VD];
+
+  // See if we have already allocated an offset for this variable.
+  if (offset.isPositive())
+    return offset;
+
+  // Don't run the expensive check, unless we have to.
+  if (!BlockHasCopyDispose)
+    if (E->isByRef()
+        || BlockRequiresCopying(E->getType()))
+      BlockHasCopyDispose = true;
+
+  // if not, allocate one now.
+  offset = getBlockOffset(E);
+
+  return offset;
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
+  const ValueDecl *VD = E->getDecl();
+  CharUnits offset = AllocateBlockDecl(E);
+  
+
+  llvm::Value *BlockLiteral = LoadBlockStruct();
+  llvm::Value *V = Builder.CreateGEP(BlockLiteral,
+                       llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+                                                         offset.getQuantity()),
+                                     "block.literal");
+  if (E->isByRef()) {
+    const llvm::Type *PtrStructTy
+      = llvm::PointerType::get(BuildByRefType(VD), 0);
+    // The block literal will need a copy/destroy helper.
+    BlockHasCopyDispose = true;
+    
+    const llvm::Type *Ty = PtrStructTy;
+    Ty = llvm::PointerType::get(Ty, 0);
+    V = Builder.CreateBitCast(V, Ty);
+    V = Builder.CreateLoad(V);
+    V = Builder.CreateStructGEP(V, 1, "forwarding");
+    V = Builder.CreateLoad(V);
+    V = Builder.CreateBitCast(V, PtrStructTy);
+    V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD), 
+                                VD->getNameAsString());
+  } else {
+    const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
+
+    Ty = llvm::PointerType::get(Ty, 0);
+    V = Builder.CreateBitCast(V, Ty);
+  }
+  return V;
+}
+
+void CodeGenFunction::BlockForwardSelf() {
+  const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+  ImplicitParamDecl *SelfDecl = OMD->getSelfDecl();
+  llvm::Value *&DMEntry = LocalDeclMap[SelfDecl];
+  if (DMEntry)
+    return;
+  // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care
+  BlockDeclRefExpr *BDRE = new (getContext())
+    BlockDeclRefExpr(SelfDecl,
+                     SelfDecl->getType(), SourceLocation(), false);
+  DMEntry = GetAddrOfBlockDecl(BDRE);
+}
+
+llvm::Constant *
+BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
+  // Generate the block descriptor.
+  const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy);
+  const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+    getTypes().ConvertType(getContext().IntTy));
+
+  llvm::Constant *DescriptorFields[2];
+
+  // Reserved
+  DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy);
+
+  // Block literal size. For global blocks we just use the size of the generic
+  // block literal struct.
+  CharUnits BlockLiteralSize = 
+    CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType());
+  DescriptorFields[1] =
+    llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity());
+
+  llvm::Constant *DescriptorStruct =
+    llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 2, false);
+
+  llvm::GlobalVariable *Descriptor =
+    new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true,
+                             llvm::GlobalVariable::InternalLinkage,
+                             DescriptorStruct, "__block_descriptor_global");
+
+  int FieldCount = 5;
+  // Generate the constants for the block literal.
+  if (CGM.getContext().getLangOptions().BlockIntrospection)
+    FieldCount = 6;
+
+  std::vector<llvm::Constant*> LiteralFields(FieldCount);
+
+  CodeGenFunction::BlockInfo Info(0, n);
+  CharUnits subBlockSize; 
+  CharUnits subBlockAlign;
+  llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
+  bool subBlockHasCopyDispose = false;
+  llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+  llvm::Function *Fn
+    = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap,
+                                                 subBlockSize,
+                                                 subBlockAlign,
+                                                 subBlockDeclRefDecls,
+                                                 subBlockHasCopyDispose);
+  assert(subBlockSize == BlockLiteralSize
+         && "no imports allowed for global block");
+
+  // isa
+  LiteralFields[0] = getNSConcreteGlobalBlock();
+
+  // Flags
+  LiteralFields[1] = CGM.getContext().getLangOptions().BlockIntrospection ?
+    llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR |
+            BLOCK_HAS_OBJC_TYPE) :
+    llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR);
+
+  // Reserved
+  LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
+
+  // Function
+  LiteralFields[3] = Fn;
+
+  // Descriptor
+  LiteralFields[4] = Descriptor;
+  
+  // Type encoding
+  if (CGM.getContext().getLangOptions().BlockIntrospection) {
+    std::string BlockTypeEncoding;
+    CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
+
+    LiteralFields[5] = CGM.GetAddrOfConstantCString(BlockTypeEncoding);
+  }
+
+  llvm::Constant *BlockLiteralStruct =
+    llvm::ConstantStruct::get(VMContext, LiteralFields, false);
+
+  llvm::GlobalVariable *BlockLiteral =
+    new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true,
+                             llvm::GlobalVariable::InternalLinkage,
+                             BlockLiteralStruct, "__block_literal_global");
+
+  return BlockLiteral;
+}
+
+llvm::Value *CodeGenFunction::LoadBlockStruct() {
+  llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()],
+                                      "self");
+  // For now, we codegen based upon byte offsets.
+  return Builder.CreateBitCast(V, PtrToInt8Ty);
+}
+
+llvm::Function *
+CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
+                                       const BlockInfo& Info,
+                                       const Decl *OuterFuncDecl,
+                                  llvm::DenseMap<const Decl*, llvm::Value*> ldm,
+                                       CharUnits &Size,
+                                       CharUnits &Align,
+                       llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
+                                       bool &subBlockHasCopyDispose) {
+
+  // Check if we should generate debug info for this block.
+  if (CGM.getDebugInfo())
+    DebugInfo = CGM.getDebugInfo();
+
+  // Arrange for local static and local extern declarations to appear
+  // to be local to this function as well, as they are directly referenced
+  // in a block.
+  for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin();
+       i != ldm.end();
+       ++i) {
+    const VarDecl *VD = dyn_cast<VarDecl>(i->first);
+
+    if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage())
+      LocalDeclMap[VD] = i->second;
+  }
+
+  BlockOffset = 
+      CGM.GetTargetTypeStoreSize(CGM.getGenericBlockLiteralType());
+  BlockAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
+
+  const FunctionType *BlockFunctionType = BExpr->getFunctionType();
+  QualType ResultType;
+  CallingConv CC = BlockFunctionType->getCallConv();
+  bool NoReturn = BlockFunctionType->getNoReturnAttr();
+  bool IsVariadic;
+  if (const FunctionProtoType *FTy =
+      dyn_cast<FunctionProtoType>(BlockFunctionType)) {
+    ResultType = FTy->getResultType();
+    IsVariadic = FTy->isVariadic();
+  } else {
+    // K&R style block.
+    ResultType = BlockFunctionType->getResultType();
+    IsVariadic = false;
+  }
+
+  FunctionArgList Args;
+
+  CurFuncDecl = OuterFuncDecl;
+
+  const BlockDecl *BD = BExpr->getBlockDecl();
+
+  IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
+
+  // Allocate all BlockDeclRefDecls, so we can calculate the right ParmTy below.
+  AllocateAllBlockDeclRefs(Info, this);
+
+  QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose,
+                                                  BlockDeclRefDecls);
+  // FIXME: This leaks
+  ImplicitParamDecl *SelfDecl =
+    ImplicitParamDecl::Create(getContext(), 0,
+                              SourceLocation(), II,
+                              ParmTy);
+
+  Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType()));
+  BlockStructDecl = SelfDecl;
+
+  for (BlockDecl::param_const_iterator i = BD->param_begin(),
+       e = BD->param_end(); i != e; ++i)
+    Args.push_back(std::make_pair(*i, (*i)->getType()));
+
+  const CGFunctionInfo &FI =
+    CGM.getTypes().getFunctionInfo(ResultType, Args, CC, NoReturn);
+
+  CodeGenTypes &Types = CGM.getTypes();
+  const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
+
+  llvm::Function *Fn =
+    llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+                           llvm::Twine("__") + Info.Name + "_block_invoke_",
+                           &CGM.getModule());
+
+  CGM.SetInternalFunctionAttributes(BD, Fn, FI);
+
+  StartFunction(BD, ResultType, Fn, Args,
+                BExpr->getBody()->getLocEnd());
+
+  CurFuncDecl = OuterFuncDecl;
+  CurCodeDecl = BD;
+
+  // Save a spot to insert the debug information for all the BlockDeclRefDecls.
+  llvm::BasicBlock *entry = Builder.GetInsertBlock();
+  llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
+  --entry_ptr;
+
+  EmitStmt(BExpr->getBody());
+
+  // Remember where we were...
+  llvm::BasicBlock *resume = Builder.GetInsertBlock();
+
+  // Go back to the entry.
+  ++entry_ptr;
+  Builder.SetInsertPoint(entry, entry_ptr);
+
+  if (CGDebugInfo *DI = getDebugInfo()) {
+    // Emit debug information for all the BlockDeclRefDecls.
+    for (unsigned i = 0, e = BlockDeclRefDecls.size(); i != e; ++i) {
+      if (const BlockDeclRefExpr *BDRE = 
+            dyn_cast<BlockDeclRefExpr>(BlockDeclRefDecls[i])) {
+        const ValueDecl *D = BDRE->getDecl();
+        DI->setLocation(D->getLocation());
+        DI->EmitDeclareOfBlockDeclRefVariable(BDRE,
+                                             LocalDeclMap[getBlockStructDecl()],
+                                              Builder, this);
+      }
+    }
+  }
+  // And resume where we left off.
+  if (resume == 0)
+    Builder.ClearInsertionPoint();
+  else
+    Builder.SetInsertPoint(resume);
+
+  FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc());
+
+  // The runtime needs a minimum alignment of a void *.
+  CharUnits MinAlign = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
+  BlockOffset = CharUnits::fromQuantity(
+      llvm::RoundUpToAlignment(BlockOffset.getQuantity(), 
+                               MinAlign.getQuantity()));
+
+  Size = BlockOffset;
+  Align = BlockAlign;
+  subBlockDeclRefDecls = BlockDeclRefDecls;
+  subBlockHasCopyDispose |= BlockHasCopyDispose;
+  return Fn;
+}
+
+CharUnits BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
+  const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl());
+
+  CharUnits Size = getContext().getTypeSizeInChars(D->getType());
+  CharUnits Align = getContext().getDeclAlign(D);
+
+  if (BDRE->isByRef()) {
+    Size = getContext().getTypeSizeInChars(getContext().VoidPtrTy);
+    Align = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
+  }
+
+  assert ((Align.isPositive()) && "alignment must be 1 byte or more");
+
+  CharUnits OldOffset = BlockOffset;
+
+  // Ensure proper alignment, even if it means we have to have a gap
+  BlockOffset = CharUnits::fromQuantity(
+      llvm::RoundUpToAlignment(BlockOffset.getQuantity(), Align.getQuantity()));
+  BlockAlign = std::max(Align, BlockAlign);
+
+  CharUnits Pad = BlockOffset - OldOffset;
+  if (Pad.isPositive()) {
+    llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad.getQuantity());
+    QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
+                                                       llvm::APInt(32, 
+                                                         Pad.getQuantity()),
+                                                       ArrayType::Normal, 0);
+    ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(),
+                                         0, QualType(PadTy), 0, VarDecl::None);
+    Expr *E;
+    E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
+                                       SourceLocation());
+    BlockDeclRefDecls.push_back(E);
+  }
+  BlockDeclRefDecls.push_back(BDRE);
+
+  BlockOffset += Size;
+  return BlockOffset-Size;
+}
+
+llvm::Constant *BlockFunction::
+GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
+                           std::vector<HelperInfo> *NoteForHelperp) {
+  QualType R = getContext().VoidTy;
+
+  FunctionArgList Args;
+  // FIXME: This leaks
+  ImplicitParamDecl *Dst =
+    ImplicitParamDecl::Create(getContext(), 0,
+                              SourceLocation(), 0,
+                              getContext().getPointerType(getContext().VoidTy));
+  Args.push_back(std::make_pair(Dst, Dst->getType()));
+  ImplicitParamDecl *Src =
+    ImplicitParamDecl::Create(getContext(), 0,
+                              SourceLocation(), 0,
+                              getContext().getPointerType(getContext().VoidTy));
+  Args.push_back(std::make_pair(Src, Src->getType()));
+
+  const CGFunctionInfo &FI =
+    CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+
+  // FIXME: We'd like to put these into a mergable by content, with
+  // internal linkage.
+  CodeGenTypes &Types = CGM.getTypes();
+  const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+  llvm::Function *Fn =
+    llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+                           "__copy_helper_block_", &CGM.getModule());
+
+  IdentifierInfo *II
+    = &CGM.getContext().Idents.get("__copy_helper_block_");
+
+  FunctionDecl *FD = FunctionDecl::Create(getContext(),
+                                          getContext().getTranslationUnitDecl(),
+                                          SourceLocation(), II, R, 0,
+                                          FunctionDecl::Static, false,
+                                          true);
+  CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+  llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+  llvm::Type *PtrPtrT;
+
+  if (NoteForHelperp) {
+    std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+    PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+    SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+    SrcObj = Builder.CreateLoad(SrcObj);
+
+    llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst);
+    llvm::Type *PtrPtrT;
+    PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+    DstObj = Builder.CreateBitCast(DstObj, PtrPtrT);
+    DstObj = Builder.CreateLoad(DstObj);
+
+    for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+      int flag = NoteForHelper[i].flag;
+      int index = NoteForHelper[i].index;
+
+      if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+          || NoteForHelper[i].RequiresCopying) {
+        llvm::Value *Srcv = SrcObj;
+        Srcv = Builder.CreateStructGEP(Srcv, index);
+        Srcv = Builder.CreateBitCast(Srcv,
+                                     llvm::PointerType::get(PtrToInt8Ty, 0));
+        Srcv = Builder.CreateLoad(Srcv);
+
+        llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
+        Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
+
+        llvm::Value *N = llvm::ConstantInt::get(
+              llvm::Type::getInt32Ty(T->getContext()), flag);
+        llvm::Value *F = getBlockObjectAssign();
+        Builder.CreateCall3(F, Dstv, Srcv, N);
+      }
+    }
+  }
+
+  CGF.FinishFunction();
+
+  return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::
+GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
+                              const llvm::StructType* T,
+                              std::vector<HelperInfo> *NoteForHelperp) {
+  QualType R = getContext().VoidTy;
+
+  FunctionArgList Args;
+  // FIXME: This leaks
+  ImplicitParamDecl *Src =
+    ImplicitParamDecl::Create(getContext(), 0,
+                              SourceLocation(), 0,
+                              getContext().getPointerType(getContext().VoidTy));
+
+  Args.push_back(std::make_pair(Src, Src->getType()));
+
+  const CGFunctionInfo &FI =
+    CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+
+  // FIXME: We'd like to put these into a mergable by content, with
+  // internal linkage.
+  CodeGenTypes &Types = CGM.getTypes();
+  const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+  llvm::Function *Fn =
+    llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+                           "__destroy_helper_block_", &CGM.getModule());
+
+  IdentifierInfo *II
+    = &CGM.getContext().Idents.get("__destroy_helper_block_");
+
+  FunctionDecl *FD = FunctionDecl::Create(getContext(),
+                                          getContext().getTranslationUnitDecl(),
+                                          SourceLocation(), II, R, 0,
+                                          FunctionDecl::Static, false,
+                                          true);
+  CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+  if (NoteForHelperp) {
+    std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
+
+    llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
+    llvm::Type *PtrPtrT;
+    PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
+    SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
+    SrcObj = Builder.CreateLoad(SrcObj);
+
+    for (unsigned i=0; i < NoteForHelper.size(); ++i) {
+      int flag = NoteForHelper[i].flag;
+      int index = NoteForHelper[i].index;
+
+      if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
+          || NoteForHelper[i].RequiresCopying) {
+        llvm::Value *Srcv = SrcObj;
+        Srcv = Builder.CreateStructGEP(Srcv, index);
+        Srcv = Builder.CreateBitCast(Srcv,
+                                     llvm::PointerType::get(PtrToInt8Ty, 0));
+        Srcv = Builder.CreateLoad(Srcv);
+
+        BuildBlockRelease(Srcv, flag);
+      }
+    }
+  }
+
+  CGF.FinishFunction();
+
+  return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T,
+                                       std::vector<HelperInfo> *NoteForHelper) {
+  return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose,
+                                                         T, NoteForHelper);
+}
+
+llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T,
+                                      std::vector<HelperInfo> *NoteForHelperp) {
+  return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose,
+                                                            T, NoteForHelperp);
+}
+
+llvm::Constant *BlockFunction::
+GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
+  QualType R = getContext().VoidTy;
+
+  FunctionArgList Args;
+  // FIXME: This leaks
+  ImplicitParamDecl *Dst =
+    ImplicitParamDecl::Create(getContext(), 0,
+                              SourceLocation(), 0,
+                              getContext().getPointerType(getContext().VoidTy));
+  Args.push_back(std::make_pair(Dst, Dst->getType()));
+
+  // FIXME: This leaks
+  ImplicitParamDecl *Src =
+    ImplicitParamDecl::Create(getContext(), 0,
+                              SourceLocation(), 0,
+                              getContext().getPointerType(getContext().VoidTy));
+  Args.push_back(std::make_pair(Src, Src->getType()));
+
+  const CGFunctionInfo &FI =
+    CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+
+  CodeGenTypes &Types = CGM.getTypes();
+  const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+  // FIXME: We'd like to put these into a mergable by content, with
+  // internal linkage.
+  llvm::Function *Fn =
+    llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+                           "__Block_byref_id_object_copy_", &CGM.getModule());
+
+  IdentifierInfo *II
+    = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_");
+
+  FunctionDecl *FD = FunctionDecl::Create(getContext(),
+                                          getContext().getTranslationUnitDecl(),
+                                          SourceLocation(), II, R, 0,
+                                          FunctionDecl::Static, false,
+                                          true);
+  CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+  // dst->x
+  llvm::Value *V = CGF.GetAddrOfLocalVar(Dst);
+  V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+  V = Builder.CreateLoad(V);
+  V = Builder.CreateStructGEP(V, 6, "x");
+  llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty);
+
+  // src->x
+  V = CGF.GetAddrOfLocalVar(Src);
+  V = Builder.CreateLoad(V);
+  V = Builder.CreateBitCast(V, T);
+  V = Builder.CreateStructGEP(V, 6, "x");
+  V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+  llvm::Value *SrcObj = Builder.CreateLoad(V);
+
+  flag |= BLOCK_BYREF_CALLER;
+
+  llvm::Value *N = llvm::ConstantInt::get(
+          llvm::Type::getInt32Ty(T->getContext()), flag);
+  llvm::Value *F = getBlockObjectAssign();
+  Builder.CreateCall3(F, DstObj, SrcObj, N);
+
+  CGF.FinishFunction();
+
+  return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *
+BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
+                                                  int flag) {
+  QualType R = getContext().VoidTy;
+
+  FunctionArgList Args;
+  // FIXME: This leaks
+  ImplicitParamDecl *Src =
+    ImplicitParamDecl::Create(getContext(), 0,
+                              SourceLocation(), 0,
+                              getContext().getPointerType(getContext().VoidTy));
+
+  Args.push_back(std::make_pair(Src, Src->getType()));
+
+  const CGFunctionInfo &FI =
+    CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+
+  CodeGenTypes &Types = CGM.getTypes();
+  const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+
+  // FIXME: We'd like to put these into a mergable by content, with
+  // internal linkage.
+  llvm::Function *Fn =
+    llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+                           "__Block_byref_id_object_dispose_",
+                           &CGM.getModule());
+
+  IdentifierInfo *II
+    = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_");
+
+  FunctionDecl *FD = FunctionDecl::Create(getContext(),
+                                          getContext().getTranslationUnitDecl(),
+                                          SourceLocation(), II, R, 0,
+                                          FunctionDecl::Static, false,
+                                          true);
+  CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
+
+  llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
+  V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
+  V = Builder.CreateLoad(V);
+  V = Builder.CreateStructGEP(V, 6, "x");
+  V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
+  V = Builder.CreateLoad(V);
+
+  flag |= BLOCK_BYREF_CALLER;
+  BuildBlockRelease(V, flag);
+  CGF.FinishFunction();
+
+  return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
+                                                    int Flag, unsigned Align) {
+  // All alignments below that of pointer alignment collapse down to just
+  // pointer alignment, as we always have at least that much alignment to begin
+  // with.
+  Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+  
+  // As an optimization, we only generate a single function of each kind we
+  // might need.  We need a different one for each alignment and for each
+  // setting of flags.  We mix Align and flag to get the kind.
+  uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
+  llvm::Constant *&Entry = CGM.AssignCache[Kind];
+  if (Entry)
+    return Entry;
+  return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag);
+}
+
+llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
+                                                       int Flag,
+                                                       unsigned Align) {
+  // All alignments below that of pointer alignment collpase down to just
+  // pointer alignment, as we always have at least that much alignment to begin
+  // with.
+  Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+  
+  // As an optimization, we only generate a single function of each kind we
+  // might need.  We need a different one for each alignment and for each
+  // setting of flags.  We mix Align and flag to get the kind.
+  uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
+  llvm::Constant *&Entry = CGM.DestroyCache[Kind];
+  if (Entry)
+    return Entry;
+  return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag);
+}
+
+llvm::Value *BlockFunction::getBlockObjectDispose() {
+  if (CGM.BlockObjectDispose == 0) {
+    const llvm::FunctionType *FTy;
+    std::vector<const llvm::Type*> ArgTys;
+    const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+    ArgTys.push_back(PtrToInt8Ty);
+    ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+    CGM.BlockObjectDispose
+      = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
+  }
+  return CGM.BlockObjectDispose;
+}
+
+llvm::Value *BlockFunction::getBlockObjectAssign() {
+  if (CGM.BlockObjectAssign == 0) {
+    const llvm::FunctionType *FTy;
+    std::vector<const llvm::Type*> ArgTys;
+    const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+    ArgTys.push_back(PtrToInt8Ty);
+    ArgTys.push_back(PtrToInt8Ty);
+    ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
+    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+    CGM.BlockObjectAssign
+      = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
+  }
+  return CGM.BlockObjectAssign;
+}
+
+void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
+  llvm::Value *F = getBlockObjectDispose();
+  llvm::Value *N;
+  V = Builder.CreateBitCast(V, PtrToInt8Ty);
+  N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag);
+  Builder.CreateCall2(F, V, N);
+}
+
+ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
+
+BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
+                             CGBuilderTy &B)
+  : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) {
+  PtrToInt8Ty = llvm::PointerType::getUnqual(
+            llvm::Type::getInt8Ty(VMContext));
+
+  BlockHasCopyDispose = false;
+}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
new file mode 100644
index 0000000..a9f5ae0
--- /dev/null
+++ b/lib/CodeGen/CGBlocks.h
@@ -0,0 +1,230 @@
+//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal state used for llvm translation for block literals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBLOCKS_H
+#define CLANG_CODEGEN_CGBLOCKS_H
+
+#include "CodeGenTypes.h"
+#include "clang/AST/Type.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+
+#include <vector>
+#include <map>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+  class Module;
+  class Constant;
+  class Function;
+  class GlobalValue;
+  class TargetData;
+  class FunctionType;
+  class Value;
+  class LLVMContext;
+}
+
+namespace clang {
+
+namespace CodeGen {
+class CodeGenModule;
+
+class BlockBase {
+public:
+    enum {
+        BLOCK_NEEDS_FREE =        (1 << 24),
+        BLOCK_HAS_COPY_DISPOSE =  (1 << 25),
+        BLOCK_HAS_CXX_OBJ =       (1 << 26),
+        BLOCK_IS_GC =             (1 << 27),
+        BLOCK_IS_GLOBAL =         (1 << 28),
+        BLOCK_HAS_DESCRIPTOR =    (1 << 29),
+        BLOCK_HAS_OBJC_TYPE  =    (1 << 30)
+    };
+};
+
+class BlockModule : public BlockBase {
+  ASTContext &Context;
+  llvm::Module &TheModule;
+  const llvm::TargetData &TheTargetData;
+  CodeGenTypes &Types;
+  CodeGenModule &CGM;
+  llvm::LLVMContext &VMContext;
+
+  ASTContext &getContext() const { return Context; }
+  llvm::Module &getModule() const { return TheModule; }
+  CodeGenTypes &getTypes() { return Types; }
+  const llvm::TargetData &getTargetData() const { return TheTargetData; }
+public:
+  llvm::Constant *getNSConcreteGlobalBlock();
+  llvm::Constant *getNSConcreteStackBlock();
+  int getGlobalUniqueCount() { return ++Block.GlobalUniqueCount; }
+  const llvm::Type *getBlockDescriptorType();
+
+  const llvm::Type *getGenericBlockLiteralType();
+  const llvm::Type *getGenericExtendedBlockLiteralType();
+
+  llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+
+  /// NSConcreteGlobalBlock - Cached reference to the class pointer for global
+  /// blocks.
+  llvm::Constant *NSConcreteGlobalBlock;
+
+  /// NSConcreteStackBlock - Cached reference to the class poinnter for stack
+  /// blocks.
+  llvm::Constant *NSConcreteStackBlock;
+
+  const llvm::Type *BlockDescriptorType;
+  const llvm::Type *GenericBlockLiteralType;
+  const llvm::Type *GenericExtendedBlockLiteralType;
+  struct {
+    int GlobalUniqueCount;
+  } Block;
+
+  llvm::Value *BlockObjectAssign;
+  llvm::Value *BlockObjectDispose;
+  const llvm::Type *PtrToInt8Ty;
+
+  std::map<uint64_t, llvm::Constant *> AssignCache;
+  std::map<uint64_t, llvm::Constant *> DestroyCache;
+
+  BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
+              CodeGenTypes &T, CodeGenModule &CodeGen)
+    : Context(C), TheModule(M), TheTargetData(TD), Types(T),
+      CGM(CodeGen), VMContext(M.getContext()),
+      NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockDescriptorType(0),
+      GenericBlockLiteralType(0), GenericExtendedBlockLiteralType(0),
+      BlockObjectAssign(0), BlockObjectDispose(0) {
+    Block.GlobalUniqueCount = 0;
+    PtrToInt8Ty = llvm::Type::getInt8PtrTy(M.getContext());
+  }
+
+  bool BlockRequiresCopying(QualType Ty)
+    { return getContext().BlockRequiresCopying(Ty); }
+};
+
+class BlockFunction : public BlockBase {
+  CodeGenModule &CGM;
+  CodeGenFunction &CGF;
+  ASTContext &getContext() const;
+
+protected:
+  llvm::LLVMContext &VMContext;
+
+public:
+  const llvm::Type *PtrToInt8Ty;
+  struct HelperInfo {
+    int index;
+    int flag;
+    bool RequiresCopying;
+  };
+
+  enum {
+    BLOCK_FIELD_IS_OBJECT   =  3,  /* id, NSObject, __attribute__((NSObject)),
+                                      block, ... */
+    BLOCK_FIELD_IS_BLOCK    =  7,  /* a block variable */
+    BLOCK_FIELD_IS_BYREF    =  8,  /* the on stack structure holding the __block
+                                      variable */
+    BLOCK_FIELD_IS_WEAK     = 16,  /* declared __weak, only used in byref copy
+                                      helpers */
+    BLOCK_BYREF_CALLER      = 128,  /* called from __block (byref) copy/dispose
+                                      support routines */
+    BLOCK_BYREF_CURRENT_MAX = 256
+  };
+
+  /// BlockInfo - Information to generate a block literal.
+  struct BlockInfo {
+    /// BlockLiteralTy - The type of the block literal.
+    const llvm::Type *BlockLiteralTy;
+
+    /// Name - the name of the function this block was created for, if any.
+    const char *Name;
+
+    /// ByCopyDeclRefs - Variables from parent scopes that have been imported
+    /// into this block.
+    llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
+
+    BlockInfo(const llvm::Type *blt, const char *n)
+      : BlockLiteralTy(blt), Name(n) {
+      // Skip asm prefix, if any.
+      if (Name && Name[0] == '\01')
+        ++Name;
+    }
+  };
+
+  CGBuilderTy &Builder;
+
+  BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B);
+
+  /// BlockOffset - The offset in bytes for the next allocation of an
+  /// imported block variable.
+  CharUnits BlockOffset;
+  /// BlockAlign - Maximal alignment needed for the Block expressed in 
+  /// characters.
+  CharUnits BlockAlign;
+
+  /// getBlockOffset - Allocate an offset for the ValueDecl from a
+  /// BlockDeclRefExpr in a block literal (BlockExpr).
+  CharUnits getBlockOffset(const BlockDeclRefExpr *E);
+
+  /// BlockHasCopyDispose - True iff the block uses copy/dispose.
+  bool BlockHasCopyDispose;
+
+  /// BlockDeclRefDecls - Decls from BlockDeclRefExprs in apperance order
+  /// in a block literal.  Decls without names are used for padding.
+  llvm::SmallVector<const Expr *, 8> BlockDeclRefDecls;
+
+  /// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
+  std::map<const Decl*, CharUnits> BlockDecls;
+
+  ImplicitParamDecl *BlockStructDecl;
+  ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
+
+  llvm::Constant *GenerateCopyHelperFunction(bool, const llvm::StructType *,
+                                             std::vector<HelperInfo> *);
+  llvm::Constant *GenerateDestroyHelperFunction(bool, const llvm::StructType *,
+                                                std::vector<HelperInfo> *);
+
+  llvm::Constant *BuildCopyHelper(const llvm::StructType *,
+                                  std::vector<HelperInfo> *);
+  llvm::Constant *BuildDestroyHelper(const llvm::StructType *,
+                                     std::vector<HelperInfo> *);
+
+  llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag);
+  llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int);
+
+  llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag,
+                                       unsigned Align);
+  llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag,
+                                          unsigned Align);
+
+  llvm::Value *getBlockObjectAssign();
+  llvm::Value *getBlockObjectDispose();
+  void BuildBlockRelease(llvm::Value *DeclPtr, int flag = BLOCK_FIELD_IS_BYREF);
+
+  bool BlockRequiresCopying(QualType Ty)
+    { return getContext().BlockRequiresCopying(Ty); }
+};
+
+}  // end namespace CodeGen
+}  // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
new file mode 100644
index 0000000..ed56bd9
--- /dev/null
+++ b/lib/CodeGen/CGBuilder.h
@@ -0,0 +1,26 @@
+//===-- CGBuilder.h - Choose IRBuilder implementation  ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBUILDER_H
+#define CLANG_CODEGEN_CGBUILDER_H
+
+#include "llvm/Support/IRBuilder.h"
+
+namespace clang {
+namespace CodeGen {
+  // Don't preserve names on values in an optimized build.
+#ifdef NDEBUG
+  typedef llvm::IRBuilder<false> CGBuilderTy;
+#else
+  typedef llvm::IRBuilder<> CGBuilderTy;
+#endif
+}  // end namespace CodeGen
+}  // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
new file mode 100644
index 0000000..beaf7b8
--- /dev/null
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -0,0 +1,889 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Builtin calls as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm;
+
+/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// and the expression node.
+static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
+                               Intrinsic::ID Id, const CallExpr *E) {
+  const llvm::Type *ResType[2];
+  ResType[0] = CGF.ConvertType(E->getType());
+  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
+  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
+  return RValue::get(CGF.Builder.CreateCall2(AtomF,
+                                             CGF.EmitScalarExpr(E->getArg(0)),
+                                             CGF.EmitScalarExpr(E->getArg(1))));
+}
+
+/// Utility to insert an atomic instruction based Instrinsic::ID and
+// the expression node, where the return value is the result of the
+// operation.
+static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
+                                   Intrinsic::ID Id, const CallExpr *E,
+                                   Instruction::BinaryOps Op) {
+  const llvm::Type *ResType[2];
+  ResType[0] = CGF.ConvertType(E->getType());
+  ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
+  Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
+  Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
+  Value *Operand = CGF.EmitScalarExpr(E->getArg(1));
+  Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand);
+
+  if (Id == Intrinsic::atomic_load_nand)
+    Result = CGF.Builder.CreateNot(Result);
+
+
+  return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
+}
+
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+                                        unsigned BuiltinID, const CallExpr *E) {
+  // See if we can constant fold this builtin.  If so, don't emit it at all.
+  Expr::EvalResult Result;
+  if (E->Evaluate(Result, CGM.getContext())) {
+    if (Result.Val.isInt())
+      return RValue::get(llvm::ConstantInt::get(VMContext,
+                                                Result.Val.getInt()));
+    else if (Result.Val.isFloat())
+      return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
+  }
+
+  switch (BuiltinID) {
+  default: break;  // Handle intrinsics and libm functions below.
+  case Builtin::BI__builtin___CFStringMakeConstantString:
+  case Builtin::BI__builtin___NSStringMakeConstantString:
+    return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
+  case Builtin::BI__builtin_stdarg_start:
+  case Builtin::BI__builtin_va_start:
+  case Builtin::BI__builtin_va_end: {
+    Value *ArgValue = EmitVAListRef(E->getArg(0));
+    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
+    if (ArgValue->getType() != DestType)
+      ArgValue = Builder.CreateBitCast(ArgValue, DestType,
+                                       ArgValue->getName().data());
+
+    Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
+      Intrinsic::vaend : Intrinsic::vastart;
+    return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
+  }
+  case Builtin::BI__builtin_va_copy: {
+    Value *DstPtr = EmitVAListRef(E->getArg(0));
+    Value *SrcPtr = EmitVAListRef(E->getArg(1));
+
+    const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+
+    DstPtr = Builder.CreateBitCast(DstPtr, Type);
+    SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
+    return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
+                                           DstPtr, SrcPtr));
+  }
+  case Builtin::BI__builtin_abs: {
+    Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+    Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
+    Value *CmpResult =
+    Builder.CreateICmpSGE(ArgValue,
+                          llvm::Constant::getNullValue(ArgValue->getType()),
+                                                            "abscond");
+    Value *Result =
+      Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
+
+    return RValue::get(Result);
+  }
+  case Builtin::BI__builtin_ctz:
+  case Builtin::BI__builtin_ctzl:
+  case Builtin::BI__builtin_ctzll: {
+    Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+    const llvm::Type *ArgType = ArgValue->getType();
+    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+    const llvm::Type *ResultType = ConvertType(E->getType());
+    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+    if (Result->getType() != ResultType)
+      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+                                     "cast");
+    return RValue::get(Result);
+  }
+  case Builtin::BI__builtin_clz:
+  case Builtin::BI__builtin_clzl:
+  case Builtin::BI__builtin_clzll: {
+    Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+    const llvm::Type *ArgType = ArgValue->getType();
+    Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
+
+    const llvm::Type *ResultType = ConvertType(E->getType());
+    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+    if (Result->getType() != ResultType)
+      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+                                     "cast");
+    return RValue::get(Result);
+  }
+  case Builtin::BI__builtin_ffs:
+  case Builtin::BI__builtin_ffsl:
+  case Builtin::BI__builtin_ffsll: {
+    // ffs(x) -> x ? cttz(x) + 1 : 0
+    Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+    const llvm::Type *ArgType = ArgValue->getType();
+    Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
+
+    const llvm::Type *ResultType = ConvertType(E->getType());
+    Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
+                                   llvm::ConstantInt::get(ArgType, 1), "tmp");
+    Value *Zero = llvm::Constant::getNullValue(ArgType);
+    Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+    Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
+    if (Result->getType() != ResultType)
+      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+                                     "cast");
+    return RValue::get(Result);
+  }
+  case Builtin::BI__builtin_parity:
+  case Builtin::BI__builtin_parityl:
+  case Builtin::BI__builtin_parityll: {
+    // parity(x) -> ctpop(x) & 1
+    Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+    const llvm::Type *ArgType = ArgValue->getType();
+    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+    const llvm::Type *ResultType = ConvertType(E->getType());
+    Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
+    Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
+                                      "tmp");
+    if (Result->getType() != ResultType)
+      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+                                     "cast");
+    return RValue::get(Result);
+  }
+  case Builtin::BI__builtin_popcount:
+  case Builtin::BI__builtin_popcountl:
+  case Builtin::BI__builtin_popcountll: {
+    Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+    const llvm::Type *ArgType = ArgValue->getType();
+    Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
+
+    const llvm::Type *ResultType = ConvertType(E->getType());
+    Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
+    if (Result->getType() != ResultType)
+      Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+                                     "cast");
+    return RValue::get(Result);
+  }
+  case Builtin::BI__builtin_expect:
+    // FIXME: pass expect through to LLVM
+    return RValue::get(EmitScalarExpr(E->getArg(0)));
+  case Builtin::BI__builtin_bswap32:
+  case Builtin::BI__builtin_bswap64: {
+    Value *ArgValue = EmitScalarExpr(E->getArg(0));
+    const llvm::Type *ArgType = ArgValue->getType();
+    Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
+    return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
+  }
+  case Builtin::BI__builtin_object_size: {
+    // We pass this builtin onto the optimizer so that it can
+    // figure out the object size in more complex cases.
+    const llvm::Type *ResType[] = {
+      ConvertType(E->getType())
+    };
+    
+    // LLVM only supports 0 and 2, make sure that we pass along that
+    // as a boolean.
+    Value *Ty = EmitScalarExpr(E->getArg(1));
+    ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
+    assert(CI);
+    uint64_t val = CI->getZExtValue();
+    CI = ConstantInt::get(llvm::Type::getInt1Ty(VMContext), (val & 0x2) >> 1);    
+    
+    Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType, 1);
+    return RValue::get(Builder.CreateCall2(F,
+                                           EmitScalarExpr(E->getArg(0)),
+                                           CI));
+  }
+  case Builtin::BI__builtin_prefetch: {
+    Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
+    // FIXME: Technically these constants should of type 'int', yes?
+    RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+    Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
+    Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
+    return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
+  }
+  case Builtin::BI__builtin_trap: {
+    Value *F = CGM.getIntrinsic(Intrinsic::trap, 0, 0);
+    return RValue::get(Builder.CreateCall(F));
+  }
+  case Builtin::BI__builtin_unreachable: {
+    if (CatchUndefined && HaveInsertPoint())
+      EmitBranch(getTrapBB());
+    Value *V = Builder.CreateUnreachable();
+    Builder.ClearInsertionPoint();
+    return RValue::get(V);
+  }
+      
+  case Builtin::BI__builtin_powi:
+  case Builtin::BI__builtin_powif:
+  case Builtin::BI__builtin_powil: {
+    Value *Base = EmitScalarExpr(E->getArg(0));
+    Value *Exponent = EmitScalarExpr(E->getArg(1));
+    const llvm::Type *ArgType = Base->getType();
+    Value *F = CGM.getIntrinsic(Intrinsic::powi, &ArgType, 1);
+    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+  }
+
+  case Builtin::BI__builtin_isgreater:
+  case Builtin::BI__builtin_isgreaterequal:
+  case Builtin::BI__builtin_isless:
+  case Builtin::BI__builtin_islessequal:
+  case Builtin::BI__builtin_islessgreater:
+  case Builtin::BI__builtin_isunordered: {
+    // Ordered comparisons: we know the arguments to these are matching scalar
+    // floating point values.
+    Value *LHS = EmitScalarExpr(E->getArg(0));
+    Value *RHS = EmitScalarExpr(E->getArg(1));
+
+    switch (BuiltinID) {
+    default: assert(0 && "Unknown ordered comparison");
+    case Builtin::BI__builtin_isgreater:
+      LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
+      break;
+    case Builtin::BI__builtin_isgreaterequal:
+      LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
+      break;
+    case Builtin::BI__builtin_isless:
+      LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
+      break;
+    case Builtin::BI__builtin_islessequal:
+      LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
+      break;
+    case Builtin::BI__builtin_islessgreater:
+      LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
+      break;
+    case Builtin::BI__builtin_isunordered:
+      LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
+      break;
+    }
+    // ZExt bool to int type.
+    return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()),
+                                          "tmp"));
+  }
+  case Builtin::BI__builtin_isnan: {
+    Value *V = EmitScalarExpr(E->getArg(0));
+    V = Builder.CreateFCmpUNO(V, V, "cmp");
+    return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+  }
+  case Builtin::BIalloca:
+  case Builtin::BI__builtin_alloca: {
+    // FIXME: LLVM IR Should allow alloca with an i64 size!
+    Value *Size = EmitScalarExpr(E->getArg(0));
+    Size = Builder.CreateIntCast(Size, llvm::Type::getInt32Ty(VMContext), false, "tmp");
+    return RValue::get(Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), Size, "tmp"));
+  }
+  case Builtin::BIbzero:
+  case Builtin::BI__builtin_bzero: {
+    Value *Address = EmitScalarExpr(E->getArg(0));
+    Builder.CreateCall4(CGM.getMemSetFn(), Address,
+                        llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0),
+                        EmitScalarExpr(E->getArg(1)),
+                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
+    return RValue::get(Address);
+  }
+  case Builtin::BImemcpy:
+  case Builtin::BI__builtin_memcpy: {
+    Value *Address = EmitScalarExpr(E->getArg(0));
+    Builder.CreateCall4(CGM.getMemCpyFn(), Address,
+                        EmitScalarExpr(E->getArg(1)),
+                        EmitScalarExpr(E->getArg(2)),
+                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
+    return RValue::get(Address);
+  }
+  case Builtin::BImemmove:
+  case Builtin::BI__builtin_memmove: {
+    Value *Address = EmitScalarExpr(E->getArg(0));
+    Builder.CreateCall4(CGM.getMemMoveFn(), Address,
+                        EmitScalarExpr(E->getArg(1)),
+                        EmitScalarExpr(E->getArg(2)),
+                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
+    return RValue::get(Address);
+  }
+  case Builtin::BImemset:
+  case Builtin::BI__builtin_memset: {
+    Value *Address = EmitScalarExpr(E->getArg(0));
+    Builder.CreateCall4(CGM.getMemSetFn(), Address,
+                        Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+                                            llvm::Type::getInt8Ty(VMContext)),
+                        EmitScalarExpr(E->getArg(2)),
+                        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
+    return RValue::get(Address);
+  }
+  case Builtin::BI__builtin_return_address: {
+    Value *Depth = EmitScalarExpr(E->getArg(0));
+    Depth = Builder.CreateIntCast(Depth,
+                                  llvm::Type::getInt32Ty(VMContext),
+                                  false, "tmp");
+    Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0);
+    return RValue::get(Builder.CreateCall(F, Depth));
+  }
+  case Builtin::BI__builtin_frame_address: {
+    Value *Depth = EmitScalarExpr(E->getArg(0));
+    Depth = Builder.CreateIntCast(Depth,
+                                  llvm::Type::getInt32Ty(VMContext),
+                                  false, "tmp");
+    Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+    return RValue::get(Builder.CreateCall(F, Depth));
+  }
+  case Builtin::BI__builtin_extract_return_addr: {
+    // FIXME: There should be a target hook for this
+    return RValue::get(EmitScalarExpr(E->getArg(0)));
+  }
+  case Builtin::BI__builtin_unwind_init: {
+    Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
+    return RValue::get(Builder.CreateCall(F));
+  }
+#if 0
+  // FIXME: Finish/enable when LLVM backend support stabilizes
+  case Builtin::BI__builtin_setjmp: {
+    Value *Buf = EmitScalarExpr(E->getArg(0));
+    // Store the frame pointer to the buffer
+    Value *FrameAddrF = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0);
+    Value *FrameAddr =
+        Builder.CreateCall(FrameAddrF,
+                           Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)));
+    Builder.CreateStore(FrameAddr, Buf);
+    // Call the setjmp intrinsic
+    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp, 0, 0);
+    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
+    Buf = Builder.CreateBitCast(Buf, DestType);
+    return RValue::get(Builder.CreateCall(F, Buf));
+  }
+  case Builtin::BI__builtin_longjmp: {
+    Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
+    Value *Buf = EmitScalarExpr(E->getArg(0));
+    const llvm::Type *DestType = llvm::Type::getInt8PtrTy(VMContext);
+    Buf = Builder.CreateBitCast(Buf, DestType);
+    return RValue::get(Builder.CreateCall(F, Buf));
+  }
+#endif
+  case Builtin::BI__sync_fetch_and_add:
+  case Builtin::BI__sync_fetch_and_sub:
+  case Builtin::BI__sync_fetch_and_or:
+  case Builtin::BI__sync_fetch_and_and:
+  case Builtin::BI__sync_fetch_and_xor:
+  case Builtin::BI__sync_add_and_fetch:
+  case Builtin::BI__sync_sub_and_fetch:
+  case Builtin::BI__sync_and_and_fetch:
+  case Builtin::BI__sync_or_and_fetch:
+  case Builtin::BI__sync_xor_and_fetch:
+  case Builtin::BI__sync_val_compare_and_swap:
+  case Builtin::BI__sync_bool_compare_and_swap:
+  case Builtin::BI__sync_lock_test_and_set:
+  case Builtin::BI__sync_lock_release:
+    assert(0 && "Shouldn't make it through sema");
+  case Builtin::BI__sync_fetch_and_add_1:
+  case Builtin::BI__sync_fetch_and_add_2:
+  case Builtin::BI__sync_fetch_and_add_4:
+  case Builtin::BI__sync_fetch_and_add_8:
+  case Builtin::BI__sync_fetch_and_add_16:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_add, E);
+  case Builtin::BI__sync_fetch_and_sub_1:
+  case Builtin::BI__sync_fetch_and_sub_2:
+  case Builtin::BI__sync_fetch_and_sub_4:
+  case Builtin::BI__sync_fetch_and_sub_8:
+  case Builtin::BI__sync_fetch_and_sub_16:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_sub, E);
+  case Builtin::BI__sync_fetch_and_or_1:
+  case Builtin::BI__sync_fetch_and_or_2:
+  case Builtin::BI__sync_fetch_and_or_4:
+  case Builtin::BI__sync_fetch_and_or_8:
+  case Builtin::BI__sync_fetch_and_or_16:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_or, E);
+  case Builtin::BI__sync_fetch_and_and_1:
+  case Builtin::BI__sync_fetch_and_and_2:
+  case Builtin::BI__sync_fetch_and_and_4:
+  case Builtin::BI__sync_fetch_and_and_8:
+  case Builtin::BI__sync_fetch_and_and_16:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_and, E);
+  case Builtin::BI__sync_fetch_and_xor_1:
+  case Builtin::BI__sync_fetch_and_xor_2:
+  case Builtin::BI__sync_fetch_and_xor_4:
+  case Builtin::BI__sync_fetch_and_xor_8:
+  case Builtin::BI__sync_fetch_and_xor_16:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
+  case Builtin::BI__sync_fetch_and_nand_1:
+  case Builtin::BI__sync_fetch_and_nand_2:
+  case Builtin::BI__sync_fetch_and_nand_4:
+  case Builtin::BI__sync_fetch_and_nand_8:
+  case Builtin::BI__sync_fetch_and_nand_16:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
+
+  // Clang extensions: not overloaded yet.
+  case Builtin::BI__sync_fetch_and_min:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
+  case Builtin::BI__sync_fetch_and_max:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_max, E);
+  case Builtin::BI__sync_fetch_and_umin:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umin, E);
+  case Builtin::BI__sync_fetch_and_umax:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_load_umax, E);
+
+  case Builtin::BI__sync_add_and_fetch_1:
+  case Builtin::BI__sync_add_and_fetch_2:
+  case Builtin::BI__sync_add_and_fetch_4:
+  case Builtin::BI__sync_add_and_fetch_8:
+  case Builtin::BI__sync_add_and_fetch_16:
+    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
+                                llvm::Instruction::Add);
+  case Builtin::BI__sync_sub_and_fetch_1:
+  case Builtin::BI__sync_sub_and_fetch_2:
+  case Builtin::BI__sync_sub_and_fetch_4:
+  case Builtin::BI__sync_sub_and_fetch_8:
+  case Builtin::BI__sync_sub_and_fetch_16:
+    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_sub, E,
+                                llvm::Instruction::Sub);
+  case Builtin::BI__sync_and_and_fetch_1:
+  case Builtin::BI__sync_and_and_fetch_2:
+  case Builtin::BI__sync_and_and_fetch_4:
+  case Builtin::BI__sync_and_and_fetch_8:
+  case Builtin::BI__sync_and_and_fetch_16:
+    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_and, E,
+                                llvm::Instruction::And);
+  case Builtin::BI__sync_or_and_fetch_1:
+  case Builtin::BI__sync_or_and_fetch_2:
+  case Builtin::BI__sync_or_and_fetch_4:
+  case Builtin::BI__sync_or_and_fetch_8:
+  case Builtin::BI__sync_or_and_fetch_16:
+    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_or, E,
+                                llvm::Instruction::Or);
+  case Builtin::BI__sync_xor_and_fetch_1:
+  case Builtin::BI__sync_xor_and_fetch_2:
+  case Builtin::BI__sync_xor_and_fetch_4:
+  case Builtin::BI__sync_xor_and_fetch_8:
+  case Builtin::BI__sync_xor_and_fetch_16:
+    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
+                                llvm::Instruction::Xor);
+  case Builtin::BI__sync_nand_and_fetch_1:
+  case Builtin::BI__sync_nand_and_fetch_2:
+  case Builtin::BI__sync_nand_and_fetch_4:
+  case Builtin::BI__sync_nand_and_fetch_8:
+  case Builtin::BI__sync_nand_and_fetch_16:
+    return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
+                                llvm::Instruction::And);
+
+  case Builtin::BI__sync_val_compare_and_swap_1:
+  case Builtin::BI__sync_val_compare_and_swap_2:
+  case Builtin::BI__sync_val_compare_and_swap_4:
+  case Builtin::BI__sync_val_compare_and_swap_8:
+  case Builtin::BI__sync_val_compare_and_swap_16:
+  {
+    const llvm::Type *ResType[2];
+    ResType[0]= ConvertType(E->getType());
+    ResType[1] = ConvertType(E->getArg(0)->getType());
+    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
+    return RValue::get(Builder.CreateCall3(AtomF,
+                                           EmitScalarExpr(E->getArg(0)),
+                                           EmitScalarExpr(E->getArg(1)),
+                                           EmitScalarExpr(E->getArg(2))));
+  }
+
+  case Builtin::BI__sync_bool_compare_and_swap_1:
+  case Builtin::BI__sync_bool_compare_and_swap_2:
+  case Builtin::BI__sync_bool_compare_and_swap_4:
+  case Builtin::BI__sync_bool_compare_and_swap_8:
+  case Builtin::BI__sync_bool_compare_and_swap_16:
+  {
+    const llvm::Type *ResType[2];
+    ResType[0]= ConvertType(E->getArg(1)->getType());
+    ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
+    Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
+    Value *OldVal = EmitScalarExpr(E->getArg(1));
+    Value *PrevVal = Builder.CreateCall3(AtomF,
+                                        EmitScalarExpr(E->getArg(0)),
+                                        OldVal,
+                                        EmitScalarExpr(E->getArg(2)));
+    Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
+    // zext bool to int.
+    return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+  }
+
+  case Builtin::BI__sync_lock_test_and_set_1:
+  case Builtin::BI__sync_lock_test_and_set_2:
+  case Builtin::BI__sync_lock_test_and_set_4:
+  case Builtin::BI__sync_lock_test_and_set_8:
+  case Builtin::BI__sync_lock_test_and_set_16:
+    return EmitBinaryAtomic(*this, Intrinsic::atomic_swap, E);
+  case Builtin::BI__sync_lock_release_1:
+  case Builtin::BI__sync_lock_release_2:
+  case Builtin::BI__sync_lock_release_4:
+  case Builtin::BI__sync_lock_release_8:
+  case Builtin::BI__sync_lock_release_16: {
+    Value *Ptr = EmitScalarExpr(E->getArg(0));
+    const llvm::Type *ElTy =
+      cast<llvm::PointerType>(Ptr->getType())->getElementType();
+    llvm::StoreInst *Store = 
+      Builder.CreateStore(llvm::Constant::getNullValue(ElTy), Ptr);
+    Store->setVolatile(true);
+    return RValue::get(0);
+  }
+
+  case Builtin::BI__sync_synchronize: {
+    Value *C[5];
+    C[0] = C[1] = C[2] = C[3] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 1);
+    C[4] = llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0);
+    Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+    return RValue::get(0);
+  }
+
+  case Builtin::BI__builtin_llvm_memory_barrier: {
+    Value *C[5] = {
+      EmitScalarExpr(E->getArg(0)),
+      EmitScalarExpr(E->getArg(1)),
+      EmitScalarExpr(E->getArg(2)),
+      EmitScalarExpr(E->getArg(3)),
+      EmitScalarExpr(E->getArg(4))
+    };
+    Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
+    return RValue::get(0);
+  }
+      
+    // Library functions with special handling.
+  case Builtin::BIsqrt:
+  case Builtin::BIsqrtf:
+  case Builtin::BIsqrtl: {
+    // Rewrite sqrt to intrinsic if allowed.
+    if (!FD->hasAttr<ConstAttr>())
+      break;
+    Value *Arg0 = EmitScalarExpr(E->getArg(0));
+    const llvm::Type *ArgType = Arg0->getType();
+    Value *F = CGM.getIntrinsic(Intrinsic::sqrt, &ArgType, 1);
+    return RValue::get(Builder.CreateCall(F, Arg0, "tmp"));
+  }
+
+  case Builtin::BIpow:
+  case Builtin::BIpowf:
+  case Builtin::BIpowl: {
+    // Rewrite sqrt to intrinsic if allowed.
+    if (!FD->hasAttr<ConstAttr>())
+      break;
+    Value *Base = EmitScalarExpr(E->getArg(0));
+    Value *Exponent = EmitScalarExpr(E->getArg(1));
+    const llvm::Type *ArgType = Base->getType();
+    Value *F = CGM.getIntrinsic(Intrinsic::pow, &ArgType, 1);
+    return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
+  }
+  }
+
+  // If this is an alias for a libm function (e.g. __builtin_sin) turn it into
+  // that function.
+  if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
+      getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+    return EmitCall(E->getCallee()->getType(),
+                    CGM.getBuiltinLibFunction(FD, BuiltinID),
+                    ReturnValueSlot(),
+                    E->arg_begin(), E->arg_end());
+
+  // See if we have a target specific intrinsic.
+  const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
+  Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
+  if (const char *Prefix =
+      llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
+    IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
+
+  if (IntrinsicID != Intrinsic::not_intrinsic) {
+    SmallVector<Value*, 16> Args;
+
+    Function *F = CGM.getIntrinsic(IntrinsicID);
+    const llvm::FunctionType *FTy = F->getFunctionType();
+
+    for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+      Value *ArgValue = EmitScalarExpr(E->getArg(i));
+
+      // If the intrinsic arg type is different from the builtin arg type
+      // we need to do a bit cast.
+      const llvm::Type *PTy = FTy->getParamType(i);
+      if (PTy != ArgValue->getType()) {
+        assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
+               "Must be able to losslessly bit cast to param");
+        ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+      }
+
+      Args.push_back(ArgValue);
+    }
+
+    Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
+    QualType BuiltinRetType = E->getType();
+
+    const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
+    if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
+
+    if (RetTy != V->getType()) {
+      assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
+             "Must be able to losslessly bit cast result type");
+      V = Builder.CreateBitCast(V, RetTy);
+    }
+
+    return RValue::get(V);
+  }
+
+  // See if we have a target specific builtin that needs to be lowered.
+  if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
+    return RValue::get(V);
+
+  ErrorUnsupported(E, "builtin function");
+
+  // Unknown builtin, for now just dump it out and return undef.
+  if (hasAggregateLLVMType(E->getType()))
+    return RValue::getAggregate(CreateMemTemp(E->getType()));
+  return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+}
+
+Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
+                                              const CallExpr *E) {
+  switch (Target.getTriple().getArch()) {
+  case llvm::Triple::x86:
+  case llvm::Triple::x86_64:
+    return EmitX86BuiltinExpr(BuiltinID, E);
+  case llvm::Triple::ppc:
+  case llvm::Triple::ppc64:
+    return EmitPPCBuiltinExpr(BuiltinID, E);
+  default:
+    return 0;
+  }
+}
+
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+                                           const CallExpr *E) {
+
+  llvm::SmallVector<Value*, 4> Ops;
+
+  for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+    Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+  switch (BuiltinID) {
+  default: return 0;
+  case X86::BI__builtin_ia32_pslldi128:
+  case X86::BI__builtin_ia32_psllqi128:
+  case X86::BI__builtin_ia32_psllwi128:
+  case X86::BI__builtin_ia32_psradi128:
+  case X86::BI__builtin_ia32_psrawi128:
+  case X86::BI__builtin_ia32_psrldi128:
+  case X86::BI__builtin_ia32_psrlqi128:
+  case X86::BI__builtin_ia32_psrlwi128: {
+    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
+    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2);
+    llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+    Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty),
+                                         Ops[1], Zero, "insert");
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
+    const char *name = 0;
+    Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+    switch (BuiltinID) {
+    default: assert(0 && "Unsupported shift intrinsic!");
+    case X86::BI__builtin_ia32_pslldi128:
+      name = "pslldi";
+      ID = Intrinsic::x86_sse2_psll_d;
+      break;
+    case X86::BI__builtin_ia32_psllqi128:
+      name = "psllqi";
+      ID = Intrinsic::x86_sse2_psll_q;
+      break;
+    case X86::BI__builtin_ia32_psllwi128:
+      name = "psllwi";
+      ID = Intrinsic::x86_sse2_psll_w;
+      break;
+    case X86::BI__builtin_ia32_psradi128:
+      name = "psradi";
+      ID = Intrinsic::x86_sse2_psra_d;
+      break;
+    case X86::BI__builtin_ia32_psrawi128:
+      name = "psrawi";
+      ID = Intrinsic::x86_sse2_psra_w;
+      break;
+    case X86::BI__builtin_ia32_psrldi128:
+      name = "psrldi";
+      ID = Intrinsic::x86_sse2_psrl_d;
+      break;
+    case X86::BI__builtin_ia32_psrlqi128:
+      name = "psrlqi";
+      ID = Intrinsic::x86_sse2_psrl_q;
+      break;
+    case X86::BI__builtin_ia32_psrlwi128:
+      name = "psrlwi";
+      ID = Intrinsic::x86_sse2_psrl_w;
+      break;
+    }
+    llvm::Function *F = CGM.getIntrinsic(ID);
+    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+  }
+  case X86::BI__builtin_ia32_pslldi:
+  case X86::BI__builtin_ia32_psllqi:
+  case X86::BI__builtin_ia32_psllwi:
+  case X86::BI__builtin_ia32_psradi:
+  case X86::BI__builtin_ia32_psrawi:
+  case X86::BI__builtin_ia32_psrldi:
+  case X86::BI__builtin_ia32_psrlqi:
+  case X86::BI__builtin_ia32_psrlwi: {
+    Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext");
+    const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1);
+    Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
+    const char *name = 0;
+    Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+    switch (BuiltinID) {
+    default: assert(0 && "Unsupported shift intrinsic!");
+    case X86::BI__builtin_ia32_pslldi:
+      name = "pslldi";
+      ID = Intrinsic::x86_mmx_psll_d;
+      break;
+    case X86::BI__builtin_ia32_psllqi:
+      name = "psllqi";
+      ID = Intrinsic::x86_mmx_psll_q;
+      break;
+    case X86::BI__builtin_ia32_psllwi:
+      name = "psllwi";
+      ID = Intrinsic::x86_mmx_psll_w;
+      break;
+    case X86::BI__builtin_ia32_psradi:
+      name = "psradi";
+      ID = Intrinsic::x86_mmx_psra_d;
+      break;
+    case X86::BI__builtin_ia32_psrawi:
+      name = "psrawi";
+      ID = Intrinsic::x86_mmx_psra_w;
+      break;
+    case X86::BI__builtin_ia32_psrldi:
+      name = "psrldi";
+      ID = Intrinsic::x86_mmx_psrl_d;
+      break;
+    case X86::BI__builtin_ia32_psrlqi:
+      name = "psrlqi";
+      ID = Intrinsic::x86_mmx_psrl_q;
+      break;
+    case X86::BI__builtin_ia32_psrlwi:
+      name = "psrlwi";
+      ID = Intrinsic::x86_mmx_psrl_w;
+      break;
+    }
+    llvm::Function *F = CGM.getIntrinsic(ID);
+    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+  }
+  case X86::BI__builtin_ia32_cmpps: {
+    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
+    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpps");
+  }
+  case X86::BI__builtin_ia32_cmpss: {
+    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss);
+    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpss");
+  }
+  case X86::BI__builtin_ia32_ldmxcsr: {
+    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
+    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
+    Builder.CreateStore(Ops[0], Tmp);
+    return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
+                              Builder.CreateBitCast(Tmp, PtrTy));
+  }
+  case X86::BI__builtin_ia32_stmxcsr: {
+    const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+    Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1);
+    Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp");
+    One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+                             Builder.CreateBitCast(Tmp, PtrTy));
+    return Builder.CreateLoad(Tmp, "stmxcsr");
+  }
+  case X86::BI__builtin_ia32_cmppd: {
+    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_pd);
+    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmppd");
+  }
+  case X86::BI__builtin_ia32_cmpsd: {
+    llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_cmp_sd);
+    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), "cmpsd");
+  }
+  case X86::BI__builtin_ia32_storehps:
+  case X86::BI__builtin_ia32_storelps: {
+    const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
+    llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
+    llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+
+    // cast val v2i64
+    Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
+
+    // extract (0, 1)
+    unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
+    llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
+    Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
+
+    // cast pointer to i64 & store
+    Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+    return Builder.CreateStore(Ops[1], Ops[0]);
+  }
+  case X86::BI__builtin_ia32_palignr: {
+    Function *F = CGM.getIntrinsic(Intrinsic::x86_ssse3_palign_r);
+    return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size());
+  }
+  case X86::BI__builtin_ia32_palignr128: {
+    unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+    
+    // If palignr is shifting the pair of input vectors less than 17 bytes,
+    // emit a shuffle instruction.
+    if (shiftVal <= 16) {
+      const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+
+      llvm::SmallVector<llvm::Constant*, 16> Indices;
+      for (unsigned i = 0; i != 16; ++i)
+        Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i));
+      
+      Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size());
+      return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+    }
+    
+    // If palignr is shifting the pair of input vectors more than 16 but less
+    // than 32 bytes, emit a logical right shift of the destination.
+    if (shiftVal < 32) {
+      const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
+      const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
+      const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext);
+      
+      Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+      Ops[1] = llvm::ConstantInt::get(IntTy, (shiftVal-16) * 8);
+      
+      // create i32 constant
+      llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
+      return Builder.CreateCall(F, &Ops[0], &Ops[0] + 2, "palignr");
+    }
+    
+    // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+    return llvm::Constant::getNullValue(ConvertType(E->getType()));
+  }
+  }
+}
+
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+                                           const CallExpr *E) {
+  return 0;
+}
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
new file mode 100644
index 0000000..28c4c6b
--- /dev/null
+++ b/lib/CodeGen/CGCXX.cpp
@@ -0,0 +1,459 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation.
+//
+//===----------------------------------------------------------------------===//
+
+// We might split this into multiple files if it gets too unwieldy
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+
+llvm::Value *CodeGenFunction::LoadCXXThis() {
+  assert(isa<CXXMethodDecl>(CurFuncDecl) &&
+         "Must be in a C++ member function decl to load 'this'");
+  assert(cast<CXXMethodDecl>(CurFuncDecl)->isInstance() &&
+         "Must be in a C++ member function decl to load 'this'");
+
+  // FIXME: What if we're inside a block?
+  // ans: See how CodeGenFunction::LoadObjCSelf() uses
+  // CodeGenFunction::BlockForwardSelf() for how to do this.
+  return Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
+}
+
+void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
+  EmitGlobal(GlobalDecl(D, Ctor_Complete));
+  EmitGlobal(GlobalDecl(D, Ctor_Base));
+}
+
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
+                                       CXXCtorType Type) {
+
+  llvm::Function *Fn = GetAddrOfCXXConstructor(D, Type);
+
+  CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+
+  SetFunctionDefinitionAttributes(D, Fn);
+  SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::Function *
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+                                       CXXCtorType Type) {
+  const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
+  const llvm::FunctionType *FTy =
+    getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), 
+                               FPT->isVariadic());
+
+  const char *Name = getMangledCXXCtorName(D, Type);
+  return cast<llvm::Function>(
+                      GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+}
+
+const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D,
+                                                 CXXCtorType Type) {
+  llvm::SmallString<256> Name;
+  getMangleContext().mangleCXXCtor(D, Type, Name);
+
+  Name += '\0';
+  return UniqueMangledName(Name.begin(), Name.end());
+}
+
+void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
+  if (D->isVirtual())
+    EmitGlobal(GlobalDecl(D, Dtor_Deleting));
+  EmitGlobal(GlobalDecl(D, Dtor_Complete));
+  EmitGlobal(GlobalDecl(D, Dtor_Base));
+}
+
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
+                                      CXXDtorType Type) {
+  llvm::Function *Fn = GetAddrOfCXXDestructor(D, Type);
+
+  CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
+
+  SetFunctionDefinitionAttributes(D, Fn);
+  SetLLVMFunctionAttributesForDefinition(D, Fn);
+}
+
+llvm::Function *
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+                                      CXXDtorType Type) {
+  const llvm::FunctionType *FTy =
+    getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false);
+
+  const char *Name = getMangledCXXDtorName(D, Type);
+  return cast<llvm::Function>(
+                      GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
+}
+
+const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D,
+                                                 CXXDtorType Type) {
+  llvm::SmallString<256> Name;
+  getMangleContext().mangleCXXDtor(D, Type, Name);
+
+  Name += '\0';
+  return UniqueMangledName(Name.begin(), Name.end());
+}
+
+llvm::Constant *
+CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
+                               bool Extern, 
+                               const ThunkAdjustment &ThisAdjustment) {
+  return GenerateCovariantThunk(Fn, GD, Extern,
+                                CovariantThunkAdjustment(ThisAdjustment,
+                                                         ThunkAdjustment()));
+}
+
+llvm::Value *
+CodeGenFunction::DynamicTypeAdjust(llvm::Value *V, 
+                                   const ThunkAdjustment &Adjustment) {
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+  const llvm::Type *OrigTy = V->getType();
+  if (Adjustment.NonVirtual) {
+    // Do the non-virtual adjustment
+    V = Builder.CreateBitCast(V, Int8PtrTy);
+    V = Builder.CreateConstInBoundsGEP1_64(V, Adjustment.NonVirtual);
+    V = Builder.CreateBitCast(V, OrigTy);
+  }
+  
+  if (!Adjustment.Virtual)
+    return V;
+
+  assert(Adjustment.Virtual % (LLVMPointerWidth / 8) == 0 && 
+         "vtable entry unaligned");
+
+  // Do the virtual this adjustment
+  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+  const llvm::Type *PtrDiffPtrTy = PtrDiffTy->getPointerTo();
+  
+  llvm::Value *ThisVal = Builder.CreateBitCast(V, Int8PtrTy);
+  V = Builder.CreateBitCast(V, PtrDiffPtrTy->getPointerTo());
+  V = Builder.CreateLoad(V, "vtable");
+  
+  llvm::Value *VTablePtr = V;
+  uint64_t VirtualAdjustment = Adjustment.Virtual / (LLVMPointerWidth / 8);
+  V = Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+  V = Builder.CreateLoad(V);
+  V = Builder.CreateGEP(ThisVal, V);
+  
+  return Builder.CreateBitCast(V, OrigTy);
+}
+
+llvm::Constant *
+CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn,
+                                   GlobalDecl GD, bool Extern,
+                                   const CovariantThunkAdjustment &Adjustment) {
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+  QualType ResultType = FPT->getResultType();
+
+  FunctionArgList Args;
+  ImplicitParamDecl *ThisDecl =
+    ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
+                              MD->getThisType(getContext()));
+  Args.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
+  for (FunctionDecl::param_const_iterator i = MD->param_begin(),
+         e = MD->param_end();
+       i != e; ++i) {
+    ParmVarDecl *D = *i;
+    Args.push_back(std::make_pair(D, D->getType()));
+  }
+  IdentifierInfo *II
+    = &CGM.getContext().Idents.get("__thunk_named_foo_");
+  FunctionDecl *FD = FunctionDecl::Create(getContext(),
+                                          getContext().getTranslationUnitDecl(),
+                                          SourceLocation(), II, ResultType, 0,
+                                          Extern
+                                            ? FunctionDecl::Extern
+                                            : FunctionDecl::Static,
+                                          false, true);
+  StartFunction(FD, ResultType, Fn, Args, SourceLocation());
+
+  // generate body
+  const llvm::Type *Ty =
+    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+                                   FPT->isVariadic());
+  llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+
+  CallArgList CallArgs;
+
+  bool ShouldAdjustReturnPointer = true;
+  QualType ArgType = MD->getThisType(getContext());
+  llvm::Value *Arg = Builder.CreateLoad(LocalDeclMap[ThisDecl], "this");
+  if (!Adjustment.ThisAdjustment.isEmpty()) {
+    // Do the this adjustment.
+    const llvm::Type *OrigTy = Callee->getType();
+    Arg = DynamicTypeAdjust(Arg, Adjustment.ThisAdjustment);
+    
+    if (!Adjustment.ReturnAdjustment.isEmpty()) {
+      const CovariantThunkAdjustment &ReturnAdjustment = 
+        CovariantThunkAdjustment(ThunkAdjustment(),
+                                 Adjustment.ReturnAdjustment);
+      
+      Callee = CGM.BuildCovariantThunk(GD, Extern, ReturnAdjustment);
+      
+      Callee = Builder.CreateBitCast(Callee, OrigTy);
+      ShouldAdjustReturnPointer = false;
+    }
+  }    
+
+  CallArgs.push_back(std::make_pair(RValue::get(Arg), ArgType));
+
+  for (FunctionDecl::param_const_iterator i = MD->param_begin(),
+         e = MD->param_end();
+       i != e; ++i) {
+    ParmVarDecl *D = *i;
+    QualType ArgType = D->getType();
+
+    // llvm::Value *Arg = CGF.GetAddrOfLocalVar(Dst);
+    Expr *Arg = new (getContext()) DeclRefExpr(D, ArgType.getNonReferenceType(),
+                                               SourceLocation());
+    CallArgs.push_back(std::make_pair(EmitCallArg(Arg, ArgType), ArgType));
+  }
+
+  RValue RV = EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
+                                                      FPT->getCallConv(),
+                                                      FPT->getNoReturnAttr()),
+                       Callee, ReturnValueSlot(), CallArgs, MD);
+  if (ShouldAdjustReturnPointer && !Adjustment.ReturnAdjustment.isEmpty()) {
+    bool CanBeZero = !(ResultType->isReferenceType()
+    // FIXME: attr nonnull can't be zero either
+                       /* || ResultType->hasAttr<NonNullAttr>() */ );
+    // Do the return result adjustment.
+    if (CanBeZero) {
+      llvm::BasicBlock *NonZeroBlock = createBasicBlock();
+      llvm::BasicBlock *ZeroBlock = createBasicBlock();
+      llvm::BasicBlock *ContBlock = createBasicBlock();
+
+      const llvm::Type *Ty = RV.getScalarVal()->getType();
+      llvm::Value *Zero = llvm::Constant::getNullValue(Ty);
+      Builder.CreateCondBr(Builder.CreateICmpNE(RV.getScalarVal(), Zero),
+                           NonZeroBlock, ZeroBlock);
+      EmitBlock(NonZeroBlock);
+      llvm::Value *NZ = 
+        DynamicTypeAdjust(RV.getScalarVal(), Adjustment.ReturnAdjustment);
+      EmitBranch(ContBlock);
+      EmitBlock(ZeroBlock);
+      llvm::Value *Z = RV.getScalarVal();
+      EmitBlock(ContBlock);
+      llvm::PHINode *RVOrZero = Builder.CreatePHI(Ty);
+      RVOrZero->reserveOperandSpace(2);
+      RVOrZero->addIncoming(NZ, NonZeroBlock);
+      RVOrZero->addIncoming(Z, ZeroBlock);
+      RV = RValue::get(RVOrZero);
+    } else
+      RV = RValue::get(DynamicTypeAdjust(RV.getScalarVal(), 
+                                         Adjustment.ReturnAdjustment));
+  }
+
+  if (!ResultType->isVoidType())
+    EmitReturnOfRValue(RV, ResultType);
+
+  FinishFunction();
+  return Fn;
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
+                              const ThunkAdjustment &ThisAdjustment) {
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+  // Compute mangled name
+  llvm::SmallString<256> OutName;
+  if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
+    getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), ThisAdjustment,
+                                          OutName);
+  else
+    getMangleContext().mangleThunk(MD, ThisAdjustment, OutName);
+  OutName += '\0';
+  const char* Name = UniqueMangledName(OutName.begin(), OutName.end());
+
+  // Get function for mangled name
+  const llvm::Type *Ty = getTypes().GetFunctionTypeForVtable(MD);
+  return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl());
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfCovariantThunk(GlobalDecl GD,
+                                   const CovariantThunkAdjustment &Adjustment) {
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+  // Compute mangled name
+  llvm::SmallString<256> OutName;
+  getMangleContext().mangleCovariantThunk(MD, Adjustment, OutName);
+  OutName += '\0';
+  const char* Name = UniqueMangledName(OutName.begin(), OutName.end());
+
+  // Get function for mangled name
+  const llvm::Type *Ty = getTypes().GetFunctionTypeForVtable(MD);
+  return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl());
+}
+
+void CodeGenModule::BuildThunksForVirtual(GlobalDecl GD) {
+  CGVtableInfo::AdjustmentVectorTy *AdjPtr = getVtableInfo().getAdjustments(GD);
+  if (!AdjPtr)
+    return;
+  CGVtableInfo::AdjustmentVectorTy &Adj = *AdjPtr;
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+  for (unsigned i = 0; i < Adj.size(); i++) {
+    GlobalDecl OGD = Adj[i].first;
+    const CXXMethodDecl *OMD = cast<CXXMethodDecl>(OGD.getDecl());
+    QualType nc_oret = OMD->getType()->getAs<FunctionType>()->getResultType();
+    CanQualType oret = getContext().getCanonicalType(nc_oret);
+    QualType nc_ret = MD->getType()->getAs<FunctionType>()->getResultType();
+    CanQualType ret = getContext().getCanonicalType(nc_ret);
+    ThunkAdjustment ReturnAdjustment;
+    if (oret != ret) {
+      QualType qD = nc_ret->getPointeeType();
+      QualType qB = nc_oret->getPointeeType();
+      CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl());
+      CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl());
+      ReturnAdjustment = ComputeThunkAdjustment(D, B);
+    }
+    ThunkAdjustment ThisAdjustment = Adj[i].second;
+    bool Extern = !cast<CXXRecordDecl>(OMD->getDeclContext())->isInAnonymousNamespace();
+    if (!ReturnAdjustment.isEmpty() || !ThisAdjustment.isEmpty()) {
+      CovariantThunkAdjustment CoAdj(ThisAdjustment, ReturnAdjustment);
+      llvm::Constant *FnConst;
+      if (!ReturnAdjustment.isEmpty())
+        FnConst = GetAddrOfCovariantThunk(GD, CoAdj);
+      else
+        FnConst = GetAddrOfThunk(GD, ThisAdjustment);
+      if (!isa<llvm::Function>(FnConst)) {
+        llvm::Constant *SubExpr =
+            cast<llvm::ConstantExpr>(FnConst)->getOperand(0);
+        llvm::Function *OldFn = cast<llvm::Function>(SubExpr);
+        std::string Name = OldFn->getNameStr();
+        GlobalDeclMap.erase(UniqueMangledName(Name.data(),
+                                              Name.data() + Name.size() + 1));
+        llvm::Constant *NewFnConst;
+        if (!ReturnAdjustment.isEmpty())
+          NewFnConst = GetAddrOfCovariantThunk(GD, CoAdj);
+        else
+          NewFnConst = GetAddrOfThunk(GD, ThisAdjustment);
+        llvm::Function *NewFn = cast<llvm::Function>(NewFnConst);
+        NewFn->takeName(OldFn);
+        llvm::Constant *NewPtrForOldDecl =
+            llvm::ConstantExpr::getBitCast(NewFn, OldFn->getType());
+        OldFn->replaceAllUsesWith(NewPtrForOldDecl);
+        OldFn->eraseFromParent();
+        FnConst = NewFn;
+      }
+      llvm::Function *Fn = cast<llvm::Function>(FnConst);
+      if (Fn->isDeclaration()) {
+        llvm::GlobalVariable::LinkageTypes linktype;
+        linktype = llvm::GlobalValue::WeakAnyLinkage;
+        if (!Extern)
+          linktype = llvm::GlobalValue::InternalLinkage;
+        Fn->setLinkage(linktype);
+        if (!Features.Exceptions && !Features.ObjCNonFragileABI)
+          Fn->addFnAttr(llvm::Attribute::NoUnwind);
+        Fn->setAlignment(2);
+        CodeGenFunction(*this).GenerateCovariantThunk(Fn, GD, Extern, CoAdj);
+      }
+    }
+  }
+}
+
+llvm::Constant *
+CodeGenModule::BuildThunk(GlobalDecl GD, bool Extern,
+                          const ThunkAdjustment &ThisAdjustment) {
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+  llvm::SmallString<256> OutName;
+  if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(MD)) {
+    getMangleContext().mangleCXXDtorThunk(D, GD.getDtorType(), ThisAdjustment,
+                                          OutName);
+  } else 
+    getMangleContext().mangleThunk(MD, ThisAdjustment, OutName);
+  
+  llvm::GlobalVariable::LinkageTypes linktype;
+  linktype = llvm::GlobalValue::WeakAnyLinkage;
+  if (!Extern)
+    linktype = llvm::GlobalValue::InternalLinkage;
+  llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0);
+  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+  const llvm::FunctionType *FTy =
+    getTypes().GetFunctionType(getTypes().getFunctionInfo(MD),
+                               FPT->isVariadic());
+
+  llvm::Function *Fn = llvm::Function::Create(FTy, linktype, OutName.str(),
+                                              &getModule());
+  CodeGenFunction(*this).GenerateThunk(Fn, GD, Extern, ThisAdjustment);
+  llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
+  return m;
+}
+
+llvm::Constant *
+CodeGenModule::BuildCovariantThunk(const GlobalDecl &GD, bool Extern,
+                                   const CovariantThunkAdjustment &Adjustment) {
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+  llvm::SmallString<256> OutName;
+  getMangleContext().mangleCovariantThunk(MD, Adjustment, OutName);
+  llvm::GlobalVariable::LinkageTypes linktype;
+  linktype = llvm::GlobalValue::WeakAnyLinkage;
+  if (!Extern)
+    linktype = llvm::GlobalValue::InternalLinkage;
+  llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0);
+  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+  const llvm::FunctionType *FTy =
+    getTypes().GetFunctionType(getTypes().getFunctionInfo(MD),
+                               FPT->isVariadic());
+
+  llvm::Function *Fn = llvm::Function::Create(FTy, linktype, OutName.str(),
+                                              &getModule());
+  CodeGenFunction(*this).GenerateCovariantThunk(Fn, MD, Extern, Adjustment);
+  llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
+  return m;
+}
+
+static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VtableIndex, 
+                                     llvm::Value *This, const llvm::Type *Ty) {
+  Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo();
+  
+  llvm::Value *Vtable = CGF.Builder.CreateBitCast(This, Ty);
+  Vtable = CGF.Builder.CreateLoad(Vtable);
+  
+  llvm::Value *VFuncPtr = 
+    CGF.Builder.CreateConstInBoundsGEP1_64(Vtable, VtableIndex, "vfn");
+  return CGF.Builder.CreateLoad(VFuncPtr);
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+                                  const llvm::Type *Ty) {
+  MD = MD->getCanonicalDecl();
+  uint64_t VtableIndex = CGM.getVtableInfo().getMethodVtableIndex(MD);
+  
+  return ::BuildVirtualCall(*this, VtableIndex, This, Ty);
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type, 
+                                  llvm::Value *&This, const llvm::Type *Ty) {
+  DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+  uint64_t VtableIndex = 
+    CGM.getVtableInfo().getMethodVtableIndex(GlobalDecl(DD, Type));
+
+  return ::BuildVirtualCall(*this, VtableIndex, This, Ty);
+}
diff --git a/lib/CodeGen/CGCXX.h b/lib/CodeGen/CGCXX.h
new file mode 100644
index 0000000..1e6adb0
--- /dev/null
+++ b/lib/CodeGen/CGCXX.h
@@ -0,0 +1,36 @@
+//===----- CGCXX.h - C++ related code CodeGen declarations ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCXX_H
+#define CLANG_CODEGEN_CGCXX_H
+
+namespace clang {
+
+/// CXXCtorType - C++ constructor types
+enum CXXCtorType {
+    Ctor_Complete,          // Complete object ctor
+    Ctor_Base,              // Base object ctor
+    Ctor_CompleteAllocating // Complete object allocating ctor
+};
+
+/// CXXDtorType - C++ destructor types
+enum CXXDtorType {
+    Dtor_Deleting, // Deleting dtor
+    Dtor_Complete, // Complete object dtor
+    Dtor_Base      // Base object dtor
+};
+
+} // end namespace clang
+
+#endif // CLANG_CODEGEN_CGCXX_H
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
new file mode 100644
index 0000000..b064c12
--- /dev/null
+++ b/lib/CodeGen/CGCall.cpp
@@ -0,0 +1,1057 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCall.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/Attributes.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Target/TargetData.h"
+
+#include "ABIInfo.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+/***/
+
+// FIXME: Use iterator and sidestep silly type array creation.
+
+static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
+  switch (CC) {
+  default: return llvm::CallingConv::C;
+  case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
+  case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
+  }
+}
+
+const
+CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
+  return getFunctionInfo(FTNP->getResultType(),
+                         llvm::SmallVector<QualType, 16>(),
+                         FTNP->getCallConv(), FTNP->getNoReturnAttr());
+}
+
+const
+CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
+  llvm::SmallVector<QualType, 16> ArgTys;
+  // FIXME: Kill copy.
+  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+    ArgTys.push_back(FTP->getArgType(i));
+  return getFunctionInfo(FTP->getResultType(), ArgTys,
+                         FTP->getCallConv(), FTP->getNoReturnAttr());
+}
+
+static CallingConv getCallingConventionForDecl(const Decl *D) {
+  // Set the appropriate calling convention for the Function.
+  if (D->hasAttr<StdCallAttr>())
+    return CC_X86StdCall;
+
+  if (D->hasAttr<FastCallAttr>())
+    return CC_X86FastCall;
+
+  return CC_C;
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
+                                                 const FunctionProtoType *FTP) {
+  llvm::SmallVector<QualType, 16> ArgTys;
+  
+  // Add the 'this' pointer.
+  ArgTys.push_back(Context.getPointerType(Context.getTagDeclType(RD)));
+  
+  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+    ArgTys.push_back(FTP->getArgType(i));
+  
+  // FIXME: Set calling convention correctly, it needs to be associated with the
+  // type somehow.
+  return getFunctionInfo(FTP->getResultType(), ArgTys,
+                         FTP->getCallConv(), FTP->getNoReturnAttr());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
+  llvm::SmallVector<QualType, 16> ArgTys;
+  // Add the 'this' pointer unless this is a static method.
+  if (MD->isInstance())
+    ArgTys.push_back(MD->getThisType(Context));
+
+  const FunctionProtoType *FTP = MD->getType()->getAs<FunctionProtoType>();
+  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+    ArgTys.push_back(FTP->getArgType(i));
+  return getFunctionInfo(FTP->getResultType(), ArgTys, FTP->getCallConv(),
+                         FTP->getNoReturnAttr());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D, 
+                                                    CXXCtorType Type) {
+  llvm::SmallVector<QualType, 16> ArgTys;
+
+  // Add the 'this' pointer.
+  ArgTys.push_back(D->getThisType(Context));
+
+  // Check if we need to add a VTT parameter (which has type void **).
+  if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
+    ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+  
+  const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
+  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+    ArgTys.push_back(FTP->getArgType(i));
+  return getFunctionInfo(FTP->getResultType(), ArgTys, FTP->getCallConv(),
+                         FTP->getNoReturnAttr());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
+                                                    CXXDtorType Type) {
+  llvm::SmallVector<QualType, 16> ArgTys;
+  
+  // Add the 'this' pointer.
+  ArgTys.push_back(D->getThisType(Context));
+  
+  // Check if we need to add a VTT parameter (which has type void **).
+  if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
+    ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+  
+  const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
+  for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+    ArgTys.push_back(FTP->getArgType(i));
+  return getFunctionInfo(FTP->getResultType(), ArgTys, FTP->getCallConv(),
+                         FTP->getNoReturnAttr());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
+  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+    if (MD->isInstance())
+      return getFunctionInfo(MD);
+
+  const FunctionType *FTy = FD->getType()->getAs<FunctionType>();
+  if (const FunctionNoProtoType *FNTP = dyn_cast<FunctionNoProtoType>(FTy))
+    return getFunctionInfo(FNTP->getResultType(), 
+                           llvm::SmallVector<QualType, 16>(),
+                           FNTP->getCallConv(), FNTP->getNoReturnAttr());
+  
+  const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
+  llvm::SmallVector<QualType, 16> ArgTys;
+  // FIXME: Kill copy.
+  for (unsigned i = 0, e = FPT->getNumArgs(); i != e; ++i)
+    ArgTys.push_back(FPT->getArgType(i));
+  return getFunctionInfo(FPT->getResultType(), ArgTys,
+                         FPT->getCallConv(), FPT->getNoReturnAttr());
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
+  llvm::SmallVector<QualType, 16> ArgTys;
+  ArgTys.push_back(MD->getSelfDecl()->getType());
+  ArgTys.push_back(Context.getObjCSelType());
+  // FIXME: Kill copy?
+  for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
+         e = MD->param_end(); i != e; ++i)
+    ArgTys.push_back((*i)->getType());
+  return getFunctionInfo(MD->getResultType(), ArgTys,
+                         getCallingConventionForDecl(MD),
+                         /*NoReturn*/ false);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
+  // FIXME: Do we need to handle ObjCMethodDecl?
+  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+                                              
+  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+    return getFunctionInfo(CD, GD.getCtorType());
+
+  if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
+    return getFunctionInfo(DD, GD.getDtorType());
+  
+  return getFunctionInfo(FD);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+                                                    const CallArgList &Args,
+                                                    CallingConv CC,
+                                                    bool NoReturn) {
+  // FIXME: Kill copy.
+  llvm::SmallVector<QualType, 16> ArgTys;
+  for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+       i != e; ++i)
+    ArgTys.push_back(i->second);
+  return getFunctionInfo(ResTy, ArgTys, CC, NoReturn);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+                                                    const FunctionArgList &Args,
+                                                    CallingConv CC,
+                                                    bool NoReturn) {
+  // FIXME: Kill copy.
+  llvm::SmallVector<QualType, 16> ArgTys;
+  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+       i != e; ++i)
+    ArgTys.push_back(i->second);
+  return getFunctionInfo(ResTy, ArgTys, CC, NoReturn);
+}
+
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+                                  const llvm::SmallVector<QualType, 16> &ArgTys,
+                                                    CallingConv CallConv,
+                                                    bool NoReturn) {
+  unsigned CC = ClangCallConvToLLVMCallConv(CallConv);
+
+  // Lookup or create unique function info.
+  llvm::FoldingSetNodeID ID;
+  CGFunctionInfo::Profile(ID, CC, NoReturn, ResTy,
+                          ArgTys.begin(), ArgTys.end());
+
+  void *InsertPos = 0;
+  CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
+  if (FI)
+    return *FI;
+
+  // Construct the function info.
+  FI = new CGFunctionInfo(CC, NoReturn, ResTy, ArgTys);
+  FunctionInfos.InsertNode(FI, InsertPos);
+
+  // Compute ABI information.
+  getABIInfo().computeInfo(*FI, getContext(), TheModule.getContext());
+
+  return *FI;
+}
+
+CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
+                               bool _NoReturn,
+                               QualType ResTy,
+                               const llvm::SmallVector<QualType, 16> &ArgTys) 
+  : CallingConvention(_CallingConvention),
+    EffectiveCallingConvention(_CallingConvention),
+    NoReturn(_NoReturn)
+{
+  NumArgs = ArgTys.size();
+  Args = new ArgInfo[1 + NumArgs];
+  Args[0].type = ResTy;
+  for (unsigned i = 0; i < NumArgs; ++i)
+    Args[1 + i].type = ArgTys[i];
+}
+
+/***/
+
+void CodeGenTypes::GetExpandedTypes(QualType Ty,
+                                    std::vector<const llvm::Type*> &ArgTys) {
+  const RecordType *RT = Ty->getAsStructureType();
+  assert(RT && "Can only expand structure types.");
+  const RecordDecl *RD = RT->getDecl();
+  assert(!RD->hasFlexibleArrayMember() &&
+         "Cannot expand structure with flexible array.");
+
+  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+         i != e; ++i) {
+    const FieldDecl *FD = *i;
+    assert(!FD->isBitField() &&
+           "Cannot expand structure with bit-field members.");
+
+    QualType FT = FD->getType();
+    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+      GetExpandedTypes(FT, ArgTys);
+    } else {
+      ArgTys.push_back(ConvertType(FT));
+    }
+  }
+}
+
+llvm::Function::arg_iterator
+CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
+                                    llvm::Function::arg_iterator AI) {
+  const RecordType *RT = Ty->getAsStructureType();
+  assert(RT && "Can only expand structure types.");
+
+  RecordDecl *RD = RT->getDecl();
+  assert(LV.isSimple() &&
+         "Unexpected non-simple lvalue during struct expansion.");
+  llvm::Value *Addr = LV.getAddress();
+  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+         i != e; ++i) {
+    FieldDecl *FD = *i;
+    QualType FT = FD->getType();
+
+    // FIXME: What are the right qualifiers here?
+    LValue LV = EmitLValueForField(Addr, FD, 0);
+    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+      AI = ExpandTypeFromArgs(FT, LV, AI);
+    } else {
+      EmitStoreThroughLValue(RValue::get(AI), LV, FT);
+      ++AI;
+    }
+  }
+
+  return AI;
+}
+
+void
+CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+                                  llvm::SmallVector<llvm::Value*, 16> &Args) {
+  const RecordType *RT = Ty->getAsStructureType();
+  assert(RT && "Can only expand structure types.");
+
+  RecordDecl *RD = RT->getDecl();
+  assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+  llvm::Value *Addr = RV.getAggregateAddr();
+  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+         i != e; ++i) {
+    FieldDecl *FD = *i;
+    QualType FT = FD->getType();
+
+    // FIXME: What are the right qualifiers here?
+    LValue LV = EmitLValueForField(Addr, FD, 0);
+    if (CodeGenFunction::hasAggregateLLVMType(FT)) {
+      ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
+    } else {
+      RValue RV = EmitLoadOfLValue(LV, FT);
+      assert(RV.isScalar() &&
+             "Unexpected non-scalar rvalue during struct expansion.");
+      Args.push_back(RV.getScalarVal());
+    }
+  }
+}
+
+/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
+/// a pointer to an object of type \arg Ty.
+///
+/// This safely handles the case when the src type is smaller than the
+/// destination type; in this situation the values of bits which not
+/// present in the src are undefined.
+static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
+                                      const llvm::Type *Ty,
+                                      CodeGenFunction &CGF) {
+  const llvm::Type *SrcTy =
+    cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+
+  // If load is legal, just bitcast the src pointer.
+  if (SrcSize >= DstSize) {
+    // Generally SrcSize is never greater than DstSize, since this means we are
+    // losing bits. However, this can happen in cases where the structure has
+    // additional padding, for example due to a user specified alignment.
+    //
+    // FIXME: Assert that we aren't truncating non-padding bits when have access
+    // to that information.
+    llvm::Value *Casted =
+      CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
+    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+    // FIXME: Use better alignment / avoid requiring aligned load.
+    Load->setAlignment(1);
+    return Load;
+  } else {
+    // Otherwise do coercion through memory. This is stupid, but
+    // simple.
+    llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
+    llvm::Value *Casted =
+      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
+    llvm::StoreInst *Store =
+      CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
+    // FIXME: Use better alignment / avoid requiring aligned store.
+    Store->setAlignment(1);
+    return CGF.Builder.CreateLoad(Tmp);
+  }
+}
+
+/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
+/// where the source and destination may have different types.
+///
+/// This safely handles the case when the src type is larger than the
+/// destination type; the upper bits of the src will be lost.
+static void CreateCoercedStore(llvm::Value *Src,
+                               llvm::Value *DstPtr,
+                               bool DstIsVolatile,
+                               CodeGenFunction &CGF) {
+  const llvm::Type *SrcTy = Src->getType();
+  const llvm::Type *DstTy =
+    cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+
+  uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+  uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
+
+  // If store is legal, just bitcast the src pointer.
+  if (SrcSize <= DstSize) {
+    llvm::Value *Casted =
+      CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
+    // FIXME: Use better alignment / avoid requiring aligned store.
+    CGF.Builder.CreateStore(Src, Casted, DstIsVolatile)->setAlignment(1);
+  } else {
+    // Otherwise do coercion through memory. This is stupid, but
+    // simple.
+
+    // Generally SrcSize is never greater than DstSize, since this means we are
+    // losing bits. However, this can happen in cases where the structure has
+    // additional padding, for example due to a user specified alignment.
+    //
+    // FIXME: Assert that we aren't truncating non-padding bits when have access
+    // to that information.
+    llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
+    CGF.Builder.CreateStore(Src, Tmp);
+    llvm::Value *Casted =
+      CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
+    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+    // FIXME: Use better alignment / avoid requiring aligned load.
+    Load->setAlignment(1);
+    CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
+  }
+}
+
+/***/
+
+bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
+  return FI.getReturnInfo().isIndirect();
+}
+
+const llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
+  std::vector<const llvm::Type*> ArgTys;
+
+  const llvm::Type *ResultType = 0;
+
+  QualType RetTy = FI.getReturnType();
+  const ABIArgInfo &RetAI = FI.getReturnInfo();
+  switch (RetAI.getKind()) {
+  case ABIArgInfo::Expand:
+    assert(0 && "Invalid ABI kind for return argument");
+
+  case ABIArgInfo::Extend:
+  case ABIArgInfo::Direct:
+    ResultType = ConvertType(RetTy);
+    break;
+
+  case ABIArgInfo::Indirect: {
+    assert(!RetAI.getIndirectAlign() && "Align unused on indirect return.");
+    ResultType = llvm::Type::getVoidTy(getLLVMContext());
+    const llvm::Type *STy = ConvertType(RetTy);
+    ArgTys.push_back(llvm::PointerType::get(STy, RetTy.getAddressSpace()));
+    break;
+  }
+
+  case ABIArgInfo::Ignore:
+    ResultType = llvm::Type::getVoidTy(getLLVMContext());
+    break;
+
+  case ABIArgInfo::Coerce:
+    ResultType = RetAI.getCoerceToType();
+    break;
+  }
+
+  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+         ie = FI.arg_end(); it != ie; ++it) {
+    const ABIArgInfo &AI = it->info;
+
+    switch (AI.getKind()) {
+    case ABIArgInfo::Ignore:
+      break;
+
+    case ABIArgInfo::Coerce:
+      ArgTys.push_back(AI.getCoerceToType());
+      break;
+
+    case ABIArgInfo::Indirect: {
+      // indirect arguments are always on the stack, which is addr space #0.
+      const llvm::Type *LTy = ConvertTypeForMem(it->type);
+      ArgTys.push_back(llvm::PointerType::getUnqual(LTy));
+      break;
+    }
+
+    case ABIArgInfo::Extend:
+    case ABIArgInfo::Direct:
+      ArgTys.push_back(ConvertType(it->type));
+      break;
+
+    case ABIArgInfo::Expand:
+      GetExpandedTypes(it->type, ArgTys);
+      break;
+    }
+  }
+
+  return llvm::FunctionType::get(ResultType, ArgTys, IsVariadic);
+}
+
+static bool HasIncompleteReturnTypeOrArgumentTypes(const FunctionProtoType *T) {
+  if (const TagType *TT = T->getResultType()->getAs<TagType>()) {
+    if (!TT->getDecl()->isDefinition())
+      return true;
+  }
+
+  for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
+    if (const TagType *TT = T->getArgType(i)->getAs<TagType>()) {
+      if (!TT->getDecl()->isDefinition())
+        return true;
+    }
+  }
+
+  return false;
+}
+
+const llvm::Type *
+CodeGenTypes::GetFunctionTypeForVtable(const CXXMethodDecl *MD) {
+  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+  
+  if (!HasIncompleteReturnTypeOrArgumentTypes(FPT))
+    return GetFunctionType(getFunctionInfo(MD), FPT->isVariadic());
+
+  return llvm::OpaqueType::get(getLLVMContext());
+}
+
+void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
+                                           const Decl *TargetDecl,
+                                           AttributeListType &PAL, 
+                                           unsigned &CallingConv) {
+  unsigned FuncAttrs = 0;
+  unsigned RetAttrs = 0;
+
+  CallingConv = FI.getEffectiveCallingConvention();
+
+  if (FI.isNoReturn())
+    FuncAttrs |= llvm::Attribute::NoReturn;
+
+  // FIXME: handle sseregparm someday...
+  if (TargetDecl) {
+    if (TargetDecl->hasAttr<NoThrowAttr>())
+      FuncAttrs |= llvm::Attribute::NoUnwind;
+    if (TargetDecl->hasAttr<NoReturnAttr>())
+      FuncAttrs |= llvm::Attribute::NoReturn;
+    if (TargetDecl->hasAttr<ConstAttr>())
+      FuncAttrs |= llvm::Attribute::ReadNone;
+    else if (TargetDecl->hasAttr<PureAttr>())
+      FuncAttrs |= llvm::Attribute::ReadOnly;
+    if (TargetDecl->hasAttr<MallocAttr>())
+      RetAttrs |= llvm::Attribute::NoAlias;
+  }
+
+  if (CodeGenOpts.OptimizeSize)
+    FuncAttrs |= llvm::Attribute::OptimizeForSize;
+  if (CodeGenOpts.DisableRedZone)
+    FuncAttrs |= llvm::Attribute::NoRedZone;
+  if (CodeGenOpts.NoImplicitFloat)
+    FuncAttrs |= llvm::Attribute::NoImplicitFloat;
+
+  QualType RetTy = FI.getReturnType();
+  unsigned Index = 1;
+  const ABIArgInfo &RetAI = FI.getReturnInfo();
+  switch (RetAI.getKind()) {
+  case ABIArgInfo::Extend:
+   if (RetTy->isSignedIntegerType()) {
+     RetAttrs |= llvm::Attribute::SExt;
+   } else if (RetTy->isUnsignedIntegerType()) {
+     RetAttrs |= llvm::Attribute::ZExt;
+   }
+   // FALLTHROUGH
+  case ABIArgInfo::Direct:
+    break;
+
+  case ABIArgInfo::Indirect:
+    PAL.push_back(llvm::AttributeWithIndex::get(Index,
+                                                llvm::Attribute::StructRet |
+                                                llvm::Attribute::NoAlias));
+    ++Index;
+    // sret disables readnone and readonly
+    FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+                   llvm::Attribute::ReadNone);
+    break;
+
+  case ABIArgInfo::Ignore:
+  case ABIArgInfo::Coerce:
+    break;
+
+  case ABIArgInfo::Expand:
+    assert(0 && "Invalid ABI kind for return argument");
+  }
+
+  if (RetAttrs)
+    PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
+
+  // FIXME: we need to honour command line settings also...
+  // FIXME: RegParm should be reduced in case of nested functions and/or global
+  // register variable.
+  signed RegParm = 0;
+  if (TargetDecl)
+    if (const RegparmAttr *RegParmAttr
+          = TargetDecl->getAttr<RegparmAttr>())
+      RegParm = RegParmAttr->getNumParams();
+
+  unsigned PointerWidth = getContext().Target.getPointerWidth(0);
+  for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+         ie = FI.arg_end(); it != ie; ++it) {
+    QualType ParamType = it->type;
+    const ABIArgInfo &AI = it->info;
+    unsigned Attributes = 0;
+
+    if (ParamType.isRestrictQualified())
+      Attributes |= llvm::Attribute::NoAlias;
+
+    switch (AI.getKind()) {
+    case ABIArgInfo::Coerce:
+      break;
+
+    case ABIArgInfo::Indirect:
+      if (AI.getIndirectByVal())
+        Attributes |= llvm::Attribute::ByVal;
+
+      Attributes |=
+        llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
+      // byval disables readnone and readonly.
+      FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+                     llvm::Attribute::ReadNone);
+      break;
+
+    case ABIArgInfo::Extend:
+     if (ParamType->isSignedIntegerType()) {
+       Attributes |= llvm::Attribute::SExt;
+     } else if (ParamType->isUnsignedIntegerType()) {
+       Attributes |= llvm::Attribute::ZExt;
+     }
+     // FALLS THROUGH
+    case ABIArgInfo::Direct:
+      if (RegParm > 0 &&
+          (ParamType->isIntegerType() || ParamType->isPointerType())) {
+        RegParm -=
+          (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+        if (RegParm >= 0)
+          Attributes |= llvm::Attribute::InReg;
+      }
+      // FIXME: handle sseregparm someday...
+      break;
+
+    case ABIArgInfo::Ignore:
+      // Skip increment, no matching LLVM parameter.
+      continue;
+
+    case ABIArgInfo::Expand: {
+      std::vector<const llvm::Type*> Tys;
+      // FIXME: This is rather inefficient. Do we ever actually need to do
+      // anything here? The result should be just reconstructed on the other
+      // side, so extension should be a non-issue.
+      getTypes().GetExpandedTypes(ParamType, Tys);
+      Index += Tys.size();
+      continue;
+    }
+    }
+
+    if (Attributes)
+      PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
+    ++Index;
+  }
+  if (FuncAttrs)
+    PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
+}
+
+void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
+                                         llvm::Function *Fn,
+                                         const FunctionArgList &Args) {
+  // If this is an implicit-return-zero function, go ahead and
+  // initialize the return value.  TODO: it might be nice to have
+  // a more general mechanism for this that didn't require synthesized
+  // return statements.
+  if (const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
+    if (FD->hasImplicitReturnZero()) {
+      QualType RetTy = FD->getResultType().getUnqualifiedType();
+      const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
+      llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
+      Builder.CreateStore(Zero, ReturnValue);
+    }
+  }
+
+  // FIXME: We no longer need the types from FunctionArgList; lift up and
+  // simplify.
+
+  // Emit allocs for param decls.  Give the LLVM Argument nodes names.
+  llvm::Function::arg_iterator AI = Fn->arg_begin();
+
+  // Name the struct return argument.
+  if (CGM.ReturnTypeUsesSret(FI)) {
+    AI->setName("agg.result");
+    ++AI;
+  }
+
+  assert(FI.arg_size() == Args.size() &&
+         "Mismatch between function signature & arguments.");
+  CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
+  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+       i != e; ++i, ++info_it) {
+    const VarDecl *Arg = i->first;
+    QualType Ty = info_it->type;
+    const ABIArgInfo &ArgI = info_it->info;
+
+    switch (ArgI.getKind()) {
+    case ABIArgInfo::Indirect: {
+      llvm::Value* V = AI;
+      if (hasAggregateLLVMType(Ty)) {
+        // Do nothing, aggregates and complex variables are accessed by
+        // reference.
+      } else {
+        // Load scalar value from indirect argument.
+        V = EmitLoadOfScalar(V, false, Ty);
+        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+          // This must be a promotion, for something like
+          // "void a(x) short x; {..."
+          V = EmitScalarConversion(V, Ty, Arg->getType());
+        }
+      }
+      EmitParmDecl(*Arg, V);
+      break;
+    }
+
+    case ABIArgInfo::Extend:
+    case ABIArgInfo::Direct: {
+      assert(AI != Fn->arg_end() && "Argument mismatch!");
+      llvm::Value* V = AI;
+      if (hasAggregateLLVMType(Ty)) {
+        // Create a temporary alloca to hold the argument; the rest of
+        // codegen expects to access aggregates & complex values by
+        // reference.
+        V = CreateMemTemp(Ty);
+        Builder.CreateStore(AI, V);
+      } else {
+        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+          // This must be a promotion, for something like
+          // "void a(x) short x; {..."
+          V = EmitScalarConversion(V, Ty, Arg->getType());
+        }
+      }
+      EmitParmDecl(*Arg, V);
+      break;
+    }
+
+    case ABIArgInfo::Expand: {
+      // If this structure was expanded into multiple arguments then
+      // we need to create a temporary and reconstruct it from the
+      // arguments.
+      llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
+      // FIXME: What are the right qualifiers here?
+      llvm::Function::arg_iterator End =
+        ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp, Qualifiers()), AI);
+      EmitParmDecl(*Arg, Temp);
+
+      // Name the arguments used in expansion and increment AI.
+      unsigned Index = 0;
+      for (; AI != End; ++AI, ++Index)
+        AI->setName(Arg->getName() + "." + llvm::Twine(Index));
+      continue;
+    }
+
+    case ABIArgInfo::Ignore:
+      // Initialize the local variable appropriately.
+      if (hasAggregateLLVMType(Ty)) {
+        EmitParmDecl(*Arg, CreateMemTemp(Ty));
+      } else {
+        EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
+      }
+
+      // Skip increment, no matching LLVM parameter.
+      continue;
+
+    case ABIArgInfo::Coerce: {
+      assert(AI != Fn->arg_end() && "Argument mismatch!");
+      // FIXME: This is very wasteful; EmitParmDecl is just going to drop the
+      // result in a new alloca anyway, so we could just store into that
+      // directly if we broke the abstraction down more.
+      llvm::Value *V = CreateMemTemp(Ty, "coerce");
+      CreateCoercedStore(AI, V, /*DestIsVolatile=*/false, *this);
+      // Match to what EmitParmDecl is expecting for this type.
+      if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+        V = EmitLoadOfScalar(V, false, Ty);
+        if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
+          // This must be a promotion, for something like
+          // "void a(x) short x; {..."
+          V = EmitScalarConversion(V, Ty, Arg->getType());
+        }
+      }
+      EmitParmDecl(*Arg, V);
+      break;
+    }
+    }
+
+    ++AI;
+  }
+  assert(AI == Fn->arg_end() && "Argument mismatch!");
+}
+
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
+                                         llvm::Value *ReturnValue) {
+  llvm::Value *RV = 0;
+
+  // Functions with no result always return void.
+  if (ReturnValue) {
+    QualType RetTy = FI.getReturnType();
+    const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+    switch (RetAI.getKind()) {
+    case ABIArgInfo::Indirect:
+      if (RetTy->isAnyComplexType()) {
+        ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
+        StoreComplexToAddr(RT, CurFn->arg_begin(), false);
+      } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+        // Do nothing; aggregrates get evaluated directly into the destination.
+      } else {
+        EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
+                          false, RetTy);
+      }
+      break;
+
+    case ABIArgInfo::Extend:
+    case ABIArgInfo::Direct:
+      // The internal return value temp always will have
+      // pointer-to-return-type type.
+      RV = Builder.CreateLoad(ReturnValue);
+      break;
+
+    case ABIArgInfo::Ignore:
+      break;
+
+    case ABIArgInfo::Coerce:
+      RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
+      break;
+
+    case ABIArgInfo::Expand:
+      assert(0 && "Invalid ABI kind for return argument");
+    }
+  }
+
+  if (RV) {
+    Builder.CreateRet(RV);
+  } else {
+    Builder.CreateRetVoid();
+  }
+}
+
+RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
+  if (ArgType->isReferenceType())
+    return EmitReferenceBindingToExpr(E);
+
+  return EmitAnyExprToTemp(E);
+}
+
+RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
+                                 llvm::Value *Callee,
+                                 ReturnValueSlot ReturnValue,
+                                 const CallArgList &CallArgs,
+                                 const Decl *TargetDecl) {
+  // FIXME: We no longer need the types from CallArgs; lift up and simplify.
+  llvm::SmallVector<llvm::Value*, 16> Args;
+
+  // Handle struct-return functions by passing a pointer to the
+  // location that we would like to return into.
+  QualType RetTy = CallInfo.getReturnType();
+  const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+
+
+  // If the call returns a temporary with struct return, create a temporary
+  // alloca to hold the result, unless one is given to us.
+  if (CGM.ReturnTypeUsesSret(CallInfo)) {
+    llvm::Value *Value = ReturnValue.getValue();
+    if (!Value)
+      Value = CreateMemTemp(RetTy);
+    Args.push_back(Value);
+  }
+
+  assert(CallInfo.arg_size() == CallArgs.size() &&
+         "Mismatch between function signature & arguments.");
+  CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
+  for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+       I != E; ++I, ++info_it) {
+    const ABIArgInfo &ArgInfo = info_it->info;
+    RValue RV = I->first;
+
+    switch (ArgInfo.getKind()) {
+    case ABIArgInfo::Indirect:
+      if (RV.isScalar() || RV.isComplex()) {
+        // Make a temporary alloca to pass the argument.
+        Args.push_back(CreateMemTemp(I->second));
+        if (RV.isScalar())
+          EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
+        else
+          StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+      } else {
+        Args.push_back(RV.getAggregateAddr());
+      }
+      break;
+
+    case ABIArgInfo::Extend:
+    case ABIArgInfo::Direct:
+      if (RV.isScalar()) {
+        Args.push_back(RV.getScalarVal());
+      } else if (RV.isComplex()) {
+        llvm::Value *Tmp = llvm::UndefValue::get(ConvertType(I->second));
+        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().first, 0);
+        Tmp = Builder.CreateInsertValue(Tmp, RV.getComplexVal().second, 1);
+        Args.push_back(Tmp);
+      } else {
+        Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
+      }
+      break;
+
+    case ABIArgInfo::Ignore:
+      break;
+
+    case ABIArgInfo::Coerce: {
+      // FIXME: Avoid the conversion through memory if possible.
+      llvm::Value *SrcPtr;
+      if (RV.isScalar()) {
+        SrcPtr = CreateMemTemp(I->second, "coerce");
+        EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, I->second);
+      } else if (RV.isComplex()) {
+        SrcPtr = CreateMemTemp(I->second, "coerce");
+        StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
+      } else
+        SrcPtr = RV.getAggregateAddr();
+      Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+                                       *this));
+      break;
+    }
+
+    case ABIArgInfo::Expand:
+      ExpandTypeToArgs(I->second, RV, Args);
+      break;
+    }
+  }
+
+  // If the callee is a bitcast of a function to a varargs pointer to function
+  // type, check to see if we can remove the bitcast.  This handles some cases
+  // with unprototyped functions.
+  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
+    if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
+      const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
+      const llvm::FunctionType *CurFT =
+        cast<llvm::FunctionType>(CurPT->getElementType());
+      const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
+
+      if (CE->getOpcode() == llvm::Instruction::BitCast &&
+          ActualFT->getReturnType() == CurFT->getReturnType() &&
+          ActualFT->getNumParams() == CurFT->getNumParams() &&
+          ActualFT->getNumParams() == Args.size()) {
+        bool ArgsMatch = true;
+        for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
+          if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
+            ArgsMatch = false;
+            break;
+          }
+
+        // Strip the cast if we can get away with it.  This is a nice cleanup,
+        // but also allows us to inline the function at -O0 if it is marked
+        // always_inline.
+        if (ArgsMatch)
+          Callee = CalleeF;
+      }
+    }
+
+
+  llvm::BasicBlock *InvokeDest = getInvokeDest();
+  unsigned CallingConv;
+  CodeGen::AttributeListType AttributeList;
+  CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
+  llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
+                                                   AttributeList.end());
+
+  llvm::CallSite CS;
+  if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
+    CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
+  } else {
+    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+    CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
+                              Args.data(), Args.data()+Args.size());
+    EmitBlock(Cont);
+  }
+
+  CS.setAttributes(Attrs);
+  CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+
+  // If the call doesn't return, finish the basic block and clear the
+  // insertion point; this allows the rest of IRgen to discard
+  // unreachable code.
+  if (CS.doesNotReturn()) {
+    Builder.CreateUnreachable();
+    Builder.ClearInsertionPoint();
+
+    // FIXME: For now, emit a dummy basic block because expr emitters in
+    // generally are not ready to handle emitting expressions at unreachable
+    // points.
+    EnsureInsertPoint();
+
+    // Return a reasonable RValue.
+    return GetUndefRValue(RetTy);
+  }
+
+  llvm::Instruction *CI = CS.getInstruction();
+  if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
+    CI->setName("call");
+
+  switch (RetAI.getKind()) {
+  case ABIArgInfo::Indirect:
+    if (RetTy->isAnyComplexType())
+      return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
+    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+      return RValue::getAggregate(Args[0]);
+    return RValue::get(EmitLoadOfScalar(Args[0], false, RetTy));
+
+  case ABIArgInfo::Extend:
+  case ABIArgInfo::Direct:
+    if (RetTy->isAnyComplexType()) {
+      llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+      llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+      return RValue::getComplex(std::make_pair(Real, Imag));
+    }
+    if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+      llvm::Value *DestPtr = ReturnValue.getValue();
+      bool DestIsVolatile = ReturnValue.isVolatile();
+
+      if (!DestPtr) {
+        DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+        DestIsVolatile = false;
+      }
+      Builder.CreateStore(CI, DestPtr, DestIsVolatile);
+      return RValue::getAggregate(DestPtr);
+    }
+    return RValue::get(CI);
+
+  case ABIArgInfo::Ignore:
+    // If we are ignoring an argument that had a result, make sure to
+    // construct the appropriate return value for our caller.
+    return GetUndefRValue(RetTy);
+
+  case ABIArgInfo::Coerce: {
+    llvm::Value *DestPtr = ReturnValue.getValue();
+    bool DestIsVolatile = ReturnValue.isVolatile();
+    
+    if (!DestPtr) {
+      DestPtr = CreateMemTemp(RetTy, "coerce");
+      DestIsVolatile = false;
+    }
+    
+    CreateCoercedStore(CI, DestPtr, DestIsVolatile, *this);
+    if (RetTy->isAnyComplexType())
+      return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
+    if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+      return RValue::getAggregate(DestPtr);
+    return RValue::get(EmitLoadOfScalar(DestPtr, false, RetTy));
+  }
+
+  case ABIArgInfo::Expand:
+    assert(0 && "Invalid ABI kind for return argument");
+  }
+
+  assert(0 && "Unhandled ABIArgInfo::Kind");
+  return RValue::get(0);
+}
+
+/* VarArg handling */
+
+llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
+  return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
+}
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
new file mode 100644
index 0000000..9601e9a
--- /dev/null
+++ b/lib/CodeGen/CGCall.h
@@ -0,0 +1,156 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCALL_H
+#define CLANG_CODEGEN_CGCALL_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Value.h"
+#include "clang/AST/Type.h"
+
+#include "CGValue.h"
+
+// FIXME: Restructure so we don't have to expose so much stuff.
+#include "ABIInfo.h"
+
+namespace llvm {
+  struct AttributeWithIndex;
+  class Function;
+  class Type;
+  class Value;
+
+  template<typename T, unsigned> class SmallVector;
+}
+
+namespace clang {
+  class ASTContext;
+  class Decl;
+  class FunctionDecl;
+  class ObjCMethodDecl;
+  class VarDecl;
+
+namespace CodeGen {
+  typedef llvm::SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
+
+  /// CallArgList - Type for representing both the value and type of
+  /// arguments in a call.
+  typedef llvm::SmallVector<std::pair<RValue, QualType>, 16> CallArgList;
+
+  /// FunctionArgList - Type for representing both the decl and type
+  /// of parameters to a function. The decl must be either a
+  /// ParmVarDecl or ImplicitParamDecl.
+  typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
+                            16> FunctionArgList;
+
+  /// CGFunctionInfo - Class to encapsulate the information about a
+  /// function definition.
+  class CGFunctionInfo : public llvm::FoldingSetNode {
+    struct ArgInfo {
+      QualType type;
+      ABIArgInfo info;
+    };
+
+    /// The LLVM::CallingConv to use for this function (as specified by the
+    /// user).
+    unsigned CallingConvention;
+
+    /// The LLVM::CallingConv to actually use for this function, which may
+    /// depend on the ABI.
+    unsigned EffectiveCallingConvention;
+
+    /// Whether this function is noreturn.
+    bool NoReturn;
+
+    unsigned NumArgs;
+    ArgInfo *Args;
+
+  public:
+    typedef const ArgInfo *const_arg_iterator;
+    typedef ArgInfo *arg_iterator;
+
+    CGFunctionInfo(unsigned CallingConvention,
+                   bool NoReturn,
+                   QualType ResTy,
+                   const llvm::SmallVector<QualType, 16> &ArgTys);
+    ~CGFunctionInfo() { delete[] Args; }
+
+    const_arg_iterator arg_begin() const { return Args + 1; }
+    const_arg_iterator arg_end() const { return Args + 1 + NumArgs; }
+    arg_iterator arg_begin() { return Args + 1; }
+    arg_iterator arg_end() { return Args + 1 + NumArgs; }
+
+    unsigned  arg_size() const { return NumArgs; }
+
+    bool isNoReturn() const { return NoReturn; }
+
+    /// getCallingConvention - Return the user specified calling
+    /// convention.
+    unsigned getCallingConvention() const { return CallingConvention; }
+
+    /// getEffectiveCallingConvention - Return the actual calling convention to
+    /// use, which may depend on the ABI.
+    unsigned getEffectiveCallingConvention() const {
+      return EffectiveCallingConvention;
+    }
+    void setEffectiveCallingConvention(unsigned Value) {
+      EffectiveCallingConvention = Value;
+    }
+
+    QualType getReturnType() const { return Args[0].type; }
+
+    ABIArgInfo &getReturnInfo() { return Args[0].info; }
+    const ABIArgInfo &getReturnInfo() const { return Args[0].info; }
+
+    void Profile(llvm::FoldingSetNodeID &ID) {
+      ID.AddInteger(getCallingConvention());
+      ID.AddBoolean(NoReturn);
+      getReturnType().Profile(ID);
+      for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
+        it->type.Profile(ID);
+    }
+    template<class Iterator>
+    static void Profile(llvm::FoldingSetNodeID &ID,
+                        unsigned CallingConvention,
+                        bool NoReturn,
+                        QualType ResTy,
+                        Iterator begin,
+                        Iterator end) {
+      ID.AddInteger(CallingConvention);
+      ID.AddBoolean(NoReturn);
+      ResTy.Profile(ID);
+      for (; begin != end; ++begin)
+        begin->Profile(ID);
+    }
+  };
+  
+  /// ReturnValueSlot - Contains the address where the return value of a 
+  /// function can be stored, and whether the address is volatile or not.
+  class ReturnValueSlot {
+    llvm::PointerIntPair<llvm::Value *, 1, bool> Value;
+
+  public:
+    ReturnValueSlot() {}
+    ReturnValueSlot(llvm::Value *Value, bool IsVolatile)
+      : Value(Value, IsVolatile) {}
+
+    bool isNull() const { return !getValue(); }
+    
+    bool isVolatile() const { return Value.getInt(); }
+    llvm::Value *getValue() const { return Value.getPointer(); }
+  };
+  
+}  // end namespace CodeGen
+}  // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
new file mode 100644
index 0000000..522fee4
--- /dev/null
+++ b/lib/CodeGen/CGClass.cpp
@@ -0,0 +1,1395 @@
+//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of classes
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static uint64_t 
+ComputeNonVirtualBaseClassOffset(ASTContext &Context, CXXBasePaths &Paths,
+                                 unsigned Start) {
+  uint64_t Offset = 0;
+
+  const CXXBasePath &Path = Paths.front();
+  for (unsigned i = Start, e = Path.size(); i != e; ++i) {
+    const CXXBasePathElement& Element = Path[i];
+
+    // Get the layout.
+    const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
+    
+    const CXXBaseSpecifier *BS = Element.Base;
+    assert(!BS->isVirtual() && "Should not see virtual bases here!");
+    
+    const CXXRecordDecl *Base = 
+      cast<CXXRecordDecl>(BS->getType()->getAs<RecordType>()->getDecl());
+    
+    // Add the offset.
+    Offset += Layout.getBaseClassOffset(Base) / 8;
+  }
+
+  return Offset;
+}
+
+llvm::Constant *
+CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *Class,
+                                            const CXXRecordDecl *BaseClass) {
+  if (Class == BaseClass)
+    return 0;
+
+  CXXBasePaths Paths(/*FindAmbiguities=*/false,
+                     /*RecordPaths=*/true, /*DetectVirtual=*/false);
+  if (!const_cast<CXXRecordDecl *>(Class)->
+        isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClass), Paths)) {
+    assert(false && "Class must be derived from the passed in base class!");
+    return 0;
+  }
+
+  uint64_t Offset = ComputeNonVirtualBaseClassOffset(getContext(), Paths, 0);
+  if (!Offset)
+    return 0;
+
+  const llvm::Type *PtrDiffTy = 
+    Types.ConvertType(getContext().getPointerDiffType());
+
+  return llvm::ConstantInt::get(PtrDiffTy, Offset);
+}
+
+// FIXME: This probably belongs in CGVtable, but it relies on 
+// the static function ComputeNonVirtualBaseClassOffset, so we should make that
+// a CodeGenModule member function as well.
+ThunkAdjustment
+CodeGenModule::ComputeThunkAdjustment(const CXXRecordDecl *ClassDecl,
+                                      const CXXRecordDecl *BaseClassDecl) {
+  CXXBasePaths Paths(/*FindAmbiguities=*/false,
+                     /*RecordPaths=*/true, /*DetectVirtual=*/false);
+  if (!const_cast<CXXRecordDecl *>(ClassDecl)->
+        isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClassDecl), Paths)) {
+    assert(false && "Class must be derived from the passed in base class!");
+    return ThunkAdjustment();
+  }
+
+  unsigned Start = 0;
+  uint64_t VirtualOffset = 0;
+
+  const CXXBasePath &Path = Paths.front();
+  const CXXRecordDecl *VBase = 0;
+  for (unsigned i = 0, e = Path.size(); i != e; ++i) {
+    const CXXBasePathElement& Element = Path[i];
+    if (Element.Base->isVirtual()) {
+      Start = i+1;
+      QualType VBaseType = Element.Base->getType();
+      VBase = cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
+    }
+  }
+  if (VBase)
+    VirtualOffset = 
+      getVtableInfo().getVirtualBaseOffsetIndex(ClassDecl, BaseClassDecl);
+  
+  uint64_t Offset = 
+    ComputeNonVirtualBaseClassOffset(getContext(), Paths, Start);
+  return ThunkAdjustment(Offset, VirtualOffset);
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
+                                       const CXXRecordDecl *Class,
+                                       const CXXRecordDecl *BaseClass,
+                                       bool NullCheckValue) {
+  QualType BTy =
+    getContext().getCanonicalType(
+      getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(BaseClass)));
+  const llvm::Type *BasePtrTy = llvm::PointerType::getUnqual(ConvertType(BTy));
+
+  if (Class == BaseClass) {
+    // Just cast back.
+    return Builder.CreateBitCast(Value, BasePtrTy);
+  }
+
+  CXXBasePaths Paths(/*FindAmbiguities=*/false,
+                     /*RecordPaths=*/true, /*DetectVirtual=*/false);
+  if (!const_cast<CXXRecordDecl *>(Class)->
+        isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClass), Paths)) {
+    assert(false && "Class must be derived from the passed in base class!");
+    return 0;
+  }
+
+  unsigned Start = 0;
+  llvm::Value *VirtualOffset = 0;
+
+  const CXXBasePath &Path = Paths.front();
+  const CXXRecordDecl *VBase = 0;
+  for (unsigned i = 0, e = Path.size(); i != e; ++i) {
+    const CXXBasePathElement& Element = Path[i];
+    if (Element.Base->isVirtual()) {
+      Start = i+1;
+      QualType VBaseType = Element.Base->getType();
+      VBase = cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
+    }
+  }
+
+  uint64_t Offset = 
+    ComputeNonVirtualBaseClassOffset(getContext(), Paths, Start);
+  
+  if (!Offset && !VBase) {
+    // Just cast back.
+    return Builder.CreateBitCast(Value, BasePtrTy);
+  }    
+
+  llvm::BasicBlock *CastNull = 0;
+  llvm::BasicBlock *CastNotNull = 0;
+  llvm::BasicBlock *CastEnd = 0;
+  
+  if (NullCheckValue) {
+    CastNull = createBasicBlock("cast.null");
+    CastNotNull = createBasicBlock("cast.notnull");
+    CastEnd = createBasicBlock("cast.end");
+    
+    llvm::Value *IsNull = 
+      Builder.CreateICmpEQ(Value,
+                           llvm::Constant::getNullValue(Value->getType()));
+    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+    EmitBlock(CastNotNull);
+  }
+  
+  if (VBase)
+    VirtualOffset = GetVirtualBaseClassOffset(Value, Class, VBase);
+
+  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+  llvm::Value *NonVirtualOffset = 0;
+  if (Offset)
+    NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy, Offset);
+  
+  llvm::Value *BaseOffset;
+  if (VBase) {
+    if (NonVirtualOffset)
+      BaseOffset = Builder.CreateAdd(VirtualOffset, NonVirtualOffset);
+    else
+      BaseOffset = VirtualOffset;
+  } else
+    BaseOffset = NonVirtualOffset;
+  
+  // Apply the base offset.
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+  Value = Builder.CreateBitCast(Value, Int8PtrTy);
+  Value = Builder.CreateGEP(Value, BaseOffset, "add.ptr");
+  
+  // Cast back.
+  Value = Builder.CreateBitCast(Value, BasePtrTy);
+ 
+  if (NullCheckValue) {
+    Builder.CreateBr(CastEnd);
+    EmitBlock(CastNull);
+    Builder.CreateBr(CastEnd);
+    EmitBlock(CastEnd);
+    
+    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType());
+    PHI->reserveOperandSpace(2);
+    PHI->addIncoming(Value, CastNotNull);
+    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 
+                     CastNull);
+    Value = PHI;
+  }
+  
+  return Value;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
+                                          const CXXRecordDecl *Class,
+                                          const CXXRecordDecl *DerivedClass,
+                                          bool NullCheckValue) {
+  QualType DerivedTy =
+    getContext().getCanonicalType(
+    getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(DerivedClass)));
+  const llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
+  
+  if (Class == DerivedClass) {
+    // Just cast back.
+    return Builder.CreateBitCast(Value, DerivedPtrTy);
+  }
+
+  llvm::Value *NonVirtualOffset =
+    CGM.GetNonVirtualBaseClassOffset(DerivedClass, Class);
+  
+  if (!NonVirtualOffset) {
+    // No offset, we can just cast back.
+    return Builder.CreateBitCast(Value, DerivedPtrTy);
+  }
+  
+  llvm::BasicBlock *CastNull = 0;
+  llvm::BasicBlock *CastNotNull = 0;
+  llvm::BasicBlock *CastEnd = 0;
+  
+  if (NullCheckValue) {
+    CastNull = createBasicBlock("cast.null");
+    CastNotNull = createBasicBlock("cast.notnull");
+    CastEnd = createBasicBlock("cast.end");
+    
+    llvm::Value *IsNull = 
+    Builder.CreateICmpEQ(Value,
+                         llvm::Constant::getNullValue(Value->getType()));
+    Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+    EmitBlock(CastNotNull);
+  }
+  
+  // Apply the offset.
+  Value = Builder.CreatePtrToInt(Value, NonVirtualOffset->getType());
+  Value = Builder.CreateSub(Value, NonVirtualOffset);
+  Value = Builder.CreateIntToPtr(Value, DerivedPtrTy);
+
+  // Just cast.
+  Value = Builder.CreateBitCast(Value, DerivedPtrTy);
+
+  if (NullCheckValue) {
+    Builder.CreateBr(CastEnd);
+    EmitBlock(CastNull);
+    Builder.CreateBr(CastEnd);
+    EmitBlock(CastEnd);
+    
+    llvm::PHINode *PHI = Builder.CreatePHI(Value->getType());
+    PHI->reserveOperandSpace(2);
+    PHI->addIncoming(Value, CastNotNull);
+    PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), 
+                     CastNull);
+    Value = PHI;
+  }
+  
+  return Value;
+}
+
+/// EmitClassAggrMemberwiseCopy - This routine generates code to copy a class
+/// array of objects from SrcValue to DestValue. Copying can be either a bitwise
+/// copy or via a copy constructor call.
+//  FIXME. Consolidate this with EmitCXXAggrConstructorCall.
+void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
+                                            llvm::Value *Src,
+                                            const ArrayType *Array,
+                                            const CXXRecordDecl *BaseClassDecl,
+                                            QualType Ty) {
+  const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
+  assert(CA && "VLA cannot be copied over");
+  bool BitwiseCopy = BaseClassDecl->hasTrivialCopyConstructor();
+
+  // Create a temporary for the loop index and initialize it with 0.
+  llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
+                                           "loop.index");
+  llvm::Value* zeroConstant =
+    llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
+  Builder.CreateStore(zeroConstant, IndexPtr);
+  // Start the loop with a block that tests the condition.
+  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+  EmitBlock(CondBlock);
+
+  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+  // Generate: if (loop-index < number-of-elements fall to the loop body,
+  // otherwise, go to the block after the for-loop.
+  uint64_t NumElements = getContext().getConstantArrayElementCount(CA);
+  llvm::Value * NumElementsPtr =
+    llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
+  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
+                                              "isless");
+  // If the condition is true, execute the body.
+  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+  EmitBlock(ForBody);
+  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+  // Inside the loop body, emit the constructor call on the array element.
+  Counter = Builder.CreateLoad(IndexPtr);
+  Src = Builder.CreateInBoundsGEP(Src, Counter, "srcaddress");
+  Dest = Builder.CreateInBoundsGEP(Dest, Counter, "destaddress");
+  if (BitwiseCopy)
+    EmitAggregateCopy(Dest, Src, Ty);
+  else if (CXXConstructorDecl *BaseCopyCtor =
+           BaseClassDecl->getCopyConstructor(getContext(), 0)) {
+    llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor,
+                                                      Ctor_Complete);
+    CallArgList CallArgs;
+    // Push the this (Dest) ptr.
+    CallArgs.push_back(std::make_pair(RValue::get(Dest),
+                                      BaseCopyCtor->getThisType(getContext())));
+
+    // Push the Src ptr.
+    CallArgs.push_back(std::make_pair(RValue::get(Src),
+                                     BaseCopyCtor->getParamDecl(0)->getType()));
+    const FunctionProtoType *FPT
+      = BaseCopyCtor->getType()->getAs<FunctionProtoType>();
+    EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+             Callee, ReturnValueSlot(), CallArgs, BaseCopyCtor);
+  }
+  EmitBlock(ContinueBlock);
+
+  // Emit the increment of the loop counter.
+  llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+  Counter = Builder.CreateLoad(IndexPtr);
+  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+  Builder.CreateStore(NextVal, IndexPtr);
+
+  // Finally, branch back up to the condition for the next iteration.
+  EmitBranch(CondBlock);
+
+  // Emit the fall-through block.
+  EmitBlock(AfterFor, true);
+}
+
+/// EmitClassAggrCopyAssignment - This routine generates code to assign a class
+/// array of objects from SrcValue to DestValue. Assignment can be either a
+/// bitwise assignment or via a copy assignment operator function call.
+/// FIXME. This can be consolidated with EmitClassAggrMemberwiseCopy
+void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
+                                            llvm::Value *Src,
+                                            const ArrayType *Array,
+                                            const CXXRecordDecl *BaseClassDecl,
+                                            QualType Ty) {
+  const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
+  assert(CA && "VLA cannot be asssigned");
+  bool BitwiseAssign = BaseClassDecl->hasTrivialCopyAssignment();
+
+  // Create a temporary for the loop index and initialize it with 0.
+  llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
+                                           "loop.index");
+  llvm::Value* zeroConstant =
+  llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
+  Builder.CreateStore(zeroConstant, IndexPtr);
+  // Start the loop with a block that tests the condition.
+  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+  EmitBlock(CondBlock);
+
+  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+  // Generate: if (loop-index < number-of-elements fall to the loop body,
+  // otherwise, go to the block after the for-loop.
+  uint64_t NumElements = getContext().getConstantArrayElementCount(CA);
+  llvm::Value * NumElementsPtr =
+  llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
+  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
+                                              "isless");
+  // If the condition is true, execute the body.
+  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+  EmitBlock(ForBody);
+  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+  // Inside the loop body, emit the assignment operator call on array element.
+  Counter = Builder.CreateLoad(IndexPtr);
+  Src = Builder.CreateInBoundsGEP(Src, Counter, "srcaddress");
+  Dest = Builder.CreateInBoundsGEP(Dest, Counter, "destaddress");
+  const CXXMethodDecl *MD = 0;
+  if (BitwiseAssign)
+    EmitAggregateCopy(Dest, Src, Ty);
+  else {
+    BaseClassDecl->hasConstCopyAssignment(getContext(), MD);
+    assert(MD && "EmitClassAggrCopyAssignment - No user assign");
+    const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+    const llvm::Type *LTy =
+    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+                                   FPT->isVariadic());
+    llvm::Constant *Callee = CGM.GetAddrOfFunction(MD, LTy);
+
+    CallArgList CallArgs;
+    // Push the this (Dest) ptr.
+    CallArgs.push_back(std::make_pair(RValue::get(Dest),
+                                      MD->getThisType(getContext())));
+
+    // Push the Src ptr.
+    QualType SrcTy = MD->getParamDecl(0)->getType();
+    RValue SrcValue = SrcTy->isReferenceType() ? RValue::get(Src) :
+                                                 RValue::getAggregate(Src);
+    CallArgs.push_back(std::make_pair(SrcValue, SrcTy));
+    EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+             Callee, ReturnValueSlot(), CallArgs, MD);
+  }
+  EmitBlock(ContinueBlock);
+
+  // Emit the increment of the loop counter.
+  llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+  Counter = Builder.CreateLoad(IndexPtr);
+  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+  Builder.CreateStore(NextVal, IndexPtr);
+
+  // Finally, branch back up to the condition for the next iteration.
+  EmitBranch(CondBlock);
+
+  // Emit the fall-through block.
+  EmitBlock(AfterFor, true);
+}
+
+/// GetVTTParameter - Return the VTT parameter that should be passed to a
+/// base constructor/destructor with virtual bases.
+static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) {
+  if (!CGVtableInfo::needsVTTParameter(GD)) {
+    // This constructor/destructor does not need a VTT parameter.
+    return 0;
+  }
+  
+  const CXXRecordDecl *RD = cast<CXXMethodDecl>(CGF.CurFuncDecl)->getParent();
+  const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+  
+  llvm::Value *VTT;
+
+  uint64_t SubVTTIndex = 
+    CGF.CGM.getVtableInfo().getSubVTTIndex(RD, Base);
+  assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
+  
+  if (CGVtableInfo::needsVTTParameter(CGF.CurGD)) {
+    // A VTT parameter was passed to the constructor, use it.
+    VTT = CGF.LoadCXXVTT();
+    VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
+  } else {
+    // We're the complete constructor, so get the VTT by name.
+    VTT = CGF.CGM.getVtableInfo().getVTT(RD);
+    VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
+  }
+
+  return VTT;
+}
+
+                                    
+/// EmitClassMemberwiseCopy - This routine generates code to copy a class
+/// object from SrcValue to DestValue. Copying can be either a bitwise copy
+/// or via a copy constructor call.
+void CodeGenFunction::EmitClassMemberwiseCopy(
+                        llvm::Value *Dest, llvm::Value *Src,
+                        const CXXRecordDecl *ClassDecl,
+                        const CXXRecordDecl *BaseClassDecl, QualType Ty) {
+  CXXCtorType CtorType = Ctor_Complete;
+  
+  if (ClassDecl) {
+    Dest = GetAddressOfBaseClass(Dest, ClassDecl, BaseClassDecl,
+                                 /*NullCheckValue=*/false);
+    Src = GetAddressOfBaseClass(Src, ClassDecl, BaseClassDecl,
+                                /*NullCheckValue=*/false);
+
+    // We want to call the base constructor.
+    CtorType = Ctor_Base;
+  }
+  if (BaseClassDecl->hasTrivialCopyConstructor()) {
+    EmitAggregateCopy(Dest, Src, Ty);
+    return;
+  }
+
+  if (CXXConstructorDecl *BaseCopyCtor =
+      BaseClassDecl->getCopyConstructor(getContext(), 0)) {
+    llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor, CtorType);
+    CallArgList CallArgs;
+    // Push the this (Dest) ptr.
+    CallArgs.push_back(std::make_pair(RValue::get(Dest),
+                                      BaseCopyCtor->getThisType(getContext())));
+
+    // Push the VTT parameter, if necessary.
+    if (llvm::Value *VTT = 
+          GetVTTParameter(*this, GlobalDecl(BaseCopyCtor, CtorType))) {
+      QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+      CallArgs.push_back(std::make_pair(RValue::get(VTT), T));
+    }
+
+    // Push the Src ptr.
+    CallArgs.push_back(std::make_pair(RValue::get(Src),
+                       BaseCopyCtor->getParamDecl(0)->getType()));
+    const FunctionProtoType *FPT =
+      BaseCopyCtor->getType()->getAs<FunctionProtoType>();
+    EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+             Callee, ReturnValueSlot(), CallArgs, BaseCopyCtor);
+  }
+}
+
+/// EmitClassCopyAssignment - This routine generates code to copy assign a class
+/// object from SrcValue to DestValue. Assignment can be either a bitwise
+/// assignment of via an assignment operator call.
+// FIXME. Consolidate this with EmitClassMemberwiseCopy as they share a lot.
+void CodeGenFunction::EmitClassCopyAssignment(
+                                        llvm::Value *Dest, llvm::Value *Src,
+                                        const CXXRecordDecl *ClassDecl,
+                                        const CXXRecordDecl *BaseClassDecl,
+                                        QualType Ty) {
+  if (ClassDecl) {
+    Dest = GetAddressOfBaseClass(Dest, ClassDecl, BaseClassDecl,
+                                 /*NullCheckValue=*/false);
+    Src = GetAddressOfBaseClass(Src, ClassDecl, BaseClassDecl,
+                                /*NullCheckValue=*/false);
+  }
+  if (BaseClassDecl->hasTrivialCopyAssignment()) {
+    EmitAggregateCopy(Dest, Src, Ty);
+    return;
+  }
+
+  const CXXMethodDecl *MD = 0;
+  BaseClassDecl->hasConstCopyAssignment(getContext(), MD);
+  assert(MD && "EmitClassCopyAssignment - missing copy assign");
+
+  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+  const llvm::Type *LTy =
+    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+                                   FPT->isVariadic());
+  llvm::Constant *Callee = CGM.GetAddrOfFunction(MD, LTy);
+
+  CallArgList CallArgs;
+  // Push the this (Dest) ptr.
+  CallArgs.push_back(std::make_pair(RValue::get(Dest),
+                                    MD->getThisType(getContext())));
+
+  // Push the Src ptr.
+  QualType SrcTy = MD->getParamDecl(0)->getType();
+  RValue SrcValue = SrcTy->isReferenceType() ? RValue::get(Src) :
+                                               RValue::getAggregate(Src);
+  CallArgs.push_back(std::make_pair(SrcValue, SrcTy));
+  EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+           Callee, ReturnValueSlot(), CallArgs, MD);
+}
+
+/// SynthesizeDefaultConstructor - synthesize a default constructor
+void
+CodeGenFunction::SynthesizeDefaultConstructor(const CXXConstructorDecl *Ctor,
+                                              CXXCtorType Type,
+                                              llvm::Function *Fn,
+                                              const FunctionArgList &Args) {
+  assert(!Ctor->isTrivial() && "shouldn't need to generate trivial ctor");
+  StartFunction(GlobalDecl(Ctor, Type), Ctor->getResultType(), Fn, Args, 
+                SourceLocation());
+  EmitCtorPrologue(Ctor, Type);
+  FinishFunction();
+}
+
+/// SynthesizeCXXCopyConstructor - This routine implicitly defines body of a
+/// copy constructor, in accordance with section 12.8 (p7 and p8) of C++03
+/// The implicitly-defined copy constructor for class X performs a memberwise
+/// copy of its subobjects. The order of copying is the same as the order of
+/// initialization of bases and members in a user-defined constructor
+/// Each subobject is copied in the manner appropriate to its type:
+///  if the subobject is of class type, the copy constructor for the class is
+///  used;
+///  if the subobject is an array, each element is copied, in the manner
+///  appropriate to the element type;
+///  if the subobject is of scalar type, the built-in assignment operator is
+///  used.
+/// Virtual base class subobjects shall be copied only once by the
+/// implicitly-defined copy constructor
+
+void 
+CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor,
+                                              CXXCtorType Type,
+                                              llvm::Function *Fn,
+                                              const FunctionArgList &Args) {
+  const CXXRecordDecl *ClassDecl = Ctor->getParent();
+  assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
+      "SynthesizeCXXCopyConstructor - copy constructor has definition already");
+  assert(!Ctor->isTrivial() && "shouldn't need to generate trivial ctor");
+  StartFunction(GlobalDecl(Ctor, Type), Ctor->getResultType(), Fn, Args, 
+                SourceLocation());
+
+  FunctionArgList::const_iterator i = Args.begin();
+  const VarDecl *ThisArg = i->first;
+  llvm::Value *ThisObj = GetAddrOfLocalVar(ThisArg);
+  llvm::Value *LoadOfThis = Builder.CreateLoad(ThisObj, "this");
+  const VarDecl *SrcArg = (i+1)->first;
+  llvm::Value *SrcObj = GetAddrOfLocalVar(SrcArg);
+  llvm::Value *LoadOfSrc = Builder.CreateLoad(SrcObj);
+
+  for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
+       Base != ClassDecl->bases_end(); ++Base) {
+    // FIXME. copy constrution of virtual base NYI
+    if (Base->isVirtual())
+      continue;
+
+    CXXRecordDecl *BaseClassDecl
+      = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+    EmitClassMemberwiseCopy(LoadOfThis, LoadOfSrc, ClassDecl, BaseClassDecl,
+                            Base->getType());
+  }
+
+  for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+       E = ClassDecl->field_end(); I != E; ++I) {
+    const FieldDecl *Field = *I;
+    
+    QualType FieldType = getContext().getCanonicalType(Field->getType());
+    const ConstantArrayType *Array =
+      getContext().getAsConstantArrayType(FieldType);
+    if (Array)
+      FieldType = getContext().getBaseElementType(FieldType);
+
+    if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
+      CXXRecordDecl *FieldClassDecl
+        = cast<CXXRecordDecl>(FieldClassType->getDecl());
+      LValue LHS = EmitLValueForField(LoadOfThis, Field, 0);
+      LValue RHS = EmitLValueForField(LoadOfSrc, Field, 0);
+      if (Array) {
+        const llvm::Type *BasePtr = ConvertType(FieldType);
+        BasePtr = llvm::PointerType::getUnqual(BasePtr);
+        llvm::Value *DestBaseAddrPtr =
+          Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+        llvm::Value *SrcBaseAddrPtr =
+          Builder.CreateBitCast(RHS.getAddress(), BasePtr);
+        EmitClassAggrMemberwiseCopy(DestBaseAddrPtr, SrcBaseAddrPtr, Array,
+                                    FieldClassDecl, FieldType);
+      }
+      else
+        EmitClassMemberwiseCopy(LHS.getAddress(), RHS.getAddress(),
+                                0 /*ClassDecl*/, FieldClassDecl, FieldType);
+      continue;
+    }
+    
+    // Do a built-in assignment of scalar data members.
+    LValue LHS = EmitLValueForFieldInitialization(LoadOfThis, Field, 0);
+    LValue RHS = EmitLValueForFieldInitialization(LoadOfSrc, Field, 0);
+
+    if (!hasAggregateLLVMType(Field->getType())) {
+      RValue RVRHS = EmitLoadOfLValue(RHS, Field->getType());
+      EmitStoreThroughLValue(RVRHS, LHS, Field->getType());
+    } else if (Field->getType()->isAnyComplexType()) {
+      ComplexPairTy Pair = LoadComplexFromAddr(RHS.getAddress(),
+                                               RHS.isVolatileQualified());
+      StoreComplexToAddr(Pair, LHS.getAddress(), LHS.isVolatileQualified());
+    } else {
+      EmitAggregateCopy(LHS.getAddress(), RHS.getAddress(), Field->getType());
+    }
+  }
+
+  InitializeVtablePtrs(ClassDecl);
+  FinishFunction();
+}
+
+/// SynthesizeCXXCopyAssignment - Implicitly define copy assignment operator.
+/// Before the implicitly-declared copy assignment operator for a class is
+/// implicitly defined, all implicitly- declared copy assignment operators for
+/// its direct base classes and its nonstatic data members shall have been
+/// implicitly defined. [12.8-p12]
+/// The implicitly-defined copy assignment operator for class X performs
+/// memberwise assignment of its subob- jects. The direct base classes of X are
+/// assigned first, in the order of their declaration in
+/// the base-specifier-list, and then the immediate nonstatic data members of X
+/// are assigned, in the order in which they were declared in the class
+/// definition.Each subobject is assigned in the manner appropriate to its type:
+///   if the subobject is of class type, the copy assignment operator for the
+///   class is used (as if by explicit qualification; that is, ignoring any
+///   possible virtual overriding functions in more derived classes);
+///
+///   if the subobject is an array, each element is assigned, in the manner
+///   appropriate to the element type;
+///
+///   if the subobject is of scalar type, the built-in assignment operator is
+///   used.
+void CodeGenFunction::SynthesizeCXXCopyAssignment(const CXXMethodDecl *CD,
+                                                  llvm::Function *Fn,
+                                                  const FunctionArgList &Args) {
+
+  const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext());
+  assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
+         "SynthesizeCXXCopyAssignment - copy assignment has user declaration");
+  StartFunction(CD, CD->getResultType(), Fn, Args, SourceLocation());
+
+  FunctionArgList::const_iterator i = Args.begin();
+  const VarDecl *ThisArg = i->first;
+  llvm::Value *ThisObj = GetAddrOfLocalVar(ThisArg);
+  llvm::Value *LoadOfThis = Builder.CreateLoad(ThisObj, "this");
+  const VarDecl *SrcArg = (i+1)->first;
+  llvm::Value *SrcObj = GetAddrOfLocalVar(SrcArg);
+  llvm::Value *LoadOfSrc = Builder.CreateLoad(SrcObj);
+
+  for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
+       Base != ClassDecl->bases_end(); ++Base) {
+    // FIXME. copy assignment of virtual base NYI
+    if (Base->isVirtual())
+      continue;
+
+    CXXRecordDecl *BaseClassDecl
+      = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+    EmitClassCopyAssignment(LoadOfThis, LoadOfSrc, ClassDecl, BaseClassDecl,
+                            Base->getType());
+  }
+
+  for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+       FieldEnd = ClassDecl->field_end();
+       Field != FieldEnd; ++Field) {
+    QualType FieldType = getContext().getCanonicalType((*Field)->getType());
+    const ConstantArrayType *Array =
+      getContext().getAsConstantArrayType(FieldType);
+    if (Array)
+      FieldType = getContext().getBaseElementType(FieldType);
+
+    if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
+      CXXRecordDecl *FieldClassDecl
+      = cast<CXXRecordDecl>(FieldClassType->getDecl());
+      LValue LHS = EmitLValueForField(LoadOfThis, *Field, 0);
+      LValue RHS = EmitLValueForField(LoadOfSrc, *Field, 0);
+      if (Array) {
+        const llvm::Type *BasePtr = ConvertType(FieldType);
+        BasePtr = llvm::PointerType::getUnqual(BasePtr);
+        llvm::Value *DestBaseAddrPtr =
+          Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+        llvm::Value *SrcBaseAddrPtr =
+          Builder.CreateBitCast(RHS.getAddress(), BasePtr);
+        EmitClassAggrCopyAssignment(DestBaseAddrPtr, SrcBaseAddrPtr, Array,
+                                    FieldClassDecl, FieldType);
+      }
+      else
+        EmitClassCopyAssignment(LHS.getAddress(), RHS.getAddress(),
+                               0 /*ClassDecl*/, FieldClassDecl, FieldType);
+      continue;
+    }
+    // Do a built-in assignment of scalar data members.
+    LValue LHS = EmitLValueForField(LoadOfThis, *Field, 0);
+    LValue RHS = EmitLValueForField(LoadOfSrc, *Field, 0);
+    if (!hasAggregateLLVMType(Field->getType())) {
+      RValue RVRHS = EmitLoadOfLValue(RHS, Field->getType());
+      EmitStoreThroughLValue(RVRHS, LHS, Field->getType());
+    } else if (Field->getType()->isAnyComplexType()) {
+      ComplexPairTy Pair = LoadComplexFromAddr(RHS.getAddress(),
+                                               RHS.isVolatileQualified());
+      StoreComplexToAddr(Pair, LHS.getAddress(), LHS.isVolatileQualified());
+    } else {
+      EmitAggregateCopy(LHS.getAddress(), RHS.getAddress(), Field->getType());
+    }
+  }
+
+  // return *this;
+  Builder.CreateStore(LoadOfThis, ReturnValue);
+
+  FinishFunction();
+}
+
+static void EmitBaseInitializer(CodeGenFunction &CGF, 
+                                const CXXRecordDecl *ClassDecl,
+                                CXXBaseOrMemberInitializer *BaseInit,
+                                CXXCtorType CtorType) {
+  assert(BaseInit->isBaseInitializer() &&
+         "Must have base initializer!");
+
+  llvm::Value *ThisPtr = CGF.LoadCXXThis();
+  
+  const Type *BaseType = BaseInit->getBaseClass();
+  CXXRecordDecl *BaseClassDecl =
+    cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+
+  // FIXME: This method of determining whether a base is virtual is ridiculous;
+  // it should be part of BaseInit.
+  bool isBaseVirtual = false;
+  for (CXXRecordDecl::base_class_const_iterator I = ClassDecl->vbases_begin(),
+       E = ClassDecl->vbases_end(); I != E; ++I)
+    if (I->getType()->getAs<RecordType>()->getDecl() == BaseClassDecl) {
+      isBaseVirtual = true;
+      break;
+    }
+
+  // The base constructor doesn't construct virtual bases.
+  if (CtorType == Ctor_Base && isBaseVirtual)
+    return;
+
+  // Compute the offset to the base; we do this directly instead of using
+  // GetAddressOfBaseClass because the class doesn't have a vtable pointer
+  // at this point.
+  // FIXME: This could be refactored back into GetAddressOfBaseClass if it took
+  // an extra parameter for whether the derived class is the complete object
+  // class.
+  const ASTRecordLayout &Layout =
+      CGF.getContext().getASTRecordLayout(ClassDecl);
+  uint64_t Offset;
+  if (isBaseVirtual)
+    Offset = Layout.getVBaseClassOffset(BaseClassDecl);
+  else
+    Offset = Layout.getBaseClassOffset(BaseClassDecl);
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  const llvm::Type *BaseClassType = CGF.ConvertType(QualType(BaseType, 0));
+  llvm::Value *V = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+  V = CGF.Builder.CreateConstInBoundsGEP1_64(V, Offset/8);
+  V = CGF.Builder.CreateBitCast(V, BaseClassType->getPointerTo());
+  CGF.EmitAggExpr(BaseInit->getInit(), V, false, false, true);
+  
+  if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor()) {
+    // FIXME: Is this OK for C++0x delegating constructors?
+    CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+
+    llvm::Value *ThisPtr = CGF.LoadCXXThis();
+    llvm::Value *V = CGF.Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+    V = CGF.Builder.CreateConstInBoundsGEP1_64(V, Offset / 8);
+    V = CGF.Builder.CreateBitCast(V, BaseClassType->getPointerTo());
+    
+    CXXDestructorDecl *DD = BaseClassDecl->getDestructor(CGF.getContext());
+    CGF.EmitCXXDestructorCall(DD, Dtor_Base, V);
+  }
+}
+
+static void EmitMemberInitializer(CodeGenFunction &CGF,
+                                  const CXXRecordDecl *ClassDecl,
+                                  CXXBaseOrMemberInitializer *MemberInit) {
+  assert(MemberInit->isMemberInitializer() &&
+         "Must have member initializer!");
+  
+  // non-static data member initializers.
+  FieldDecl *Field = MemberInit->getMember();
+  QualType FieldType = CGF.getContext().getCanonicalType(Field->getType());
+
+  llvm::Value *ThisPtr = CGF.LoadCXXThis();
+  LValue LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
+  
+  // If we are initializing an anonymous union field, drill down to the field.
+  if (MemberInit->getAnonUnionMember()) {
+    Field = MemberInit->getAnonUnionMember();
+    LHS = CGF.EmitLValueForField(LHS.getAddress(), Field, 0);
+    FieldType = Field->getType();
+  }
+
+  // FIXME: If there's no initializer and the CXXBaseOrMemberInitializer
+  // was implicitly generated, we shouldn't be zeroing memory.
+  RValue RHS;
+  if (FieldType->isReferenceType()) {
+    RHS = CGF.EmitReferenceBindingToExpr(MemberInit->getInit(),
+                                         /*IsInitializer=*/true);
+    CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+  } else if (FieldType->isArrayType() && !MemberInit->getInit()) {
+    CGF.EmitMemSetToZero(LHS.getAddress(), Field->getType());
+  } else if (!CGF.hasAggregateLLVMType(Field->getType())) {
+    RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit(), true));
+    CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
+  } else if (MemberInit->getInit()->getType()->isAnyComplexType()) {
+    CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
+                                LHS.isVolatileQualified());
+  } else {
+    CGF.EmitAggExpr(MemberInit->getInit(), LHS.getAddress(), 
+                    LHS.isVolatileQualified(), false, true);
+    
+    if (!CGF.Exceptions)
+      return;
+
+    const RecordType *RT = FieldType->getAs<RecordType>();
+    if (!RT)
+      return;
+    
+    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+    if (!RD->hasTrivialDestructor()) {
+      // FIXME: Is this OK for C++0x delegating constructors?
+      CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+      
+      llvm::Value *ThisPtr = CGF.LoadCXXThis();
+      LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
+
+      CXXDestructorDecl *DD = RD->getDestructor(CGF.getContext());
+      CGF.EmitCXXDestructorCall(DD, Dtor_Complete, LHS.getAddress());
+    }
+  }
+}
+
+/// EmitCtorPrologue - This routine generates necessary code to initialize
+/// base classes and non-static data members belonging to this constructor.
+/// FIXME: This needs to take a CXXCtorType.
+void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
+                                       CXXCtorType CtorType) {
+  const CXXRecordDecl *ClassDecl = CD->getParent();
+
+  llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> MemberInitializers;
+  
+  // FIXME: Add vbase initialization
+  
+  for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
+       E = CD->init_end();
+       B != E; ++B) {
+    CXXBaseOrMemberInitializer *Member = (*B);
+    
+    assert(LiveTemporaries.empty() &&
+           "Should not have any live temporaries at initializer start!");
+
+    if (Member->isBaseInitializer())
+      EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
+    else
+      MemberInitializers.push_back(Member);
+  }
+
+  InitializeVtablePtrs(ClassDecl);
+
+  for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I) {
+    assert(LiveTemporaries.empty() &&
+           "Should not have any live temporaries at initializer start!");
+    
+    EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I]);
+  }
+}
+
+/// EmitDtorEpilogue - Emit all code that comes at the end of class's
+/// destructor. This is to call destructors on members and base classes
+/// in reverse order of their construction.
+/// FIXME: This needs to take a CXXDtorType.
+void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
+                                       CXXDtorType DtorType) {
+  assert(!DD->isTrivial() &&
+         "Should not emit dtor epilogue for trivial dtor!");
+
+  const CXXRecordDecl *ClassDecl = DD->getParent();
+
+  // Collect the fields.
+  llvm::SmallVector<const FieldDecl *, 16> FieldDecls;
+  for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+       E = ClassDecl->field_end(); I != E; ++I) {
+    const FieldDecl *Field = *I;
+    
+    QualType FieldType = getContext().getCanonicalType(Field->getType());
+    FieldType = getContext().getBaseElementType(FieldType);
+    
+    const RecordType *RT = FieldType->getAs<RecordType>();
+    if (!RT)
+      continue;
+    
+    CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+    if (FieldClassDecl->hasTrivialDestructor())
+        continue;
+    
+    FieldDecls.push_back(Field);
+  }
+  
+  // Now destroy the fields.
+  for (size_t i = FieldDecls.size(); i > 0; --i) {
+    const FieldDecl *Field = FieldDecls[i - 1];
+    
+    QualType FieldType = Field->getType();
+    const ConstantArrayType *Array = 
+      getContext().getAsConstantArrayType(FieldType);
+    if (Array)
+      FieldType = getContext().getBaseElementType(FieldType);
+    
+    const RecordType *RT = FieldType->getAs<RecordType>();
+    CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+
+    llvm::Value *ThisPtr = LoadCXXThis();
+
+    LValue LHS = EmitLValueForField(ThisPtr, Field, 
+                                    // FIXME: Qualifiers?
+                                    /*CVRQualifiers=*/0);
+    if (Array) {
+      const llvm::Type *BasePtr = ConvertType(FieldType);
+      BasePtr = llvm::PointerType::getUnqual(BasePtr);
+      llvm::Value *BaseAddrPtr =
+        Builder.CreateBitCast(LHS.getAddress(), BasePtr);
+      EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
+                                Array, BaseAddrPtr);
+    } else
+      EmitCXXDestructorCall(FieldClassDecl->getDestructor(getContext()),
+                            Dtor_Complete, LHS.getAddress());
+  }
+
+  // Destroy non-virtual bases.
+  for (CXXRecordDecl::reverse_base_class_const_iterator I = 
+        ClassDecl->bases_rbegin(), E = ClassDecl->bases_rend(); I != E; ++I) {
+    const CXXBaseSpecifier &Base = *I;
+    
+    // Ignore virtual bases.
+    if (Base.isVirtual())
+      continue;
+    
+    CXXRecordDecl *BaseClassDecl
+      = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+    
+    // Ignore trivial destructors.
+    if (BaseClassDecl->hasTrivialDestructor())
+      continue;
+    const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+    
+    llvm::Value *V = GetAddressOfBaseClass(LoadCXXThis(),
+                                           ClassDecl, BaseClassDecl, 
+                                           /*NullCheckValue=*/false);
+    EmitCXXDestructorCall(D, Dtor_Base, V);
+  }
+
+  // If we're emitting a base destructor, we don't want to emit calls to the
+  // virtual bases.
+  if (DtorType == Dtor_Base)
+    return;
+  
+  // Handle virtual bases.
+  for (CXXRecordDecl::reverse_base_class_const_iterator I = 
+       ClassDecl->vbases_rbegin(), E = ClassDecl->vbases_rend(); I != E; ++I) {
+    const CXXBaseSpecifier &Base = *I;
+    CXXRecordDecl *BaseClassDecl
+    = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+    
+    // Ignore trivial destructors.
+    if (BaseClassDecl->hasTrivialDestructor())
+      continue;
+    const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+    llvm::Value *V = GetAddressOfBaseClass(LoadCXXThis(),
+                                           ClassDecl, BaseClassDecl, 
+                                           /*NullCheckValue=*/false);
+    EmitCXXDestructorCall(D, Dtor_Base, V);
+  }
+    
+  // If we have a deleting destructor, emit a call to the delete operator.
+  if (DtorType == Dtor_Deleting) {
+    assert(DD->getOperatorDelete() && 
+           "operator delete missing - EmitDtorEpilogue");
+    EmitDeleteCall(DD->getOperatorDelete(), LoadCXXThis(),
+                   getContext().getTagDeclType(ClassDecl));
+  }
+}
+
+void CodeGenFunction::SynthesizeDefaultDestructor(const CXXDestructorDecl *Dtor,
+                                                  CXXDtorType DtorType,
+                                                  llvm::Function *Fn,
+                                                  const FunctionArgList &Args) {
+  assert(!Dtor->getParent()->hasUserDeclaredDestructor() &&
+         "SynthesizeDefaultDestructor - destructor has user declaration");
+
+  StartFunction(GlobalDecl(Dtor, DtorType), Dtor->getResultType(), Fn, Args, 
+                SourceLocation());
+  InitializeVtablePtrs(Dtor->getParent());
+  EmitDtorEpilogue(Dtor, DtorType);
+  FinishFunction();
+}
+
+/// EmitCXXAggrConstructorCall - This routine essentially creates a (nested)
+/// for-loop to call the default constructor on individual members of the
+/// array. 
+/// 'D' is the default constructor for elements of the array, 'ArrayTy' is the
+/// array type and 'ArrayPtr' points to the beginning fo the array.
+/// It is assumed that all relevant checks have been made by the caller.
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+                                          const ConstantArrayType *ArrayTy,
+                                          llvm::Value *ArrayPtr,
+                                          CallExpr::const_arg_iterator ArgBeg,
+                                          CallExpr::const_arg_iterator ArgEnd) {
+
+  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+  llvm::Value * NumElements =
+    llvm::ConstantInt::get(SizeTy, 
+                           getContext().getConstantArrayElementCount(ArrayTy));
+
+  EmitCXXAggrConstructorCall(D, NumElements, ArrayPtr, ArgBeg, ArgEnd);
+}
+
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+                                          llvm::Value *NumElements,
+                                          llvm::Value *ArrayPtr,
+                                          CallExpr::const_arg_iterator ArgBeg,
+                                          CallExpr::const_arg_iterator ArgEnd) {
+  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+  // Create a temporary for the loop index and initialize it with 0.
+  llvm::Value *IndexPtr = CreateTempAlloca(SizeTy, "loop.index");
+  llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+  Builder.CreateStore(Zero, IndexPtr);
+
+  // Start the loop with a block that tests the condition.
+  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+  EmitBlock(CondBlock);
+
+  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+  // Generate: if (loop-index < number-of-elements fall to the loop body,
+  // otherwise, go to the block after the for-loop.
+  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElements, "isless");
+  // If the condition is true, execute the body.
+  Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+  EmitBlock(ForBody);
+
+  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+  // Inside the loop body, emit the constructor call on the array element.
+  Counter = Builder.CreateLoad(IndexPtr);
+  llvm::Value *Address = Builder.CreateInBoundsGEP(ArrayPtr, Counter, 
+                                                   "arrayidx");
+
+  // C++ [class.temporary]p4: 
+  // There are two contexts in which temporaries are destroyed at a different
+  // point than the end of the full-expression. The first context is when a
+  // default constructor is called to initialize an element of an array. 
+  // If the constructor has one or more default arguments, the destruction of 
+  // every temporary created in a default argument expression is sequenced 
+  // before the construction of the next array element, if any.
+  
+  // Keep track of the current number of live temporaries.
+  unsigned OldNumLiveTemporaries = LiveTemporaries.size();
+
+  EmitCXXConstructorCall(D, Ctor_Complete, Address, ArgBeg, ArgEnd);
+
+  // Pop temporaries.
+  while (LiveTemporaries.size() > OldNumLiveTemporaries)
+    PopCXXTemporary();
+  
+  EmitBlock(ContinueBlock);
+
+  // Emit the increment of the loop counter.
+  llvm::Value *NextVal = llvm::ConstantInt::get(SizeTy, 1);
+  Counter = Builder.CreateLoad(IndexPtr);
+  NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
+  Builder.CreateStore(NextVal, IndexPtr);
+
+  // Finally, branch back up to the condition for the next iteration.
+  EmitBranch(CondBlock);
+
+  // Emit the fall-through block.
+  EmitBlock(AfterFor, true);
+}
+
+/// EmitCXXAggrDestructorCall - calls the default destructor on array
+/// elements in reverse order of construction.
+void
+CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+                                           const ArrayType *Array,
+                                           llvm::Value *This) {
+  const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
+  assert(CA && "Do we support VLA for destruction ?");
+  uint64_t ElementCount = getContext().getConstantArrayElementCount(CA);
+  
+  const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
+  llvm::Value* ElementCountPtr = llvm::ConstantInt::get(SizeLTy, ElementCount);
+  EmitCXXAggrDestructorCall(D, ElementCountPtr, This);
+}
+
+/// EmitCXXAggrDestructorCall - calls the default destructor on array
+/// elements in reverse order of construction.
+void
+CodeGenFunction::EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+                                           llvm::Value *UpperCount,
+                                           llvm::Value *This) {
+  const llvm::Type *SizeLTy = ConvertType(getContext().getSizeType());
+  llvm::Value *One = llvm::ConstantInt::get(SizeLTy, 1);
+  
+  // Create a temporary for the loop index and initialize it with count of
+  // array elements.
+  llvm::Value *IndexPtr = CreateTempAlloca(SizeLTy, "loop.index");
+
+  // Store the number of elements in the index pointer.
+  Builder.CreateStore(UpperCount, IndexPtr);
+
+  // Start the loop with a block that tests the condition.
+  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+
+  EmitBlock(CondBlock);
+
+  llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+  // Generate: if (loop-index != 0 fall to the loop body,
+  // otherwise, go to the block after the for-loop.
+  llvm::Value* zeroConstant =
+    llvm::Constant::getNullValue(SizeLTy);
+  llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
+  llvm::Value *IsNE = Builder.CreateICmpNE(Counter, zeroConstant,
+                                            "isne");
+  // If the condition is true, execute the body.
+  Builder.CreateCondBr(IsNE, ForBody, AfterFor);
+
+  EmitBlock(ForBody);
+
+  llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
+  // Inside the loop body, emit the constructor call on the array element.
+  Counter = Builder.CreateLoad(IndexPtr);
+  Counter = Builder.CreateSub(Counter, One);
+  llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
+  EmitCXXDestructorCall(D, Dtor_Complete, Address);
+
+  EmitBlock(ContinueBlock);
+
+  // Emit the decrement of the loop counter.
+  Counter = Builder.CreateLoad(IndexPtr);
+  Counter = Builder.CreateSub(Counter, One, "dec");
+  Builder.CreateStore(Counter, IndexPtr);
+
+  // Finally, branch back up to the condition for the next iteration.
+  EmitBranch(CondBlock);
+
+  // Emit the fall-through block.
+  EmitBlock(AfterFor, true);
+}
+
+/// GenerateCXXAggrDestructorHelper - Generates a helper function which when
+/// invoked, calls the default destructor on array elements in reverse order of
+/// construction.
+llvm::Constant * 
+CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+                                                 const ArrayType *Array,
+                                                 llvm::Value *This) {
+  FunctionArgList Args;
+  ImplicitParamDecl *Dst =
+    ImplicitParamDecl::Create(getContext(), 0,
+                              SourceLocation(), 0,
+                              getContext().getPointerType(getContext().VoidTy));
+  Args.push_back(std::make_pair(Dst, Dst->getType()));
+  
+  llvm::SmallString<16> Name;
+  llvm::raw_svector_ostream(Name) << "__tcf_" << (++UniqueAggrDestructorCount);
+  QualType R = getContext().VoidTy;
+  const CGFunctionInfo &FI
+    = CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+  const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
+  llvm::Function *Fn =
+    llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+                           Name.str(),
+                           &CGM.getModule());
+  IdentifierInfo *II = &CGM.getContext().Idents.get(Name.str());
+  FunctionDecl *FD = FunctionDecl::Create(getContext(),
+                                          getContext().getTranslationUnitDecl(),
+                                          SourceLocation(), II, R, 0,
+                                          FunctionDecl::Static,
+                                          false, true);
+  StartFunction(FD, R, Fn, Args, SourceLocation());
+  QualType BaseElementTy = getContext().getBaseElementType(Array);
+  const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+  BasePtr = llvm::PointerType::getUnqual(BasePtr);
+  llvm::Value *BaseAddrPtr = Builder.CreateBitCast(This, BasePtr);
+  EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+  FinishFunction();
+  llvm::Type *Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),
+                                              0);
+  llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
+  return m;
+}
+
+
+void
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+                                        CXXCtorType Type,
+                                        llvm::Value *This,
+                                        CallExpr::const_arg_iterator ArgBeg,
+                                        CallExpr::const_arg_iterator ArgEnd) {
+  if (D->isTrivial()) {
+    if (ArgBeg == ArgEnd) {
+      // Trivial default constructor, no codegen required.
+      assert(D->isDefaultConstructor() &&
+             "trivial 0-arg ctor not a default ctor");
+      return;
+    }
+
+    assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+    assert(D->isCopyConstructor() && "trivial 1-arg ctor not a copy ctor");
+
+    const Expr *E = (*ArgBeg);
+    QualType Ty = E->getType();
+    llvm::Value *Src = EmitLValue(E).getAddress();
+    EmitAggregateCopy(This, Src, Ty);
+    return;
+  }
+
+  llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type));
+  llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+
+  EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
+}
+
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
+                                            CXXDtorType Type,
+                                            llvm::Value *This) {
+  llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type));
+  llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+  
+  EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
+}
+
+llvm::Value *
+CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
+                                           const CXXRecordDecl *ClassDecl,
+                                           const CXXRecordDecl *BaseClassDecl) {
+  const llvm::Type *Int8PtrTy = 
+    llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+  llvm::Value *VTablePtr = Builder.CreateBitCast(This, 
+                                                 Int8PtrTy->getPointerTo());
+  VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
+
+  int64_t VBaseOffsetIndex = 
+    CGM.getVtableInfo().getVirtualBaseOffsetIndex(ClassDecl, BaseClassDecl);
+  
+  llvm::Value *VBaseOffsetPtr = 
+    Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetIndex, "vbase.offset.ptr");
+  const llvm::Type *PtrDiffTy = 
+    ConvertType(getContext().getPointerDiffType());
+  
+  VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr, 
+                                         PtrDiffTy->getPointerTo());
+                                         
+  llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
+  
+  return VBaseOffset;
+}
+
+void CodeGenFunction::InitializeVtablePtrs(const CXXRecordDecl *ClassDecl) {
+  if (!ClassDecl->isDynamicClass())
+    return;
+
+  llvm::Constant *Vtable = CGM.getVtableInfo().getVtable(ClassDecl);
+  CGVtableInfo::AddrSubMap_t& AddressPoints =
+      *(*CGM.getVtableInfo().AddressPoints[ClassDecl])[ClassDecl];
+  llvm::Value *ThisPtr = LoadCXXThis();
+  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl);
+
+  // Store address points for virtual bases
+  for (CXXRecordDecl::base_class_const_iterator I = 
+       ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); I != E; ++I) {
+    const CXXBaseSpecifier &Base = *I;
+    CXXRecordDecl *BaseClassDecl
+      = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+    uint64_t Offset = Layout.getVBaseClassOffset(BaseClassDecl);
+    InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints,
+                                  ThisPtr, Offset);
+  }
+
+  // Store address points for non-virtual bases and current class
+  InitializeVtablePtrsRecursive(ClassDecl, Vtable, AddressPoints, ThisPtr, 0);
+}
+
+void CodeGenFunction::InitializeVtablePtrsRecursive(
+        const CXXRecordDecl *ClassDecl,
+        llvm::Constant *Vtable,
+        CGVtableInfo::AddrSubMap_t& AddressPoints,
+        llvm::Value *ThisPtr,
+        uint64_t Offset) {
+  if (!ClassDecl->isDynamicClass())
+    return;
+
+  // Store address points for non-virtual bases
+  const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl);
+  for (CXXRecordDecl::base_class_const_iterator I = 
+       ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
+    const CXXBaseSpecifier &Base = *I;
+    if (Base.isVirtual())
+      continue;
+    CXXRecordDecl *BaseClassDecl
+      = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+    uint64_t NewOffset = Offset + Layout.getBaseClassOffset(BaseClassDecl);
+    InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints,
+                                  ThisPtr, NewOffset);
+  }
+
+  // Compute the address point
+  assert(AddressPoints.count(std::make_pair(ClassDecl, Offset)) &&
+         "Missing address point for class");
+  uint64_t AddressPoint = AddressPoints[std::make_pair(ClassDecl, Offset)];
+  llvm::Value *VtableAddressPoint =
+      Builder.CreateConstInBoundsGEP2_64(Vtable, 0, AddressPoint);
+
+  // Compute the address to store the address point
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+  llvm::Value *VtableField = Builder.CreateBitCast(ThisPtr, Int8PtrTy);
+  VtableField = Builder.CreateConstInBoundsGEP1_64(VtableField, Offset/8);
+  const llvm::Type *AddressPointPtrTy =
+      VtableAddressPoint->getType()->getPointerTo();
+  VtableField = Builder.CreateBitCast(VtableField, AddressPointPtrTy);
+
+  // Store address point
+  Builder.CreateStore(VtableAddressPoint, VtableField);
+}
+
+llvm::Value *CodeGenFunction::LoadCXXVTT() {
+  assert((isa<CXXConstructorDecl>(CurFuncDecl) ||
+          isa<CXXDestructorDecl>(CurFuncDecl)) &&
+         "Must be in a C++ ctor or dtor to load the vtt parameter");
+
+  return Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
+}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
new file mode 100644
index 0000000..7138bc0
--- /dev/null
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -0,0 +1,1844 @@
+//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the debug information generation while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/System/Path.h"
+#include "llvm/Target/TargetMachine.h"
+using namespace clang;
+using namespace clang::CodeGen;
+
+CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
+  : CGM(CGM), isMainCompileUnitCreated(false), DebugFactory(CGM.getModule()),
+    BlockLiteralGenericSet(false) {
+}
+
+CGDebugInfo::~CGDebugInfo() {
+  assert(RegionStack.empty() && "Region stack mismatch, stack not empty!");
+}
+
+void CGDebugInfo::setLocation(SourceLocation Loc) {
+  if (Loc.isValid())
+    CurLoc = CGM.getContext().getSourceManager().getInstantiationLoc(Loc);
+}
+
+/// getContextDescriptor - Get context info for the decl.
+llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context,
+                                              llvm::DIDescriptor &CompileUnit) {
+  if (!Context)
+    return CompileUnit;
+
+  llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
+    I = RegionMap.find(Context);
+  if (I != RegionMap.end())
+    return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(I->second));
+
+  // Check namespace.
+  if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+    return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl, CompileUnit));
+  
+  return CompileUnit;
+}
+
+/// getFunctionName - Get function name for the given FunctionDecl. If the
+/// name is constructred on demand (e.g. C++ destructor) then the name
+/// is stored on the side.
+llvm::StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
+  assert (FD && "Invalid FunctionDecl!");
+  IdentifierInfo *FII = FD->getIdentifier();
+  if (FII)
+    return FII->getName();
+
+  // Otherwise construct human readable name for debug info.
+  std::string NS = FD->getNameAsString();
+
+  // Copy this name on the side and use its reference.
+  char *StrPtr = DebugInfoNames.Allocate<char>(NS.length());
+  memcpy(StrPtr, NS.data(), NS.length());
+  return llvm::StringRef(StrPtr, NS.length());
+}
+
+/// getOrCreateCompileUnit - Get the compile unit from the cache or create a new
+/// one if necessary. This returns null for invalid source locations.
+llvm::DICompileUnit CGDebugInfo::getOrCreateCompileUnit(SourceLocation Loc) {
+  // Get source file information.
+  const char *FileName =  "<unknown>";
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  if (Loc.isValid()) {
+    PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+    FileName = PLoc.getFilename();
+    unsigned FID = PLoc.getIncludeLoc().getRawEncoding();
+
+    // See if this compile unit has been used before for this valid location.
+    llvm::DICompileUnit &Unit = CompileUnitCache[FID];
+    if (!Unit.isNull()) return Unit;
+  }
+
+  // Get absolute path name.
+  llvm::sys::Path AbsFileName(FileName);
+  AbsFileName.makeAbsolute();
+
+  // See if thie compile unit is representing main source file. Each source
+  // file has corresponding compile unit. There is only one main source
+  // file at a time.
+  bool isMain = false;
+  const LangOptions &LO = CGM.getLangOptions();
+  const CodeGenOptions &CGO = CGM.getCodeGenOpts();
+  if (isMainCompileUnitCreated == false) {
+    if (!CGO.MainFileName.empty()) {
+      if (AbsFileName.getLast() == CGO.MainFileName)
+        isMain = true;
+    } else {
+      if (Loc.isValid() && SM.isFromMainFile(Loc))
+        isMain = true;
+    }
+    if (isMain)
+      isMainCompileUnitCreated = true;
+  }
+
+  unsigned LangTag;
+  if (LO.CPlusPlus) {
+    if (LO.ObjC1)
+      LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
+    else
+      LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
+  } else if (LO.ObjC1) {
+    LangTag = llvm::dwarf::DW_LANG_ObjC;
+  } else if (LO.C99) {
+    LangTag = llvm::dwarf::DW_LANG_C99;
+  } else {
+    LangTag = llvm::dwarf::DW_LANG_C89;
+  }
+
+  const char *Producer =
+#ifdef CLANG_VENDOR
+    CLANG_VENDOR
+#endif
+    "clang " CLANG_VERSION_STRING;
+
+  // Figure out which version of the ObjC runtime we have.
+  unsigned RuntimeVers = 0;
+  if (LO.ObjC1)
+    RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
+
+  // Create new compile unit.
+  return DebugFactory.CreateCompileUnit(
+    LangTag, AbsFileName.getLast(), AbsFileName.getDirname(), Producer, isMain,
+    LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers);
+}
+
+/// CreateType - Get the Basic type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT,
+                                     llvm::DICompileUnit Unit) {
+  unsigned Encoding = 0;
+  switch (BT->getKind()) {
+  default:
+  case BuiltinType::Void:
+    return llvm::DIType();
+  case BuiltinType::UChar:
+  case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
+  case BuiltinType::Char_S:
+  case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+  case BuiltinType::UShort:
+  case BuiltinType::UInt:
+  case BuiltinType::ULong:
+  case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
+  case BuiltinType::Short:
+  case BuiltinType::Int:
+  case BuiltinType::Long:
+  case BuiltinType::LongLong:  Encoding = llvm::dwarf::DW_ATE_signed; break;
+  case BuiltinType::Bool:      Encoding = llvm::dwarf::DW_ATE_boolean; break;
+  case BuiltinType::Float:
+  case BuiltinType::LongDouble:
+  case BuiltinType::Double:    Encoding = llvm::dwarf::DW_ATE_float; break;
+  }
+  // Bit size, align and offset of the type.
+  uint64_t Size = CGM.getContext().getTypeSize(BT);
+  uint64_t Align = CGM.getContext().getTypeAlign(BT);
+  uint64_t Offset = 0;
+
+  llvm::DIType DbgTy = 
+    DebugFactory.CreateBasicType(Unit,
+                                 BT->getName(CGM.getContext().getLangOptions()),
+                                 Unit, 0, Size, Align,
+                                 Offset, /*flags*/ 0, Encoding);
+  return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  // Bit size, align and offset of the type.
+  unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
+  if (Ty->isComplexIntegerType())
+    Encoding = llvm::dwarf::DW_ATE_lo_user;
+
+  uint64_t Size = CGM.getContext().getTypeSize(Ty);
+  uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+  uint64_t Offset = 0;
+
+  llvm::DIType DbgTy = 
+    DebugFactory.CreateBasicType(Unit, "complex",
+                                 Unit, 0, Size, Align,
+                                 Offset, /*flags*/ 0, Encoding);
+  return DbgTy;
+}
+
+/// CreateCVRType - Get the qualified type from the cache or create
+/// a new one if necessary.
+llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DICompileUnit Unit) {
+  QualifierCollector Qc;
+  const Type *T = Qc.strip(Ty);
+
+  // Ignore these qualifiers for now.
+  Qc.removeObjCGCAttr();
+  Qc.removeAddressSpace();
+
+  // We will create one Derived type for one qualifier and recurse to handle any
+  // additional ones.
+  unsigned Tag;
+  if (Qc.hasConst()) {
+    Tag = llvm::dwarf::DW_TAG_const_type;
+    Qc.removeConst();
+  } else if (Qc.hasVolatile()) {
+    Tag = llvm::dwarf::DW_TAG_volatile_type;
+    Qc.removeVolatile();
+  } else if (Qc.hasRestrict()) {
+    Tag = llvm::dwarf::DW_TAG_restrict_type;
+    Qc.removeRestrict();
+  } else {
+    assert(Qc.empty() && "Unknown type qualifier for debug info");
+    return getOrCreateType(QualType(T, 0), Unit);
+  }
+
+  llvm::DIType FromTy = getOrCreateType(Qc.apply(T), Unit);
+
+  // No need to fill in the Name, Line, Size, Alignment, Offset in case of
+  // CVR derived types.
+  llvm::DIType DbgTy =
+    DebugFactory.CreateDerivedType(Tag, Unit, "", llvm::DICompileUnit(),
+                                   0, 0, 0, 0, 0, FromTy);
+  return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  llvm::DIType DbgTy =
+    CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty, 
+                          Ty->getPointeeType(), Unit);
+  return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty, 
+                               Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
+                                                const Type *Ty, 
+                                                QualType PointeeTy,
+                                                llvm::DICompileUnit Unit) {
+  llvm::DIType EltTy = getOrCreateType(PointeeTy, Unit);
+
+  // Bit size, align and offset of the type.
+  
+  // Size is always the size of a pointer. We can't use getTypeSize here
+  // because that does not return the correct value for references.
+  uint64_t Size = 
+    CGM.getContext().Target.getPointerWidth(PointeeTy.getAddressSpace());
+  uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+  return
+    DebugFactory.CreateDerivedType(Tag, Unit, "", llvm::DICompileUnit(),
+                                   0, Size, Align, 0, 0, EltTy);
+  
+}
+
+llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  if (BlockLiteralGenericSet)
+    return BlockLiteralGeneric;
+
+  llvm::DICompileUnit DefUnit;
+  unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+  llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+  llvm::DIType FieldTy;
+
+  QualType FType;
+  uint64_t FieldSize, FieldOffset;
+  unsigned FieldAlign;
+
+  llvm::DIArray Elements;
+  llvm::DIType EltTy, DescTy;
+
+  FieldOffset = 0;
+  FType = CGM.getContext().UnsignedLongTy;
+  FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+  FieldSize = CGM.getContext().getTypeSize(FType);
+  FieldAlign = CGM.getContext().getTypeAlign(FType);
+  FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                           "reserved", DefUnit,
+                                           0, FieldSize, FieldAlign,
+                                           FieldOffset, 0, FieldTy);
+  EltTys.push_back(FieldTy);
+
+  FieldOffset += FieldSize;
+  FType = CGM.getContext().UnsignedLongTy;
+  FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+  FieldSize = CGM.getContext().getTypeSize(FType);
+  FieldAlign = CGM.getContext().getTypeAlign(FType);
+  FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                           "Size", DefUnit,
+                                           0, FieldSize, FieldAlign,
+                                           FieldOffset, 0, FieldTy);
+  EltTys.push_back(FieldTy);
+
+  FieldOffset += FieldSize;
+  Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+  EltTys.clear();
+
+  unsigned Flags = llvm::DIType::FlagAppleBlock;
+
+  EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
+                                           DefUnit, 0, FieldOffset, 0, 0, Flags,
+                                           llvm::DIType(), Elements);
+
+  // Bit size, align and offset of the type.
+  uint64_t Size = CGM.getContext().getTypeSize(Ty);
+  uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+  DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+                                          Unit, "", llvm::DICompileUnit(),
+                                          0, Size, Align, 0, 0, EltTy);
+
+  FieldOffset = 0;
+  FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+  FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+  FieldSize = CGM.getContext().getTypeSize(FType);
+  FieldAlign = CGM.getContext().getTypeAlign(FType);
+  FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                           "__isa", DefUnit,
+                                           0, FieldSize, FieldAlign,
+                                           FieldOffset, 0, FieldTy);
+  EltTys.push_back(FieldTy);
+
+  FieldOffset += FieldSize;
+  FType = CGM.getContext().IntTy;
+  FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+  FieldSize = CGM.getContext().getTypeSize(FType);
+  FieldAlign = CGM.getContext().getTypeAlign(FType);
+  FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                           "__flags", DefUnit,
+                                           0, FieldSize, FieldAlign,
+                                           FieldOffset, 0, FieldTy);
+  EltTys.push_back(FieldTy);
+
+  FieldOffset += FieldSize;
+  FType = CGM.getContext().IntTy;
+  FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+  FieldSize = CGM.getContext().getTypeSize(FType);
+  FieldAlign = CGM.getContext().getTypeAlign(FType);
+  FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                           "__reserved", DefUnit,
+                                           0, FieldSize, FieldAlign,
+                                           FieldOffset, 0, FieldTy);
+  EltTys.push_back(FieldTy);
+
+  FieldOffset += FieldSize;
+  FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+  FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+  FieldSize = CGM.getContext().getTypeSize(FType);
+  FieldAlign = CGM.getContext().getTypeAlign(FType);
+  FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                           "__FuncPtr", DefUnit,
+                                           0, FieldSize, FieldAlign,
+                                           FieldOffset, 0, FieldTy);
+  EltTys.push_back(FieldTy);
+
+  FieldOffset += FieldSize;
+  FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+  FieldTy = DescTy;
+  FieldSize = CGM.getContext().getTypeSize(Ty);
+  FieldAlign = CGM.getContext().getTypeAlign(Ty);
+  FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                           "__descriptor", DefUnit,
+                                           0, FieldSize, FieldAlign,
+                                           FieldOffset, 0, FieldTy);
+  EltTys.push_back(FieldTy);
+
+  FieldOffset += FieldSize;
+  Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+  EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
+                                           DefUnit, 0, FieldOffset, 0, 0, Flags,
+                                           llvm::DIType(), Elements);
+
+  BlockLiteralGenericSet = true;
+  BlockLiteralGeneric
+    = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
+                                     "", llvm::DICompileUnit(),
+                                     0, Size, Align, 0, 0, EltTy);
+  return BlockLiteralGeneric;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  // Typedefs are derived from some other type.  If we have a typedef of a
+  // typedef, make sure to emit the whole chain.
+  llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
+
+  // We don't set size information, but do specify where the typedef was
+  // declared.
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  PresumedLoc PLoc = SM.getPresumedLoc(Ty->getDecl()->getLocation());
+  unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+  llvm::DIDescriptor TyContext 
+    = getContextDescriptor(dyn_cast<Decl>(Ty->getDecl()->getDeclContext()),
+                           Unit);
+  llvm::DIType DbgTy = 
+    DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_typedef, 
+                                   TyContext,
+                                   Ty->getDecl()->getName(), Unit,
+                                   Line, 0, 0, 0, 0, Src);
+  return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+  // Add the result type at least.
+  EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
+
+  // Set up remainder of arguments if there is a prototype.
+  // FIXME: IF NOT, HOW IS THIS REPRESENTED?  llvm-gcc doesn't represent '...'!
+  if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
+    for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+      EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
+  } else {
+    // FIXME: Handle () case in C.  llvm-gcc doesn't do it either.
+  }
+
+  llvm::DIArray EltTypeArray =
+    DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+  llvm::DIType DbgTy =
+    DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+                                     Unit, "", llvm::DICompileUnit(),
+                                     0, 0, 0, 0, 0,
+                                     llvm::DIType(), EltTypeArray);
+  return DbgTy;
+}
+
+/// CollectRecordFields - A helper function to collect debug info for
+/// record fields. This is used while creating debug info entry for a Record.
+void CGDebugInfo::
+CollectRecordFields(const RecordDecl *RD, llvm::DICompileUnit Unit,
+                    llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+  unsigned FieldNo = 0;
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+  for (RecordDecl::field_iterator I = RD->field_begin(),
+                                  E = RD->field_end();
+       I != E; ++I, ++FieldNo) {
+    FieldDecl *Field = *I;
+    llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+
+    llvm::StringRef FieldName = Field->getName();
+
+    // Ignore unnamed fields.
+    if (FieldName.empty())
+      continue;
+
+    // Get the location for the field.
+    SourceLocation FieldDefLoc = Field->getLocation();
+    PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
+    llvm::DICompileUnit FieldDefUnit;
+    unsigned FieldLine = 0;
+
+    if (!PLoc.isInvalid()) {
+      FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
+      FieldLine = PLoc.getLine();
+    }
+
+    QualType FType = Field->getType();
+    uint64_t FieldSize = 0;
+    unsigned FieldAlign = 0;
+    if (!FType->isIncompleteArrayType()) {
+
+      // Bit size, align and offset of the type.
+      FieldSize = CGM.getContext().getTypeSize(FType);
+      Expr *BitWidth = Field->getBitWidth();
+      if (BitWidth)
+        FieldSize = BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+
+      FieldAlign =  CGM.getContext().getTypeAlign(FType);
+    }
+
+    uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+    // Create a DW_TAG_member node to remember the offset of this field in the
+    // struct.  FIXME: This is an absolutely insane way to capture this
+    // information.  When we gut debug info, this should be fixed.
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             FieldName, FieldDefUnit,
+                                             FieldLine, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+  }
+}
+
+/// getOrCreateMethodType - CXXMethodDecl's type is a FunctionType. This
+/// function type is not updated to include implicit "this" pointer. Use this
+/// routine to get a method type which includes "this" pointer.
+llvm::DIType
+CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
+                                   llvm::DICompileUnit Unit) {
+  llvm::DIType FnTy = getOrCreateType(Method->getType(), Unit);
+  
+  // Static methods do not need "this" pointer argument.
+  if (Method->isStatic())
+    return FnTy;
+
+  // Add "this" pointer.
+
+  llvm::DIArray Args = llvm::DICompositeType(FnTy.getNode()).getTypeArray();
+  assert (Args.getNumElements() && "Invalid number of arguments!");
+
+  llvm::SmallVector<llvm::DIDescriptor, 16> Elts;
+
+  // First element is always return type. For 'void' functions it is NULL.
+  Elts.push_back(Args.getElement(0));
+
+  // "this" pointer is always first argument.
+  ASTContext &Context = CGM.getContext();
+  QualType ThisPtr = 
+    Context.getPointerType(Context.getTagDeclType(Method->getParent()));
+  llvm::DIType ThisPtrType = 
+    DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
+  TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType.getNode();  
+  Elts.push_back(ThisPtrType);
+
+  // Copy rest of the arguments.
+  for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
+    Elts.push_back(Args.getElement(i));
+
+  llvm::DIArray EltTypeArray =
+    DebugFactory.GetOrCreateArray(Elts.data(), Elts.size());
+
+  return
+    DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+                                     Unit, "", llvm::DICompileUnit(),
+                                     0, 0, 0, 0, 0,
+                                     llvm::DIType(), EltTypeArray);
+}
+
+/// CreateCXXMemberFunction - A helper function to create a DISubprogram for
+/// a single member function GlobalDecl.
+llvm::DISubprogram
+CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
+                                     llvm::DICompileUnit Unit,
+                                     llvm::DICompositeType &RecordTy) {
+  bool IsCtorOrDtor = 
+    isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
+  
+  llvm::StringRef MethodName = getFunctionName(Method);
+  llvm::StringRef MethodLinkageName;
+  llvm::DIType MethodTy = getOrCreateMethodType(Method, Unit);
+  
+  // Since a single ctor/dtor corresponds to multiple functions, it doesn't
+  // make sense to give a single ctor/dtor a linkage name.
+  if (!IsCtorOrDtor)
+    MethodLinkageName = CGM.getMangledName(Method);
+
+  SourceManager &SM = CGM.getContext().getSourceManager();
+
+  // Get the location for the method.
+  SourceLocation MethodDefLoc = Method->getLocation();
+  PresumedLoc PLoc = SM.getPresumedLoc(MethodDefLoc);
+  llvm::DICompileUnit MethodDefUnit;
+  unsigned MethodLine = 0;
+
+  if (!PLoc.isInvalid()) {
+    MethodDefUnit = getOrCreateCompileUnit(MethodDefLoc);
+    MethodLine = PLoc.getLine();
+  }
+
+  // Collect virtual method info.
+  llvm::DIType ContainingType;
+  unsigned Virtuality = 0; 
+  unsigned VIndex = 0;
+  
+  if (Method->isVirtual()) {
+    if (Method->isPure())
+      Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual;
+    else
+      Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual;
+    
+    // It doesn't make sense to give a virtual destructor a vtable index,
+    // since a single destructor has two entries in the vtable.
+    if (!isa<CXXDestructorDecl>(Method))
+      VIndex = CGM.getVtableInfo().getMethodVtableIndex(Method);
+    ContainingType = RecordTy;
+  }
+
+  llvm::DISubprogram SP =
+    DebugFactory.CreateSubprogram(RecordTy , MethodName, MethodName, 
+                                  MethodLinkageName,
+                                  MethodDefUnit, MethodLine,
+                                  MethodTy, /*isLocalToUnit=*/false, 
+                                  Method->isThisDeclarationADefinition(),
+                                  Virtuality, VIndex, ContainingType);
+  
+  // Don't cache ctors or dtors since we have to emit multiple functions for
+  // a single ctor or dtor.
+  if (!IsCtorOrDtor && Method->isThisDeclarationADefinition())
+    SPCache[Method] = llvm::WeakVH(SP.getNode());
+
+  return SP;
+}
+
+/// CollectCXXMemberFunctions - A helper function to collect debug info for
+/// C++ member functions.This is used while creating debug info entry for 
+/// a Record.
+void CGDebugInfo::
+CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DICompileUnit Unit,
+                          llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+                          llvm::DICompositeType &RecordTy) {
+  for(CXXRecordDecl::method_iterator I = RD->method_begin(),
+        E = RD->method_end(); I != E; ++I) {
+    const CXXMethodDecl *Method = *I;
+    
+    if (Method->isImplicit() && !Method->isUsed())
+      continue;
+
+    EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+  }
+}                                 
+
+/// CollectCXXBases - A helper function to collect debug info for
+/// C++ base classes. This is used while creating debug info entry for 
+/// a Record.
+void CGDebugInfo::
+CollectCXXBases(const CXXRecordDecl *RD, llvm::DICompileUnit Unit,
+                llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+                llvm::DICompositeType &RecordTy) {
+
+  const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+  for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
+         BE = RD->bases_end(); BI != BE; ++BI) {
+    unsigned BFlags = 0;
+    uint64_t BaseOffset;
+    
+    const CXXRecordDecl *Base =
+      cast<CXXRecordDecl>(BI->getType()->getAs<RecordType>()->getDecl());
+    
+    if (BI->isVirtual()) {
+      // virtual base offset index is -ve. The code generator emits dwarf
+      // expression where it expects +ve number.
+      BaseOffset = 0 - CGM.getVtableInfo().getVirtualBaseOffsetIndex(RD, Base);
+      BFlags = llvm::DIType::FlagVirtual;
+    } else
+      BaseOffset = RL.getBaseClassOffset(Base);
+    
+    AccessSpecifier Access = BI->getAccessSpecifier();
+    if (Access == clang::AS_private)
+      BFlags |= llvm::DIType::FlagPrivate;
+    else if (Access == clang::AS_protected)
+      BFlags |= llvm::DIType::FlagProtected;
+    
+    llvm::DIType DTy =
+      DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
+                                     RecordTy, llvm::StringRef(), 
+                                     llvm::DICompileUnit(), 0, 0, 0,
+                                     BaseOffset, BFlags,
+                                     getOrCreateType(BI->getType(),
+                                                     Unit));
+    EltTys.push_back(DTy);
+  }
+}
+
+/// getOrCreateVTablePtrType - Return debug info descriptor for vtable.
+llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DICompileUnit Unit) {
+  if (!VTablePtrType.isNull())
+    return VTablePtrType;
+
+  ASTContext &Context = CGM.getContext();
+
+  /* Function type */
+  llvm::SmallVector<llvm::DIDescriptor, 16> STys;
+  STys.push_back(getOrCreateType(Context.IntTy, Unit));
+  llvm::DIArray SElements =
+    DebugFactory.GetOrCreateArray(STys.data(), STys.size());
+  llvm::DIType SubTy =
+    DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
+                                     Unit, "", llvm::DICompileUnit(),
+                                     0, 0, 0, 0, 0, llvm::DIType(), SElements);
+
+  unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
+  llvm::DIType vtbl_ptr_type 
+    = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+                                     Unit, "__vtbl_ptr_type", llvm::DICompileUnit(),
+                                     0, Size, 0, 0, 0, SubTy);
+
+  VTablePtrType = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
+                                          Unit, "", llvm::DICompileUnit(),
+                                          0, Size, 0, 0, 0, vtbl_ptr_type);
+  return VTablePtrType;
+}
+
+/// getVtableName - Get vtable name for the given Class.
+llvm::StringRef CGDebugInfo::getVtableName(const CXXRecordDecl *RD) {
+  // Otherwise construct gdb compatible name name.
+  std::string Name = "_vptr$" + RD->getNameAsString();
+
+  // Copy this name on the side and use its reference.
+  char *StrPtr = DebugInfoNames.Allocate<char>(Name.length());
+  memcpy(StrPtr, Name.data(), Name.length());
+  return llvm::StringRef(StrPtr, Name.length());
+}
+
+
+/// CollectVtableInfo - If the C++ class has vtable info then insert appropriate
+/// debug info entry in EltTys vector.
+void CGDebugInfo::
+CollectVtableInfo(const CXXRecordDecl *RD, llvm::DICompileUnit Unit,
+                  llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
+  const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+
+  // If there is a primary base then it will hold vtable info.
+  if (RL.getPrimaryBase())
+    return;
+
+  // If this class is not dynamic then there is not any vtable info to collect.
+  if (!RD->isDynamicClass())
+    return;
+
+  unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+  llvm::DIType VPTR
+    = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                     getVtableName(RD), llvm::DICompileUnit(),
+                                     0, Size, 0, 0, 0, 
+                                     getOrCreateVTablePtrType(Unit));
+  EltTys.push_back(VPTR);
+}
+
+/// CreateType - get structure or union type.
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  RecordDecl *RD = Ty->getDecl();
+
+  unsigned Tag;
+  if (RD->isStruct())
+    Tag = llvm::dwarf::DW_TAG_structure_type;
+  else if (RD->isUnion())
+    Tag = llvm::dwarf::DW_TAG_union_type;
+  else {
+    assert(RD->isClass() && "Unknown RecordType!");
+    Tag = llvm::dwarf::DW_TAG_class_type;
+  }
+
+  SourceManager &SM = CGM.getContext().getSourceManager();
+
+  // Get overall information about the record type for the debug info.
+  PresumedLoc PLoc = SM.getPresumedLoc(RD->getLocation());
+  llvm::DICompileUnit DefUnit;
+  unsigned Line = 0;
+  if (!PLoc.isInvalid()) {
+    DefUnit = getOrCreateCompileUnit(RD->getLocation());
+    Line = PLoc.getLine();
+  }
+
+  // Records and classes and unions can all be recursive.  To handle them, we
+  // first generate a debug descriptor for the struct as a forward declaration.
+  // Then (if it is a definition) we go through and get debug info for all of
+  // its members.  Finally, we create a descriptor for the complete type (which
+  // may refer to the forward decl if the struct is recursive) and replace all
+  // uses of the forward declaration with the final definition.
+
+  // A RD->getName() is not unique. However, the debug info descriptors 
+  // are uniqued so use type name to ensure uniquness.
+  std::string STy = QualType(Ty, 0).getAsString();
+  llvm::DIDescriptor FDContext = 
+    getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+  llvm::DICompositeType FwdDecl =
+    DebugFactory.CreateCompositeType(Tag, FDContext,
+                                     STy.c_str(),
+                                     DefUnit, Line, 0, 0, 0, 0,
+                                     llvm::DIType(), llvm::DIArray());
+
+  // If this is just a forward declaration, return it.
+  if (!RD->getDefinition(CGM.getContext()))
+    return FwdDecl;
+
+  llvm::TrackingVH<llvm::MDNode> FwdDeclNode = FwdDecl.getNode();
+  // Otherwise, insert it into the TypeCache so that recursive uses will find
+  // it.
+  TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
+
+  // Convert all the elements.
+  llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+  const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+  if (CXXDecl) {
+    CollectCXXBases(CXXDecl, Unit, EltTys, FwdDecl);
+    CollectVtableInfo(CXXDecl, Unit, EltTys);
+  }
+  CollectRecordFields(RD, Unit, EltTys);
+  llvm::MDNode *ContainingType = NULL;
+  if (CXXDecl) {
+    CollectCXXMemberFunctions(CXXDecl, Unit, EltTys, FwdDecl);
+
+    // A class's primary base or the class itself contains the vtable.
+    const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+    if (const CXXRecordDecl *PBase = RL.getPrimaryBase())
+      ContainingType = 
+        getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit).getNode();
+    else if (CXXDecl->isDynamicClass()) 
+      ContainingType = FwdDecl.getNode();
+  }
+
+  llvm::DIArray Elements =
+    DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+  // Bit size, align and offset of the type.
+  uint64_t Size = CGM.getContext().getTypeSize(Ty);
+  uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+  llvm::DIDescriptor RDContext =  
+    getContextDescriptor(dyn_cast<Decl>(RD->getDeclContext()), Unit);
+  llvm::DICompositeType RealDecl =
+    DebugFactory.CreateCompositeType(Tag, RDContext,
+                                     RD->getName(),
+                                     DefUnit, Line, Size, Align, 0, 0, 
+                                     llvm::DIType(), Elements, 
+                                     0, ContainingType);
+
+  // Now that we have a real decl for the struct, replace anything using the
+  // old decl with the new one.  This will recursively update the debug info.
+  llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
+
+  return RealDecl;
+}
+
+/// CreateType - get objective-c interface type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  ObjCInterfaceDecl *ID = Ty->getDecl();
+
+  unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+  SourceManager &SM = CGM.getContext().getSourceManager();
+
+  // Get overall information about the record type for the debug info.
+  llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(ID->getLocation());
+  PresumedLoc PLoc = SM.getPresumedLoc(ID->getLocation());
+  unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+
+  unsigned RuntimeLang = DefUnit.getLanguage();
+
+  // To handle recursive interface, we
+  // first generate a debug descriptor for the struct as a forward declaration.
+  // Then (if it is a definition) we go through and get debug info for all of
+  // its members.  Finally, we create a descriptor for the complete type (which
+  // may refer to the forward decl if the struct is recursive) and replace all
+  // uses of the forward declaration with the final definition.
+  llvm::DICompositeType FwdDecl =
+    DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(),
+                                     DefUnit, Line, 0, 0, 0, 0,
+                                     llvm::DIType(), llvm::DIArray(),
+                                     RuntimeLang);
+
+  // If this is just a forward declaration, return it.
+  if (ID->isForwardDecl())
+    return FwdDecl;
+
+  llvm::TrackingVH<llvm::MDNode> FwdDeclNode = FwdDecl.getNode();
+  // Otherwise, insert it into the TypeCache so that recursive uses will find
+  // it.
+  TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
+
+  // Convert all the elements.
+  llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
+
+  ObjCInterfaceDecl *SClass = ID->getSuperClass();
+  if (SClass) {
+    llvm::DIType SClassTy =
+      getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+    llvm::DIType InhTag =
+      DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
+                                     Unit, "", llvm::DICompileUnit(), 0, 0, 0,
+                                     0 /* offset */, 0, SClassTy);
+    EltTys.push_back(InhTag);
+  }
+
+  const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
+
+  unsigned FieldNo = 0;
+  for (ObjCInterfaceDecl::ivar_iterator I = ID->ivar_begin(),
+         E = ID->ivar_end();  I != E; ++I, ++FieldNo) {
+    ObjCIvarDecl *Field = *I;
+    llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+
+    llvm::StringRef FieldName = Field->getName();
+
+    // Ignore unnamed fields.
+    if (FieldName.empty())
+      continue;
+
+    // Get the location for the field.
+    SourceLocation FieldDefLoc = Field->getLocation();
+    llvm::DICompileUnit FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
+    PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
+    unsigned FieldLine = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+
+    QualType FType = Field->getType();
+    uint64_t FieldSize = 0;
+    unsigned FieldAlign = 0;
+
+    if (!FType->isIncompleteArrayType()) {
+
+      // Bit size, align and offset of the type.
+      FieldSize = CGM.getContext().getTypeSize(FType);
+      Expr *BitWidth = Field->getBitWidth();
+      if (BitWidth)
+        FieldSize = BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+
+      FieldAlign =  CGM.getContext().getTypeAlign(FType);
+    }
+
+    uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
+    unsigned Flags = 0;
+    if (Field->getAccessControl() == ObjCIvarDecl::Protected)
+      Flags = llvm::DIType::FlagProtected;
+    else if (Field->getAccessControl() == ObjCIvarDecl::Private)
+      Flags = llvm::DIType::FlagPrivate;
+
+    // Create a DW_TAG_member node to remember the offset of this field in the
+    // struct.  FIXME: This is an absolutely insane way to capture this
+    // information.  When we gut debug info, this should be fixed.
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             FieldName, FieldDefUnit,
+                                             FieldLine, FieldSize, FieldAlign,
+                                             FieldOffset, Flags, FieldTy);
+    EltTys.push_back(FieldTy);
+  }
+
+  llvm::DIArray Elements =
+    DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+  // Bit size, align and offset of the type.
+  uint64_t Size = CGM.getContext().getTypeSize(Ty);
+  uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+  llvm::DICompositeType RealDecl =
+    DebugFactory.CreateCompositeType(Tag, Unit, ID->getName(), DefUnit,
+                                     Line, Size, Align, 0, 0, llvm::DIType(), 
+                                     Elements, RuntimeLang);
+
+  // Now that we have a real decl for the struct, replace anything using the
+  // old decl with the new one.  This will recursively update the debug info.
+  llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
+
+  return RealDecl;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  EnumDecl *ED = Ty->getDecl();
+
+  llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
+
+  // Create DIEnumerator elements for each enumerator.
+  for (EnumDecl::enumerator_iterator
+         Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
+       Enum != EnumEnd; ++Enum) {
+    Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getName(),
+                                            Enum->getInitVal().getZExtValue()));
+  }
+
+  // Return a CompositeType for the enum itself.
+  llvm::DIArray EltArray =
+    DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
+
+  SourceLocation DefLoc = ED->getLocation();
+  llvm::DICompileUnit DefUnit = getOrCreateCompileUnit(DefLoc);
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  PresumedLoc PLoc = SM.getPresumedLoc(DefLoc);
+  unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+
+  // Size and align of the type.
+  uint64_t Size = 0;
+  unsigned Align = 0;
+  if (!Ty->isIncompleteType()) {
+    Size = CGM.getContext().getTypeSize(Ty);
+    Align = CGM.getContext().getTypeAlign(Ty);
+  }
+
+  llvm::DIType DbgTy = 
+    DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
+                                     Unit, ED->getName(), DefUnit, Line,
+                                     Size, Align, 0, 0,
+                                     llvm::DIType(), EltArray);
+  return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+    return CreateType(RT, Unit);
+  else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
+    return CreateType(ET, Unit);
+
+  return llvm::DIType();
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
+                                     llvm::DICompileUnit Unit) {
+  uint64_t Size;
+  uint64_t Align;
+
+
+  // FIXME: make getTypeAlign() aware of VLAs and incomplete array types
+  if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
+    Size = 0;
+    Align =
+      CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+  } else if (Ty->isIncompleteArrayType()) {
+    Size = 0;
+    Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+  } else {
+    // Size and align of the whole array, not the element type.
+    Size = CGM.getContext().getTypeSize(Ty);
+    Align = CGM.getContext().getTypeAlign(Ty);
+  }
+
+  // Add the dimensions of the array.  FIXME: This loses CV qualifiers from
+  // interior arrays, do we care?  Why aren't nested arrays represented the
+  // obvious/recursive way?
+  llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
+  QualType EltTy(Ty, 0);
+  while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+    uint64_t Upper = 0;
+    if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+      if (CAT->getSize().getZExtValue())
+        Upper = CAT->getSize().getZExtValue() - 1;
+    // FIXME: Verify this is right for VLAs.
+    Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper));
+    EltTy = Ty->getElementType();
+  }
+
+  llvm::DIArray SubscriptArray =
+    DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+
+  llvm::DIType DbgTy = 
+    DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_array_type,
+                                     Unit, "", llvm::DICompileUnit(),
+                                     0, Size, Align, 0, 0,
+                                     getOrCreateType(EltTy, Unit),
+                                     SubscriptArray);
+  return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty, 
+                                     llvm::DICompileUnit Unit) {
+  return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type, 
+                               Ty, Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty, 
+                                     llvm::DICompileUnit U) {
+  QualType PointerDiffTy = CGM.getContext().getPointerDiffType();
+  llvm::DIType PointerDiffDITy = getOrCreateType(PointerDiffTy, U);
+  
+  if (!Ty->getPointeeType()->isFunctionType()) {
+    // We have a data member pointer type.
+    return PointerDiffDITy;
+  }
+  
+  // We have a member function pointer type. Treat it as a struct with two
+  // ptrdiff_t members.
+  std::pair<uint64_t, unsigned> Info = CGM.getContext().getTypeInfo(Ty);
+
+  uint64_t FieldOffset = 0;
+  llvm::DIDescriptor ElementTypes[2];
+  
+  // FIXME: This should probably be a function type instead.
+  ElementTypes[0] =
+    DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
+                                   "ptr", llvm::DICompileUnit(), 0,
+                                   Info.first, Info.second, FieldOffset, 0,
+                                   PointerDiffDITy);
+  FieldOffset += Info.first;
+  
+  ElementTypes[1] =
+    DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, U,
+                                   "ptr", llvm::DICompileUnit(), 0,
+                                   Info.first, Info.second, FieldOffset, 0,
+                                   PointerDiffDITy);
+  
+  llvm::DIArray Elements = 
+    DebugFactory.GetOrCreateArray(&ElementTypes[0],
+                                  llvm::array_lengthof(ElementTypes));
+
+  return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_structure_type, 
+                                          U, llvm::StringRef("test"), 
+                                          llvm::DICompileUnit(), 0, FieldOffset, 
+                                          0, 0, 0, llvm::DIType(), Elements);
+}
+
+static QualType UnwrapTypeForDebugInfo(QualType T) {
+  do {
+    QualType LastT = T;
+    switch (T->getTypeClass()) {
+    default:
+      return T;
+    case Type::TemplateSpecialization:
+      T = cast<TemplateSpecializationType>(T)->desugar();
+      break;
+    case Type::TypeOfExpr: {
+      TypeOfExprType *Ty = cast<TypeOfExprType>(T);
+      T = Ty->getUnderlyingExpr()->getType();
+      break;
+    }
+    case Type::TypeOf:
+      T = cast<TypeOfType>(T)->getUnderlyingType();
+      break;
+    case Type::Decltype:
+      T = cast<DecltypeType>(T)->getUnderlyingType();
+      break;
+    case Type::QualifiedName:
+      T = cast<QualifiedNameType>(T)->getNamedType();
+      break;
+    case Type::SubstTemplateTypeParm:
+      T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
+      break;
+    case Type::Elaborated:
+      T = cast<ElaboratedType>(T)->getUnderlyingType();
+      break;
+    }
+    
+    assert(T != LastT && "Type unwrapping failed to unwrap!");
+    if (T == LastT)
+      return T;
+  } while (true);
+  
+  return T;
+}
+
+/// getOrCreateType - Get the type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
+                                          llvm::DICompileUnit Unit) {
+  if (Ty.isNull())
+    return llvm::DIType();
+
+  // Unwrap the type as needed for debug information.
+  Ty = UnwrapTypeForDebugInfo(Ty);
+  
+  // Check for existing entry.
+  std::map<void *, llvm::WeakVH>::iterator it =
+    TypeCache.find(Ty.getAsOpaquePtr());
+  if (it != TypeCache.end()) {
+    // Verify that the debug info still exists.
+    if (&*it->second)
+      return llvm::DIType(cast<llvm::MDNode>(it->second));
+  }
+
+  // Otherwise create the type.
+  llvm::DIType Res = CreateTypeNode(Ty, Unit);
+
+  // And update the type cache.
+  TypeCache[Ty.getAsOpaquePtr()] = Res.getNode();  
+  return Res;
+}
+
+/// CreateTypeNode - Create a new debug type node.
+llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
+                                         llvm::DICompileUnit Unit) {
+  // Handle qualifiers, which recursively handles what they refer to.
+  if (Ty.hasLocalQualifiers())
+    return CreateQualifiedType(Ty, Unit);
+
+  const char *Diag = 0;
+  
+  // Work out details of type.
+  switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+    assert(false && "Dependent types cannot show up in debug information");
+
+  // FIXME: Handle these.
+  case Type::ExtVector:
+  case Type::Vector:
+    return llvm::DIType();
+      
+  case Type::ObjCObjectPointer:
+    return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
+  case Type::ObjCInterface:
+    return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
+  case Type::Builtin: return CreateType(cast<BuiltinType>(Ty), Unit);
+  case Type::Complex: return CreateType(cast<ComplexType>(Ty), Unit);
+  case Type::Pointer: return CreateType(cast<PointerType>(Ty), Unit);
+  case Type::BlockPointer:
+    return CreateType(cast<BlockPointerType>(Ty), Unit);
+  case Type::Typedef: return CreateType(cast<TypedefType>(Ty), Unit);
+  case Type::Record:
+  case Type::Enum:
+    return CreateType(cast<TagType>(Ty), Unit);
+  case Type::FunctionProto:
+  case Type::FunctionNoProto:
+    return CreateType(cast<FunctionType>(Ty), Unit);
+  case Type::ConstantArray:
+  case Type::VariableArray:
+  case Type::IncompleteArray:
+    return CreateType(cast<ArrayType>(Ty), Unit);
+
+  case Type::LValueReference:
+    return CreateType(cast<LValueReferenceType>(Ty), Unit);
+
+  case Type::MemberPointer:
+    return CreateType(cast<MemberPointerType>(Ty), Unit);
+
+  case Type::TemplateSpecialization:
+  case Type::Elaborated:
+  case Type::QualifiedName:
+  case Type::SubstTemplateTypeParm:
+  case Type::TypeOfExpr:
+  case Type::TypeOf:
+  case Type::Decltype:
+    llvm_unreachable("type should have been unwrapped!");
+    return llvm::DIType();
+      
+  case Type::RValueReference:
+    // FIXME: Implement!
+    Diag = "rvalue references";
+    break;
+  }
+  
+  assert(Diag && "Fall through without a diagnostic?");
+  unsigned DiagID = CGM.getDiags().getCustomDiagID(Diagnostic::Error,
+                               "debug information for %0 is not yet supported");
+  CGM.getDiags().Report(FullSourceLoc(), DiagID)
+    << Diag;
+  return llvm::DIType();
+}
+
+/// EmitFunctionStart - Constructs the debug code for entering a function -
+/// "llvm.dbg.func.start.".
+void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
+                                    llvm::Function *Fn,
+                                    CGBuilderTy &Builder) {
+
+  llvm::StringRef Name;
+  llvm::StringRef LinkageName;
+
+  const Decl *D = GD.getDecl();
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+    // If there is a DISubprogram for  this function available then use it.
+    llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+      FI = SPCache.find(FD);
+    if (FI != SPCache.end()) {
+      llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(FI->second));
+      if (!SP.isNull() && SP.isSubprogram() && SP.isDefinition()) {
+        RegionStack.push_back(SP.getNode());
+        RegionMap[D] = llvm::WeakVH(SP.getNode());
+        return;
+      }
+    }
+    Name = getFunctionName(FD);
+    if (!Name.empty() && Name[0] == '\01')
+      Name = Name.substr(1);
+    // Use mangled name as linkage name for c/c++ functions.
+    LinkageName = CGM.getMangledName(GD);
+  } else {
+    // Use llvm function name as linkage name.
+    Name = Fn->getName();
+    LinkageName = Name;
+    if (!Name.empty() && Name[0] == '\01')
+      Name = Name.substr(1);
+  }
+
+  // It is expected that CurLoc is set before using EmitFunctionStart.
+  // Usually, CurLoc points to the left bracket location of compound
+  // statement representing function body.
+  llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  unsigned LineNo = SM.getPresumedLoc(CurLoc).getLine();
+
+  llvm::DISubprogram SP =
+    DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
+                                  getOrCreateType(FnType, Unit),
+                                  Fn->hasInternalLinkage(), true/*definition*/);
+
+  // Push function on region stack.
+  RegionStack.push_back(SP.getNode());
+  RegionMap[D] = llvm::WeakVH(SP.getNode());
+}
+
+
+void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
+  if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
+
+  // Don't bother if things are the same as last time.
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  if (CurLoc == PrevLoc
+       || (SM.getInstantiationLineNumber(CurLoc) ==
+           SM.getInstantiationLineNumber(PrevLoc)
+           && SM.isFromSameFile(CurLoc, PrevLoc)))
+    return;
+
+  // Update last state.
+  PrevLoc = CurLoc;
+
+  // Get the appropriate compile unit.
+  llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
+  PresumedLoc PLoc = SM.getPresumedLoc(CurLoc);
+
+  llvm::DIDescriptor DR(RegionStack.back());
+  llvm::DIScope DS = llvm::DIScope(DR.getNode());
+  llvm::DILocation DO(NULL);
+  llvm::DILocation DL = 
+    DebugFactory.CreateLocation(PLoc.getLine(), PLoc.getColumn(),
+                                DS, DO);
+  Builder.SetCurrentDebugLocation(DL.getNode());
+}
+
+/// EmitRegionStart- Constructs the debug code for entering a declarative
+/// region - "llvm.dbg.region.start.".
+void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) {
+  llvm::DIDescriptor D =
+    DebugFactory.CreateLexicalBlock(RegionStack.empty() ? 
+                                    llvm::DIDescriptor() : 
+                                    llvm::DIDescriptor(RegionStack.back()));
+  RegionStack.push_back(D.getNode());
+}
+
+/// EmitRegionEnd - Constructs the debug code for exiting a declarative
+/// region - "llvm.dbg.region.end."
+void CGDebugInfo::EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder) {
+  assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+  // Provide an region stop point.
+  EmitStopPoint(Fn, Builder);
+
+  RegionStack.pop_back();
+}
+
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
+                              llvm::Value *Storage, CGBuilderTy &Builder) {
+  assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+  // Do not emit variable debug information while generating optimized code.
+  // The llvm optimizer and code generator are not yet ready to support
+  // optimized code debugging.
+  const CodeGenOptions &CGO = CGM.getCodeGenOpts();
+  if (CGO.OptimizationLevel)
+    return;
+
+  llvm::DICompileUnit Unit = getOrCreateCompileUnit(VD->getLocation());
+  QualType Type = VD->getType();
+  llvm::DIType Ty = getOrCreateType(Type, Unit);
+  if (VD->hasAttr<BlocksAttr>()) {
+    llvm::DICompileUnit DefUnit;
+    unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+    llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+    llvm::DIType FieldTy;
+
+    QualType FType;
+    uint64_t FieldSize, FieldOffset;
+    unsigned FieldAlign;
+
+    llvm::DIArray Elements;
+    llvm::DIType EltTy;
+    
+    // Build up structure for the byref.  See BuildByRefType.
+    FieldOffset = 0;
+    FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = CGM.getContext().getTypeAlign(FType);
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             "__isa", DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+
+    FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = CGM.getContext().getTypeAlign(FType);
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             "__forwarding", DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+
+    FType = CGM.getContext().IntTy;
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = CGM.getContext().getTypeAlign(FType);
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             "__flags", DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+
+    FType = CGM.getContext().IntTy;
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = CGM.getContext().getTypeAlign(FType);
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             "__size", DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+    
+    bool HasCopyAndDispose = CGM.BlockRequiresCopying(Type);
+    if (HasCopyAndDispose) {
+      FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+      FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+      FieldSize = CGM.getContext().getTypeSize(FType);
+      FieldAlign = CGM.getContext().getTypeAlign(FType);
+      FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                               "__copy_helper", DefUnit,
+                                               0, FieldSize, FieldAlign,
+                                               FieldOffset, 0, FieldTy);
+      EltTys.push_back(FieldTy);
+      FieldOffset += FieldSize;
+
+      FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+      FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+      FieldSize = CGM.getContext().getTypeSize(FType);
+      FieldAlign = CGM.getContext().getTypeAlign(FType);
+      FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                               "__destroy_helper", DefUnit,
+                                               0, FieldSize, FieldAlign,
+                                               FieldOffset, 0, FieldTy);
+      EltTys.push_back(FieldTy);
+      FieldOffset += FieldSize;
+    }
+    
+    CharUnits Align = CGM.getContext().getDeclAlign(VD);
+    if (Align > CharUnits::fromQuantity(
+          CGM.getContext().Target.getPointerAlign(0) / 8)) {
+      unsigned AlignedOffsetInBytes
+        = llvm::RoundUpToAlignment(FieldOffset/8, Align.getQuantity());
+      unsigned NumPaddingBytes
+        = AlignedOffsetInBytes - FieldOffset/8;
+
+      if (NumPaddingBytes > 0) {
+        llvm::APInt pad(32, NumPaddingBytes);
+        FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
+                                                     pad, ArrayType::Normal, 0);
+        FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+        FieldSize = CGM.getContext().getTypeSize(FType);
+        FieldAlign = CGM.getContext().getTypeAlign(FType);
+        FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
+                                                 Unit, "", DefUnit,
+                                                 0, FieldSize, FieldAlign,
+                                                 FieldOffset, 0, FieldTy);
+        EltTys.push_back(FieldTy);
+        FieldOffset += FieldSize;
+      }
+    }
+
+    FType = Type;
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = Align.getQuantity()*8;
+    
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             VD->getName(), DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+
+    Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+    unsigned Flags = llvm::DIType::FlagBlockByrefStruct;
+
+    Ty = DebugFactory.CreateCompositeType(Tag, Unit, "",
+                                          llvm::DICompileUnit(),
+                                          0, FieldOffset, 0, 0, Flags,
+                                          llvm::DIType(), Elements);
+  }
+
+  // Get location information.
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  PresumedLoc PLoc = SM.getPresumedLoc(VD->getLocation());
+  unsigned Line = 0;
+  unsigned Column = 0;
+  if (PLoc.isInvalid())
+    PLoc = SM.getPresumedLoc(CurLoc);
+  if (PLoc.isValid()) {
+    Line = PLoc.getLine();
+    Column = PLoc.getColumn();
+    Unit = getOrCreateCompileUnit(CurLoc);
+  } else {
+    Unit = llvm::DICompileUnit();
+  }
+
+  // Create the descriptor for the variable.
+  llvm::DIVariable D =
+    DebugFactory.CreateVariable(Tag, llvm::DIDescriptor(RegionStack.back()),
+                                VD->getName(),
+                                Unit, Line, Ty);
+  // Insert an llvm.dbg.declare into the current block.
+  llvm::Instruction *Call =
+    DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+  llvm::DIScope DS(RegionStack.back());
+  llvm::DILocation DO(NULL);
+  llvm::DILocation DL = DebugFactory.CreateLocation(Line, Column, DS, DO);
+  
+  Call->setMetadata("dbg", DL.getNode());
+}
+
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
+                              llvm::Value *Storage, CGBuilderTy &Builder,
+                              CodeGenFunction *CGF) {
+  const ValueDecl *VD = BDRE->getDecl();
+  assert(!RegionStack.empty() && "Region stack mismatch, stack empty!");
+
+  // Do not emit variable debug information while generating optimized code.
+  // The llvm optimizer and code generator are not yet ready to support
+  // optimized code debugging.
+  const CodeGenOptions &CGO = CGM.getCodeGenOpts();
+  if (CGO.OptimizationLevel || Builder.GetInsertBlock() == 0)
+    return;
+
+  uint64_t XOffset = 0;
+  llvm::DICompileUnit Unit = getOrCreateCompileUnit(VD->getLocation());
+  QualType Type = VD->getType();
+  llvm::DIType Ty = getOrCreateType(Type, Unit);
+  if (VD->hasAttr<BlocksAttr>()) {
+    llvm::DICompileUnit DefUnit;
+    unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
+
+    llvm::SmallVector<llvm::DIDescriptor, 5> EltTys;
+
+    llvm::DIType FieldTy;
+
+    QualType FType;
+    uint64_t FieldSize, FieldOffset;
+    unsigned FieldAlign;
+
+    llvm::DIArray Elements;
+    llvm::DIType EltTy;
+    
+    // Build up structure for the byref.  See BuildByRefType.
+    FieldOffset = 0;
+    FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = CGM.getContext().getTypeAlign(FType);
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             "__isa", DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+
+    FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = CGM.getContext().getTypeAlign(FType);
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             "__forwarding", DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+
+    FType = CGM.getContext().IntTy;
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = CGM.getContext().getTypeAlign(FType);
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             "__flags", DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+
+    FType = CGM.getContext().IntTy;
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = CGM.getContext().getTypeAlign(FType);
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             "__size", DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+    
+    bool HasCopyAndDispose = CGM.BlockRequiresCopying(Type);
+    if (HasCopyAndDispose) {
+      FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+      FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+      FieldSize = CGM.getContext().getTypeSize(FType);
+      FieldAlign = CGM.getContext().getTypeAlign(FType);
+      FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                               "__copy_helper", DefUnit,
+                                               0, FieldSize, FieldAlign,
+                                               FieldOffset, 0, FieldTy);
+      EltTys.push_back(FieldTy);
+      FieldOffset += FieldSize;
+
+      FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+      FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+      FieldSize = CGM.getContext().getTypeSize(FType);
+      FieldAlign = CGM.getContext().getTypeAlign(FType);
+      FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                               "__destroy_helper", DefUnit,
+                                               0, FieldSize, FieldAlign,
+                                               FieldOffset, 0, FieldTy);
+      EltTys.push_back(FieldTy);
+      FieldOffset += FieldSize;
+    }
+    
+    CharUnits Align = CGM.getContext().getDeclAlign(VD);
+    if (Align > CharUnits::fromQuantity(
+          CGM.getContext().Target.getPointerAlign(0) / 8)) {
+      unsigned AlignedOffsetInBytes
+        = llvm::RoundUpToAlignment(FieldOffset/8, Align.getQuantity());
+      unsigned NumPaddingBytes
+        = AlignedOffsetInBytes - FieldOffset/8;
+
+      if (NumPaddingBytes > 0) {
+        llvm::APInt pad(32, NumPaddingBytes);
+        FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
+                                                     pad, ArrayType::Normal, 0);
+        FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+        FieldSize = CGM.getContext().getTypeSize(FType);
+        FieldAlign = CGM.getContext().getTypeAlign(FType);
+        FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member,
+                                                 Unit, "", DefUnit,
+                                                 0, FieldSize, FieldAlign,
+                                                 FieldOffset, 0, FieldTy);
+        EltTys.push_back(FieldTy);
+        FieldOffset += FieldSize;
+      }
+    }
+
+    FType = Type;
+    FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+    FieldSize = CGM.getContext().getTypeSize(FType);
+    FieldAlign = Align.getQuantity()*8;
+    
+    XOffset = FieldOffset;
+    FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
+                                             VD->getName(), DefUnit,
+                                             0, FieldSize, FieldAlign,
+                                             FieldOffset, 0, FieldTy);
+    EltTys.push_back(FieldTy);
+    FieldOffset += FieldSize;
+
+    Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
+
+    unsigned Flags = llvm::DIType::FlagBlockByrefStruct;
+
+    Ty = DebugFactory.CreateCompositeType(Tag, Unit, "",
+                                          llvm::DICompileUnit(),
+                                          0, FieldOffset, 0, 0, Flags,
+                                          llvm::DIType(), Elements);
+  }
+
+  // Get location information.
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  PresumedLoc PLoc = SM.getPresumedLoc(VD->getLocation());
+  unsigned Line = 0;
+  if (!PLoc.isInvalid())
+    Line = PLoc.getLine();
+  else
+    Unit = llvm::DICompileUnit();
+
+  CharUnits offset = CGF->BlockDecls[VD];
+  llvm::SmallVector<llvm::Value *, 9> addr;
+  const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(CGM.getLLVMContext());
+  addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
+  addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+  addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+  if (BDRE->isByRef()) {
+    addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
+    addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+    // offset of __forwarding field
+    offset = CharUnits::fromQuantity(CGF->LLVMPointerWidth/8);
+    addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+    addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpDeref));
+    addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIFactory::OpPlus));
+    // offset of x field
+    offset = CharUnits::fromQuantity(XOffset/8);
+    addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+  }
+
+  // Create the descriptor for the variable.
+  llvm::DIVariable D =
+    DebugFactory.CreateComplexVariable(Tag,
+                                       llvm::DIDescriptor(RegionStack.back()),
+                                       VD->getName(), Unit, Line, Ty,
+                                       addr);
+  // Insert an llvm.dbg.declare into the current block.
+  llvm::Instruction *Call = 
+    DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
+
+  llvm::DIScope DS(RegionStack.back());
+  llvm::DILocation DO(NULL);
+  llvm::DILocation DL = 
+    DebugFactory.CreateLocation(Line, PLoc.getColumn(), DS, DO);
+  
+  Call->setMetadata("dbg", DL.getNode());
+}
+
+void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
+                                            llvm::Value *Storage,
+                                            CGBuilderTy &Builder) {
+  EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder);
+}
+
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+  const BlockDeclRefExpr *BDRE, llvm::Value *Storage, CGBuilderTy &Builder,
+  CodeGenFunction *CGF) {
+  EmitDeclare(BDRE, llvm::dwarf::DW_TAG_auto_variable, Storage, Builder, CGF);
+}
+
+/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+/// variable declaration.
+void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
+                                           CGBuilderTy &Builder) {
+  EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, Builder);
+}
+
+
+
+/// EmitGlobalVariable - Emit information about a global variable.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+                                     const VarDecl *D) {
+  
+  // Create global variable debug descriptor.
+  llvm::DICompileUnit Unit = getOrCreateCompileUnit(D->getLocation());
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  PresumedLoc PLoc = SM.getPresumedLoc(D->getLocation());
+  unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+  QualType T = D->getType();
+  if (T->isIncompleteArrayType()) {
+
+    // CodeGen turns int[] into int[1] so we'll do the same here.
+    llvm::APSInt ConstVal(32);
+
+    ConstVal = 1;
+    QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+    T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+                                           ArrayType::Normal, 0);
+  }
+  llvm::StringRef DeclName = D->getName();
+  llvm::DIDescriptor DContext = 
+    getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()), Unit);
+  DebugFactory.CreateGlobalVariable(DContext, DeclName,
+                                    DeclName, llvm::StringRef(), Unit, LineNo,
+                                    getOrCreateType(T, Unit),
+                                    Var->hasInternalLinkage(),
+                                    true/*definition*/, Var);
+}
+
+/// EmitGlobalVariable - Emit information about an objective-c interface.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+                                     ObjCInterfaceDecl *ID) {
+  // Create global variable debug descriptor.
+  llvm::DICompileUnit Unit = getOrCreateCompileUnit(ID->getLocation());
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  PresumedLoc PLoc = SM.getPresumedLoc(ID->getLocation());
+  unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+  llvm::StringRef Name = ID->getName();
+
+  QualType T = CGM.getContext().getObjCInterfaceType(ID);
+  if (T->isIncompleteArrayType()) {
+
+    // CodeGen turns int[] into int[1] so we'll do the same here.
+    llvm::APSInt ConstVal(32);
+
+    ConstVal = 1;
+    QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+    T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+                                           ArrayType::Normal, 0);
+  }
+
+  DebugFactory.CreateGlobalVariable(Unit, Name, Name, Name, Unit, LineNo,
+                                    getOrCreateType(T, Unit),
+                                    Var->hasInternalLinkage(),
+                                    true/*definition*/, Var);
+}
+
+/// getOrCreateNamesSpace - Return namespace descriptor for the given
+/// namespace decl.
+llvm::DINameSpace 
+CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl, 
+                                  llvm::DIDescriptor Unit) {
+  llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I = 
+    NameSpaceCache.find(NSDecl);
+  if (I != NameSpaceCache.end())
+    return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
+  
+  SourceManager &SM = CGM.getContext().getSourceManager();
+  PresumedLoc PLoc = SM.getPresumedLoc(NSDecl->getLocation());
+  unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+
+  llvm::DIDescriptor Context = 
+    getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()), Unit);
+  llvm::DINameSpace NS =
+    DebugFactory.CreateNameSpace(Context, NSDecl->getName(), 
+	llvm::DICompileUnit(Unit.getNode()), LineNo);
+  NameSpaceCache[NSDecl] = llvm::WeakVH(NS.getNode());
+  return NS;
+}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
new file mode 100644
index 0000000..c2bcc4b
--- /dev/null
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -0,0 +1,206 @@
+//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the source level debug info generator for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGDEBUGINFO_H
+#define CLANG_CODEGEN_CGDEBUGINFO_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Allocator.h"
+#include <map>
+
+#include "CGBuilder.h"
+
+namespace llvm {
+  class MDNode;
+}
+
+namespace clang {
+  class VarDecl;
+  class ObjCInterfaceDecl;
+
+namespace CodeGen {
+  class CodeGenModule;
+  class CodeGenFunction;
+  class GlobalDecl;
+
+/// CGDebugInfo - This class gathers all debug information during compilation
+/// and is responsible for emitting to llvm globals or pass directly to
+/// the backend.
+class CGDebugInfo {
+  CodeGenModule &CGM;
+  bool isMainCompileUnitCreated;
+  llvm::DIFactory DebugFactory;
+
+  SourceLocation CurLoc, PrevLoc;
+  
+  llvm::DIType VTablePtrType;
+
+  /// CompileUnitCache - Cache of previously constructed CompileUnits.
+  llvm::DenseMap<unsigned, llvm::DICompileUnit> CompileUnitCache;
+
+  /// TypeCache - Cache of previously constructed Types.
+  // FIXME: Eliminate this map.  Be careful of iterator invalidation.
+  std::map<void *, llvm::WeakVH> TypeCache;
+
+  bool BlockLiteralGenericSet;
+  llvm::DIType BlockLiteralGeneric;
+
+  std::vector<llvm::TrackingVH<llvm::MDNode> > RegionStack;
+  llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
+
+  /// DebugInfoNames - This is a storage for names that are
+  /// constructed on demand. For example, C++ destructors, C++ operators etc..
+  llvm::BumpPtrAllocator DebugInfoNames;
+
+  llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
+  llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
+
+  /// Helper functions for getOrCreateType.
+  llvm::DIType CreateType(const BuiltinType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const ComplexType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateQualifiedType(QualType Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const TypedefType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
+                          llvm::DICompileUnit Unit);
+  llvm::DIType CreateType(const PointerType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const FunctionType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const TagType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const RecordType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const EnumType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const ArrayType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DICompileUnit U);
+  llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DICompileUnit U);
+  llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
+                                     llvm::DICompileUnit Unit);
+  llvm::DIType getOrCreateVTablePtrType(llvm::DICompileUnit Unit);
+  llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N, 
+                                         llvm::DIDescriptor Unit);
+
+  llvm::DIType CreatePointerLikeType(unsigned Tag,
+                                     const Type *Ty, QualType PointeeTy,
+                                     llvm::DICompileUnit U);
+  
+  llvm::DISubprogram CreateCXXMemberFunction(const CXXMethodDecl *Method,
+                                             llvm::DICompileUnit Unit,
+                                             llvm::DICompositeType &RecordTy);
+  
+  void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
+                                 llvm::DICompileUnit U,
+                                 llvm::SmallVectorImpl<llvm::DIDescriptor> &E,
+                                 llvm::DICompositeType &T);
+  void CollectCXXBases(const CXXRecordDecl *Decl,
+                       llvm::DICompileUnit Unit,
+                       llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys,
+                       llvm::DICompositeType &RecordTy);
+
+
+  void CollectRecordFields(const RecordDecl *Decl, llvm::DICompileUnit U,
+                           llvm::SmallVectorImpl<llvm::DIDescriptor> &E);
+
+  void CollectVtableInfo(const CXXRecordDecl *Decl,
+                         llvm::DICompileUnit Unit,
+                         llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys);
+
+public:
+  CGDebugInfo(CodeGenModule &CGM);
+  ~CGDebugInfo();
+
+  /// setLocation - Update the current source location. If \arg loc is
+  /// invalid it is ignored.
+  void setLocation(SourceLocation Loc);
+
+  /// EmitStopPoint - Emit a call to llvm.dbg.stoppoint to indicate a change of
+  /// source line.
+  void EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder);
+
+  /// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
+  /// start of a new function.
+  void EmitFunctionStart(GlobalDecl GD, QualType FnType,
+                         llvm::Function *Fn, CGBuilderTy &Builder);
+
+  /// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
+  /// of a new block.
+  void EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder);
+
+  /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
+  /// block.
+  void EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder);
+
+  /// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic
+  /// variable declaration.
+  void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
+                                 CGBuilderTy &Builder);
+
+  /// EmitDeclareOfBlockDeclRefVariable - Emit call to llvm.dbg.declare for an
+  /// imported variable declaration in a block.
+  void EmitDeclareOfBlockDeclRefVariable(const BlockDeclRefExpr *BDRE,
+                                         llvm::Value *AI,
+                                         CGBuilderTy &Builder,
+                                         CodeGenFunction *CGF);
+
+  /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+  /// variable declaration.
+  void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+                                CGBuilderTy &Builder);
+
+  /// EmitGlobalVariable - Emit information about a global variable.
+  void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+
+  /// EmitGlobalVariable - Emit information about an objective-c interface.
+  void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
+
+private:
+  /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+  void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
+                   CGBuilderTy &Builder);
+
+  /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+  void EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag, llvm::Value *AI,
+                   CGBuilderTy &Builder, CodeGenFunction *CGF);
+
+  /// getContextDescriptor - Get context info for the decl.
+  llvm::DIDescriptor getContextDescriptor(const Decl *Decl,
+                                          llvm::DIDescriptor &CU);
+
+  /// getOrCreateCompileUnit - Get the compile unit from the cache or create a
+  /// new one if necessary.
+  llvm::DICompileUnit getOrCreateCompileUnit(SourceLocation Loc);
+
+  /// getOrCreateType - Get the type from the cache or create a new type if
+  /// necessary.
+  llvm::DIType getOrCreateType(QualType Ty, llvm::DICompileUnit Unit);
+
+  /// CreateTypeNode - Create type metadata for a source language type.
+  llvm::DIType CreateTypeNode(QualType Ty, llvm::DICompileUnit Unit);
+
+  /// getFunctionName - Get function name for the given FunctionDecl. If the
+  /// name is constructred on demand (e.g. C++ destructor) then the name
+  /// is stored on the side.
+  llvm::StringRef getFunctionName(const FunctionDecl *FD);
+
+  /// getVtableName - Get vtable name for the given Class.
+  llvm::StringRef getVtableName(const CXXRecordDecl *Decl);
+
+};
+} // namespace CodeGen
+} // namespace clang
+
+
+#endif
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
new file mode 100644
index 0000000..793a220
--- /dev/null
+++ b/lib/CodeGen/CGDecl.cpp
@@ -0,0 +1,728 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Decl nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+void CodeGenFunction::EmitDecl(const Decl &D) {
+  switch (D.getKind()) {
+  default:
+    CGM.ErrorUnsupported(&D, "decl");
+    return;
+  case Decl::ParmVar:
+    assert(0 && "Parmdecls should not be in declstmts!");
+  case Decl::Function:  // void X();
+  case Decl::Record:    // struct/union/class X;
+  case Decl::Enum:      // enum X;
+  case Decl::EnumConstant: // enum ? { X = ? }
+  case Decl::CXXRecord: // struct/union/class X; [C++]
+  case Decl::Using:          // using X; [C++]
+  case Decl::UsingShadow:
+  case Decl::UsingDirective: // using namespace X; [C++]
+  case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
+    // None of these decls require codegen support.
+    return;
+
+  case Decl::Var: {
+    const VarDecl &VD = cast<VarDecl>(D);
+    assert(VD.isBlockVarDecl() &&
+           "Should not see file-scope variables inside a function!");
+    return EmitBlockVarDecl(VD);
+  }
+
+  case Decl::Typedef: {   // typedef int X;
+    const TypedefDecl &TD = cast<TypedefDecl>(D);
+    QualType Ty = TD.getUnderlyingType();
+
+    if (Ty->isVariablyModifiedType())
+      EmitVLASize(Ty);
+  }
+  }
+}
+
+/// EmitBlockVarDecl - This method handles emission of any variable declaration
+/// inside a function, including static vars etc.
+void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
+  if (D.hasAttr<AsmLabelAttr>())
+    CGM.ErrorUnsupported(&D, "__asm__");
+
+  switch (D.getStorageClass()) {
+  case VarDecl::None:
+  case VarDecl::Auto:
+  case VarDecl::Register:
+    return EmitLocalBlockVarDecl(D);
+  case VarDecl::Static: {
+    llvm::GlobalValue::LinkageTypes Linkage = 
+      llvm::GlobalValue::InternalLinkage;
+
+    // If this is a static declaration inside an inline function, it must have
+    // weak linkage so that the linker will merge multiple definitions of it.
+    if (getContext().getLangOptions().CPlusPlus) {
+      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurFuncDecl)) {
+        if (FD->isInlined())
+          Linkage = llvm::GlobalValue::WeakAnyLinkage;
+      }
+    }
+    
+    return EmitStaticBlockVarDecl(D, Linkage);
+  }
+  case VarDecl::Extern:
+  case VarDecl::PrivateExtern:
+    // Don't emit it now, allow it to be emitted lazily on its first use.
+    return;
+  }
+
+  assert(0 && "Unknown storage class");
+}
+
+static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
+                                     const char *Separator) {
+  CodeGenModule &CGM = CGF.CGM;
+  if (CGF.getContext().getLangOptions().CPlusPlus)
+    return CGM.getMangledName(&D);
+  
+  std::string ContextName;
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl))
+    ContextName = CGM.getMangledName(FD);
+  else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
+    ContextName = CGF.CurFn->getName();
+  else
+    // FIXME: What about in a block??
+    assert(0 && "Unknown context for block var decl");
+  
+  return ContextName + Separator + D.getNameAsString();
+}
+
+llvm::GlobalVariable *
+CodeGenFunction::CreateStaticBlockVarDecl(const VarDecl &D,
+                                          const char *Separator,
+                                      llvm::GlobalValue::LinkageTypes Linkage) {
+  QualType Ty = D.getType();
+  assert(Ty->isConstantSizeType() && "VLAs can't be static");
+
+  std::string Name = GetStaticDeclName(*this, D, Separator);
+
+  const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+  llvm::GlobalVariable *GV =
+    new llvm::GlobalVariable(CGM.getModule(), LTy,
+                             Ty.isConstant(getContext()), Linkage,
+                             CGM.EmitNullConstant(D.getType()), Name, 0,
+                             D.isThreadSpecified(), Ty.getAddressSpace());
+  GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+  return GV;
+}
+
+/// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+/// global variable that has already been created for it.  If the initializer
+/// has a different type than GV does, this may free GV and return a different
+/// one.  Otherwise it just returns GV.
+llvm::GlobalVariable *
+CodeGenFunction::AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
+                                                    llvm::GlobalVariable *GV) {
+  llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);
+  
+  // If constant emission failed, then this should be a C++ static
+  // initializer.
+  if (!Init) {
+    if (!getContext().getLangOptions().CPlusPlus)
+      CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+    else {
+      // Since we have a static initializer, this global variable can't 
+      // be constant.
+      GV->setConstant(false);
+      
+      EmitStaticCXXBlockVarDeclInit(D, GV);
+    }
+    return GV;
+  }
+  
+  // The initializer may differ in type from the global. Rewrite
+  // the global to match the initializer.  (We have to do this
+  // because some types, like unions, can't be completely represented
+  // in the LLVM type system.)
+  if (GV->getType() != Init->getType()) {
+    llvm::GlobalVariable *OldGV = GV;
+    
+    GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+                                  OldGV->isConstant(),
+                                  OldGV->getLinkage(), Init, "",
+                                  0, D.isThreadSpecified(),
+                                  D.getType().getAddressSpace());
+    
+    // Steal the name of the old global
+    GV->takeName(OldGV);
+    
+    // Replace all uses of the old global with the new global
+    llvm::Constant *NewPtrForOldDecl =
+    llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+    OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+    
+    // Erase the old global, since it is no longer used.
+    OldGV->eraseFromParent();
+  }
+  
+  GV->setInitializer(Init);
+  return GV;
+}
+
+void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
+                                      llvm::GlobalValue::LinkageTypes Linkage) {
+  llvm::Value *&DMEntry = LocalDeclMap[&D];
+  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+
+  llvm::GlobalVariable *GV = CreateStaticBlockVarDecl(D, ".", Linkage);
+
+  // Store into LocalDeclMap before generating initializer to handle
+  // circular references.
+  DMEntry = GV;
+
+  // Make sure to evaluate VLA bounds now so that we have them for later.
+  //
+  // FIXME: Can this happen?
+  if (D.getType()->isVariablyModifiedType())
+    EmitVLASize(D.getType());
+
+  // If this value has an initializer, emit it.
+  if (D.getInit())
+    GV = AddInitializerToGlobalBlockVarDecl(D, GV);
+
+  // FIXME: Merge attribute handling.
+  if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
+    SourceManager &SM = CGM.getContext().getSourceManager();
+    llvm::Constant *Ann =
+      CGM.EmitAnnotateAttr(GV, AA,
+                           SM.getInstantiationLineNumber(D.getLocation()));
+    CGM.AddAnnotation(Ann);
+  }
+
+  if (const SectionAttr *SA = D.getAttr<SectionAttr>())
+    GV->setSection(SA->getName());
+
+  if (D.hasAttr<UsedAttr>())
+    CGM.AddUsedGlobal(GV);
+
+  // We may have to cast the constant because of the initializer
+  // mismatch above.
+  //
+  // FIXME: It is really dangerous to store this in the map; if anyone
+  // RAUW's the GV uses of this constant will be invalid.
+  const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
+  const llvm::Type *LPtrTy =
+    llvm::PointerType::get(LTy, D.getType().getAddressSpace());
+  DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);
+
+  // Emit global variable debug descriptor for static vars.
+  CGDebugInfo *DI = getDebugInfo();
+  if (DI) {
+    DI->setLocation(D.getLocation());
+    DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
+  }
+}
+
+unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
+  assert(ByRefValueInfo.count(VD) && "Did not find value!");
+  
+  return ByRefValueInfo.find(VD)->second.second;
+}
+
+/// BuildByRefType - This routine changes a __block variable declared as T x
+///   into:
+///
+///      struct {
+///        void *__isa;
+///        void *__forwarding;
+///        int32_t __flags;
+///        int32_t __size;
+///        void *__copy_helper;       // only if needed
+///        void *__destroy_helper;    // only if needed
+///        char padding[X];           // only if needed
+///        T x;
+///      } x
+///
+const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
+  std::pair<const llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
+  if (Info.first)
+    return Info.first;
+  
+  QualType Ty = D->getType();
+
+  std::vector<const llvm::Type *> Types;
+  
+  const llvm::PointerType *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+  llvm::PATypeHolder ByRefTypeHolder = llvm::OpaqueType::get(VMContext);
+  
+  // void *__isa;
+  Types.push_back(Int8PtrTy);
+  
+  // void *__forwarding;
+  Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder));
+  
+  // int32_t __flags;
+  Types.push_back(llvm::Type::getInt32Ty(VMContext));
+    
+  // int32_t __size;
+  Types.push_back(llvm::Type::getInt32Ty(VMContext));
+
+  bool HasCopyAndDispose = BlockRequiresCopying(Ty);
+  if (HasCopyAndDispose) {
+    /// void *__copy_helper;
+    Types.push_back(Int8PtrTy);
+    
+    /// void *__destroy_helper;
+    Types.push_back(Int8PtrTy);
+  }
+
+  bool Packed = false;
+  CharUnits Align = getContext().getDeclAlign(D);
+  if (Align > CharUnits::fromQuantity(Target.getPointerAlign(0) / 8)) {
+    // We have to insert padding.
+    
+    // The struct above has 2 32-bit integers.
+    unsigned CurrentOffsetInBytes = 4 * 2;
+    
+    // And either 2 or 4 pointers.
+    CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
+      CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
+    
+    // Align the offset.
+    unsigned AlignedOffsetInBytes = 
+      llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
+    
+    unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
+    if (NumPaddingBytes > 0) {
+      const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+      // FIXME: We need a sema error for alignment larger than the minimum of
+      // the maximal stack alignmint and the alignment of malloc on the system.
+      if (NumPaddingBytes > 1)
+        Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
+    
+      Types.push_back(Ty);
+
+      // We want a packed struct.
+      Packed = true;
+    }
+  }
+
+  // T x;
+  Types.push_back(ConvertType(Ty));
+  
+  const llvm::Type *T = llvm::StructType::get(VMContext, Types, Packed);
+  
+  cast<llvm::OpaqueType>(ByRefTypeHolder.get())->refineAbstractTypeTo(T);
+  CGM.getModule().addTypeName("struct.__block_byref_" + D->getNameAsString(), 
+                              ByRefTypeHolder.get());
+  
+  Info.first = ByRefTypeHolder.get();
+  
+  Info.second = Types.size() - 1;
+  
+  return Info.first;
+}
+
+/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
+/// variable declaration with auto, register, or no storage class specifier.
+/// These turn into simple stack objects, or GlobalValues depending on target.
+void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
+  QualType Ty = D.getType();
+  bool isByRef = D.hasAttr<BlocksAttr>();
+  bool needsDispose = false;
+  CharUnits Align = CharUnits::Zero();
+  bool IsSimpleConstantInitializer = false;
+
+  llvm::Value *DeclPtr;
+  if (Ty->isConstantSizeType()) {
+    if (!Target.useGlobalsForAutomaticVariables()) {
+      
+      // If this value is an array or struct, is POD, and if the initializer is
+      // a staticly determinable constant, try to optimize it.
+      if (D.getInit() && !isByRef &&
+          (Ty->isArrayType() || Ty->isRecordType()) &&
+          Ty->isPODType() &&
+          D.getInit()->isConstantInitializer(getContext())) {
+        // If this variable is marked 'const', emit the value as a global.
+        if (CGM.getCodeGenOpts().MergeAllConstants &&
+            Ty.isConstant(getContext())) {
+          EmitStaticBlockVarDecl(D, llvm::GlobalValue::InternalLinkage);
+          return;
+        }
+        
+        IsSimpleConstantInitializer = true;
+      }
+      
+      // A normal fixed sized variable becomes an alloca in the entry block.
+      const llvm::Type *LTy = ConvertTypeForMem(Ty);
+      if (isByRef)
+        LTy = BuildByRefType(&D);
+      llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
+      Alloc->setName(D.getNameAsString());
+
+      Align = getContext().getDeclAlign(&D);
+      if (isByRef)
+        Align = std::max(Align, 
+            CharUnits::fromQuantity(Target.getPointerAlign(0) / 8));
+      Alloc->setAlignment(Align.getQuantity());
+      DeclPtr = Alloc;
+    } else {
+      // Targets that don't support recursion emit locals as globals.
+      const char *Class =
+        D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
+      DeclPtr = CreateStaticBlockVarDecl(D, Class,
+                                         llvm::GlobalValue
+                                         ::InternalLinkage);
+    }
+
+    // FIXME: Can this happen?
+    if (Ty->isVariablyModifiedType())
+      EmitVLASize(Ty);
+  } else {
+    EnsureInsertPoint();
+
+    if (!DidCallStackSave) {
+      // Save the stack.
+      const llvm::Type *LTy = llvm::Type::getInt8PtrTy(VMContext);
+      llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
+
+      llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+      llvm::Value *V = Builder.CreateCall(F);
+
+      Builder.CreateStore(V, Stack);
+
+      DidCallStackSave = true;
+
+      {
+        // Push a cleanup block and restore the stack there.
+        DelayedCleanupBlock scope(*this);
+
+        V = Builder.CreateLoad(Stack, "tmp");
+        llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+        Builder.CreateCall(F, V);
+      }
+    }
+
+    // Get the element type.
+    const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
+    const llvm::Type *LElemPtrTy =
+      llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
+
+    llvm::Value *VLASize = EmitVLASize(Ty);
+
+    // Downcast the VLA size expression
+    VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
+                                    false, "tmp");
+
+    // Allocate memory for the array.
+    llvm::AllocaInst *VLA = 
+      Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext), VLASize, "vla");
+    VLA->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+
+    DeclPtr = Builder.CreateBitCast(VLA, LElemPtrTy, "tmp");
+  }
+
+  llvm::Value *&DMEntry = LocalDeclMap[&D];
+  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+  DMEntry = DeclPtr;
+
+  // Emit debug info for local var declaration.
+  if (CGDebugInfo *DI = getDebugInfo()) {
+    assert(HaveInsertPoint() && "Unexpected unreachable point!");
+
+    DI->setLocation(D.getLocation());
+    if (Target.useGlobalsForAutomaticVariables()) {
+      DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
+    } else
+      DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+  }
+
+  // If this local has an initializer, emit it now.
+  const Expr *Init = D.getInit();
+
+  // If we are at an unreachable point, we don't need to emit the initializer
+  // unless it contains a label.
+  if (!HaveInsertPoint()) {
+    if (!ContainsLabel(Init))
+      Init = 0;
+    else
+      EnsureInsertPoint();
+  }
+
+  if (Init) {
+    llvm::Value *Loc = DeclPtr;
+    if (isByRef)
+      Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D), 
+                                    D.getNameAsString());
+
+    bool isVolatile =
+      getContext().getCanonicalType(D.getType()).isVolatileQualified();
+    
+    // If the initializer was a simple constant initializer, we can optimize it
+    // in various ways.
+    if (IsSimpleConstantInitializer) {
+      llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(),D.getType(),this);
+      assert(Init != 0 && "Wasn't a simple constant init?");
+      
+      llvm::Value *AlignVal = 
+        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 
+            Align.getQuantity());
+      const llvm::Type *IntPtr =
+        llvm::IntegerType::get(VMContext, LLVMPointerWidth);
+      llvm::Value *SizeVal =
+        llvm::ConstantInt::get(IntPtr, 
+            getContext().getTypeSizeInChars(Ty).getQuantity());
+
+      const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+      if (Loc->getType() != BP)
+        Loc = Builder.CreateBitCast(Loc, BP, "tmp");
+      
+      // If the initializer is all zeros, codegen with memset.
+      if (isa<llvm::ConstantAggregateZero>(Init)) {
+        llvm::Value *Zero =
+          llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0);
+        Builder.CreateCall4(CGM.getMemSetFn(), Loc, Zero, SizeVal, AlignVal);
+      } else {
+        // Otherwise, create a temporary global with the initializer then 
+        // memcpy from the global to the alloca.
+        std::string Name = GetStaticDeclName(*this, D, ".");
+        llvm::GlobalVariable *GV =
+          new llvm::GlobalVariable(CGM.getModule(), Init->getType(), true,
+                                   llvm::GlobalValue::InternalLinkage,
+                                   Init, Name, 0, false, 0);
+        GV->setAlignment(Align.getQuantity());
+
+        llvm::Value *SrcPtr = GV;
+        if (SrcPtr->getType() != BP)
+          SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+        
+        Builder.CreateCall4(CGM.getMemCpyFn(), Loc, SrcPtr, SizeVal, AlignVal);
+      }
+    } else if (Ty->isReferenceType()) {
+      RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
+      EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
+    } else if (!hasAggregateLLVMType(Init->getType())) {
+      llvm::Value *V = EmitScalarExpr(Init);
+      EmitStoreOfScalar(V, Loc, isVolatile, D.getType());
+    } else if (Init->getType()->isAnyComplexType()) {
+      EmitComplexExprIntoAddr(Init, Loc, isVolatile);
+    } else {
+      EmitAggExpr(Init, Loc, isVolatile);
+    }
+  }
+
+  if (isByRef) {
+    const llvm::PointerType *PtrToInt8Ty = llvm::Type::getInt8PtrTy(VMContext);
+
+    EnsureInsertPoint();
+    llvm::Value *isa_field = Builder.CreateStructGEP(DeclPtr, 0);
+    llvm::Value *forwarding_field = Builder.CreateStructGEP(DeclPtr, 1);
+    llvm::Value *flags_field = Builder.CreateStructGEP(DeclPtr, 2);
+    llvm::Value *size_field = Builder.CreateStructGEP(DeclPtr, 3);
+    llvm::Value *V;
+    int flag = 0;
+    int flags = 0;
+
+    needsDispose = true;
+
+    if (Ty->isBlockPointerType()) {
+      flag |= BLOCK_FIELD_IS_BLOCK;
+      flags |= BLOCK_HAS_COPY_DISPOSE;
+    } else if (BlockRequiresCopying(Ty)) {
+      flag |= BLOCK_FIELD_IS_OBJECT;
+      flags |= BLOCK_HAS_COPY_DISPOSE;
+    }
+
+    // FIXME: Someone double check this.
+    if (Ty.isObjCGCWeak())
+      flag |= BLOCK_FIELD_IS_WEAK;
+
+    int isa = 0;
+    if (flag&BLOCK_FIELD_IS_WEAK)
+      isa = 1;
+    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa);
+    V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa");
+    Builder.CreateStore(V, isa_field);
+
+    Builder.CreateStore(DeclPtr, forwarding_field);
+
+    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags);
+    Builder.CreateStore(V, flags_field);
+
+    const llvm::Type *V1;
+    V1 = cast<llvm::PointerType>(DeclPtr->getType())->getElementType();
+    V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                               CGM.GetTargetTypeStoreSize(V1).getQuantity());
+    Builder.CreateStore(V, size_field);
+
+    if (flags & BLOCK_HAS_COPY_DISPOSE) {
+      BlockHasCopyDispose = true;
+      llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
+      Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, 
+                                               Align.getQuantity()),
+                          copy_helper);
+
+      llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
+      Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
+                                                  Align.getQuantity()),
+                          destroy_helper);
+    }
+  }
+
+  // Handle CXX destruction of variables.
+  QualType DtorTy(Ty);
+  while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
+    DtorTy = getContext().getBaseElementType(Array);
+  if (const RecordType *RT = DtorTy->getAs<RecordType>())
+    if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+      if (!ClassDecl->hasTrivialDestructor()) {
+        const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
+        assert(D && "EmitLocalBlockVarDecl - destructor is nul");
+        
+        if (const ConstantArrayType *Array = 
+              getContext().getAsConstantArrayType(Ty)) {
+          {
+            DelayedCleanupBlock Scope(*this);
+            QualType BaseElementTy = getContext().getBaseElementType(Array);
+            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+            BasePtr = llvm::PointerType::getUnqual(BasePtr);
+            llvm::Value *BaseAddrPtr =
+              Builder.CreateBitCast(DeclPtr, BasePtr);
+            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+          
+            // Make sure to jump to the exit block.
+            EmitBranch(Scope.getCleanupExitBlock());
+          }
+          if (Exceptions) {
+            EHCleanupBlock Cleanup(*this);
+            QualType BaseElementTy = getContext().getBaseElementType(Array);
+            const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+            BasePtr = llvm::PointerType::getUnqual(BasePtr);
+            llvm::Value *BaseAddrPtr =
+              Builder.CreateBitCast(DeclPtr, BasePtr);
+            EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+          }
+        } else {
+          {
+            DelayedCleanupBlock Scope(*this);
+            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
+
+            // Make sure to jump to the exit block.
+            EmitBranch(Scope.getCleanupExitBlock());
+          }
+          if (Exceptions) {
+            EHCleanupBlock Cleanup(*this);
+            EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
+          }
+        }
+      }
+  }
+
+  // Handle the cleanup attribute
+  if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
+    const FunctionDecl *FD = CA->getFunctionDecl();
+
+    llvm::Constant* F = CGM.GetAddrOfFunction(FD);
+    assert(F && "Could not find function!");
+
+    const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
+
+    // In some cases, the type of the function argument will be different from
+    // the type of the pointer. An example of this is
+    // void f(void* arg);
+    // __attribute__((cleanup(f))) void *g;
+    //
+    // To fix this we insert a bitcast here.
+    QualType ArgTy = Info.arg_begin()->type;
+    {
+      DelayedCleanupBlock scope(*this);
+
+      CallArgList Args;
+      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
+                                                           ConvertType(ArgTy))),
+                                    getContext().getPointerType(D.getType())));
+      EmitCall(Info, F, ReturnValueSlot(), Args);
+    }
+    if (Exceptions) {
+      EHCleanupBlock Cleanup(*this);
+
+      CallArgList Args;
+      Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
+                                                           ConvertType(ArgTy))),
+                                    getContext().getPointerType(D.getType())));
+      EmitCall(Info, F, ReturnValueSlot(), Args);
+    }
+  }
+
+  if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
+    {
+      DelayedCleanupBlock scope(*this);
+      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+      V = Builder.CreateLoad(V);
+      BuildBlockRelease(V);
+    }
+    // FIXME: Turn this on and audit the codegen
+    if (0 && Exceptions) {
+      EHCleanupBlock Cleanup(*this);
+      llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+      V = Builder.CreateLoad(V);
+      BuildBlockRelease(V);
+    }
+  }
+}
+
+/// Emit an alloca (or GlobalValue depending on target)
+/// for the specified parameter and set up LocalDeclMap.
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
+  // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
+  assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
+         "Invalid argument to EmitParmDecl");
+  QualType Ty = D.getType();
+  CanQualType CTy = getContext().getCanonicalType(Ty);
+
+  llvm::Value *DeclPtr;
+  // If this is an aggregate or variable sized value, reuse the input pointer.
+  if (!Ty->isConstantSizeType() ||
+      CodeGenFunction::hasAggregateLLVMType(Ty)) {
+    DeclPtr = Arg;
+  } else {
+    // Otherwise, create a temporary to hold the value.
+    DeclPtr = CreateMemTemp(Ty, D.getName() + ".addr");
+
+    // Store the initial value into the alloca.
+    EmitStoreOfScalar(Arg, DeclPtr, CTy.isVolatileQualified(), Ty);
+  }
+  Arg->setName(D.getName());
+
+  llvm::Value *&DMEntry = LocalDeclMap[&D];
+  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+  DMEntry = DeclPtr;
+
+  // Emit debug info for param declaration.
+  if (CGDebugInfo *DI = getDebugInfo()) {
+    DI->setLocation(D.getLocation());
+    DI->EmitDeclareOfArgVariable(&D, DeclPtr, Builder);
+  }
+}
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
new file mode 100644
index 0000000..0de3b0b
--- /dev/null
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -0,0 +1,303 @@
+//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ declarations
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+using namespace clang;
+using namespace CodeGen;
+
+static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
+                         llvm::Constant *DeclPtr) {
+  assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
+  assert(!D.getType()->isReferenceType() && 
+         "Should not call EmitDeclInit on a reference!");
+  
+  CodeGenModule &CGM = CGF.CGM;
+  ASTContext &Context = CGF.getContext();
+    
+  const Expr *Init = D.getInit();
+  QualType T = D.getType();
+  bool isVolatile = Context.getCanonicalType(T).isVolatileQualified();
+
+  if (!CGF.hasAggregateLLVMType(T)) {
+    llvm::Value *V = CGF.EmitScalarExpr(Init);
+    CGF.EmitStoreOfScalar(V, DeclPtr, isVolatile, T);
+  } else if (T->isAnyComplexType()) {
+    CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
+  } else {
+    CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
+    
+    // Avoid generating destructor(s) for initialized objects. 
+    if (!isa<CXXConstructExpr>(Init))
+      return;
+    
+    const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
+    if (Array)
+      T = Context.getBaseElementType(Array);
+    
+    const RecordType *RT = T->getAs<RecordType>();
+    if (!RT)
+      return;
+    
+    CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+    if (RD->hasTrivialDestructor())
+      return;
+    
+    CXXDestructorDecl *Dtor = RD->getDestructor(Context);
+    
+    llvm::Constant *DtorFn;
+    if (Array) {
+      DtorFn = 
+        CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor, 
+                                                             Array, 
+                                                             DeclPtr);
+      const llvm::Type *Int8PtrTy =
+        llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+      DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
+     } else
+      DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);                                
+
+    CGF.EmitCXXGlobalDtorRegistration(DtorFn, DeclPtr);
+  }
+}
+
+void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
+                                               llvm::Constant *DeclPtr) {
+
+  const Expr *Init = D.getInit();
+  QualType T = D.getType();
+
+  if (!T->isReferenceType()) {
+    EmitDeclInit(*this, D, DeclPtr);
+    return;
+  }
+  if (Init->isLvalue(getContext()) == Expr::LV_Valid) {
+    RValue RV = EmitReferenceBindingToExpr(Init, /*IsInitializer=*/true);
+    EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, T);
+    return;
+  }
+  ErrorUnsupported(Init, 
+                   "global variable that binds reference to a non-lvalue");
+}
+
+void
+CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
+                                               llvm::Constant *DeclPtr) {
+  const llvm::Type *Int8PtrTy = 
+    llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+
+  std::vector<const llvm::Type *> Params;
+  Params.push_back(Int8PtrTy);
+
+  // Get the destructor function type
+  const llvm::Type *DtorFnTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+  DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
+
+  Params.clear();
+  Params.push_back(DtorFnTy);
+  Params.push_back(Int8PtrTy);
+  Params.push_back(Int8PtrTy);
+
+  // Get the __cxa_atexit function type
+  // extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
+  const llvm::FunctionType *AtExitFnTy =
+    llvm::FunctionType::get(ConvertType(getContext().IntTy), Params, false);
+
+  llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
+                                                       "__cxa_atexit");
+
+  llvm::Constant *Handle = CGM.CreateRuntimeVariable(Int8PtrTy,
+                                                     "__dso_handle");
+  llvm::Value *Args[3] = { llvm::ConstantExpr::getBitCast(DtorFn, DtorFnTy),
+                           llvm::ConstantExpr::getBitCast(DeclPtr, Int8PtrTy),
+                           llvm::ConstantExpr::getBitCast(Handle, Int8PtrTy) };
+  Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
+}
+
+void
+CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D) {
+  const llvm::FunctionType *FTy
+    = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                              false);
+
+  // Create a variable initialization function.
+  llvm::Function *Fn =
+    llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+                           "__cxx_global_var_init", &TheModule);
+
+  CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D);
+
+  CXXGlobalInits.push_back(Fn);
+}
+
+void
+CodeGenModule::EmitCXXGlobalInitFunc() {
+  if (CXXGlobalInits.empty())
+    return;
+
+  const llvm::FunctionType *FTy
+    = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                              false);
+
+  // Create our global initialization function.
+  // FIXME: Should this be tweakable by targets?
+  llvm::Function *Fn =
+    llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+                           "__cxx_global_initialization", &TheModule);
+
+  CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+                                                   &CXXGlobalInits[0],
+                                                   CXXGlobalInits.size());
+  AddGlobalCtor(Fn);
+}
+
+void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+                                                       const VarDecl *D) {
+  StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+                SourceLocation());
+
+  llvm::Constant *DeclPtr = CGM.GetAddrOfGlobalVar(D);
+  EmitCXXGlobalVarDeclInit(*D, DeclPtr);
+
+  FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+                                                llvm::Constant **Decls,
+                                                unsigned NumDecls) {
+  StartFunction(GlobalDecl(), getContext().VoidTy, Fn, FunctionArgList(),
+                SourceLocation());
+
+  for (unsigned i = 0; i != NumDecls; ++i)
+    Builder.CreateCall(Decls[i]);
+
+  FinishFunction();
+}
+
+static llvm::Constant *getGuardAcquireFn(CodeGenFunction &CGF) {
+  // int __cxa_guard_acquire(__int64_t *guard_object);
+  
+  const llvm::Type *Int64PtrTy = 
+    llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
+  
+  std::vector<const llvm::Type*> Args(1, Int64PtrTy);
+  
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(CGF.ConvertType(CGF.getContext().IntTy),
+                            Args, /*isVarArg=*/false);
+  
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire");
+}
+
+static llvm::Constant *getGuardReleaseFn(CodeGenFunction &CGF) {
+  // void __cxa_guard_release(__int64_t *guard_object);
+  
+  const llvm::Type *Int64PtrTy = 
+    llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
+  
+  std::vector<const llvm::Type*> Args(1, Int64PtrTy);
+  
+  const llvm::FunctionType *FTy =
+  llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+                          Args, /*isVarArg=*/false);
+  
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release");
+}
+
+static llvm::Constant *getGuardAbortFn(CodeGenFunction &CGF) {
+  // void __cxa_guard_abort(__int64_t *guard_object);
+  
+  const llvm::Type *Int64PtrTy = 
+    llvm::Type::getInt64PtrTy(CGF.getLLVMContext());
+  
+  std::vector<const llvm::Type*> Args(1, Int64PtrTy);
+  
+  const llvm::FunctionType *FTy =
+  llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+                          Args, /*isVarArg=*/false);
+  
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort");
+}
+
+void
+CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
+                                               llvm::GlobalVariable *GV) {
+  bool ThreadsafeStatics = getContext().getLangOptions().ThreadsafeStatics;
+  
+  llvm::SmallString<256> GuardVName;
+  CGM.getMangleContext().mangleGuardVariable(&D, GuardVName);
+
+  // Create the guard variable.
+  const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(VMContext);
+  llvm::GlobalValue *GuardVariable =
+    new llvm::GlobalVariable(CGM.getModule(), Int64Ty,
+                             false, GV->getLinkage(),
+                             llvm::Constant::getNullValue(Int64Ty),
+                             GuardVName.str());
+
+  // Load the first byte of the guard variable.
+  const llvm::Type *PtrTy
+    = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
+  llvm::Value *V = 
+    Builder.CreateLoad(Builder.CreateBitCast(GuardVariable, PtrTy), "tmp");
+
+  llvm::BasicBlock *InitCheckBlock = createBasicBlock("init.check");
+  llvm::BasicBlock *EndBlock = createBasicBlock("init.end");
+
+  // Check if the first byte of the guard variable is zero.
+  Builder.CreateCondBr(Builder.CreateIsNull(V, "tobool"), 
+                       InitCheckBlock, EndBlock);
+
+  EmitBlock(InitCheckBlock);
+
+  if (ThreadsafeStatics) {
+    // Call __cxa_guard_acquire.
+    V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
+               
+    llvm::BasicBlock *InitBlock = createBasicBlock("init");
+  
+    Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
+                         InitBlock, EndBlock);
+  
+    EmitBlock(InitBlock);
+
+    if (Exceptions) {
+      EHCleanupBlock Cleanup(*this);
+    
+      // Call __cxa_guard_abort.
+      Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
+    }
+  }
+
+  if (D.getType()->isReferenceType()) {
+    QualType T = D.getType();
+    // We don't want to pass true for IsInitializer here, because a static
+    // reference to a temporary does not extend its lifetime.
+    RValue RV = EmitReferenceBindingToExpr(D.getInit(),
+                                           /*IsInitializer=*/false);
+    EmitStoreOfScalar(RV.getScalarVal(), GV, /*Volatile=*/false, T);
+
+  } else
+    EmitDeclInit(*this, D, GV);
+
+  if (ThreadsafeStatics) {
+    // Call __cxa_guard_release.
+    Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
+  } else {
+    llvm::Value *One = 
+      llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1);
+    Builder.CreateStore(One, Builder.CreateBitCast(GuardVariable, PtrTy));
+  }
+
+  EmitBlock(EndBlock);
+}
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
new file mode 100644
index 0000000..60a1016
--- /dev/null
+++ b/lib/CodeGen/CGException.cpp
@@ -0,0 +1,779 @@
+//===--- CGException.cpp - Emit LLVM Code for C++ exceptions --------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ exception related code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtCXX.h"
+
+#include "llvm/Intrinsics.h"
+
+#include "CodeGenFunction.h"
+using namespace clang;
+using namespace CodeGen;
+
+static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
+  // void *__cxa_allocate_exception(size_t thrown_size);
+  const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+  std::vector<const llvm::Type*> Args(1, SizeTy);
+
+  const llvm::FunctionType *FTy =
+  llvm::FunctionType::get(llvm::Type::getInt8PtrTy(CGF.getLLVMContext()),
+                          Args, false);
+
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
+}
+
+static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
+  // void __cxa_free_exception(void *thrown_exception);
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+  const llvm::FunctionType *FTy =
+  llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+                          Args, false);
+
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
+}
+
+static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
+  // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
+  //                  void (*dest) (void *));
+
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  std::vector<const llvm::Type*> Args(3, Int8PtrTy);
+
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+                            Args, false);
+
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
+}
+
+static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
+  // void __cxa_rethrow();
+
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
+}
+
+static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
+  // void* __cxa_begin_catch();
+
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
+}
+
+static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
+  // void __cxa_end_catch();
+
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
+}
+
+static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
+  // void __cxa_call_unexepcted(void *thrown_exception);
+
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()),
+                            Args, false);
+
+  return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
+}
+
+// FIXME: Eventually this will all go into the backend.  Set from the target for
+// now.
+static int using_sjlj_exceptions = 0;
+
+static llvm::Constant *getUnwindResumeOrRethrowFn(CodeGenFunction &CGF) {
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), Args,
+                            false);
+
+  if (using_sjlj_exceptions)
+    return CGF.CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
+  return CGF.CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
+}
+
+static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
+  // void __terminate();
+
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
+
+  return CGF.CGM.CreateRuntimeFunction(FTy, "_ZSt9terminatev");
+}
+
+// CopyObject - Utility to copy an object.  Calls copy constructor as necessary.
+// DestPtr is casted to the right type.
+static void CopyObject(CodeGenFunction &CGF, const Expr *E, 
+                       llvm::Value *DestPtr, llvm::Value *ExceptionPtrPtr) {
+  QualType ObjectType = E->getType();
+
+  // Store the throw exception in the exception object.
+  if (!CGF.hasAggregateLLVMType(ObjectType)) {
+    llvm::Value *Value = CGF.EmitScalarExpr(E);
+    const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo();
+
+    CGF.Builder.CreateStore(Value, 
+                            CGF.Builder.CreateBitCast(DestPtr, ValuePtrTy));
+  } else {
+    const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo();
+    const CXXRecordDecl *RD =
+      cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl());
+    
+    llvm::Value *This = CGF.Builder.CreateBitCast(DestPtr, Ty);
+    if (RD->hasTrivialCopyConstructor()) {
+      CGF.EmitAggExpr(E, This, false);
+    } else if (CXXConstructorDecl *CopyCtor
+               = RD->getCopyConstructor(CGF.getContext(), 0)) {
+      llvm::Value *CondPtr = 0;
+      if (CGF.Exceptions) {
+        CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+        llvm::Constant *FreeExceptionFn = getFreeExceptionFn(CGF);
+        
+        llvm::BasicBlock *CondBlock = CGF.createBasicBlock("cond.free");
+        llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+        CondPtr = CGF.CreateTempAlloca(llvm::Type::getInt1Ty(CGF.getLLVMContext()),
+                                       "doEHfree");
+
+        CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CondPtr),
+                                 CondBlock, Cont);
+        CGF.EmitBlock(CondBlock);
+
+        // Load the exception pointer.
+        llvm::Value *ExceptionPtr = CGF.Builder.CreateLoad(ExceptionPtrPtr);
+        CGF.Builder.CreateCall(FreeExceptionFn, ExceptionPtr);
+
+        CGF.EmitBlock(Cont);
+      }
+
+      if (CondPtr)
+        CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
+                                CondPtr);
+
+      llvm::Value *Src = CGF.EmitLValue(E).getAddress();
+        
+      if (CondPtr)
+        CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
+                                CondPtr);
+
+      llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler();
+      llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
+      CGF.setInvokeDest(TerminateHandler);
+
+      // Stolen from EmitClassAggrMemberwiseCopy
+      llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor,
+                                                            Ctor_Complete);
+      CallArgList CallArgs;
+      CallArgs.push_back(std::make_pair(RValue::get(This),
+                                      CopyCtor->getThisType(CGF.getContext())));
+
+      // Push the Src ptr.
+      CallArgs.push_back(std::make_pair(RValue::get(Src),
+                                        CopyCtor->getParamDecl(0)->getType()));
+      const FunctionProtoType *FPT
+        = CopyCtor->getType()->getAs<FunctionProtoType>();
+      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+                   Callee, ReturnValueSlot(), CallArgs, CopyCtor);
+      CGF.setInvokeDest(PrevLandingPad);
+    } else
+      llvm_unreachable("uncopyable object");
+  }
+}
+
+// CopyObject - Utility to copy an object.  Calls copy constructor as necessary.
+// N is casted to the right type.
+static void CopyObject(CodeGenFunction &CGF, QualType ObjectType,
+                       bool WasPointer, bool WasPointerReference,
+                       llvm::Value *E, llvm::Value *N) {
+  // Store the throw exception in the exception object.
+  if (WasPointer || !CGF.hasAggregateLLVMType(ObjectType)) {
+    llvm::Value *Value = E;
+    if (!WasPointer)
+      Value = CGF.Builder.CreateLoad(Value);
+    const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0);
+    if (WasPointerReference) {
+      llvm::Value *Tmp = CGF.CreateTempAlloca(Value->getType(), "catch.param");
+      CGF.Builder.CreateStore(Value, Tmp);
+      Value = Tmp;
+      ValuePtrTy = Value->getType()->getPointerTo(0);
+    }
+    N = CGF.Builder.CreateBitCast(N, ValuePtrTy);
+    CGF.Builder.CreateStore(Value, N);
+  } else {
+    const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0);
+    const CXXRecordDecl *RD;
+    RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl());
+    llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty);
+    if (RD->hasTrivialCopyConstructor()) {
+      CGF.EmitAggregateCopy(This, E, ObjectType);
+    } else if (CXXConstructorDecl *CopyCtor
+               = RD->getCopyConstructor(CGF.getContext(), 0)) {
+      llvm::Value *Src = E;
+
+      // Stolen from EmitClassAggrMemberwiseCopy
+      llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor,
+                                                            Ctor_Complete);
+      CallArgList CallArgs;
+      CallArgs.push_back(std::make_pair(RValue::get(This),
+                                      CopyCtor->getThisType(CGF.getContext())));
+
+      // Push the Src ptr.
+      CallArgs.push_back(std::make_pair(RValue::get(Src),
+                                        CopyCtor->getParamDecl(0)->getType()));
+
+      const FunctionProtoType *FPT
+        = CopyCtor->getType()->getAs<FunctionProtoType>();
+      CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+                   Callee, ReturnValueSlot(), CallArgs, CopyCtor);
+    } else
+      llvm_unreachable("uncopyable object");
+  }
+}
+
+void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
+  if (!E->getSubExpr()) {
+    if (getInvokeDest()) {
+      llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+      Builder.CreateInvoke(getReThrowFn(*this), Cont, getInvokeDest())
+        ->setDoesNotReturn();
+      EmitBlock(Cont);
+    } else
+      Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn();
+    Builder.CreateUnreachable();
+
+    // Clear the insertion point to indicate we are in unreachable code.
+    Builder.ClearInsertionPoint();
+    return;
+  }
+
+  QualType ThrowType = E->getSubExpr()->getType();
+
+  // Now allocate the exception object.
+  const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+  uint64_t TypeSize = getContext().getTypeSize(ThrowType) / 8;
+
+  llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
+  llvm::Value *ExceptionPtr =
+    Builder.CreateCall(AllocExceptionFn,
+                       llvm::ConstantInt::get(SizeTy, TypeSize),
+                       "exception");
+  
+  llvm::Value *ExceptionPtrPtr = 
+    CreateTempAlloca(ExceptionPtr->getType(), "exception.ptr");
+  Builder.CreateStore(ExceptionPtr, ExceptionPtrPtr);
+
+
+  CopyObject(*this, E->getSubExpr(), ExceptionPtr, ExceptionPtrPtr);
+
+  // Now throw the exception.
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+  llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType);
+  llvm::Constant *Dtor = llvm::Constant::getNullValue(Int8PtrTy);
+
+  if (getInvokeDest()) {
+    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+    llvm::InvokeInst *ThrowCall =
+      Builder.CreateInvoke3(getThrowFn(*this), Cont, getInvokeDest(),
+                            ExceptionPtr, TypeInfo, Dtor);
+    ThrowCall->setDoesNotReturn();
+    EmitBlock(Cont);
+  } else {
+    llvm::CallInst *ThrowCall =
+      Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
+    ThrowCall->setDoesNotReturn();
+  }
+  Builder.CreateUnreachable();
+
+  // Clear the insertion point to indicate we are in unreachable code.
+  Builder.ClearInsertionPoint();
+
+  // FIXME: For now, emit a dummy basic block because expr emitters in generally
+  // are not ready to handle emitting expressions at unreachable points.
+  EnsureInsertPoint();
+}
+
+void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
+  if (!Exceptions)
+    return;
+  
+  const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+  if (FD == 0)
+    return;
+  const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+  if (Proto == 0)
+    return;
+
+  assert(!Proto->hasAnyExceptionSpec() && "function with parameter pack");
+
+  if (!Proto->hasExceptionSpec())
+    return;
+
+  llvm::Constant *Personality =
+    CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
+                                                      (VMContext),
+                                                      true),
+                              "__gxx_personality_v0");
+  Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
+  llvm::Value *llvm_eh_exception =
+    CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+  llvm::Value *llvm_eh_selector =
+    CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+  const llvm::IntegerType *Int8Ty;
+  const llvm::PointerType *PtrToInt8Ty;
+  Int8Ty = llvm::Type::getInt8Ty(VMContext);
+  // C string type.  Used in lots of places.
+  PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+  llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+  llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+
+  llvm::BasicBlock *PrevLandingPad = getInvokeDest();
+  llvm::BasicBlock *EHSpecHandler = createBasicBlock("ehspec.handler");
+  llvm::BasicBlock *Match = createBasicBlock("match");
+  llvm::BasicBlock *Unwind = 0;
+
+  assert(PrevLandingPad == 0 && "EHSpec has invoke context");
+  (void)PrevLandingPad;
+
+  llvm::BasicBlock *Cont = createBasicBlock("cont");
+
+  EmitBranchThroughCleanup(Cont);
+
+  // Emit the statements in the try {} block
+  setInvokeDest(EHSpecHandler);
+
+  EmitBlock(EHSpecHandler);
+  // Exception object
+  llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+  llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
+
+  SelectorArgs.push_back(Exc);
+  SelectorArgs.push_back(Personality);
+  SelectorArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                                Proto->getNumExceptions()+1));
+
+  for (unsigned i = 0; i < Proto->getNumExceptions(); ++i) {
+    QualType Ty = Proto->getExceptionType(i);
+    QualType ExceptType
+      = Ty.getNonReferenceType().getUnqualifiedType();
+    llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType);
+    SelectorArgs.push_back(EHType);
+  }
+  if (Proto->getNumExceptions())
+    SelectorArgs.push_back(Null);
+
+  // Find which handler was matched.
+  llvm::Value *Selector
+    = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
+                         SelectorArgs.end(), "selector");
+  if (Proto->getNumExceptions()) {
+    Unwind = createBasicBlock("Unwind");
+
+    Builder.CreateStore(Exc, RethrowPtr);
+    Builder.CreateCondBr(Builder.CreateICmpSLT(Selector,
+                                               llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                                                      0)),
+                         Match, Unwind);
+
+    EmitBlock(Match);
+  }
+  Builder.CreateCall(getUnexpectedFn(*this), Exc)->setDoesNotReturn();
+  Builder.CreateUnreachable();
+
+  if (Proto->getNumExceptions()) {
+    EmitBlock(Unwind);
+    Builder.CreateCall(getUnwindResumeOrRethrowFn(*this),
+                       Builder.CreateLoad(RethrowPtr));
+    Builder.CreateUnreachable();
+  }
+
+  EmitBlock(Cont);
+}
+
+void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
+  if (!Exceptions)
+    return;
+  
+  const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+  if (FD == 0)
+    return;
+  const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+  if (Proto == 0)
+    return;
+
+  if (!Proto->hasExceptionSpec())
+    return;
+
+  setInvokeDest(0);
+}
+
+void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
+  // Pointer to the personality function
+  llvm::Constant *Personality =
+    CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
+                                                      (VMContext),
+                                                      true),
+                              "__gxx_personality_v0");
+  Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
+  llvm::Value *llvm_eh_exception =
+    CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+  llvm::Value *llvm_eh_selector =
+    CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+
+  llvm::BasicBlock *PrevLandingPad = getInvokeDest();
+  llvm::BasicBlock *TryHandler = createBasicBlock("try.handler");
+  llvm::BasicBlock *FinallyBlock = createBasicBlock("finally");
+  llvm::BasicBlock *FinallyRethrow = createBasicBlock("finally.throw");
+  llvm::BasicBlock *FinallyEnd = createBasicBlock("finally.end");
+
+  // Push an EH context entry, used for handling rethrows.
+  PushCleanupBlock(FinallyBlock);
+
+  // Emit the statements in the try {} block
+  setInvokeDest(TryHandler);
+
+  // FIXME: We should not have to do this here.  The AST should have the member
+  // initializers under the CXXTryStmt's TryBlock.
+  if (OuterTryBlock == &S) {
+    GlobalDecl GD = CurGD;
+    const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+    if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+      size_t OldCleanupStackSize = CleanupEntries.size();
+      EmitCtorPrologue(CD, CurGD.getCtorType());
+      EmitStmt(S.getTryBlock());
+
+      // If any of the member initializers are temporaries bound to references
+      // make sure to emit their destructors.
+      EmitCleanupBlocks(OldCleanupStackSize);
+    } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
+      llvm::BasicBlock *DtorEpilogue  = createBasicBlock("dtor.epilogue");
+      PushCleanupBlock(DtorEpilogue);
+
+      InitializeVtablePtrs(DD->getParent());
+      EmitStmt(S.getTryBlock());
+
+      CleanupBlockInfo Info = PopCleanupBlock();
+
+      assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!");
+      EmitBlock(DtorEpilogue);
+      EmitDtorEpilogue(DD, GD.getDtorType());
+
+      if (Info.SwitchBlock)
+        EmitBlock(Info.SwitchBlock);
+      if (Info.EndBlock)
+        EmitBlock(Info.EndBlock);
+    } else
+      EmitStmt(S.getTryBlock());
+  } else
+    EmitStmt(S.getTryBlock());
+
+  // Jump to end if there is no exception
+  EmitBranchThroughCleanup(FinallyEnd);
+
+  llvm::BasicBlock *TerminateHandler = getTerminateHandler();
+
+  // Emit the handlers
+  EmitBlock(TryHandler);
+
+  const llvm::IntegerType *Int8Ty;
+  const llvm::PointerType *PtrToInt8Ty;
+  Int8Ty = llvm::Type::getInt8Ty(VMContext);
+  // C string type.  Used in lots of places.
+  PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+  llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+  llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+  llvm::Value *llvm_eh_typeid_for =
+    CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+  // Exception object
+  llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+  llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
+
+  llvm::SmallVector<llvm::Value*, 8> Args;
+  Args.clear();
+  SelectorArgs.push_back(Exc);
+  SelectorArgs.push_back(Personality);
+
+  bool HasCatchAll = false;
+  for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
+    const CXXCatchStmt *C = S.getHandler(i);
+    VarDecl *CatchParam = C->getExceptionDecl();
+    if (CatchParam) {
+      // C++ [except.handle]p3 indicates that top-level cv-qualifiers
+      // are ignored.
+      QualType CaughtType = C->getCaughtType().getNonReferenceType();
+      llvm::Value *EHTypeInfo
+        = CGM.GetAddrOfRTTIDescriptor(CaughtType.getUnqualifiedType());
+      SelectorArgs.push_back(EHTypeInfo);
+    } else {
+      // null indicates catch all
+      SelectorArgs.push_back(Null);
+      HasCatchAll = true;
+    }
+  }
+
+  // We use a cleanup unless there was already a catch all.
+  if (!HasCatchAll) {
+    SelectorArgs.push_back(Null);
+  }
+
+  // Find which handler was matched.
+  llvm::Value *Selector
+    = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
+                         SelectorArgs.end(), "selector");
+  for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
+    const CXXCatchStmt *C = S.getHandler(i);
+    VarDecl *CatchParam = C->getExceptionDecl();
+    Stmt *CatchBody = C->getHandlerBlock();
+
+    llvm::BasicBlock *Next = 0;
+
+    if (SelectorArgs[i+2] != Null) {
+      llvm::BasicBlock *Match = createBasicBlock("match");
+      Next = createBasicBlock("catch.next");
+      const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
+      llvm::Value *Id
+        = Builder.CreateCall(llvm_eh_typeid_for,
+                             Builder.CreateBitCast(SelectorArgs[i+2],
+                                                   Int8PtrTy));
+      Builder.CreateCondBr(Builder.CreateICmpEQ(Selector, Id),
+                           Match, Next);
+      EmitBlock(Match);
+    }
+
+    llvm::BasicBlock *MatchEnd = createBasicBlock("match.end");
+    llvm::BasicBlock *MatchHandler = createBasicBlock("match.handler");
+
+    PushCleanupBlock(MatchEnd);
+    setInvokeDest(MatchHandler);
+
+    llvm::Value *ExcObject = Builder.CreateCall(getBeginCatchFn(*this), Exc);
+
+    {
+      CleanupScope CatchScope(*this);
+      // Bind the catch parameter if it exists.
+      if (CatchParam) {
+        QualType CatchType = CatchParam->getType().getNonReferenceType();
+        setInvokeDest(TerminateHandler);
+        bool WasPointer = true;
+        bool WasPointerReference = false;
+        CatchType = CGM.getContext().getCanonicalType(CatchType);
+        if (CatchType.getTypePtr()->isPointerType()) {
+          if (isa<ReferenceType>(CatchParam->getType()))
+            WasPointerReference = true;
+        } else {
+          if (!isa<ReferenceType>(CatchParam->getType()))
+            WasPointer = false;
+          CatchType = getContext().getPointerType(CatchType);
+        }
+        ExcObject = Builder.CreateBitCast(ExcObject, ConvertType(CatchType));
+        EmitLocalBlockVarDecl(*CatchParam);
+        // FIXME: we need to do this sooner so that the EH region for the
+        // cleanup doesn't start until after the ctor completes, use a decl
+        // init?
+        CopyObject(*this, CatchParam->getType().getNonReferenceType(),
+                   WasPointer, WasPointerReference, ExcObject,
+                   GetAddrOfLocalVar(CatchParam));
+        setInvokeDest(MatchHandler);
+      }
+
+      EmitStmt(CatchBody);
+    }
+
+    EmitBranchThroughCleanup(FinallyEnd);
+
+    EmitBlock(MatchHandler);
+
+    llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+    // We are required to emit this call to satisfy LLVM, even
+    // though we don't use the result.
+    Args.clear();
+    Args.push_back(Exc);
+    Args.push_back(Personality);
+    Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                          0));
+    Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end());
+    Builder.CreateStore(Exc, RethrowPtr);
+    EmitBranchThroughCleanup(FinallyRethrow);
+
+    CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
+
+    EmitBlock(MatchEnd);
+
+    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+    Builder.CreateInvoke(getEndCatchFn(*this),
+                         Cont, TerminateHandler,
+                         Args.begin(), Args.begin());
+    EmitBlock(Cont);
+    if (Info.SwitchBlock)
+      EmitBlock(Info.SwitchBlock);
+    if (Info.EndBlock)
+      EmitBlock(Info.EndBlock);
+
+    Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+    Builder.CreateStore(Exc, RethrowPtr);
+    EmitBranchThroughCleanup(FinallyRethrow);
+
+    if (Next)
+      EmitBlock(Next);
+  }
+  if (!HasCatchAll) {
+    Builder.CreateStore(Exc, RethrowPtr);
+    EmitBranchThroughCleanup(FinallyRethrow);
+  }
+
+  CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
+
+  setInvokeDest(PrevLandingPad);
+
+  EmitBlock(FinallyBlock);
+
+  if (Info.SwitchBlock)
+    EmitBlock(Info.SwitchBlock);
+  if (Info.EndBlock)
+    EmitBlock(Info.EndBlock);
+
+  // Branch around the rethrow code.
+  EmitBranch(FinallyEnd);
+
+  EmitBlock(FinallyRethrow);
+  // FIXME: Eventually we can chain the handlers together and just do a call
+  // here.
+  if (getInvokeDest()) {
+    llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+    Builder.CreateInvoke(getUnwindResumeOrRethrowFn(*this), Cont,
+                         getInvokeDest(),
+                         Builder.CreateLoad(RethrowPtr));
+    EmitBlock(Cont);
+  } else
+    Builder.CreateCall(getUnwindResumeOrRethrowFn(*this),
+                       Builder.CreateLoad(RethrowPtr));
+
+  Builder.CreateUnreachable();
+
+  EmitBlock(FinallyEnd);
+}
+
+CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() {
+  llvm::BasicBlock *Cont1 = CGF.createBasicBlock("cont");
+  CGF.EmitBranch(Cont1);
+  CGF.setInvokeDest(PreviousInvokeDest);
+
+
+  CGF.EmitBlock(CleanupHandler);
+
+  llvm::Constant *Personality =
+    CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
+                                                          (CGF.VMContext),
+                                                          true),
+                                  "__gxx_personality_v0");
+  Personality = llvm::ConstantExpr::getBitCast(Personality, CGF.PtrToInt8Ty);
+  llvm::Value *llvm_eh_exception =
+    CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+  llvm::Value *llvm_eh_selector =
+    CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+
+  llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+  const llvm::IntegerType *Int8Ty;
+  const llvm::PointerType *PtrToInt8Ty;
+  Int8Ty = llvm::Type::getInt8Ty(CGF.VMContext);
+  // C string type.  Used in lots of places.
+  PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+  llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+  llvm::SmallVector<llvm::Value*, 8> Args;
+  Args.clear();
+  Args.push_back(Exc);
+  Args.push_back(Personality);
+  Args.push_back(Null);
+  CGF.Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end());
+
+  CGF.EmitBlock(CleanupEntryBB);
+
+  CGF.EmitBlock(Cont1);
+
+  if (CGF.getInvokeDest()) {
+    llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+    CGF.Builder.CreateInvoke(getUnwindResumeOrRethrowFn(CGF), Cont,
+                             CGF.getInvokeDest(), Exc);
+    CGF.EmitBlock(Cont);
+  } else
+    CGF.Builder.CreateCall(getUnwindResumeOrRethrowFn(CGF), Exc);
+
+  CGF.Builder.CreateUnreachable();
+
+  CGF.EmitBlock(Cont);
+  if (CGF.Exceptions)
+    CGF.setInvokeDest(CleanupHandler);
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
+  if (TerminateHandler)
+    return TerminateHandler;
+
+  llvm::BasicBlock *Cont = 0;
+
+  if (HaveInsertPoint()) {
+    Cont = createBasicBlock("cont");
+    EmitBranch(Cont);
+  }
+
+  llvm::Constant *Personality =
+    CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
+                                                      (VMContext),
+                                                      true),
+                              "__gxx_personality_v0");
+  Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
+  llvm::Value *llvm_eh_exception =
+    CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+  llvm::Value *llvm_eh_selector =
+    CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+
+  // Set up terminate handler
+  TerminateHandler = createBasicBlock("terminate.handler");
+  EmitBlock(TerminateHandler);
+  llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+  // We are required to emit this call to satisfy LLVM, even
+  // though we don't use the result.
+  llvm::SmallVector<llvm::Value*, 8> Args;
+  Args.push_back(Exc);
+  Args.push_back(Personality);
+  Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                        1));
+  Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end());
+  llvm::CallInst *TerminateCall =
+    Builder.CreateCall(getTerminateFn(*this));
+  TerminateCall->setDoesNotReturn();
+  TerminateCall->setDoesNotThrow();
+  Builder.CreateUnreachable();
+
+  // Clear the insertion point to indicate we are in unreachable code.
+  Builder.ClearInsertionPoint();
+
+  if (Cont)
+    EmitBlock(Cont);
+
+  return TerminateHandler;
+}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
new file mode 100644
index 0000000..bb74f9b
--- /dev/null
+++ b/lib/CodeGen/CGExpr.cpp
@@ -0,0 +1,1904 @@
+//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCall.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Intrinsics.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===--------------------------------------------------------------------===//
+//                        Miscellaneous Helper Methods
+//===--------------------------------------------------------------------===//
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
+                                                    const llvm::Twine &Name) {
+  if (!Builder.isNamePreserving())
+    return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
+  return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
+}
+
+llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty, const llvm::Twine &Name) {
+  llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
+  // FIXME: Should we prefer the preferred type alignment here?
+  CharUnits Align = getContext().getTypeAlignInChars(Ty);
+  Alloc->setAlignment(Align.getQuantity());
+  return Alloc;
+}
+
+/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+/// expression and compare the result against zero, returning an Int1Ty value.
+llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
+  QualType BoolTy = getContext().BoolTy;
+  if (E->getType()->isMemberFunctionPointerType()) {
+    LValue LV = EmitAggExprToLValue(E);
+
+    // Get the pointer.
+    llvm::Value *FuncPtr = Builder.CreateStructGEP(LV.getAddress(), 0,
+                                                   "src.ptr");
+    FuncPtr = Builder.CreateLoad(FuncPtr);
+
+    llvm::Value *IsNotNull = 
+      Builder.CreateICmpNE(FuncPtr,
+                            llvm::Constant::getNullValue(FuncPtr->getType()),
+                            "tobool");
+
+    return IsNotNull;
+  }
+  if (!E->getType()->isAnyComplexType())
+    return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
+
+  return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which can have
+/// any type.  The result is returned as an RValue struct.  If this is an
+/// aggregate expression, the aggloc/agglocvolatile arguments indicate where the
+/// result should be returned.
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc,
+                                    bool IsAggLocVolatile, bool IgnoreResult,
+                                    bool IsInitializer) {
+  if (!hasAggregateLLVMType(E->getType()))
+    return RValue::get(EmitScalarExpr(E, IgnoreResult));
+  else if (E->getType()->isAnyComplexType())
+    return RValue::getComplex(EmitComplexExpr(E, false, false,
+                                              IgnoreResult, IgnoreResult));
+
+  EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer);
+  return RValue::getAggregate(AggLoc, IsAggLocVolatile);
+}
+
+/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+/// always be accessible even if no aggregate location is provided.
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E,
+                                          bool IsAggLocVolatile,
+                                          bool IsInitializer) {
+  llvm::Value *AggLoc = 0;
+
+  if (hasAggregateLLVMType(E->getType()) &&
+      !E->getType()->isAnyComplexType())
+    AggLoc = CreateTempAlloca(ConvertTypeForMem(E->getType()), "agg.tmp");
+  return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false,
+                     IsInitializer);
+}
+
+RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
+                                                   bool IsInitializer) {
+  bool ShouldDestroyTemporaries = false;
+  unsigned OldNumLiveTemporaries = 0;
+
+  if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
+    E = DAE->getExpr();
+
+  if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
+    ShouldDestroyTemporaries = true;
+    
+    // Keep track of the current cleanup stack depth.
+    OldNumLiveTemporaries = LiveTemporaries.size();
+    
+    E = TE->getSubExpr();
+  }
+  
+  RValue Val;
+  if (E->isLvalue(getContext()) == Expr::LV_Valid) {
+    // Emit the expr as an lvalue.
+    LValue LV = EmitLValue(E);
+    if (LV.isSimple()) {
+      if (ShouldDestroyTemporaries) {
+        // Pop temporaries.
+        while (LiveTemporaries.size() > OldNumLiveTemporaries)
+          PopCXXTemporary();
+      }
+      
+      return RValue::get(LV.getAddress());
+    }
+    
+    Val = EmitLoadOfLValue(LV, E->getType());
+    
+    if (ShouldDestroyTemporaries) {
+      // Pop temporaries.
+      while (LiveTemporaries.size() > OldNumLiveTemporaries)
+        PopCXXTemporary();
+    }      
+  } else {
+    const CXXRecordDecl *BaseClassDecl = 0;
+    const CXXRecordDecl *DerivedClassDecl = 0;
+    
+    if (const CastExpr *CE = 
+          dyn_cast<CastExpr>(E->IgnoreParenNoopCasts(getContext()))) {
+      if (CE->getCastKind() == CastExpr::CK_DerivedToBase) {
+        E = CE->getSubExpr();
+        
+        BaseClassDecl = 
+          cast<CXXRecordDecl>(CE->getType()->getAs<RecordType>()->getDecl());
+        DerivedClassDecl = 
+          cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+      }
+    }
+      
+    Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false,
+                            IsInitializer);
+
+    if (ShouldDestroyTemporaries) {
+      // Pop temporaries.
+      while (LiveTemporaries.size() > OldNumLiveTemporaries)
+        PopCXXTemporary();
+    }      
+    
+    if (IsInitializer) {
+      // We might have to destroy the temporary variable.
+      if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+        if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+          if (!ClassDecl->hasTrivialDestructor()) {
+            const CXXDestructorDecl *Dtor =
+              ClassDecl->getDestructor(getContext());
+
+            {
+              DelayedCleanupBlock Scope(*this);
+              EmitCXXDestructorCall(Dtor, Dtor_Complete,
+                                    Val.getAggregateAddr());
+              
+              // Make sure to jump to the exit block.
+              EmitBranch(Scope.getCleanupExitBlock());
+            }
+            if (Exceptions) {
+              EHCleanupBlock Cleanup(*this);
+              EmitCXXDestructorCall(Dtor, Dtor_Complete,
+                                    Val.getAggregateAddr());
+            }
+          }
+        }
+      }
+    }
+    
+    // Check if need to perform the derived-to-base cast.
+    if (BaseClassDecl) {
+      llvm::Value *Derived = Val.getAggregateAddr();
+      llvm::Value *Base = 
+        GetAddressOfBaseClass(Derived, DerivedClassDecl, BaseClassDecl, 
+                              /*NullCheckValue=*/false);
+      return RValue::get(Base);
+    }
+  }
+
+  if (Val.isAggregate()) {
+    Val = RValue::get(Val.getAggregateAddr());
+  } else {
+    // Create a temporary variable that we can bind the reference to.
+    llvm::Value *Temp = CreateMemTemp(E->getType(), "reftmp");
+    if (Val.isScalar())
+      EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType());
+    else
+      StoreComplexToAddr(Val.getComplexVal(), Temp, false);
+    Val = RValue::get(Temp);
+  }
+
+  return Val;
+}
+
+
+/// getAccessedFieldNo - Given an encoded value and a result number, return the
+/// input field number being accessed.
+unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
+                                             const llvm::Constant *Elts) {
+  if (isa<llvm::ConstantAggregateZero>(Elts))
+    return 0;
+
+  return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue();
+}
+
+void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
+  if (!CatchUndefined)
+    return;
+
+  const llvm::IntegerType *Size_tTy
+    = llvm::IntegerType::get(VMContext, LLVMPointerWidth);
+  Address = Builder.CreateBitCast(Address, PtrToInt8Ty);
+
+  const llvm::Type *ResType[] = {
+    Size_tTy
+  };
+  llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, ResType, 1);
+  const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
+    CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+  // In time, people may want to control this and use a 1 here.
+  llvm::Value *Arg = llvm::ConstantInt::get(IntTy, 0);
+  llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
+  llvm::BasicBlock *Cont = createBasicBlock();
+  llvm::BasicBlock *Check = createBasicBlock();
+  llvm::Value *NegativeOne = llvm::ConstantInt::get(Size_tTy, -1ULL);
+  Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
+    
+  EmitBlock(Check);
+  Builder.CreateCondBr(Builder.CreateICmpUGE(C,
+                                        llvm::ConstantInt::get(Size_tTy, Size)),
+                       Cont, getTrapBB());
+  EmitBlock(Cont);
+}
+
+
+llvm::Value *CodeGenFunction::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+                        bool isInc, bool isPre) {
+  QualType ValTy = E->getSubExpr()->getType();
+  llvm::Value *InVal = EmitLoadOfLValue(LV, ValTy).getScalarVal();
+  
+  int AmountVal = isInc ? 1 : -1;
+  
+  if (ValTy->isPointerType() &&
+      ValTy->getAs<PointerType>()->isVariableArrayType()) {
+    // The amount of the addition/subtraction needs to account for the VLA size
+    ErrorUnsupported(E, "VLA pointer inc/dec");
+  }
+  
+  llvm::Value *NextVal;
+  if (const llvm::PointerType *PT =
+      dyn_cast<llvm::PointerType>(InVal->getType())) {
+    llvm::Constant *Inc =
+    llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal);
+    if (!isa<llvm::FunctionType>(PT->getElementType())) {
+      QualType PTEE = ValTy->getPointeeType();
+      if (const ObjCInterfaceType *OIT =
+          dyn_cast<ObjCInterfaceType>(PTEE)) {
+        // Handle interface types, which are not represented with a concrete
+        // type.
+        int size = getContext().getTypeSize(OIT) / 8;
+        if (!isInc)
+          size = -size;
+        Inc = llvm::ConstantInt::get(Inc->getType(), size);
+        const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+        InVal = Builder.CreateBitCast(InVal, i8Ty);
+        NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr");
+        llvm::Value *lhs = LV.getAddress();
+        lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty));
+        LV = LValue::MakeAddr(lhs, MakeQualifiers(ValTy));
+      } else
+        NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec");
+    } else {
+      const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+      NextVal = Builder.CreateBitCast(InVal, i8Ty, "tmp");
+      NextVal = Builder.CreateGEP(NextVal, Inc, "ptrincdec");
+      NextVal = Builder.CreateBitCast(NextVal, InVal->getType());
+    }
+  } else if (InVal->getType() == llvm::Type::getInt1Ty(VMContext) && isInc) {
+    // Bool++ is an interesting case, due to promotion rules, we get:
+    // Bool++ -> Bool = Bool+1 -> Bool = (int)Bool+1 ->
+    // Bool = ((int)Bool+1) != 0
+    // An interesting aspect of this is that increment is always true.
+    // Decrement does not have this property.
+    NextVal = llvm::ConstantInt::getTrue(VMContext);
+  } else if (isa<llvm::IntegerType>(InVal->getType())) {
+    NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal);
+    
+    // Signed integer overflow is undefined behavior.
+    if (ValTy->isSignedIntegerType())
+      NextVal = Builder.CreateNSWAdd(InVal, NextVal, isInc ? "inc" : "dec");
+    else
+      NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec");
+  } else {
+    // Add the inc/dec to the real part.
+    if (InVal->getType()->isFloatTy())
+      NextVal =
+      llvm::ConstantFP::get(VMContext,
+                            llvm::APFloat(static_cast<float>(AmountVal)));
+    else if (InVal->getType()->isDoubleTy())
+      NextVal =
+      llvm::ConstantFP::get(VMContext,
+                            llvm::APFloat(static_cast<double>(AmountVal)));
+    else {
+      llvm::APFloat F(static_cast<float>(AmountVal));
+      bool ignored;
+      F.convert(Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+                &ignored);
+      NextVal = llvm::ConstantFP::get(VMContext, F);
+    }
+    NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec");
+  }
+  
+  // Store the updated result through the lvalue.
+  if (LV.isBitfield())
+    EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, &NextVal);
+  else
+    EmitStoreThroughLValue(RValue::get(NextVal), LV, ValTy);
+  
+  // If this is a postinc, return the value read from memory, otherwise use the
+  // updated value.
+  return isPre ? NextVal : InVal;
+}
+
+
+CodeGenFunction::ComplexPairTy CodeGenFunction::
+EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+                         bool isInc, bool isPre) {
+  ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
+                                            LV.isVolatileQualified());
+  
+  llvm::Value *NextVal;
+  if (isa<llvm::IntegerType>(InVal.first->getType())) {
+    uint64_t AmountVal = isInc ? 1 : -1;
+    NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
+    
+    // Add the inc/dec to the real part.
+    NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+  } else {
+    QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+    llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
+    if (!isInc)
+      FVal.changeSign();
+    NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
+    
+    // Add the inc/dec to the real part.
+    NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+  }
+  
+  ComplexPairTy IncVal(NextVal, InVal.second);
+  
+  // Store the updated result through the lvalue.
+  StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
+  
+  // If this is a postinc, return the value read from memory, otherwise use the
+  // updated value.
+  return isPre ? IncVal : InVal;
+}
+
+
+//===----------------------------------------------------------------------===//
+//                         LValue Expression Emission
+//===----------------------------------------------------------------------===//
+
+RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
+  if (Ty->isVoidType())
+    return RValue::get(0);
+  
+  if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+    const llvm::Type *EltTy = ConvertType(CTy->getElementType());
+    llvm::Value *U = llvm::UndefValue::get(EltTy);
+    return RValue::getComplex(std::make_pair(U, U));
+  }
+  
+  if (hasAggregateLLVMType(Ty)) {
+    const llvm::Type *LTy = llvm::PointerType::getUnqual(ConvertType(Ty));
+    return RValue::getAggregate(llvm::UndefValue::get(LTy));
+  }
+  
+  return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
+}
+
+RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
+                                              const char *Name) {
+  ErrorUnsupported(E, Name);
+  return GetUndefRValue(E->getType());
+}
+
+LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
+                                              const char *Name) {
+  ErrorUnsupported(E, Name);
+  llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
+  return LValue::MakeAddr(llvm::UndefValue::get(Ty),
+                          MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
+  LValue LV = EmitLValue(E);
+  if (!isa<DeclRefExpr>(E) && !LV.isBitfield() && LV.isSimple())
+    EmitCheck(LV.getAddress(), getContext().getTypeSize(E->getType()) / 8);
+  return LV;
+}
+
+/// EmitLValue - Emit code to compute a designator that specifies the location
+/// of the expression.
+///
+/// This can return one of two things: a simple address or a bitfield reference.
+/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
+/// an LLVM pointer type.
+///
+/// If this returns a bitfield reference, nothing about the pointee type of the
+/// LLVM value is known: For example, it may not be a pointer to an integer.
+///
+/// If this returns a normal address, and if the lvalue's C type is fixed size,
+/// this method guarantees that the returned pointer type will point to an LLVM
+/// type of the same size of the lvalue's type.  If the lvalue has a variable
+/// length type, this is not possible.
+///
+LValue CodeGenFunction::EmitLValue(const Expr *E) {
+  switch (E->getStmtClass()) {
+  default: return EmitUnsupportedLValue(E, "l-value expression");
+
+  case Expr::ObjCIsaExprClass:
+    return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
+  case Expr::BinaryOperatorClass:
+    return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
+  case Expr::CallExprClass:
+  case Expr::CXXMemberCallExprClass:
+  case Expr::CXXOperatorCallExprClass:
+    return EmitCallExprLValue(cast<CallExpr>(E));
+  case Expr::VAArgExprClass:
+    return EmitVAArgExprLValue(cast<VAArgExpr>(E));
+  case Expr::DeclRefExprClass:
+    return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+  case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+  case Expr::PredefinedExprClass:
+    return EmitPredefinedLValue(cast<PredefinedExpr>(E));
+  case Expr::StringLiteralClass:
+    return EmitStringLiteralLValue(cast<StringLiteral>(E));
+  case Expr::ObjCEncodeExprClass:
+    return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
+
+  case Expr::BlockDeclRefExprClass:
+    return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E));
+
+  case Expr::CXXTemporaryObjectExprClass:
+  case Expr::CXXConstructExprClass:
+    return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
+  case Expr::CXXBindTemporaryExprClass:
+    return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+  case Expr::CXXExprWithTemporariesClass:
+    return EmitCXXExprWithTemporariesLValue(cast<CXXExprWithTemporaries>(E));
+  case Expr::CXXZeroInitValueExprClass:
+    return EmitNullInitializationLValue(cast<CXXZeroInitValueExpr>(E));
+  case Expr::CXXDefaultArgExprClass:
+    return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
+  case Expr::CXXTypeidExprClass:
+    return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
+
+  case Expr::ObjCMessageExprClass:
+    return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
+  case Expr::ObjCIvarRefExprClass:
+    return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
+  case Expr::ObjCPropertyRefExprClass:
+    return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E));
+  case Expr::ObjCImplicitSetterGetterRefExprClass:
+    return EmitObjCKVCRefLValue(cast<ObjCImplicitSetterGetterRefExpr>(E));
+  case Expr::ObjCSuperExprClass:
+    return EmitObjCSuperExprLValue(cast<ObjCSuperExpr>(E));
+
+  case Expr::StmtExprClass:
+    return EmitStmtExprLValue(cast<StmtExpr>(E));
+  case Expr::UnaryOperatorClass:
+    return EmitUnaryOpLValue(cast<UnaryOperator>(E));
+  case Expr::ArraySubscriptExprClass:
+    return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+  case Expr::ExtVectorElementExprClass:
+    return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
+  case Expr::MemberExprClass:
+    return EmitMemberExpr(cast<MemberExpr>(E));
+  case Expr::CompoundLiteralExprClass:
+    return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
+  case Expr::ConditionalOperatorClass:
+    return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
+  case Expr::ChooseExprClass:
+    return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+  case Expr::ImplicitCastExprClass:
+  case Expr::CStyleCastExprClass:
+  case Expr::CXXFunctionalCastExprClass:
+  case Expr::CXXStaticCastExprClass:
+  case Expr::CXXDynamicCastExprClass:
+  case Expr::CXXReinterpretCastExprClass:
+  case Expr::CXXConstCastExprClass:
+    return EmitCastLValue(cast<CastExpr>(E));
+  }
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+                                               QualType Ty) {
+  llvm::LoadInst *Load = Builder.CreateLoad(Addr, "tmp");
+  if (Volatile)
+    Load->setVolatile(true);
+
+  // Bool can have different representation in memory than in registers.
+  llvm::Value *V = Load;
+  if (Ty->isBooleanType())
+    if (V->getType() != llvm::Type::getInt1Ty(VMContext))
+      V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool");
+
+  return V;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+                                        bool Volatile, QualType Ty) {
+
+  if (Ty->isBooleanType()) {
+    // Bool can have different representation in memory than in registers.
+    const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType());
+    Value = Builder.CreateIntCast(Value, DstPtr->getElementType(), false);
+  }
+  Builder.CreateStore(Value, Addr, Volatile);
+}
+
+/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
+/// method emits the address of the lvalue, then loads the result as an rvalue,
+/// returning the rvalue.
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) {
+  if (LV.isObjCWeak()) {
+    // load of a __weak object.
+    llvm::Value *AddrWeakObj = LV.getAddress();
+    return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+                                                             AddrWeakObj));
+  }
+
+  if (LV.isSimple()) {
+    llvm::Value *Ptr = LV.getAddress();
+    const llvm::Type *EltTy =
+      cast<llvm::PointerType>(Ptr->getType())->getElementType();
+
+    // Simple scalar l-value.
+    //
+    // FIXME: We shouldn't have to use isSingleValueType here.
+    if (EltTy->isSingleValueType())
+      return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(),
+                                          ExprType));
+
+    assert(ExprType->isFunctionType() && "Unknown scalar value");
+    return RValue::get(Ptr);
+  }
+
+  if (LV.isVectorElt()) {
+    llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(),
+                                          LV.isVolatileQualified(), "tmp");
+    return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(),
+                                                    "vecext"));
+  }
+
+  // If this is a reference to a subset of the elements of a vector, either
+  // shuffle the input or extract/insert them as appropriate.
+  if (LV.isExtVectorElt())
+    return EmitLoadOfExtVectorElementLValue(LV, ExprType);
+
+  if (LV.isBitfield())
+    return EmitLoadOfBitfieldLValue(LV, ExprType);
+
+  if (LV.isPropertyRef())
+    return EmitLoadOfPropertyRefLValue(LV, ExprType);
+
+  assert(LV.isKVCRef() && "Unknown LValue type!");
+  return EmitLoadOfKVCRefLValue(LV, ExprType);
+}
+
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
+                                                 QualType ExprType) {
+  unsigned StartBit = LV.getBitfieldStartBit();
+  unsigned BitfieldSize = LV.getBitfieldSize();
+  llvm::Value *Ptr = LV.getBitfieldAddr();
+
+  const llvm::Type *EltTy =
+    cast<llvm::PointerType>(Ptr->getType())->getElementType();
+  unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
+
+  // In some cases the bitfield may straddle two memory locations.  Currently we
+  // load the entire bitfield, then do the magic to sign-extend it if
+  // necessary. This results in somewhat more code than necessary for the common
+  // case (one load), since two shifts accomplish both the masking and sign
+  // extension.
+  unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
+  llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp");
+
+  // Shift to proper location.
+  if (StartBit)
+    Val = Builder.CreateLShr(Val, StartBit, "bf.lo");
+
+  // Mask off unused bits.
+  llvm::Constant *LowMask = llvm::ConstantInt::get(VMContext,
+                                llvm::APInt::getLowBitsSet(EltTySize, LowBits));
+  Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared");
+
+  // Fetch the high bits if necessary.
+  if (LowBits < BitfieldSize) {
+    unsigned HighBits = BitfieldSize - LowBits;
+    llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get(
+                            llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi");
+    llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
+                                              LV.isVolatileQualified(),
+                                              "tmp");
+
+    // Mask off unused bits.
+    llvm::Constant *HighMask = llvm::ConstantInt::get(VMContext,
+                               llvm::APInt::getLowBitsSet(EltTySize, HighBits));
+    HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared");
+
+    // Shift to proper location and or in to bitfield value.
+    HighVal = Builder.CreateShl(HighVal, LowBits);
+    Val = Builder.CreateOr(Val, HighVal, "bf.val");
+  }
+
+  // Sign extend if necessary.
+  if (LV.isBitfieldSigned()) {
+    llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy,
+                                                    EltTySize - BitfieldSize);
+    Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits),
+                             ExtraBits, "bf.val.sext");
+  }
+
+  // The bitfield type and the normal type differ when the storage sizes differ
+  // (currently just _Bool).
+  Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp");
+
+  return RValue::get(Val);
+}
+
+RValue CodeGenFunction::EmitLoadOfPropertyRefLValue(LValue LV,
+                                                    QualType ExprType) {
+  return EmitObjCPropertyGet(LV.getPropertyRefExpr());
+}
+
+RValue CodeGenFunction::EmitLoadOfKVCRefLValue(LValue LV,
+                                               QualType ExprType) {
+  return EmitObjCPropertyGet(LV.getKVCRefExpr());
+}
+
+// If this is a reference to a subset of the elements of a vector, create an
+// appropriate shufflevector.
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV,
+                                                         QualType ExprType) {
+  llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(),
+                                        LV.isVolatileQualified(), "tmp");
+
+  const llvm::Constant *Elts = LV.getExtVectorElts();
+
+  // If the result of the expression is a non-vector type, we must be extracting
+  // a single element.  Just codegen as an extractelement.
+  const VectorType *ExprVT = ExprType->getAs<VectorType>();
+  if (!ExprVT) {
+    unsigned InIdx = getAccessedFieldNo(0, Elts);
+    llvm::Value *Elt = llvm::ConstantInt::get(
+                                      llvm::Type::getInt32Ty(VMContext), InIdx);
+    return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp"));
+  }
+
+  // Always use shuffle vector to try to retain the original program structure
+  unsigned NumResultElts = ExprVT->getNumElements();
+
+  llvm::SmallVector<llvm::Constant*, 4> Mask;
+  for (unsigned i = 0; i != NumResultElts; ++i) {
+    unsigned InIdx = getAccessedFieldNo(i, Elts);
+    Mask.push_back(llvm::ConstantInt::get(
+                                     llvm::Type::getInt32Ty(VMContext), InIdx));
+  }
+
+  llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+  Vec = Builder.CreateShuffleVector(Vec,
+                                    llvm::UndefValue::get(Vec->getType()),
+                                    MaskV, "tmp");
+  return RValue::get(Vec);
+}
+
+
+
+/// EmitStoreThroughLValue - Store the specified rvalue into the specified
+/// lvalue, where both are guaranteed to the have the same type, and that type
+/// is 'Ty'.
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
+                                             QualType Ty) {
+  if (!Dst.isSimple()) {
+    if (Dst.isVectorElt()) {
+      // Read/modify/write the vector, inserting the new element.
+      llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(),
+                                            Dst.isVolatileQualified(), "tmp");
+      Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+                                        Dst.getVectorIdx(), "vecins");
+      Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified());
+      return;
+    }
+
+    // If this is an update of extended vector elements, insert them as
+    // appropriate.
+    if (Dst.isExtVectorElt())
+      return EmitStoreThroughExtVectorComponentLValue(Src, Dst, Ty);
+
+    if (Dst.isBitfield())
+      return EmitStoreThroughBitfieldLValue(Src, Dst, Ty);
+
+    if (Dst.isPropertyRef())
+      return EmitStoreThroughPropertyRefLValue(Src, Dst, Ty);
+
+    assert(Dst.isKVCRef() && "Unknown LValue type");
+    return EmitStoreThroughKVCRefLValue(Src, Dst, Ty);
+  }
+
+  if (Dst.isObjCWeak() && !Dst.isNonGC()) {
+    // load of a __weak object.
+    llvm::Value *LvalueDst = Dst.getAddress();
+    llvm::Value *src = Src.getScalarVal();
+     CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
+    return;
+  }
+
+  if (Dst.isObjCStrong() && !Dst.isNonGC()) {
+    // load of a __strong object.
+    llvm::Value *LvalueDst = Dst.getAddress();
+    llvm::Value *src = Src.getScalarVal();
+    if (Dst.isObjCIvar()) {
+      assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
+      const llvm::Type *ResultType = ConvertType(getContext().LongTy);
+      llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
+      llvm::Value *dst = RHS;
+      RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+      llvm::Value *LHS = 
+        Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
+      llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
+      CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
+                                              BytesBetween);
+    } else if (Dst.isGlobalObjCRef())
+      CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst);
+    else
+      CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
+    return;
+  }
+
+  assert(Src.isScalar() && "Can't emit an agg store with this method");
+  EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(),
+                    Dst.isVolatileQualified(), Ty);
+}
+
+void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+                                                     QualType Ty,
+                                                     llvm::Value **Result) {
+  unsigned StartBit = Dst.getBitfieldStartBit();
+  unsigned BitfieldSize = Dst.getBitfieldSize();
+  llvm::Value *Ptr = Dst.getBitfieldAddr();
+
+  const llvm::Type *EltTy =
+    cast<llvm::PointerType>(Ptr->getType())->getElementType();
+  unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy);
+
+  // Get the new value, cast to the appropriate type and masked to exactly the
+  // size of the bit-field.
+  llvm::Value *SrcVal = Src.getScalarVal();
+  llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp");
+  llvm::Constant *Mask = llvm::ConstantInt::get(VMContext,
+                           llvm::APInt::getLowBitsSet(EltTySize, BitfieldSize));
+  NewVal = Builder.CreateAnd(NewVal, Mask, "bf.value");
+
+  // Return the new value of the bit-field, if requested.
+  if (Result) {
+    // Cast back to the proper type for result.
+    const llvm::Type *SrcTy = SrcVal->getType();
+    llvm::Value *SrcTrunc = Builder.CreateIntCast(NewVal, SrcTy, false,
+                                                  "bf.reload.val");
+
+    // Sign extend if necessary.
+    if (Dst.isBitfieldSigned()) {
+      unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy);
+      llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy,
+                                                      SrcTySize - BitfieldSize);
+      SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits),
+                                    ExtraBits, "bf.reload.sext");
+    }
+
+    *Result = SrcTrunc;
+  }
+
+  // In some cases the bitfield may straddle two memory locations.  Emit the low
+  // part first and check to see if the high needs to be done.
+  unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit);
+  llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(),
+                                           "bf.prev.low");
+
+  // Compute the mask for zero-ing the low part of this bitfield.
+  llvm::Constant *InvMask =
+    llvm::ConstantInt::get(VMContext,
+             ~llvm::APInt::getBitsSet(EltTySize, StartBit, StartBit + LowBits));
+
+  // Compute the new low part as
+  //   LowVal = (LowVal & InvMask) | (NewVal << StartBit),
+  // with the shift of NewVal implicitly stripping the high bits.
+  llvm::Value *NewLowVal =
+    Builder.CreateShl(NewVal, StartBit, "bf.value.lo");
+  LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared");
+  LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo");
+
+  // Write back.
+  Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified());
+
+  // If the low part doesn't cover the bitfield emit a high part.
+  if (LowBits < BitfieldSize) {
+    unsigned HighBits = BitfieldSize - LowBits;
+    llvm::Value *HighPtr =  Builder.CreateGEP(Ptr, llvm::ConstantInt::get(
+                            llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi");
+    llvm::Value *HighVal = Builder.CreateLoad(HighPtr,
+                                              Dst.isVolatileQualified(),
+                                              "bf.prev.hi");
+
+    // Compute the mask for zero-ing the high part of this bitfield.
+    llvm::Constant *InvMask =
+      llvm::ConstantInt::get(VMContext, ~llvm::APInt::getLowBitsSet(EltTySize,
+                               HighBits));
+
+    // Compute the new high part as
+    //   HighVal = (HighVal & InvMask) | (NewVal lshr LowBits),
+    // where the high bits of NewVal have already been cleared and the
+    // shift stripping the low bits.
+    llvm::Value *NewHighVal =
+      Builder.CreateLShr(NewVal, LowBits, "bf.value.high");
+    HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared");
+    HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi");
+
+    // Write back.
+    Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified());
+  }
+}
+
+void CodeGenFunction::EmitStoreThroughPropertyRefLValue(RValue Src,
+                                                        LValue Dst,
+                                                        QualType Ty) {
+  EmitObjCPropertySet(Dst.getPropertyRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughKVCRefLValue(RValue Src,
+                                                   LValue Dst,
+                                                   QualType Ty) {
+  EmitObjCPropertySet(Dst.getKVCRefExpr(), Src);
+}
+
+void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
+                                                               LValue Dst,
+                                                               QualType Ty) {
+  // This access turns into a read/modify/write of the vector.  Load the input
+  // value now.
+  llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(),
+                                        Dst.isVolatileQualified(), "tmp");
+  const llvm::Constant *Elts = Dst.getExtVectorElts();
+
+  llvm::Value *SrcVal = Src.getScalarVal();
+
+  if (const VectorType *VTy = Ty->getAs<VectorType>()) {
+    unsigned NumSrcElts = VTy->getNumElements();
+    unsigned NumDstElts =
+       cast<llvm::VectorType>(Vec->getType())->getNumElements();
+    if (NumDstElts == NumSrcElts) {
+      // Use shuffle vector is the src and destination are the same number of
+      // elements and restore the vector mask since it is on the side it will be
+      // stored.
+      llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
+      for (unsigned i = 0; i != NumSrcElts; ++i) {
+        unsigned InIdx = getAccessedFieldNo(i, Elts);
+        Mask[InIdx] = llvm::ConstantInt::get(
+                                          llvm::Type::getInt32Ty(VMContext), i);
+      }
+
+      llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+      Vec = Builder.CreateShuffleVector(SrcVal,
+                                        llvm::UndefValue::get(Vec->getType()),
+                                        MaskV, "tmp");
+    } else if (NumDstElts > NumSrcElts) {
+      // Extended the source vector to the same length and then shuffle it
+      // into the destination.
+      // FIXME: since we're shuffling with undef, can we just use the indices
+      //        into that?  This could be simpler.
+      llvm::SmallVector<llvm::Constant*, 4> ExtMask;
+      const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+      unsigned i;
+      for (i = 0; i != NumSrcElts; ++i)
+        ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+      for (; i != NumDstElts; ++i)
+        ExtMask.push_back(llvm::UndefValue::get(Int32Ty));
+      llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0],
+                                                        ExtMask.size());
+      llvm::Value *ExtSrcVal =
+        Builder.CreateShuffleVector(SrcVal,
+                                    llvm::UndefValue::get(SrcVal->getType()),
+                                    ExtMaskV, "tmp");
+      // build identity
+      llvm::SmallVector<llvm::Constant*, 4> Mask;
+      for (unsigned i = 0; i != NumDstElts; ++i)
+        Mask.push_back(llvm::ConstantInt::get(Int32Ty, i));
+
+      // modify when what gets shuffled in
+      for (unsigned i = 0; i != NumSrcElts; ++i) {
+        unsigned Idx = getAccessedFieldNo(i, Elts);
+        Mask[Idx] = llvm::ConstantInt::get(Int32Ty, i+NumDstElts);
+      }
+      llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size());
+      Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV, "tmp");
+    } else {
+      // We should never shorten the vector
+      assert(0 && "unexpected shorten vector length");
+    }
+  } else {
+    // If the Src is a scalar (not a vector) it must be updating one element.
+    unsigned InIdx = getAccessedFieldNo(0, Elts);
+    const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+    llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
+    Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp");
+  }
+
+  Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified());
+}
+
+// setObjCGCLValueClass - sets class of he lvalue for the purpose of
+// generating write-barries API. It is currently a global, ivar,
+// or neither.
+static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
+                                 LValue &LV) {
+  if (Ctx.getLangOptions().getGCMode() == LangOptions::NonGC)
+    return;
+  
+  if (isa<ObjCIvarRefExpr>(E)) {
+    LV.SetObjCIvar(LV, true);
+    ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
+    LV.setBaseIvarExp(Exp->getBase());
+    LV.SetObjCArray(LV, E->getType()->isArrayType());
+    return;
+  }
+  
+  if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
+    if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
+      if ((VD->isBlockVarDecl() && !VD->hasLocalStorage()) ||
+          VD->isFileVarDecl())
+        LV.SetGlobalObjCRef(LV, true);
+    }
+    LV.SetObjCArray(LV, E->getType()->isArrayType());
+    return;
+  }
+  
+  if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
+    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+    return;
+  }
+  
+  if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
+    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+    if (LV.isObjCIvar()) {
+      // If cast is to a structure pointer, follow gcc's behavior and make it
+      // a non-ivar write-barrier.
+      QualType ExpTy = E->getType();
+      if (ExpTy->isPointerType())
+        ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+      if (ExpTy->isRecordType())
+        LV.SetObjCIvar(LV, false); 
+    }
+    return;
+  }
+  if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
+    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+    return;
+  }
+  
+  if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
+    setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV);
+    return;
+  }
+  
+  if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
+    setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+    if (LV.isObjCIvar() && !LV.isObjCArray()) 
+      // Using array syntax to assigning to what an ivar points to is not 
+      // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
+      LV.SetObjCIvar(LV, false); 
+    else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
+      // Using array syntax to assigning to what global points to is not 
+      // same as assigning to the global itself. {id *G;} G[i] = 0;
+      LV.SetGlobalObjCRef(LV, false);
+    return;
+  }
+  
+  if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
+    setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+    // We don't know if member is an 'ivar', but this flag is looked at
+    // only in the context of LV.isObjCIvar().
+    LV.SetObjCArray(LV, E->getType()->isArrayType());
+    return;
+  }
+}
+
+static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
+                                      const Expr *E, const VarDecl *VD) {
+  assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
+         "Var decl must have external storage or be a file var decl!");
+
+  llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+  if (VD->getType()->isReferenceType())
+    V = CGF.Builder.CreateLoad(V, "tmp");
+  LValue LV = LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType()));
+  setObjCGCLValueClass(CGF.getContext(), E, LV);
+  return LV;
+}
+
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
+                                      const Expr *E, const FunctionDecl *FD) {
+  llvm::Value* V = CGF.CGM.GetAddrOfFunction(FD);
+  if (!FD->hasPrototype()) {
+    if (const FunctionProtoType *Proto =
+            FD->getType()->getAs<FunctionProtoType>()) {
+      // Ugly case: for a K&R-style definition, the type of the definition
+      // isn't the same as the type of a use.  Correct for this with a
+      // bitcast.
+      QualType NoProtoType =
+          CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
+      NoProtoType = CGF.getContext().getPointerType(NoProtoType);
+      V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType), "tmp");
+    }
+  }
+  return LValue::MakeAddr(V, CGF.MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
+  const NamedDecl *ND = E->getDecl();
+
+  if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+    
+    // Check if this is a global variable.
+    if (VD->hasExternalStorage() || VD->isFileVarDecl()) 
+      return EmitGlobalVarDeclLValue(*this, E, VD);
+
+    bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>();
+
+    llvm::Value *V = LocalDeclMap[VD];
+    assert(V && "DeclRefExpr not entered in LocalDeclMap?");
+
+    Qualifiers Quals = MakeQualifiers(E->getType());
+    // local variables do not get their gc attribute set.
+    // local static?
+    if (NonGCable) Quals.removeObjCGCAttr();
+
+    if (VD->hasAttr<BlocksAttr>()) {
+      V = Builder.CreateStructGEP(V, 1, "forwarding");
+      V = Builder.CreateLoad(V);
+      V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
+                                  VD->getNameAsString());
+    }
+    if (VD->getType()->isReferenceType())
+      V = Builder.CreateLoad(V, "tmp");
+    LValue LV = LValue::MakeAddr(V, Quals);
+    LValue::SetObjCNonGC(LV, NonGCable);
+    setObjCGCLValueClass(getContext(), E, LV);
+    return LV;
+  }
+  
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+    return EmitFunctionDeclLValue(*this, E, FD);
+  
+  // FIXME: the qualifier check does not seem sufficient here
+  if (E->getQualifier()) {
+    const FieldDecl *FD = cast<FieldDecl>(ND);
+    llvm::Value *V = CGM.EmitPointerToDataMember(FD);
+
+    return LValue::MakeAddr(V, MakeQualifiers(FD->getType()));
+  }
+  
+  assert(false && "Unhandled DeclRefExpr");
+  
+  // an invalid LValue, but the assert will
+  // ensure that this point is never reached.
+  return LValue();
+}
+
+LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) {
+  return LValue::MakeAddr(GetAddrOfBlockDecl(E), MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
+  // __extension__ doesn't affect lvalue-ness.
+  if (E->getOpcode() == UnaryOperator::Extension)
+    return EmitLValue(E->getSubExpr());
+
+  QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
+  switch (E->getOpcode()) {
+  default: assert(0 && "Unknown unary operator lvalue!");
+  case UnaryOperator::Deref: {
+    QualType T = E->getSubExpr()->getType()->getPointeeType();
+    assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
+
+    Qualifiers Quals = MakeQualifiers(T);
+    Quals.setAddressSpace(ExprTy.getAddressSpace());
+
+    LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), Quals);
+    // We should not generate __weak write barrier on indirect reference
+    // of a pointer to object; as in void foo (__weak id *param); *param = 0;
+    // But, we continue to generate __strong write barrier on indirect write
+    // into a pointer to object.
+    if (getContext().getLangOptions().ObjC1 &&
+        getContext().getLangOptions().getGCMode() != LangOptions::NonGC &&
+        LV.isObjCWeak())
+      LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+    return LV;
+  }
+  case UnaryOperator::Real:
+  case UnaryOperator::Imag: {
+    LValue LV = EmitLValue(E->getSubExpr());
+    unsigned Idx = E->getOpcode() == UnaryOperator::Imag;
+    return LValue::MakeAddr(Builder.CreateStructGEP(LV.getAddress(),
+                                                    Idx, "idx"),
+                            MakeQualifiers(ExprTy));
+  }
+  case UnaryOperator::PreInc:
+  case UnaryOperator::PreDec: {
+    LValue LV = EmitLValue(E->getSubExpr());
+    bool isInc = E->getOpcode() == UnaryOperator::PreInc;
+    
+    if (E->getType()->isAnyComplexType())
+      EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
+    else
+      EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
+    return LV;
+  }
+  }
+}
+
+LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
+  return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromLiteral(E),
+                          Qualifiers());
+}
+
+LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
+  return LValue::MakeAddr(CGM.GetAddrOfConstantStringFromObjCEncode(E),
+                          Qualifiers());
+}
+
+
+LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) {
+  std::string GlobalVarName;
+
+  switch (Type) {
+  default: assert(0 && "Invalid type");
+  case PredefinedExpr::Func:
+    GlobalVarName = "__func__.";
+    break;
+  case PredefinedExpr::Function:
+    GlobalVarName = "__FUNCTION__.";
+    break;
+  case PredefinedExpr::PrettyFunction:
+    GlobalVarName = "__PRETTY_FUNCTION__.";
+    break;
+  }
+
+  llvm::StringRef FnName = CurFn->getName();
+  if (FnName.startswith("\01"))
+    FnName = FnName.substr(1);
+  GlobalVarName += FnName;
+
+  std::string FunctionName =
+    PredefinedExpr::ComputeName(getContext(), (PredefinedExpr::IdentType)Type,
+                                CurCodeDecl);
+
+  llvm::Constant *C =
+    CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+  return LValue::MakeAddr(C, Qualifiers());
+}
+
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+  switch (E->getIdentType()) {
+  default:
+    return EmitUnsupportedLValue(E, "predefined expression");
+  case PredefinedExpr::Func:
+  case PredefinedExpr::Function:
+  case PredefinedExpr::PrettyFunction:
+    return EmitPredefinedFunctionName(E->getIdentType());
+  }
+}
+
+llvm::BasicBlock *CodeGenFunction::getTrapBB() {
+  const CodeGenOptions &GCO = CGM.getCodeGenOpts();
+
+  // If we are not optimzing, don't collapse all calls to trap in the function
+  // to the same call, that way, in the debugger they can see which operation
+  // did in fact fail.  If we are optimizing, we collpase all call to trap down
+  // to just one per function to save on codesize.
+  if (GCO.OptimizationLevel
+      && TrapBB)
+    return TrapBB;
+
+  llvm::BasicBlock *Cont = 0;
+  if (HaveInsertPoint()) {
+    Cont = createBasicBlock("cont");
+    EmitBranch(Cont);
+  }
+  TrapBB = createBasicBlock("trap");
+  EmitBlock(TrapBB);
+
+  llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap, 0, 0);
+  llvm::CallInst *TrapCall = Builder.CreateCall(F);
+  TrapCall->setDoesNotReturn();
+  TrapCall->setDoesNotThrow();
+  Builder.CreateUnreachable();
+
+  if (Cont)
+    EmitBlock(Cont);
+  return TrapBB;
+}
+
+LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+  // The index must always be an integer, which is not an aggregate.  Emit it.
+  llvm::Value *Idx = EmitScalarExpr(E->getIdx());
+  QualType IdxTy  = E->getIdx()->getType();
+  bool IdxSigned = IdxTy->isSignedIntegerType();
+
+  // If the base is a vector type, then we are forming a vector element lvalue
+  // with this subscript.
+  if (E->getBase()->getType()->isVectorType()) {
+    // Emit the vector as an lvalue to get its address.
+    LValue LHS = EmitLValue(E->getBase());
+    assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
+    Idx = Builder.CreateIntCast(Idx,
+                          llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx");
+    return LValue::MakeVectorElt(LHS.getAddress(), Idx,
+                                 E->getBase()->getType().getCVRQualifiers());
+  }
+
+  // The base must be a pointer, which is not an aggregate.  Emit it.
+  llvm::Value *Base = EmitScalarExpr(E->getBase());
+
+  // Extend or truncate the index type to 32 or 64-bits.
+  unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+  if (IdxBitwidth != LLVMPointerWidth)
+    Idx = Builder.CreateIntCast(Idx,
+                            llvm::IntegerType::get(VMContext, LLVMPointerWidth),
+                                IdxSigned, "idxprom");
+
+  // FIXME: As llvm implements the object size checking, this can come out.
+  if (CatchUndefined) {
+    if (const ImplicitCastExpr *ICE=dyn_cast<ImplicitCastExpr>(E->getBase())) {
+      if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
+        if (ICE->getCastKind() == CastExpr::CK_ArrayToPointerDecay) {
+          if (const ConstantArrayType *CAT
+              = getContext().getAsConstantArrayType(DRE->getType())) {
+            llvm::APInt Size = CAT->getSize();
+            llvm::BasicBlock *Cont = createBasicBlock("cont");
+            Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
+                                  llvm::ConstantInt::get(Idx->getType(), Size)),
+                                 Cont, getTrapBB());
+            EmitBlock(Cont);
+          }
+        }
+      }
+    }
+  }
+
+  // We know that the pointer points to a type of the correct size, unless the
+  // size is a VLA or Objective-C interface.
+  llvm::Value *Address = 0;
+  if (const VariableArrayType *VAT =
+        getContext().getAsVariableArrayType(E->getType())) {
+    llvm::Value *VLASize = GetVLASize(VAT);
+
+    Idx = Builder.CreateMul(Idx, VLASize);
+
+    QualType BaseType = getContext().getBaseElementType(VAT);
+
+    CharUnits BaseTypeSize = getContext().getTypeSizeInChars(BaseType);
+    Idx = Builder.CreateUDiv(Idx,
+                             llvm::ConstantInt::get(Idx->getType(),
+                                 BaseTypeSize.getQuantity()));
+    Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+  } else if (const ObjCInterfaceType *OIT =
+             dyn_cast<ObjCInterfaceType>(E->getType())) {
+    llvm::Value *InterfaceSize =
+      llvm::ConstantInt::get(Idx->getType(),
+          getContext().getTypeSizeInChars(OIT).getQuantity());
+
+    Idx = Builder.CreateMul(Idx, InterfaceSize);
+
+    const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+    Address = Builder.CreateGEP(Builder.CreateBitCast(Base, i8PTy),
+                                Idx, "arrayidx");
+    Address = Builder.CreateBitCast(Address, Base->getType());
+  } else {
+    Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+  }
+
+  QualType T = E->getBase()->getType()->getPointeeType();
+  assert(!T.isNull() &&
+         "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
+
+  Qualifiers Quals = MakeQualifiers(T);
+  Quals.setAddressSpace(E->getBase()->getType().getAddressSpace());
+
+  LValue LV = LValue::MakeAddr(Address, Quals);
+  if (getContext().getLangOptions().ObjC1 &&
+      getContext().getLangOptions().getGCMode() != LangOptions::NonGC) {
+    LValue::SetObjCNonGC(LV, !E->isOBJCGCCandidate(getContext()));
+    setObjCGCLValueClass(getContext(), E, LV);
+  }
+  return LV;
+}
+
+static
+llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext,
+                                       llvm::SmallVector<unsigned, 4> &Elts) {
+  llvm::SmallVector<llvm::Constant*, 4> CElts;
+
+  for (unsigned i = 0, e = Elts.size(); i != e; ++i)
+    CElts.push_back(llvm::ConstantInt::get(
+                                   llvm::Type::getInt32Ty(VMContext), Elts[i]));
+
+  return llvm::ConstantVector::get(&CElts[0], CElts.size());
+}
+
+LValue CodeGenFunction::
+EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
+  const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext);
+
+  // Emit the base vector as an l-value.
+  LValue Base;
+
+  // ExtVectorElementExpr's base can either be a vector or pointer to vector.
+  if (E->isArrow()) {
+    // If it is a pointer to a vector, emit the address and form an lvalue with
+    // it.
+    llvm::Value *Ptr = EmitScalarExpr(E->getBase());
+    const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
+    Qualifiers Quals = MakeQualifiers(PT->getPointeeType());
+    Quals.removeObjCGCAttr();
+    Base = LValue::MakeAddr(Ptr, Quals);
+  } else if (E->getBase()->isLvalue(getContext()) == Expr::LV_Valid) {
+    // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
+    // emit the base as an lvalue.
+    assert(E->getBase()->getType()->isVectorType());
+    Base = EmitLValue(E->getBase());
+  } else {
+    // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
+    assert(E->getBase()->getType()->getAs<VectorType>() &&
+           "Result must be a vector");
+    llvm::Value *Vec = EmitScalarExpr(E->getBase());
+    
+    // Store the vector to memory (because LValue wants an address).
+    llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
+    Builder.CreateStore(Vec, VecMem);
+    Base = LValue::MakeAddr(VecMem, Qualifiers());
+  }
+  
+  // Encode the element access list into a vector of unsigned indices.
+  llvm::SmallVector<unsigned, 4> Indices;
+  E->getEncodedElementAccess(Indices);
+
+  if (Base.isSimple()) {
+    llvm::Constant *CV = GenerateConstantVector(VMContext, Indices);
+    return LValue::MakeExtVectorElt(Base.getAddress(), CV,
+                                    Base.getVRQualifiers());
+  }
+  assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
+
+  llvm::Constant *BaseElts = Base.getExtVectorElts();
+  llvm::SmallVector<llvm::Constant *, 4> CElts;
+
+  for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
+    if (isa<llvm::ConstantAggregateZero>(BaseElts))
+      CElts.push_back(llvm::ConstantInt::get(Int32Ty, 0));
+    else
+      CElts.push_back(cast<llvm::Constant>(BaseElts->getOperand(Indices[i])));
+  }
+  llvm::Constant *CV = llvm::ConstantVector::get(&CElts[0], CElts.size());
+  return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV,
+                                  Base.getVRQualifiers());
+}
+
+LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
+  bool isNonGC = false;
+  Expr *BaseExpr = E->getBase();
+  llvm::Value *BaseValue = NULL;
+  Qualifiers BaseQuals;
+
+  // If this is s.x, emit s as an lvalue.  If it is s->x, emit s as a scalar.
+  if (E->isArrow()) {
+    BaseValue = EmitScalarExpr(BaseExpr);
+    const PointerType *PTy =
+      BaseExpr->getType()->getAs<PointerType>();
+    BaseQuals = PTy->getPointeeType().getQualifiers();
+  } else if (isa<ObjCPropertyRefExpr>(BaseExpr->IgnoreParens()) ||
+             isa<ObjCImplicitSetterGetterRefExpr>(
+               BaseExpr->IgnoreParens())) {
+    RValue RV = EmitObjCPropertyGet(BaseExpr);
+    BaseValue = RV.getAggregateAddr();
+    BaseQuals = BaseExpr->getType().getQualifiers();
+  } else {
+    LValue BaseLV = EmitLValue(BaseExpr);
+    if (BaseLV.isNonGC())
+      isNonGC = true;
+    // FIXME: this isn't right for bitfields.
+    BaseValue = BaseLV.getAddress();
+    QualType BaseTy = BaseExpr->getType();
+    BaseQuals = BaseTy.getQualifiers();
+  }
+
+  NamedDecl *ND = E->getMemberDecl();
+  if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
+    LValue LV = EmitLValueForField(BaseValue, Field, 
+                                   BaseQuals.getCVRQualifiers());
+    LValue::SetObjCNonGC(LV, isNonGC);
+    setObjCGCLValueClass(getContext(), E, LV);
+    return LV;
+  }
+  
+  if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+    return EmitGlobalVarDeclLValue(*this, E, VD);
+
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+    return EmitFunctionDeclLValue(*this, E, FD);
+
+  assert(false && "Unhandled member declaration!");
+  return LValue();
+}
+
+LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
+                                              const FieldDecl* Field,
+                                              unsigned CVRQualifiers) {
+  CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field);
+
+  // FIXME: CodeGenTypes should expose a method to get the appropriate type for
+  // FieldTy (the appropriate type is ABI-dependent).
+  const llvm::Type *FieldTy =
+    CGM.getTypes().ConvertTypeForMem(Field->getType());
+  const llvm::PointerType *BaseTy =
+  cast<llvm::PointerType>(BaseValue->getType());
+  unsigned AS = BaseTy->getAddressSpace();
+  BaseValue = Builder.CreateBitCast(BaseValue,
+                                    llvm::PointerType::get(FieldTy, AS),
+                                    "tmp");
+
+  llvm::Value *Idx =
+    llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Info.FieldNo);
+  llvm::Value *V = Builder.CreateGEP(BaseValue, Idx, "tmp");
+
+  return LValue::MakeBitfield(V, Info.Start, Info.Size,
+                              Field->getType()->isSignedIntegerType(),
+                            Field->getType().getCVRQualifiers()|CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
+                                           const FieldDecl* Field,
+                                           unsigned CVRQualifiers) {
+  if (Field->isBitField())
+    return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
+
+  unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+  llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
+
+  // Match union field type.
+  if (Field->getParent()->isUnion()) {
+    const llvm::Type *FieldTy =
+      CGM.getTypes().ConvertTypeForMem(Field->getType());
+    const llvm::PointerType * BaseTy =
+      cast<llvm::PointerType>(BaseValue->getType());
+    unsigned AS = BaseTy->getAddressSpace();
+    V = Builder.CreateBitCast(V,
+                              llvm::PointerType::get(FieldTy, AS),
+                              "tmp");
+  }
+  if (Field->getType()->isReferenceType())
+    V = Builder.CreateLoad(V, "tmp");
+
+  Qualifiers Quals = MakeQualifiers(Field->getType());
+  Quals.addCVRQualifiers(CVRQualifiers);
+  // __weak attribute on a field is ignored.
+  if (Quals.getObjCGCAttr() == Qualifiers::Weak)
+    Quals.removeObjCGCAttr();
+  
+  return LValue::MakeAddr(V, Quals);
+}
+
+LValue 
+CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue, 
+                                                  const FieldDecl* Field,
+                                                  unsigned CVRQualifiers) {
+  QualType FieldType = Field->getType();
+  
+  if (!FieldType->isReferenceType())
+    return EmitLValueForField(BaseValue, Field, CVRQualifiers);
+
+  unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+  llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
+
+  assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+
+  return LValue::MakeAddr(V, MakeQualifiers(FieldType));
+}
+
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
+  llvm::Value *DeclPtr = CreateTempAlloca(ConvertTypeForMem(E->getType()),
+                                          ".compoundliteral");
+
+  const Expr* InitExpr = E->getInitializer();
+  LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType()));
+
+  if (E->getType()->isComplexType())
+    EmitComplexExprIntoAddr(InitExpr, DeclPtr, false);
+  else if (hasAggregateLLVMType(E->getType()))
+    EmitAnyExpr(InitExpr, DeclPtr, false);
+  else
+    EmitStoreThroughLValue(EmitAnyExpr(InitExpr), Result, E->getType());
+
+  return Result;
+}
+
+LValue 
+CodeGenFunction::EmitConditionalOperatorLValue(const ConditionalOperator* E) {
+  if (E->isLvalue(getContext()) == Expr::LV_Valid) {
+    if (int Cond = ConstantFoldsToSimpleInteger(E->getCond())) {
+      Expr *Live = Cond == 1 ? E->getLHS() : E->getRHS();
+      if (Live)
+        return EmitLValue(Live);
+    }
+
+    if (!E->getLHS())
+      return EmitUnsupportedLValue(E, "conditional operator with missing LHS");
+
+    llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+    llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+    llvm::BasicBlock *ContBlock = createBasicBlock("cond.end");
+    
+    EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+    
+    // Any temporaries created here are conditional.
+    BeginConditionalBranch();
+    EmitBlock(LHSBlock);
+    LValue LHS = EmitLValue(E->getLHS());
+    EndConditionalBranch();
+    
+    if (!LHS.isSimple())
+      return EmitUnsupportedLValue(E, "conditional operator");
+
+    // FIXME: We shouldn't need an alloca for this.
+    llvm::Value *Temp = CreateTempAlloca(LHS.getAddress()->getType(),"condtmp");
+    Builder.CreateStore(LHS.getAddress(), Temp);
+    EmitBranch(ContBlock);
+    
+    // Any temporaries created here are conditional.
+    BeginConditionalBranch();
+    EmitBlock(RHSBlock);
+    LValue RHS = EmitLValue(E->getRHS());
+    EndConditionalBranch();
+    if (!RHS.isSimple())
+      return EmitUnsupportedLValue(E, "conditional operator");
+
+    Builder.CreateStore(RHS.getAddress(), Temp);
+    EmitBranch(ContBlock);
+
+    EmitBlock(ContBlock);
+    
+    Temp = Builder.CreateLoad(Temp, "lv");
+    return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+  }
+  
+  // ?: here should be an aggregate.
+  assert((hasAggregateLLVMType(E->getType()) &&
+          !E->getType()->isAnyComplexType()) &&
+         "Unexpected conditional operator!");
+
+  return EmitAggExprToLValue(E);
+}
+
+/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
+/// If the cast is a dynamic_cast, we can have the usual lvalue result,
+/// otherwise if a cast is needed by the code generator in an lvalue context,
+/// then it must mean that we need the address of an aggregate in order to
+/// access one of its fields.  This can happen for all the reasons that casts
+/// are permitted with aggregate result, including noop aggregate casts, and
+/// cast from scalar to union.
+LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
+  switch (E->getCastKind()) {
+  default:
+    return EmitUnsupportedLValue(E, "unexpected cast lvalue");
+
+  case CastExpr::CK_Dynamic: {
+    LValue LV = EmitLValue(E->getSubExpr());
+    llvm::Value *V = LV.getAddress();
+    const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
+    return LValue::MakeAddr(EmitDynamicCast(V, DCE),
+                            MakeQualifiers(E->getType()));
+  }
+
+  case CastExpr::CK_NoOp:
+  case CastExpr::CK_ConstructorConversion:
+  case CastExpr::CK_UserDefinedConversion:
+  case CastExpr::CK_AnyPointerToObjCPointerCast:
+    return EmitLValue(E->getSubExpr());
+  
+  case CastExpr::CK_DerivedToBase: {
+    const RecordType *DerivedClassTy = 
+      E->getSubExpr()->getType()->getAs<RecordType>();
+    CXXRecordDecl *DerivedClassDecl = 
+      cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+    const RecordType *BaseClassTy = E->getType()->getAs<RecordType>();
+    CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl());
+    
+    LValue LV = EmitLValue(E->getSubExpr());
+    
+    // Perform the derived-to-base conversion
+    llvm::Value *Base = 
+      GetAddressOfBaseClass(LV.getAddress(), DerivedClassDecl, 
+                            BaseClassDecl, /*NullCheckValue=*/false);
+    
+    return LValue::MakeAddr(Base, MakeQualifiers(E->getType()));
+  }
+  case CastExpr::CK_ToUnion:
+    return EmitAggExprToLValue(E);
+  case CastExpr::CK_BaseToDerived: {
+    const RecordType *BaseClassTy = 
+      E->getSubExpr()->getType()->getAs<RecordType>();
+    CXXRecordDecl *BaseClassDecl = 
+      cast<CXXRecordDecl>(BaseClassTy->getDecl());
+    
+    const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
+    CXXRecordDecl *DerivedClassDecl = 
+      cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+    
+    LValue LV = EmitLValue(E->getSubExpr());
+    
+    // Perform the base-to-derived conversion
+    llvm::Value *Derived = 
+      GetAddressOfDerivedClass(LV.getAddress(), BaseClassDecl, 
+                               DerivedClassDecl, /*NullCheckValue=*/false);
+    
+    return LValue::MakeAddr(Derived, MakeQualifiers(E->getType()));
+  }
+  case CastExpr::CK_BitCast: {
+    // This must be a reinterpret_cast (or c-style equivalent).
+    const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
+    
+    LValue LV = EmitLValue(E->getSubExpr());
+    llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+                                           ConvertType(CE->getTypeAsWritten()));
+    return LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+  }
+  }
+}
+
+LValue CodeGenFunction::EmitNullInitializationLValue(
+                                              const CXXZeroInitValueExpr *E) {
+  QualType Ty = E->getType();
+  LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty));
+  EmitMemSetToZero(LV.getAddress(), Ty);
+  return LV;
+}
+
+//===--------------------------------------------------------------------===//
+//                             Expression Emission
+//===--------------------------------------------------------------------===//
+
+
+RValue CodeGenFunction::EmitCallExpr(const CallExpr *E, 
+                                     ReturnValueSlot ReturnValue) {
+  // Builtins never have block type.
+  if (E->getCallee()->getType()->isBlockPointerType())
+    return EmitBlockCallExpr(E, ReturnValue);
+
+  if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
+    return EmitCXXMemberCallExpr(CE, ReturnValue);
+
+  const Decl *TargetDecl = 0;
+  if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
+    if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+      TargetDecl = DRE->getDecl();
+      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(TargetDecl))
+        if (unsigned builtinID = FD->getBuiltinID())
+          return EmitBuiltinExpr(FD, builtinID, E);
+    }
+  }
+
+  if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
+    if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+      return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
+
+  if (isa<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
+    // C++ [expr.pseudo]p1:
+    //   The result shall only be used as the operand for the function call
+    //   operator (), and the result of such a call has type void. The only
+    //   effect is the evaluation of the postfix-expression before the dot or
+    //   arrow.
+    EmitScalarExpr(E->getCallee());
+    return RValue::get(0);
+  }
+
+  llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+  return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
+                  E->arg_begin(), E->arg_end(), TargetDecl);
+}
+
+LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
+  // Comma expressions just emit their LHS then their RHS as an l-value.
+  if (E->getOpcode() == BinaryOperator::Comma) {
+    EmitAnyExpr(E->getLHS());
+    EnsureInsertPoint();
+    return EmitLValue(E->getRHS());
+  }
+
+  if (E->getOpcode() == BinaryOperator::PtrMemD ||
+      E->getOpcode() == BinaryOperator::PtrMemI)
+    return EmitPointerToDataMemberBinaryExpr(E);
+  
+  // Can only get l-value for binary operator expressions which are a
+  // simple assignment of aggregate type.
+  if (E->getOpcode() != BinaryOperator::Assign)
+    return EmitUnsupportedLValue(E, "binary l-value expression");
+
+  if (!hasAggregateLLVMType(E->getType())) {
+    // Emit the LHS as an l-value.
+    LValue LV = EmitLValue(E->getLHS());
+    
+    llvm::Value *RHS = EmitScalarExpr(E->getRHS());
+    EmitStoreOfScalar(RHS, LV.getAddress(), LV.isVolatileQualified(), 
+                      E->getType());
+    return LV;
+  }
+  
+  return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
+  RValue RV = EmitCallExpr(E);
+
+  if (!RV.isScalar())
+    return LValue::MakeAddr(RV.getAggregateAddr(),MakeQualifiers(E->getType()));
+    
+  assert(E->getCallReturnType()->isReferenceType() &&
+         "Can't have a scalar return unless the return type is a "
+         "reference type!");
+
+  return LValue::MakeAddr(RV.getScalarVal(), MakeQualifiers(E->getType()));
+}
+
+LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
+  // FIXME: This shouldn't require another copy.
+  return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
+  llvm::Value *Temp = CreateMemTemp(E->getType(), "tmp");
+  EmitCXXConstructExpr(Temp, E);
+  return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+}
+
+LValue
+CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
+  llvm::Value *Temp = EmitCXXTypeidExpr(E);
+  return LValue::MakeAddr(Temp, MakeQualifiers(E->getType()));
+}
+
+LValue
+CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
+  LValue LV = EmitLValue(E->getSubExpr());
+  PushCXXTemporary(E->getTemporary(), LV.getAddress());
+  return LV;
+}
+
+LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
+  // Can only get l-value for message expression returning aggregate type
+  RValue RV = EmitObjCMessageExpr(E);
+  // FIXME: can this be volatile?
+  return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+                                             const ObjCIvarDecl *Ivar) {
+  return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
+}
+
+LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
+                                          llvm::Value *BaseValue,
+                                          const ObjCIvarDecl *Ivar,
+                                          unsigned CVRQualifiers) {
+  return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
+                                                   Ivar, CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
+  // FIXME: A lot of the code below could be shared with EmitMemberExpr.
+  llvm::Value *BaseValue = 0;
+  const Expr *BaseExpr = E->getBase();
+  Qualifiers BaseQuals;
+  QualType ObjectTy;
+  if (E->isArrow()) {
+    BaseValue = EmitScalarExpr(BaseExpr);
+    ObjectTy = BaseExpr->getType()->getPointeeType();
+    BaseQuals = ObjectTy.getQualifiers();
+  } else {
+    LValue BaseLV = EmitLValue(BaseExpr);
+    // FIXME: this isn't right for bitfields.
+    BaseValue = BaseLV.getAddress();
+    ObjectTy = BaseExpr->getType();
+    BaseQuals = ObjectTy.getQualifiers();
+  }
+
+  LValue LV = 
+    EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
+                      BaseQuals.getCVRQualifiers());
+  setObjCGCLValueClass(getContext(), E, LV);
+  return LV;
+}
+
+LValue
+CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) {
+  // This is a special l-value that just issues sends when we load or store
+  // through it.
+  return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue CodeGenFunction::EmitObjCKVCRefLValue(
+                                const ObjCImplicitSetterGetterRefExpr *E) {
+  // This is a special l-value that just issues sends when we load or store
+  // through it.
+  return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers());
+}
+
+LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
+  return EmitUnsupportedLValue(E, "use of super");
+}
+
+LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
+  // Can only get l-value for message expression returning aggregate type
+  RValue RV = EmitAnyExprToTemp(E);
+  // FIXME: can this be volatile?
+  return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
+}
+
+RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
+                                 ReturnValueSlot ReturnValue,
+                                 CallExpr::const_arg_iterator ArgBeg,
+                                 CallExpr::const_arg_iterator ArgEnd,
+                                 const Decl *TargetDecl) {
+  // Get the actual function type. The callee type will always be a pointer to
+  // function type or a block pointer type.
+  assert(CalleeType->isFunctionPointerType() &&
+         "Call must have function pointer type!");
+
+  CalleeType = getContext().getCanonicalType(CalleeType);
+
+  const FunctionType *FnType
+    = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+  QualType ResultType = FnType->getResultType();
+
+  CallArgList Args;
+  EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
+
+  return EmitCall(CGM.getTypes().getFunctionInfo(Args, FnType),
+                  Callee, ReturnValue, Args, TargetDecl);
+}
+
+LValue CodeGenFunction::
+EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
+  llvm::Value *BaseV;
+  if (E->getOpcode() == BinaryOperator::PtrMemI)
+    BaseV = EmitScalarExpr(E->getLHS());
+  else
+    BaseV = EmitLValue(E->getLHS()).getAddress();
+  const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(getLLVMContext());
+  BaseV = Builder.CreateBitCast(BaseV, i8Ty);
+  llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
+  llvm::Value *AddV = Builder.CreateInBoundsGEP(BaseV, OffsetV, "add.ptr");
+
+  QualType Ty = E->getRHS()->getType();
+  Ty = Ty->getAs<MemberPointerType>()->getPointeeType();
+  
+  const llvm::Type *PType = ConvertType(getContext().getPointerType(Ty));
+  AddV = Builder.CreateBitCast(AddV, PType);
+  return LValue::MakeAddr(AddV, MakeQualifiers(Ty));
+}
+
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
new file mode 100644
index 0000000..97455c7
--- /dev/null
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -0,0 +1,767 @@
+//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Aggregate Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+//                        Aggregate Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace  {
+class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
+  CodeGenFunction &CGF;
+  CGBuilderTy &Builder;
+  llvm::Value *DestPtr;
+  bool VolatileDest;
+  bool IgnoreResult;
+  bool IsInitializer;
+  bool RequiresGCollection;
+public:
+  AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
+                 bool ignore, bool isinit, bool requiresGCollection)
+    : CGF(cgf), Builder(CGF.Builder),
+      DestPtr(destPtr), VolatileDest(v), IgnoreResult(ignore),
+      IsInitializer(isinit), RequiresGCollection(requiresGCollection) {
+  }
+
+  //===--------------------------------------------------------------------===//
+  //                               Utilities
+  //===--------------------------------------------------------------------===//
+
+  /// EmitAggLoadOfLValue - Given an expression with aggregate type that
+  /// represents a value lvalue, this method emits the address of the lvalue,
+  /// then loads the result into DestPtr.
+  void EmitAggLoadOfLValue(const Expr *E);
+
+  /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+  void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
+  void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
+
+  //===--------------------------------------------------------------------===//
+  //                            Visitor Methods
+  //===--------------------------------------------------------------------===//
+
+  void VisitStmt(Stmt *S) {
+    CGF.ErrorUnsupported(S, "aggregate expression");
+  }
+  void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
+  void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+
+  // l-values.
+  void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); }
+  void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
+  void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
+  void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
+  void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+    EmitAggLoadOfLValue(E);
+  }
+  void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+    EmitAggLoadOfLValue(E);
+  }
+  void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+    EmitAggLoadOfLValue(E);
+  }
+  void VisitPredefinedExpr(const PredefinedExpr *E) {
+    EmitAggLoadOfLValue(E);
+  }
+
+  // Operators.
+  void VisitCastExpr(CastExpr *E);
+  void VisitCallExpr(const CallExpr *E);
+  void VisitStmtExpr(const StmtExpr *E);
+  void VisitBinaryOperator(const BinaryOperator *BO);
+  void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
+  void VisitBinAssign(const BinaryOperator *E);
+  void VisitBinComma(const BinaryOperator *E);
+  void VisitUnaryAddrOf(const UnaryOperator *E);
+
+  void VisitObjCMessageExpr(ObjCMessageExpr *E);
+  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+    EmitAggLoadOfLValue(E);
+  }
+  void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
+  void VisitObjCImplicitSetterGetterRefExpr(ObjCImplicitSetterGetterRefExpr *E);
+
+  void VisitConditionalOperator(const ConditionalOperator *CO);
+  void VisitChooseExpr(const ChooseExpr *CE);
+  void VisitInitListExpr(InitListExpr *E);
+  void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+  void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+    Visit(DAE->getExpr());
+  }
+  void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+  void VisitCXXConstructExpr(const CXXConstructExpr *E);
+  void VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E);
+  void VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E);
+  void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
+
+  void VisitVAArgExpr(VAArgExpr *E);
+
+  void EmitInitializationToLValue(Expr *E, LValue Address, QualType T);
+  void EmitNullInitializationToLValue(LValue Address, QualType T);
+  //  case Expr::ChooseExprClass:
+  void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
+};
+}  // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+//                                Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitAggLoadOfLValue - Given an expression with aggregate type that
+/// represents a value lvalue, this method emits the address of the lvalue,
+/// then loads the result into DestPtr.
+void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
+  LValue LV = CGF.EmitLValue(E);
+  EmitFinalDestCopy(E, LV);
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
+  assert(Src.isAggregate() && "value must be aggregate value!");
+
+  // If the result is ignored, don't copy from the value.
+  if (DestPtr == 0) {
+    if (!Src.isVolatileQualified() || (IgnoreResult && Ignore))
+      return;
+    // If the source is volatile, we must read from it; to do that, we need
+    // some place to put it.
+    DestPtr = CGF.CreateMemTemp(E->getType(), "agg.tmp");
+  }
+
+  if (RequiresGCollection) {
+    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
+                                              DestPtr, Src.getAggregateAddr(),
+                                              E->getType());
+    return;
+  }
+  // If the result of the assignment is used, copy the LHS there also.
+  // FIXME: Pass VolatileDest as well.  I think we also need to merge volatile
+  // from the source as well, as we can't eliminate it if either operand
+  // is volatile, unless copy has volatile for both source and destination..
+  CGF.EmitAggregateCopy(DestPtr, Src.getAggregateAddr(), E->getType(),
+                        VolatileDest|Src.isVolatileQualified());
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
+  assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
+
+  EmitFinalDestCopy(E, RValue::getAggregate(Src.getAddress(),
+                                            Src.isVolatileQualified()),
+                    Ignore);
+}
+
+//===----------------------------------------------------------------------===//
+//                            Visitor Methods
+//===----------------------------------------------------------------------===//
+
+void AggExprEmitter::VisitCastExpr(CastExpr *E) {
+  switch (E->getCastKind()) {
+  default: assert(0 && "Unhandled cast kind!");
+
+  case CastExpr::CK_ToUnion: {
+    // GCC union extension
+    QualType PtrTy =
+    CGF.getContext().getPointerType(E->getSubExpr()->getType());
+    llvm::Value *CastPtr = Builder.CreateBitCast(DestPtr,
+                                                 CGF.ConvertType(PtrTy));
+    EmitInitializationToLValue(E->getSubExpr(),
+                               LValue::MakeAddr(CastPtr, Qualifiers()), 
+                               E->getType());
+    break;
+  }
+
+  // FIXME: Remove the CK_Unknown check here.
+  case CastExpr::CK_Unknown:
+  case CastExpr::CK_NoOp:
+  case CastExpr::CK_UserDefinedConversion:
+  case CastExpr::CK_ConstructorConversion:
+    assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
+                                                   E->getType()) &&
+           "Implicit cast types must be compatible");
+    Visit(E->getSubExpr());
+    break;
+
+  case CastExpr::CK_NullToMemberPointer: {
+    const llvm::Type *PtrDiffTy = 
+      CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+    llvm::Value *NullValue = llvm::Constant::getNullValue(PtrDiffTy);
+    llvm::Value *Ptr = Builder.CreateStructGEP(DestPtr, 0, "ptr");
+    Builder.CreateStore(NullValue, Ptr, VolatileDest);
+    
+    llvm::Value *Adj = Builder.CreateStructGEP(DestPtr, 1, "adj");
+    Builder.CreateStore(NullValue, Adj, VolatileDest);
+
+    break;
+  }
+      
+  case CastExpr::CK_BitCast: {
+    // This must be a member function pointer cast.
+    Visit(E->getSubExpr());
+    break;
+  }
+
+  case CastExpr::CK_DerivedToBaseMemberPointer:
+  case CastExpr::CK_BaseToDerivedMemberPointer: {
+    QualType SrcType = E->getSubExpr()->getType();
+    
+    llvm::Value *Src = CGF.CreateMemTemp(SrcType, "tmp");
+    CGF.EmitAggExpr(E->getSubExpr(), Src, SrcType.isVolatileQualified());
+    
+    llvm::Value *SrcPtr = Builder.CreateStructGEP(Src, 0, "src.ptr");
+    SrcPtr = Builder.CreateLoad(SrcPtr);
+    
+    llvm::Value *SrcAdj = Builder.CreateStructGEP(Src, 1, "src.adj");
+    SrcAdj = Builder.CreateLoad(SrcAdj);
+    
+    llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
+    Builder.CreateStore(SrcPtr, DstPtr, VolatileDest);
+    
+    llvm::Value *DstAdj = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
+    
+    // Now See if we need to update the adjustment.
+    const CXXRecordDecl *BaseDecl = 
+      cast<CXXRecordDecl>(SrcType->getAs<MemberPointerType>()->
+                          getClass()->getAs<RecordType>()->getDecl());
+    const CXXRecordDecl *DerivedDecl = 
+      cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()->
+                          getClass()->getAs<RecordType>()->getDecl());
+    if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
+      std::swap(DerivedDecl, BaseDecl);
+
+    if (llvm::Constant *Adj = 
+          CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, BaseDecl)) {
+      if (E->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
+        SrcAdj = Builder.CreateSub(SrcAdj, Adj, "adj");
+      else
+        SrcAdj = Builder.CreateAdd(SrcAdj, Adj, "adj");
+    }
+    
+    Builder.CreateStore(SrcAdj, DstAdj, VolatileDest);
+    break;
+  }
+  }
+}
+
+void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
+  if (E->getCallReturnType()->isReferenceType()) {
+    EmitAggLoadOfLValue(E);
+    return;
+  }
+
+  // If the struct doesn't require GC, we can just pass the destination
+  // directly to EmitCall.
+  if (!RequiresGCollection) {
+    CGF.EmitCallExpr(E, ReturnValueSlot(DestPtr, VolatileDest));
+    return;
+  }
+  
+  RValue RV = CGF.EmitCallExpr(E);
+  EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+  RValue RV = CGF.EmitObjCMessageExpr(E);
+  EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+  RValue RV = CGF.EmitObjCPropertyGet(E);
+  EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitObjCImplicitSetterGetterRefExpr(
+                                   ObjCImplicitSetterGetterRefExpr *E) {
+  RValue RV = CGF.EmitObjCPropertyGet(E);
+  EmitFinalDestCopy(E, RV);
+}
+
+void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
+  CGF.EmitAnyExpr(E->getLHS(), 0, false, true);
+  CGF.EmitAggExpr(E->getRHS(), DestPtr, VolatileDest,
+                  /*IgnoreResult=*/false, IsInitializer);
+}
+
+void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
+  // We have a member function pointer.
+  const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+  (void) MPT;
+  assert(MPT->getPointeeType()->isFunctionProtoType() &&
+         "Unexpected member pointer type!");
+  
+  const DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
+  const CXXMethodDecl *MD = 
+    cast<CXXMethodDecl>(DRE->getDecl())->getCanonicalDecl();
+
+  const llvm::Type *PtrDiffTy = 
+    CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+  llvm::Value *DstPtr = Builder.CreateStructGEP(DestPtr, 0, "dst.ptr");
+  llvm::Value *FuncPtr;
+  
+  if (MD->isVirtual()) {
+    int64_t Index = 
+      CGF.CGM.getVtableInfo().getMethodVtableIndex(MD);
+    
+    // Itanium C++ ABI 2.3:
+    //   For a non-virtual function, this field is a simple function pointer. 
+    //   For a virtual function, it is 1 plus the virtual table offset 
+    //   (in bytes) of the function, represented as a ptrdiff_t. 
+    FuncPtr = llvm::ConstantInt::get(PtrDiffTy, (Index * 8) + 1);
+  } else {
+    const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+    const llvm::Type *Ty =
+      CGF.CGM.getTypes().GetFunctionType(CGF.CGM.getTypes().getFunctionInfo(MD),
+                                         FPT->isVariadic());
+    llvm::Constant *Fn = CGF.CGM.GetAddrOfFunction(MD, Ty);
+    FuncPtr = llvm::ConstantExpr::getPtrToInt(Fn, PtrDiffTy);
+  }
+  Builder.CreateStore(FuncPtr, DstPtr, VolatileDest);
+
+  llvm::Value *AdjPtr = Builder.CreateStructGEP(DestPtr, 1, "dst.adj");
+  
+  // The adjustment will always be 0.
+  Builder.CreateStore(llvm::ConstantInt::get(PtrDiffTy, 0), AdjPtr,
+                      VolatileDest);
+}
+
+void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+  CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest);
+}
+
+void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
+  if (E->getOpcode() == BinaryOperator::PtrMemD ||
+      E->getOpcode() == BinaryOperator::PtrMemI)
+    VisitPointerToDataMemberBinaryOperator(E);
+  else
+    CGF.ErrorUnsupported(E, "aggregate binary expression");
+}
+
+void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
+                                                    const BinaryOperator *E) {
+  LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
+  EmitFinalDestCopy(E, LV);
+}
+
+void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+  // For an assignment to work, the value on the right has
+  // to be compatible with the value on the left.
+  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+                                                 E->getRHS()->getType())
+         && "Invalid assignment");
+  LValue LHS = CGF.EmitLValue(E->getLHS());
+
+  // We have to special case property setters, otherwise we must have
+  // a simple lvalue (no aggregates inside vectors, bitfields).
+  if (LHS.isPropertyRef()) {
+    llvm::Value *AggLoc = DestPtr;
+    if (!AggLoc)
+      AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
+    CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+    CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
+                            RValue::getAggregate(AggLoc, VolatileDest));
+  } else if (LHS.isKVCRef()) {
+    llvm::Value *AggLoc = DestPtr;
+    if (!AggLoc)
+      AggLoc = CGF.CreateMemTemp(E->getRHS()->getType());
+    CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
+    CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
+                            RValue::getAggregate(AggLoc, VolatileDest));
+  } else {
+    bool RequiresGCollection = false;
+    if (CGF.getContext().getLangOptions().NeXTRuntime) {
+      QualType LHSTy = E->getLHS()->getType();
+      if (const RecordType *FDTTy = LHSTy.getTypePtr()->getAs<RecordType>())
+        RequiresGCollection = FDTTy->getDecl()->hasObjectMember();
+    }
+    // Codegen the RHS so that it stores directly into the LHS.
+    CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(),
+                    false, false, RequiresGCollection);
+    EmitFinalDestCopy(E, LHS, true);
+  }
+}
+
+void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) {
+  if (!E->getLHS()) {
+    CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
+    return;
+  }
+
+  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+  CGF.BeginConditionalBranch();
+  CGF.EmitBlock(LHSBlock);
+
+  // Handle the GNU extension for missing LHS.
+  assert(E->getLHS() && "Must have LHS for aggregate value");
+
+  Visit(E->getLHS());
+  CGF.EndConditionalBranch();
+  CGF.EmitBranch(ContBlock);
+
+  CGF.BeginConditionalBranch();
+  CGF.EmitBlock(RHSBlock);
+
+  Visit(E->getRHS());
+  CGF.EndConditionalBranch();
+  CGF.EmitBranch(ContBlock);
+
+  CGF.EmitBlock(ContBlock);
+}
+
+void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
+  Visit(CE->getChosenSubExpr(CGF.getContext()));
+}
+
+void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+  llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+  llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+  if (!ArgPtr) {
+    CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
+    return;
+  }
+
+  EmitFinalDestCopy(VE, LValue::MakeAddr(ArgPtr, Qualifiers()));
+}
+
+void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+  llvm::Value *Val = DestPtr;
+
+  if (!Val) {
+    // Create a temporary variable.
+    Val = CGF.CreateMemTemp(E->getType(), "tmp");
+
+    // FIXME: volatile
+    CGF.EmitAggExpr(E->getSubExpr(), Val, false);
+  } else
+    Visit(E->getSubExpr());
+
+  // Don't make this a live temporary if we're emitting an initializer expr.
+  if (!IsInitializer)
+    CGF.PushCXXTemporary(E->getTemporary(), Val);
+}
+
+void
+AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+  llvm::Value *Val = DestPtr;
+
+  if (!Val) {
+    // Create a temporary variable.
+    Val = CGF.CreateMemTemp(E->getType(), "tmp");
+  }
+
+  if (E->requiresZeroInitialization())
+    EmitNullInitializationToLValue(LValue::MakeAddr(Val, 
+                                                    // FIXME: Qualifiers()?
+                                                 E->getType().getQualifiers()),
+                                   E->getType());
+
+  CGF.EmitCXXConstructExpr(Val, E);
+}
+
+void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+  llvm::Value *Val = DestPtr;
+
+  if (!Val) {
+    // Create a temporary variable.
+    Val = CGF.CreateMemTemp(E->getType(), "tmp");
+  }
+  CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer);
+}
+
+void AggExprEmitter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+  llvm::Value *Val = DestPtr;
+
+  if (!Val) {
+    // Create a temporary variable.
+    Val = CGF.CreateMemTemp(E->getType(), "tmp");
+  }
+  LValue LV = LValue::MakeAddr(Val, Qualifiers());
+  EmitNullInitializationToLValue(LV, E->getType());
+}
+
+void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+  llvm::Value *Val = DestPtr;
+
+  if (!Val) {
+    // Create a temporary variable.
+    Val = CGF.CreateMemTemp(E->getType(), "tmp");
+  }
+  LValue LV = LValue::MakeAddr(Val, Qualifiers());
+  EmitNullInitializationToLValue(LV, E->getType());
+}
+
+void 
+AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV, QualType T) {
+  // FIXME: Ignore result?
+  // FIXME: Are initializers affected by volatile?
+  if (isa<ImplicitValueInitExpr>(E)) {
+    EmitNullInitializationToLValue(LV, T);
+  } else if (T->isReferenceType()) {
+    RValue RV = CGF.EmitReferenceBindingToExpr(E, /*IsInitializer=*/false);
+    CGF.EmitStoreThroughLValue(RV, LV, T);
+  } else if (T->isAnyComplexType()) {
+    CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
+  } else if (CGF.hasAggregateLLVMType(T)) {
+    CGF.EmitAnyExpr(E, LV.getAddress(), false);
+  } else {
+    CGF.EmitStoreThroughLValue(CGF.EmitAnyExpr(E), LV, T);
+  }
+}
+
+void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
+  if (!CGF.hasAggregateLLVMType(T)) {
+    // For non-aggregates, we can store zero
+    llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
+    CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T);
+  } else {
+    // Otherwise, just memset the whole thing to zero.  This is legal
+    // because in LLVM, all default initializers are guaranteed to have a
+    // bit pattern of all zeros.
+    // FIXME: That isn't true for member pointers!
+    // There's a potential optimization opportunity in combining
+    // memsets; that would be easy for arrays, but relatively
+    // difficult for structures with the current code.
+    CGF.EmitMemSetToZero(LV.getAddress(), T);
+  }
+}
+
+void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
+#if 0
+  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
+  // (Length of globals? Chunks of zeroed-out space?).
+  //
+  // If we can, prefer a copy from a global; this is a lot less code for long
+  // globals, and it's easier for the current optimizers to analyze.
+  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
+    llvm::GlobalVariable* GV =
+    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
+                             llvm::GlobalValue::InternalLinkage, C, "");
+    EmitFinalDestCopy(E, LValue::MakeAddr(GV, Qualifiers()));
+    return;
+  }
+#endif
+  if (E->hadArrayRangeDesignator()) {
+    CGF.ErrorUnsupported(E, "GNU array range designator extension");
+  }
+
+  // Handle initialization of an array.
+  if (E->getType()->isArrayType()) {
+    const llvm::PointerType *APType =
+      cast<llvm::PointerType>(DestPtr->getType());
+    const llvm::ArrayType *AType =
+      cast<llvm::ArrayType>(APType->getElementType());
+
+    uint64_t NumInitElements = E->getNumInits();
+
+    if (E->getNumInits() > 0) {
+      QualType T1 = E->getType();
+      QualType T2 = E->getInit(0)->getType();
+      if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
+        EmitAggLoadOfLValue(E->getInit(0));
+        return;
+      }
+    }
+
+    uint64_t NumArrayElements = AType->getNumElements();
+    QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
+    ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
+
+    // FIXME: were we intentionally ignoring address spaces and GC attributes?
+    Qualifiers Quals = CGF.MakeQualifiers(ElementType);
+
+    for (uint64_t i = 0; i != NumArrayElements; ++i) {
+      llvm::Value *NextVal = Builder.CreateStructGEP(DestPtr, i, ".array");
+      if (i < NumInitElements)
+        EmitInitializationToLValue(E->getInit(i),
+                                   LValue::MakeAddr(NextVal, Quals), 
+                                   ElementType);
+      else
+        EmitNullInitializationToLValue(LValue::MakeAddr(NextVal, Quals),
+                                       ElementType);
+    }
+    return;
+  }
+
+  assert(E->getType()->isRecordType() && "Only support structs/unions here!");
+
+  // Do struct initialization; this code just sets each individual member
+  // to the approprate value.  This makes bitfield support automatic;
+  // the disadvantage is that the generated code is more difficult for
+  // the optimizer, especially with bitfields.
+  unsigned NumInitElements = E->getNumInits();
+  RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+  unsigned CurInitVal = 0;
+
+  if (E->getType()->isUnionType()) {
+    // Only initialize one field of a union. The field itself is
+    // specified by the initializer list.
+    if (!E->getInitializedFieldInUnion()) {
+      // Empty union; we have nothing to do.
+
+#ifndef NDEBUG
+      // Make sure that it's really an empty and not a failure of
+      // semantic analysis.
+      for (RecordDecl::field_iterator Field = SD->field_begin(),
+                                   FieldEnd = SD->field_end();
+           Field != FieldEnd; ++Field)
+        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+      return;
+    }
+
+    // FIXME: volatility
+    FieldDecl *Field = E->getInitializedFieldInUnion();
+    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
+
+    if (NumInitElements) {
+      // Store the initializer into the field
+      EmitInitializationToLValue(E->getInit(0), FieldLoc, Field->getType());
+    } else {
+      // Default-initialize to null
+      EmitNullInitializationToLValue(FieldLoc, Field->getType());
+    }
+
+    return;
+  }
+
+  // Here we iterate over the fields; this makes it simpler to both
+  // default-initialize fields and skip over unnamed fields.
+  for (RecordDecl::field_iterator Field = SD->field_begin(),
+                               FieldEnd = SD->field_end();
+       Field != FieldEnd; ++Field) {
+    // We're done once we hit the flexible array member
+    if (Field->getType()->isIncompleteArrayType())
+      break;
+
+    if (Field->isUnnamedBitfield())
+      continue;
+
+    // FIXME: volatility
+    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, *Field, 0);
+    // We never generate write-barries for initialized fields.
+    LValue::SetObjCNonGC(FieldLoc, true);
+    if (CurInitVal < NumInitElements) {
+      // Store the initializer into the field
+      EmitInitializationToLValue(E->getInit(CurInitVal++), FieldLoc, 
+                                 Field->getType());
+    } else {
+      // We're out of initalizers; default-initialize to null
+      EmitNullInitializationToLValue(FieldLoc, Field->getType());
+    }
+  }
+}
+
+//===----------------------------------------------------------------------===//
+//                        Entry Points into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitAggExpr - Emit the computation of the specified expression of aggregate
+/// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
+/// the value of the aggregate expression is not needed.  If VolatileDest is
+/// true, DestPtr cannot be 0.
+//
+// FIXME: Take Qualifiers object.
+void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
+                                  bool VolatileDest, bool IgnoreResult,
+                                  bool IsInitializer,
+                                  bool RequiresGCollection) {
+  assert(E && hasAggregateLLVMType(E->getType()) &&
+         "Invalid aggregate expression to emit");
+  assert ((DestPtr != 0 || VolatileDest == false)
+          && "volatile aggregate can't be 0");
+
+  AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult, IsInitializer,
+                 RequiresGCollection)
+    .Visit(const_cast<Expr*>(E));
+}
+
+LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
+  assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
+  Qualifiers Q = MakeQualifiers(E->getType());
+  llvm::Value *Temp = CreateMemTemp(E->getType());
+  EmitAggExpr(E, Temp, Q.hasVolatile());
+  return LValue::MakeAddr(Temp, Q);
+}
+
+void CodeGenFunction::EmitAggregateClear(llvm::Value *DestPtr, QualType Ty) {
+  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+  EmitMemSetToZero(DestPtr, Ty);
+}
+
+void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
+                                        llvm::Value *SrcPtr, QualType Ty,
+                                        bool isVolatile) {
+  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
+  // C99 6.5.16.1p3, which states "If the value being stored in an object is
+  // read from another object that overlaps in anyway the storage of the first
+  // object, then the overlap shall be exact and the two objects shall have
+  // qualified or unqualified versions of a compatible type."
+  //
+  // memcpy is not defined if the source and destination pointers are exactly
+  // equal, but other compilers do this optimization, and almost every memcpy
+  // implementation handles this case safely.  If there is a libc that does not
+  // safely handle this, we can add a target hook.
+  const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+  if (DestPtr->getType() != BP)
+    DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+  if (SrcPtr->getType() != BP)
+    SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
+
+  // Get size and alignment info for this aggregate.
+  std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+
+  // FIXME: Handle variable sized types.
+  const llvm::Type *IntPtr =
+          llvm::IntegerType::get(VMContext, LLVMPointerWidth);
+
+  // FIXME: If we have a volatile struct, the optimizer can remove what might
+  // appear to be `extra' memory ops:
+  //
+  // volatile struct { int i; } a, b;
+  //
+  // int main() {
+  //   a = b;
+  //   a = b;
+  // }
+  //
+  // we need to use a differnt call here.  We use isVolatile to indicate when
+  // either the source or the destination is volatile.
+  Builder.CreateCall4(CGM.getMemCpyFn(),
+                      DestPtr, SrcPtr,
+                      // TypeInfo.first describes size in bits.
+                      llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
+                      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                             TypeInfo.second/8));
+}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
new file mode 100644
index 0000000..0328621
--- /dev/null
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -0,0 +1,926 @@
+//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ expressions
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+using namespace clang;
+using namespace CodeGen;
+
+RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+                                          llvm::Value *Callee,
+                                          ReturnValueSlot ReturnValue,
+                                          llvm::Value *This,
+                                          llvm::Value *VTT,
+                                          CallExpr::const_arg_iterator ArgBeg,
+                                          CallExpr::const_arg_iterator ArgEnd) {
+  assert(MD->isInstance() &&
+         "Trying to emit a member call expr on a static method!");
+
+  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+  CallArgList Args;
+
+  // Push the this ptr.
+  Args.push_back(std::make_pair(RValue::get(This),
+                                MD->getThisType(getContext())));
+
+  // If there is a VTT parameter, emit it.
+  if (VTT) {
+    QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+    Args.push_back(std::make_pair(RValue::get(VTT), T));
+  }
+  
+  // And the rest of the call args
+  EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+
+  QualType ResultType = FPT->getResultType();
+  return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
+                                                 FPT->getCallConv(),
+                                                 FPT->getNoReturnAttr()), Callee, 
+                  ReturnValue, Args, MD);
+}
+
+/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
+/// expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCalls(const Expr *Base) {
+  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+      // This is a record decl. We know the type and can devirtualize it.
+      return VD->getType()->isRecordType();
+    }
+    
+    return false;
+  }
+  
+  // We can always devirtualize calls on temporary object expressions.
+  if (isa<CXXConstructExpr>(Base))
+    return true;
+  
+  // And calls on bound temporaries.
+  if (isa<CXXBindTemporaryExpr>(Base))
+    return true;
+  
+  // Check if this is a call expr that returns a record type.
+  if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+    return CE->getCallReturnType()->isRecordType();
+  
+  // We can't devirtualize the call.
+  return false;
+}
+
+RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
+                                              ReturnValueSlot ReturnValue) {
+  if (isa<BinaryOperator>(CE->getCallee()->IgnoreParens())) 
+    return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
+      
+  const MemberExpr *ME = cast<MemberExpr>(CE->getCallee()->IgnoreParens());
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+
+  if (MD->isStatic()) {
+    // The method is static, emit it as we would a regular call.
+    llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
+    return EmitCall(getContext().getPointerType(MD->getType()), Callee,
+                    ReturnValue, CE->arg_begin(), CE->arg_end());
+  }
+  
+  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+  const llvm::Type *Ty =
+    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+                                   FPT->isVariadic());
+  llvm::Value *This;
+
+  if (ME->isArrow())
+    This = EmitScalarExpr(ME->getBase());
+  else {
+    LValue BaseLV = EmitLValue(ME->getBase());
+    This = BaseLV.getAddress();
+  }
+
+  if (MD->isCopyAssignment() && MD->isTrivial()) {
+    // We don't like to generate the trivial copy assignment operator when
+    // it isn't necessary; just produce the proper effect here.
+    llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+    EmitAggregateCopy(This, RHS, CE->getType());
+    return RValue::get(This);
+  }
+
+  // C++ [class.virtual]p12:
+  //   Explicit qualification with the scope operator (5.1) suppresses the
+  //   virtual call mechanism.
+  //
+  // We also don't emit a virtual call if the base expression has a record type
+  // because then we know what the type is.
+  llvm::Value *Callee;
+  if (const CXXDestructorDecl *Destructor
+             = dyn_cast<CXXDestructorDecl>(MD)) {
+    if (Destructor->isTrivial())
+      return RValue::get(0);
+    if (MD->isVirtual() && !ME->hasQualifier() && 
+        !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
+      Callee = BuildVirtualCall(Destructor, Dtor_Complete, This, Ty); 
+    } else {
+      Callee = CGM.GetAddrOfFunction(GlobalDecl(Destructor, Dtor_Complete), Ty);
+    }
+  } else if (MD->isVirtual() && !ME->hasQualifier() && 
+             !canDevirtualizeMemberFunctionCalls(ME->getBase())) {
+    Callee = BuildVirtualCall(MD, This, Ty); 
+  } else {
+    Callee = CGM.GetAddrOfFunction(MD, Ty);
+  }
+
+  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+                           CE->arg_begin(), CE->arg_end());
+}
+
+RValue
+CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+                                              ReturnValueSlot ReturnValue) {
+  const BinaryOperator *BO =
+      cast<BinaryOperator>(E->getCallee()->IgnoreParens());
+  const Expr *BaseExpr = BO->getLHS();
+  const Expr *MemFnExpr = BO->getRHS();
+  
+  const MemberPointerType *MPT = 
+    MemFnExpr->getType()->getAs<MemberPointerType>();
+  const FunctionProtoType *FPT = 
+    MPT->getPointeeType()->getAs<FunctionProtoType>();
+  const CXXRecordDecl *RD = 
+    cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+  const llvm::FunctionType *FTy = 
+    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(RD, FPT),
+                                   FPT->isVariadic());
+
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+  // Get the member function pointer.
+  llvm::Value *MemFnPtr = CreateMemTemp(MemFnExpr->getType(), "mem.fn");
+  EmitAggExpr(MemFnExpr, MemFnPtr, /*VolatileDest=*/false);
+
+  // Emit the 'this' pointer.
+  llvm::Value *This;
+  
+  if (BO->getOpcode() == BinaryOperator::PtrMemI)
+    This = EmitScalarExpr(BaseExpr);
+  else 
+    This = EmitLValue(BaseExpr).getAddress();
+  
+  // Adjust it.
+  llvm::Value *Adj = Builder.CreateStructGEP(MemFnPtr, 1);
+  Adj = Builder.CreateLoad(Adj, "mem.fn.adj");
+  
+  llvm::Value *Ptr = Builder.CreateBitCast(This, Int8PtrTy, "ptr");
+  Ptr = Builder.CreateGEP(Ptr, Adj, "adj");
+  
+  This = Builder.CreateBitCast(Ptr, This->getType(), "this");
+  
+  llvm::Value *FnPtr = Builder.CreateStructGEP(MemFnPtr, 0, "mem.fn.ptr");
+  
+  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+
+  llvm::Value *FnAsInt = Builder.CreateLoad(FnPtr, "fn");
+  
+  // If the LSB in the function pointer is 1, the function pointer points to
+  // a virtual function.
+  llvm::Value *IsVirtual 
+    = Builder.CreateAnd(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1),
+                        "and");
+  
+  IsVirtual = Builder.CreateTrunc(IsVirtual,
+                                  llvm::Type::getInt1Ty(VMContext));
+  
+  llvm::BasicBlock *FnVirtual = createBasicBlock("fn.virtual");
+  llvm::BasicBlock *FnNonVirtual = createBasicBlock("fn.nonvirtual");
+  llvm::BasicBlock *FnEnd = createBasicBlock("fn.end");
+  
+  Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
+  EmitBlock(FnVirtual);
+  
+  const llvm::Type *VtableTy = 
+    FTy->getPointerTo()->getPointerTo();
+
+  llvm::Value *Vtable = Builder.CreateBitCast(This, VtableTy->getPointerTo());
+  Vtable = Builder.CreateLoad(Vtable);
+  
+  Vtable = Builder.CreateBitCast(Vtable, Int8PtrTy);
+  llvm::Value *VtableOffset = 
+    Builder.CreateSub(FnAsInt, llvm::ConstantInt::get(PtrDiffTy, 1));
+  
+  Vtable = Builder.CreateGEP(Vtable, VtableOffset, "fn");
+  Vtable = Builder.CreateBitCast(Vtable, VtableTy);
+  
+  llvm::Value *VirtualFn = Builder.CreateLoad(Vtable, "virtualfn");
+  
+  EmitBranch(FnEnd);
+  EmitBlock(FnNonVirtual);
+  
+  // If the function is not virtual, just load the pointer.
+  llvm::Value *NonVirtualFn = Builder.CreateLoad(FnPtr, "fn");
+  NonVirtualFn = Builder.CreateIntToPtr(NonVirtualFn, FTy->getPointerTo());
+  
+  EmitBlock(FnEnd);
+
+  llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo());
+  Callee->reserveOperandSpace(2);
+  Callee->addIncoming(VirtualFn, FnVirtual);
+  Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+
+  CallArgList Args;
+
+  QualType ThisType = 
+    getContext().getPointerType(getContext().getTagDeclType(RD));
+
+  // Push the this ptr.
+  Args.push_back(std::make_pair(RValue::get(This), ThisType));
+  
+  // And the rest of the call args
+  EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
+  const FunctionType *BO_FPT = BO->getType()->getAs<FunctionProtoType>();
+  return EmitCall(CGM.getTypes().getFunctionInfo(Args, BO_FPT), Callee, 
+                  ReturnValue, Args);
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+                                               const CXXMethodDecl *MD,
+                                               ReturnValueSlot ReturnValue) {
+  assert(MD->isInstance() &&
+         "Trying to emit a member call expr on a static method!");
+
+  if (MD->isCopyAssignment()) {
+    const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
+    if (ClassDecl->hasTrivialCopyAssignment()) {
+      assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
+             "EmitCXXOperatorMemberCallExpr - user declared copy assignment");
+      llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+      llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
+      QualType Ty = E->getType();
+      EmitAggregateCopy(This, Src, Ty);
+      return RValue::get(This);
+    }
+  }
+
+  const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+  const llvm::Type *Ty =
+    CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+                                   FPT->isVariadic());
+
+  llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+
+  llvm::Value *Callee;
+  if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
+    Callee = BuildVirtualCall(MD, This, Ty);
+  else
+    Callee = CGM.GetAddrOfFunction(MD, Ty);
+
+  return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+                           E->arg_begin() + 1, E->arg_end());
+}
+
+void
+CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
+                                      const CXXConstructExpr *E) {
+  assert(Dest && "Must have a destination!");
+  const CXXConstructorDecl *CD = E->getConstructor();
+  const ConstantArrayType *Array =
+  getContext().getAsConstantArrayType(E->getType());
+  // For a copy constructor, even if it is trivial, must fall thru so
+  // its argument is code-gen'ed.
+  if (!CD->isCopyConstructor()) {
+    QualType InitType = E->getType();
+    if (Array)
+      InitType = getContext().getBaseElementType(Array);
+    const CXXRecordDecl *RD =
+    cast<CXXRecordDecl>(InitType->getAs<RecordType>()->getDecl());
+    if (RD->hasTrivialConstructor())
+      return;
+  }
+  // Code gen optimization to eliminate copy constructor and return
+  // its first argument instead.
+  if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
+    const Expr *Arg = E->getArg(0);
+    
+    if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+      assert((ICE->getCastKind() == CastExpr::CK_NoOp ||
+              ICE->getCastKind() == CastExpr::CK_ConstructorConversion ||
+              ICE->getCastKind() == CastExpr::CK_UserDefinedConversion) &&
+             "Unknown implicit cast kind in constructor elision");
+      Arg = ICE->getSubExpr();
+    }
+    
+    if (const CXXFunctionalCastExpr *FCE = dyn_cast<CXXFunctionalCastExpr>(Arg))
+      Arg = FCE->getSubExpr();
+    
+    if (const CXXBindTemporaryExpr *BindExpr = 
+        dyn_cast<CXXBindTemporaryExpr>(Arg))
+      Arg = BindExpr->getSubExpr();
+    
+    EmitAggExpr(Arg, Dest, false);
+    return;
+  }
+  if (Array) {
+    QualType BaseElementTy = getContext().getBaseElementType(Array);
+    const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+    BasePtr = llvm::PointerType::getUnqual(BasePtr);
+    llvm::Value *BaseAddrPtr =
+    Builder.CreateBitCast(Dest, BasePtr);
+    
+    EmitCXXAggrConstructorCall(CD, Array, BaseAddrPtr, 
+                               E->arg_begin(), E->arg_end());
+  }
+  else
+    // Call the constructor.
+    EmitCXXConstructorCall(CD, 
+                           E->isBaseInitialization()? Ctor_Base : Ctor_Complete, 
+                           Dest,
+                           E->arg_begin(), E->arg_end());
+}
+
+static CharUnits CalculateCookiePadding(ASTContext &Ctx, QualType ElementType) {
+  const RecordType *RT = ElementType->getAs<RecordType>();
+  if (!RT)
+    return CharUnits::Zero();
+  
+  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+  if (!RD)
+    return CharUnits::Zero();
+  
+  // Check if the class has a trivial destructor.
+  if (RD->hasTrivialDestructor()) {
+    // Check if the usual deallocation function takes two arguments.
+    const CXXMethodDecl *UsualDeallocationFunction = 0;
+    
+    DeclarationName OpName =
+      Ctx.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
+    DeclContext::lookup_const_iterator Op, OpEnd;
+    for (llvm::tie(Op, OpEnd) = RD->lookup(OpName);
+         Op != OpEnd; ++Op) {
+      const CXXMethodDecl *Delete = cast<CXXMethodDecl>(*Op);
+
+      if (Delete->isUsualDeallocationFunction()) {
+        UsualDeallocationFunction = Delete;
+        break;
+      }
+    }
+    
+    // No usual deallocation function, we don't need a cookie.
+    if (!UsualDeallocationFunction)
+      return CharUnits::Zero();
+    
+    // The usual deallocation function doesn't take a size_t argument, so we
+    // don't need a cookie.
+    if (UsualDeallocationFunction->getNumParams() == 1)
+      return CharUnits::Zero();
+        
+    assert(UsualDeallocationFunction->getNumParams() == 2 && 
+           "Unexpected deallocation function type!");
+  }  
+  
+  // Padding is the maximum of sizeof(size_t) and alignof(ElementType)
+  return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
+                  Ctx.getTypeAlignInChars(ElementType));
+}
+
+static CharUnits CalculateCookiePadding(ASTContext &Ctx, const CXXNewExpr *E) {
+  if (!E->isArray())
+    return CharUnits::Zero();
+
+  // No cookie is required if the new operator being used is 
+  // ::operator new[](size_t, void*).
+  const FunctionDecl *OperatorNew = E->getOperatorNew();
+  if (OperatorNew->getDeclContext()->getLookupContext()->isFileContext()) {
+    if (OperatorNew->getNumParams() == 2) {
+      CanQualType ParamType = 
+        Ctx.getCanonicalType(OperatorNew->getParamDecl(1)->getType());
+      
+      if (ParamType == Ctx.VoidPtrTy)
+        return CharUnits::Zero();
+    }
+  }
+      
+  return CalculateCookiePadding(Ctx, E->getAllocatedType());
+}
+
+static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF, 
+                                        const CXXNewExpr *E,
+                                        llvm::Value *& NumElements) {
+  QualType Type = E->getAllocatedType();
+  CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(Type);
+  const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+  
+  if (!E->isArray())
+    return llvm::ConstantInt::get(SizeTy, TypeSize.getQuantity());
+
+  CharUnits CookiePadding = CalculateCookiePadding(CGF.getContext(), E);
+  
+  Expr::EvalResult Result;
+  if (E->getArraySize()->Evaluate(Result, CGF.getContext()) &&
+      !Result.HasSideEffects && Result.Val.isInt()) {
+
+    CharUnits AllocSize = 
+      Result.Val.getInt().getZExtValue() * TypeSize + CookiePadding;
+    
+    NumElements = 
+      llvm::ConstantInt::get(SizeTy, Result.Val.getInt().getZExtValue());
+    
+    return llvm::ConstantInt::get(SizeTy, AllocSize.getQuantity());
+  }
+  
+  // Emit the array size expression.
+  NumElements = CGF.EmitScalarExpr(E->getArraySize());
+  
+  // Multiply with the type size.
+  llvm::Value *V = 
+    CGF.Builder.CreateMul(NumElements, 
+                          llvm::ConstantInt::get(SizeTy, 
+                                                 TypeSize.getQuantity()));
+
+  // And add the cookie padding if necessary.
+  if (!CookiePadding.isZero())
+    V = CGF.Builder.CreateAdd(V, 
+        llvm::ConstantInt::get(SizeTy, CookiePadding.getQuantity()));
+  
+  return V;
+}
+
+static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
+                               llvm::Value *NewPtr,
+                               llvm::Value *NumElements) {
+  if (E->isArray()) {
+    if (CXXConstructorDecl *Ctor = E->getConstructor())
+      CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr, 
+                                     E->constructor_arg_begin(), 
+                                     E->constructor_arg_end());
+    return;
+  }
+  
+  QualType AllocType = E->getAllocatedType();
+
+  if (CXXConstructorDecl *Ctor = E->getConstructor()) {
+    CGF.EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr,
+                               E->constructor_arg_begin(),
+                               E->constructor_arg_end());
+
+    return;
+  }
+    
+  // We have a POD type.
+  if (E->getNumConstructorArgs() == 0)
+    return;
+
+  assert(E->getNumConstructorArgs() == 1 &&
+         "Can only have one argument to initializer of POD type.");
+      
+  const Expr *Init = E->getConstructorArg(0);
+    
+  if (!CGF.hasAggregateLLVMType(AllocType)) 
+    CGF.EmitStoreOfScalar(CGF.EmitScalarExpr(Init), NewPtr,
+                          AllocType.isVolatileQualified(), AllocType);
+  else if (AllocType->isAnyComplexType())
+    CGF.EmitComplexExprIntoAddr(Init, NewPtr, 
+                                AllocType.isVolatileQualified());
+  else
+    CGF.EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
+}
+
+llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
+  QualType AllocType = E->getAllocatedType();
+  FunctionDecl *NewFD = E->getOperatorNew();
+  const FunctionProtoType *NewFTy = NewFD->getType()->getAs<FunctionProtoType>();
+
+  CallArgList NewArgs;
+
+  // The allocation size is the first argument.
+  QualType SizeTy = getContext().getSizeType();
+
+  llvm::Value *NumElements = 0;
+  llvm::Value *AllocSize = EmitCXXNewAllocSize(*this, E, NumElements);
+  
+  NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
+
+  // Emit the rest of the arguments.
+  // FIXME: Ideally, this should just use EmitCallArgs.
+  CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
+
+  // First, use the types from the function type.
+  // We start at 1 here because the first argument (the allocation size)
+  // has already been emitted.
+  for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
+    QualType ArgType = NewFTy->getArgType(i);
+
+    assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+           getTypePtr() ==
+           getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
+           "type mismatch in call argument!");
+
+    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+                                     ArgType));
+
+  }
+
+  // Either we've emitted all the call args, or we have a call to a
+  // variadic function.
+  assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
+         "Extra arguments in non-variadic function!");
+
+  // If we still have any arguments, emit them using the type of the argument.
+  for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
+       NewArg != NewArgEnd; ++NewArg) {
+    QualType ArgType = NewArg->getType();
+    NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+                                     ArgType));
+  }
+
+  // Emit the call to new.
+  RValue RV =
+    EmitCall(CGM.getTypes().getFunctionInfo(NewArgs, NewFTy),
+             CGM.GetAddrOfFunction(NewFD), ReturnValueSlot(), NewArgs, NewFD);
+
+  // If an allocation function is declared with an empty exception specification
+  // it returns null to indicate failure to allocate storage. [expr.new]p13.
+  // (We don't need to check for null when there's no new initializer and
+  // we're allocating a POD type).
+  bool NullCheckResult = NewFTy->hasEmptyExceptionSpec() &&
+    !(AllocType->isPODType() && !E->hasInitializer());
+
+  llvm::BasicBlock *NewNull = 0;
+  llvm::BasicBlock *NewNotNull = 0;
+  llvm::BasicBlock *NewEnd = 0;
+
+  llvm::Value *NewPtr = RV.getScalarVal();
+
+  if (NullCheckResult) {
+    NewNull = createBasicBlock("new.null");
+    NewNotNull = createBasicBlock("new.notnull");
+    NewEnd = createBasicBlock("new.end");
+
+    llvm::Value *IsNull =
+      Builder.CreateICmpEQ(NewPtr,
+                           llvm::Constant::getNullValue(NewPtr->getType()),
+                           "isnull");
+
+    Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
+    EmitBlock(NewNotNull);
+  }
+  
+  CharUnits CookiePadding = CalculateCookiePadding(getContext(), E);
+  if (!CookiePadding.isZero()) {
+    CharUnits CookieOffset = 
+      CookiePadding - getContext().getTypeSizeInChars(SizeTy);
+    
+    llvm::Value *NumElementsPtr = 
+      Builder.CreateConstInBoundsGEP1_64(NewPtr, CookieOffset.getQuantity());
+    
+    NumElementsPtr = Builder.CreateBitCast(NumElementsPtr, 
+                                           ConvertType(SizeTy)->getPointerTo());
+    Builder.CreateStore(NumElements, NumElementsPtr);
+
+    // Now add the padding to the new ptr.
+    NewPtr = Builder.CreateConstInBoundsGEP1_64(NewPtr, 
+                                                CookiePadding.getQuantity());
+  }
+  
+  NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
+
+  EmitNewInitializer(*this, E, NewPtr, NumElements);
+
+  if (NullCheckResult) {
+    Builder.CreateBr(NewEnd);
+    NewNotNull = Builder.GetInsertBlock();
+    EmitBlock(NewNull);
+    Builder.CreateBr(NewEnd);
+    EmitBlock(NewEnd);
+
+    llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
+    PHI->reserveOperandSpace(2);
+    PHI->addIncoming(NewPtr, NewNotNull);
+    PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);
+
+    NewPtr = PHI;
+  }
+
+  return NewPtr;
+}
+
+static std::pair<llvm::Value *, llvm::Value *>
+GetAllocatedObjectPtrAndNumElements(CodeGenFunction &CGF,
+                                    llvm::Value *Ptr, QualType DeleteTy) {
+  QualType SizeTy = CGF.getContext().getSizeType();
+  const llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+  
+  CharUnits DeleteTypeAlign = CGF.getContext().getTypeAlignInChars(DeleteTy);
+  CharUnits CookiePadding = 
+    std::max(CGF.getContext().getTypeSizeInChars(SizeTy),
+             DeleteTypeAlign);
+  assert(!CookiePadding.isZero() && "CookiePadding should not be 0.");
+
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  CharUnits CookieOffset = 
+    CookiePadding - CGF.getContext().getTypeSizeInChars(SizeTy);
+
+  llvm::Value *AllocatedObjectPtr = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
+  AllocatedObjectPtr = 
+    CGF.Builder.CreateConstInBoundsGEP1_64(AllocatedObjectPtr,
+                                           -CookiePadding.getQuantity());
+
+  llvm::Value *NumElementsPtr =
+    CGF.Builder.CreateConstInBoundsGEP1_64(AllocatedObjectPtr, 
+                                           CookieOffset.getQuantity());
+  NumElementsPtr = 
+    CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo());
+  
+  llvm::Value *NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+  NumElements = 
+    CGF.Builder.CreateIntCast(NumElements, SizeLTy, /*isSigned=*/false);
+  
+  return std::make_pair(AllocatedObjectPtr, NumElements);
+}
+
+void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
+                                     llvm::Value *Ptr,
+                                     QualType DeleteTy) {
+  const FunctionProtoType *DeleteFTy =
+    DeleteFD->getType()->getAs<FunctionProtoType>();
+
+  CallArgList DeleteArgs;
+
+  // Check if we need to pass the size to the delete operator.
+  llvm::Value *Size = 0;
+  QualType SizeTy;
+  if (DeleteFTy->getNumArgs() == 2) {
+    SizeTy = DeleteFTy->getArgType(1);
+    CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
+    Size = llvm::ConstantInt::get(ConvertType(SizeTy), 
+                                  DeleteTypeSize.getQuantity());
+  }
+  
+  if (DeleteFD->getOverloadedOperator() == OO_Array_Delete &&
+      !CalculateCookiePadding(getContext(), DeleteTy).isZero()) {
+    // We need to get the number of elements in the array from the cookie.
+    llvm::Value *AllocatedObjectPtr;
+    llvm::Value *NumElements;
+    llvm::tie(AllocatedObjectPtr, NumElements) =
+      GetAllocatedObjectPtrAndNumElements(*this, Ptr, DeleteTy);
+    
+    // Multiply the size with the number of elements.
+    if (Size)
+      Size = Builder.CreateMul(NumElements, Size);
+    
+    Ptr = AllocatedObjectPtr;
+  }
+  
+  QualType ArgTy = DeleteFTy->getArgType(0);
+  llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
+  DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
+
+  if (Size)
+    DeleteArgs.push_back(std::make_pair(RValue::get(Size), SizeTy));
+
+  // Emit the call to delete.
+  EmitCall(CGM.getTypes().getFunctionInfo(DeleteArgs, DeleteFTy),
+           CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(), 
+           DeleteArgs, DeleteFD);
+}
+
+void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
+  
+  // Get at the argument before we performed the implicit conversion
+  // to void*.
+  const Expr *Arg = E->getArgument();
+  while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+    if (ICE->getCastKind() != CastExpr::CK_UserDefinedConversion &&
+        ICE->getType()->isVoidPointerType())
+      Arg = ICE->getSubExpr();
+    else
+      break;
+  }
+  
+  QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
+
+  llvm::Value *Ptr = EmitScalarExpr(Arg);
+
+  // Null check the pointer.
+  llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
+  llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
+
+  llvm::Value *IsNull =
+    Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
+                         "isnull");
+
+  Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
+  EmitBlock(DeleteNotNull);
+  
+  bool ShouldCallDelete = true;
+  
+  // Call the destructor if necessary.
+  if (const RecordType *RT = DeleteTy->getAs<RecordType>()) {
+    if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+      if (!RD->hasTrivialDestructor()) {
+        const CXXDestructorDecl *Dtor = RD->getDestructor(getContext());
+        if (E->isArrayForm()) {
+          llvm::Value *AllocatedObjectPtr;
+          llvm::Value *NumElements;
+          llvm::tie(AllocatedObjectPtr, NumElements) =
+            GetAllocatedObjectPtrAndNumElements(*this, Ptr, DeleteTy);
+          
+          EmitCXXAggrDestructorCall(Dtor, NumElements, Ptr);
+        } else if (Dtor->isVirtual()) {
+          const llvm::Type *Ty =
+            CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(Dtor),
+                                           /*isVariadic=*/false);
+          
+          llvm::Value *Callee = BuildVirtualCall(Dtor, Dtor_Deleting, Ptr, Ty);
+          EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
+                            0, 0);
+
+          // The dtor took care of deleting the object.
+          ShouldCallDelete = false;
+        } else 
+          EmitCXXDestructorCall(Dtor, Dtor_Complete, Ptr);
+      }
+    }
+  }
+
+  if (ShouldCallDelete)
+    EmitDeleteCall(E->getOperatorDelete(), Ptr, DeleteTy);
+
+  EmitBlock(DeleteEnd);
+}
+
+llvm::Value * CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
+  QualType Ty = E->getType();
+  const llvm::Type *LTy = ConvertType(Ty)->getPointerTo();
+  
+  if (E->isTypeOperand()) {
+    llvm::Constant *TypeInfo = 
+      CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
+    return Builder.CreateBitCast(TypeInfo, LTy);
+  }
+  
+  Expr *subE = E->getExprOperand();
+  Ty = subE->getType();
+  CanQualType CanTy = CGM.getContext().getCanonicalType(Ty);
+  Ty = CanTy.getUnqualifiedType().getNonReferenceType();
+  if (const RecordType *RT = Ty->getAs<RecordType>()) {
+    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+    if (RD->isPolymorphic()) {
+      // FIXME: if subE is an lvalue do
+      LValue Obj = EmitLValue(subE);
+      llvm::Value *This = Obj.getAddress();
+      LTy = LTy->getPointerTo()->getPointerTo();
+      llvm::Value *V = Builder.CreateBitCast(This, LTy);
+      // We need to do a zero check for *p, unless it has NonNullAttr.
+      // FIXME: PointerType->hasAttr<NonNullAttr>()
+      bool CanBeZero = false;
+      if (UnaryOperator *UO = dyn_cast<UnaryOperator>(subE->IgnoreParens()))
+        if (UO->getOpcode() == UnaryOperator::Deref)
+          CanBeZero = true;
+      if (CanBeZero) {
+        llvm::BasicBlock *NonZeroBlock = createBasicBlock();
+        llvm::BasicBlock *ZeroBlock = createBasicBlock();
+        
+        llvm::Value *Zero = llvm::Constant::getNullValue(LTy);
+        Builder.CreateCondBr(Builder.CreateICmpNE(V, Zero),
+                             NonZeroBlock, ZeroBlock);
+        EmitBlock(ZeroBlock);
+        /// Call __cxa_bad_typeid
+        const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
+        const llvm::FunctionType *FTy;
+        FTy = llvm::FunctionType::get(ResultType, false);
+        llvm::Value *F = CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
+        Builder.CreateCall(F)->setDoesNotReturn();
+        Builder.CreateUnreachable();
+        EmitBlock(NonZeroBlock);
+      }
+      V = Builder.CreateLoad(V, "vtable");
+      V = Builder.CreateConstInBoundsGEP1_64(V, -1ULL);
+      V = Builder.CreateLoad(V);
+      return V;
+    }
+  }
+  return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(Ty), LTy);
+}
+
+llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
+                                              const CXXDynamicCastExpr *DCE) {
+  QualType SrcTy = DCE->getSubExpr()->getType();
+  QualType DestTy = DCE->getTypeAsWritten();
+  QualType InnerType = DestTy->getPointeeType();
+  
+  const llvm::Type *LTy = ConvertType(DCE->getType());
+
+  bool CanBeZero = false;
+  bool ToVoid = false;
+  bool ThrowOnBad = false;
+  if (DestTy->isPointerType()) {
+    // FIXME: if PointerType->hasAttr<NonNullAttr>(), we don't set this
+    CanBeZero = true;
+    if (InnerType->isVoidType())
+      ToVoid = true;
+  } else {
+    LTy = LTy->getPointerTo();
+    ThrowOnBad = true;
+  }
+
+  if (SrcTy->isPointerType() || SrcTy->isReferenceType())
+    SrcTy = SrcTy->getPointeeType();
+  SrcTy = SrcTy.getUnqualifiedType();
+
+  if (DestTy->isPointerType() || DestTy->isReferenceType())
+    DestTy = DestTy->getPointeeType();
+  DestTy = DestTy.getUnqualifiedType();
+
+  llvm::BasicBlock *ContBlock = createBasicBlock();
+  llvm::BasicBlock *NullBlock = 0;
+  llvm::BasicBlock *NonZeroBlock = 0;
+  if (CanBeZero) {
+    NonZeroBlock = createBasicBlock();
+    NullBlock = createBasicBlock();
+    Builder.CreateCondBr(Builder.CreateIsNotNull(V), NonZeroBlock, NullBlock);
+    EmitBlock(NonZeroBlock);
+  }
+
+  llvm::BasicBlock *BadCastBlock = 0;
+
+  const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
+
+  // See if this is a dynamic_cast(void*)
+  if (ToVoid) {
+    llvm::Value *This = V;
+    V = Builder.CreateBitCast(This, PtrDiffTy->getPointerTo()->getPointerTo());
+    V = Builder.CreateLoad(V, "vtable");
+    V = Builder.CreateConstInBoundsGEP1_64(V, -2ULL);
+    V = Builder.CreateLoad(V, "offset to top");
+    This = Builder.CreateBitCast(This, llvm::Type::getInt8PtrTy(VMContext));
+    V = Builder.CreateInBoundsGEP(This, V);
+    V = Builder.CreateBitCast(V, LTy);
+  } else {
+    /// Call __dynamic_cast
+    const llvm::Type *ResultType = llvm::Type::getInt8PtrTy(VMContext);
+    const llvm::FunctionType *FTy;
+    std::vector<const llvm::Type*> ArgTys;
+    const llvm::Type *PtrToInt8Ty
+      = llvm::Type::getInt8Ty(VMContext)->getPointerTo();
+    ArgTys.push_back(PtrToInt8Ty);
+    ArgTys.push_back(PtrToInt8Ty);
+    ArgTys.push_back(PtrToInt8Ty);
+    ArgTys.push_back(PtrDiffTy);
+    FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
+
+    // FIXME: Calculate better hint.
+    llvm::Value *hint = llvm::ConstantInt::get(PtrDiffTy, -1ULL);
+    
+    assert(SrcTy->isRecordType() && "Src type must be record type!");
+    assert(DestTy->isRecordType() && "Dest type must be record type!");
+    
+    llvm::Value *SrcArg
+      = CGM.GetAddrOfRTTIDescriptor(SrcTy.getUnqualifiedType());
+    llvm::Value *DestArg
+      = CGM.GetAddrOfRTTIDescriptor(DestTy.getUnqualifiedType());
+    
+    V = Builder.CreateBitCast(V, PtrToInt8Ty);
+    V = Builder.CreateCall4(CGM.CreateRuntimeFunction(FTy, "__dynamic_cast"),
+                            V, SrcArg, DestArg, hint);
+    V = Builder.CreateBitCast(V, LTy);
+
+    if (ThrowOnBad) {
+      BadCastBlock = createBasicBlock();
+
+      Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
+      EmitBlock(BadCastBlock);
+      /// Call __cxa_bad_cast
+      ResultType = llvm::Type::getVoidTy(VMContext);
+      const llvm::FunctionType *FBadTy;
+      FBadTy = llvm::FunctionType::get(ResultType, false);
+      llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
+      Builder.CreateCall(F)->setDoesNotReturn();
+      Builder.CreateUnreachable();
+    }
+  }
+  
+  if (CanBeZero) {
+    Builder.CreateBr(ContBlock);
+    EmitBlock(NullBlock);
+    Builder.CreateBr(ContBlock);
+  }
+  EmitBlock(ContBlock);
+  if (CanBeZero) {
+    llvm::PHINode *PHI = Builder.CreatePHI(LTy);
+    PHI->reserveOperandSpace(2);
+    PHI->addIncoming(V, NonZeroBlock);
+    PHI->addIncoming(llvm::Constant::getNullValue(LTy), NullBlock);
+    V = PHI;
+  }
+
+  return V;
+}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
new file mode 100644
index 0000000..5ec336c
--- /dev/null
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -0,0 +1,724 @@
+//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with complex types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+//                        Complex Expression Emitter
+//===----------------------------------------------------------------------===//
+
+typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
+
+namespace  {
+class ComplexExprEmitter
+  : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
+  CodeGenFunction &CGF;
+  CGBuilderTy &Builder;
+  // True is we should ignore the value of a
+  bool IgnoreReal;
+  bool IgnoreImag;
+  // True if we should ignore the value of a=b
+  bool IgnoreRealAssign;
+  bool IgnoreImagAssign;
+public:
+  ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false,
+                     bool irn=false, bool iin=false)
+    : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii),
+    IgnoreRealAssign(irn), IgnoreImagAssign(iin) {
+  }
+
+
+  //===--------------------------------------------------------------------===//
+  //                               Utilities
+  //===--------------------------------------------------------------------===//
+
+  bool TestAndClearIgnoreReal() {
+    bool I = IgnoreReal;
+    IgnoreReal = false;
+    return I;
+  }
+  bool TestAndClearIgnoreImag() {
+    bool I = IgnoreImag;
+    IgnoreImag = false;
+    return I;
+  }
+  bool TestAndClearIgnoreRealAssign() {
+    bool I = IgnoreRealAssign;
+    IgnoreRealAssign = false;
+    return I;
+  }
+  bool TestAndClearIgnoreImagAssign() {
+    bool I = IgnoreImagAssign;
+    IgnoreImagAssign = false;
+    return I;
+  }
+
+  /// EmitLoadOfLValue - Given an expression with complex type that represents a
+  /// value l-value, this method emits the address of the l-value, then loads
+  /// and returns the result.
+  ComplexPairTy EmitLoadOfLValue(const Expr *E) {
+    LValue LV = CGF.EmitLValue(E);
+    if (LV.isSimple())
+      return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+
+    if (LV.isPropertyRef())
+      return CGF.EmitObjCPropertyGet(LV.getPropertyRefExpr()).getComplexVal();
+
+    assert(LV.isKVCRef() && "Unknown LValue type!");
+    return CGF.EmitObjCPropertyGet(LV.getKVCRefExpr()).getComplexVal();
+  }
+
+  /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
+  /// the real and imaginary pieces.
+  ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
+
+  /// EmitStoreOfComplex - Store the specified real/imag parts into the
+  /// specified value pointer.
+  void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
+
+  /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+  ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
+                                         QualType DestType);
+
+  //===--------------------------------------------------------------------===//
+  //                            Visitor Methods
+  //===--------------------------------------------------------------------===//
+
+  ComplexPairTy VisitStmt(Stmt *S) {
+    S->dump(CGF.getContext().getSourceManager());
+    assert(0 && "Stmt can't have complex result type!");
+    return ComplexPairTy();
+  }
+  ComplexPairTy VisitExpr(Expr *S);
+  ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+  ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+
+  // l-values.
+  ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+  ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+    return EmitLoadOfLValue(E);
+  }
+  ComplexPairTy VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+    return EmitLoadOfLValue(E);
+  }
+  ComplexPairTy VisitObjCImplicitSetterGetterRefExpr(
+                               ObjCImplicitSetterGetterRefExpr *E) {
+    return EmitLoadOfLValue(E);
+  }
+  ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
+    return CGF.EmitObjCMessageExpr(E).getComplexVal();
+  }
+  ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
+  ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+
+  // FIXME: CompoundLiteralExpr
+
+  ComplexPairTy EmitCast(Expr *Op, QualType DestTy);
+  ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
+    // Unlike for scalars, we don't have to worry about function->ptr demotion
+    // here.
+    return EmitCast(E->getSubExpr(), E->getType());
+  }
+  ComplexPairTy VisitCastExpr(CastExpr *E) {
+    return EmitCast(E->getSubExpr(), E->getType());
+  }
+  ComplexPairTy VisitCallExpr(const CallExpr *E);
+  ComplexPairTy VisitStmtExpr(const StmtExpr *E);
+
+  // Operators.
+  ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
+                                   bool isInc, bool isPre) {
+    LValue LV = CGF.EmitLValue(E->getSubExpr());
+    return CGF.EmitComplexPrePostIncDec(E, LV, isInc, isPre);
+  }
+  ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
+    return VisitPrePostIncDec(E, false, false);
+  }
+  ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) {
+    return VisitPrePostIncDec(E, true, false);
+  }
+  ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) {
+    return VisitPrePostIncDec(E, false, true);
+  }
+  ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) {
+    return VisitPrePostIncDec(E, true, true);
+  }
+  ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+  ComplexPairTy VisitUnaryPlus     (const UnaryOperator *E) {
+    TestAndClearIgnoreReal();
+    TestAndClearIgnoreImag();
+    TestAndClearIgnoreRealAssign();
+    TestAndClearIgnoreImagAssign();
+    return Visit(E->getSubExpr());
+  }
+  ComplexPairTy VisitUnaryMinus    (const UnaryOperator *E);
+  ComplexPairTy VisitUnaryNot      (const UnaryOperator *E);
+  // LNot,Real,Imag never return complex.
+  ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
+    return Visit(E->getSubExpr());
+  }
+  ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+    return Visit(DAE->getExpr());
+  }
+  ComplexPairTy VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+    return CGF.EmitCXXExprWithTemporaries(E).getComplexVal();
+  }
+  ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+    assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+    QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+    llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+    return ComplexPairTy(Null, Null);
+  }
+  ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+    assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+    QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+    llvm::Constant *Null =
+                       llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+    return ComplexPairTy(Null, Null);
+  }
+
+  struct BinOpInfo {
+    ComplexPairTy LHS;
+    ComplexPairTy RHS;
+    QualType Ty;  // Computation Type.
+  };
+
+  BinOpInfo EmitBinOps(const BinaryOperator *E);
+  ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
+                                   ComplexPairTy (ComplexExprEmitter::*Func)
+                                   (const BinOpInfo &));
+
+  ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
+  ComplexPairTy EmitBinSub(const BinOpInfo &Op);
+  ComplexPairTy EmitBinMul(const BinOpInfo &Op);
+  ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+
+  ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+    return EmitBinMul(EmitBinOps(E));
+  }
+  ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
+    return EmitBinAdd(EmitBinOps(E));
+  }
+  ComplexPairTy VisitBinSub(const BinaryOperator *E) {
+    return EmitBinSub(EmitBinOps(E));
+  }
+  ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
+    return EmitBinDiv(EmitBinOps(E));
+  }
+
+  // Compound assignments.
+  ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
+    return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
+  }
+  ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) {
+    return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub);
+  }
+  ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) {
+    return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul);
+  }
+  ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
+    return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
+  }
+
+  // GCC rejects rem/and/or/xor for integer complex.
+  // Logical and/or always return int, never complex.
+
+  // No comparisons produce a complex result.
+  ComplexPairTy VisitBinAssign     (const BinaryOperator *E);
+  ComplexPairTy VisitBinComma      (const BinaryOperator *E);
+
+
+  ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO);
+  ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
+
+  ComplexPairTy VisitInitListExpr(InitListExpr *E);
+
+  ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+};
+}  // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+//                                Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitLoadOfComplex - Given an RValue reference for a complex, emit code to
+/// load the real and imaginary pieces, returning them as Real/Imag.
+ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
+                                                    bool isVolatile) {
+  llvm::Value *Real=0, *Imag=0;
+
+  if (!IgnoreReal) {
+    llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0,
+                                                 SrcPtr->getName() + ".realp");
+    Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real");
+  }
+
+  if (!IgnoreImag) {
+    llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1,
+                                                 SrcPtr->getName() + ".imagp");
+    Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag");
+  }
+  return ComplexPairTy(Real, Imag);
+}
+
+/// EmitStoreOfComplex - Store the specified real/imag parts into the
+/// specified value pointer.
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
+                                            bool isVolatile) {
+  llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real");
+  llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag");
+
+  Builder.CreateStore(Val.first, RealPtr, isVolatile);
+  Builder.CreateStore(Val.second, ImagPtr, isVolatile);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+//                            Visitor Methods
+//===----------------------------------------------------------------------===//
+
+ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
+  CGF.ErrorUnsupported(E, "complex expression");
+  const llvm::Type *EltTy =
+    CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+  llvm::Value *U = llvm::UndefValue::get(EltTy);
+  return ComplexPairTy(U, U);
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
+  llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
+  return
+        ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+}
+
+
+ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
+  if (E->getCallReturnType()->isReferenceType())
+    return EmitLoadOfLValue(E);
+
+  return CGF.EmitCallExpr(E).getComplexVal();
+}
+
+ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+  return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
+}
+
+/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
+                                                           QualType SrcType,
+                                                           QualType DestType) {
+  // Get the src/dest element type.
+  SrcType = SrcType->getAs<ComplexType>()->getElementType();
+  DestType = DestType->getAs<ComplexType>()->getElementType();
+
+  // C99 6.3.1.6: When a value of complex type is converted to another
+  // complex type, both the real and imaginary parts follow the conversion
+  // rules for the corresponding real types.
+  Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType);
+  Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType);
+  return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) {
+  // Two cases here: cast from (complex to complex) and (scalar to complex).
+  if (Op->getType()->isAnyComplexType())
+    return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+
+  // C99 6.3.1.7: When a value of real type is converted to a complex type, the
+  // real part of the complex result value is determined by the rules of
+  // conversion to the corresponding real type and the imaginary part of the
+  // complex result value is a positive zero or an unsigned zero.
+  llvm::Value *Elt = CGF.EmitScalarExpr(Op);
+
+  // Convert the input element to the element type of the complex.
+  DestTy = DestTy->getAs<ComplexType>()->getElementType();
+  Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
+
+  // Return (realval, 0).
+  return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+  TestAndClearIgnoreReal();
+  TestAndClearIgnoreImag();
+  TestAndClearIgnoreRealAssign();
+  TestAndClearIgnoreImagAssign();
+  ComplexPairTy Op = Visit(E->getSubExpr());
+
+  llvm::Value *ResR, *ResI;
+  if (Op.first->getType()->isFloatingPoint()) {
+    ResR = Builder.CreateFNeg(Op.first,  "neg.r");
+    ResI = Builder.CreateFNeg(Op.second, "neg.i");
+  } else {
+    ResR = Builder.CreateNeg(Op.first,  "neg.r");
+    ResI = Builder.CreateNeg(Op.second, "neg.i");
+  }
+  return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+  TestAndClearIgnoreReal();
+  TestAndClearIgnoreImag();
+  TestAndClearIgnoreRealAssign();
+  TestAndClearIgnoreImagAssign();
+  // ~(a+ib) = a + i*-b
+  ComplexPairTy Op = Visit(E->getSubExpr());
+  llvm::Value *ResI;
+  if (Op.second->getType()->isFloatingPoint())
+    ResI = Builder.CreateFNeg(Op.second, "conj.i");
+  else
+    ResI = Builder.CreateNeg(Op.second, "conj.i");
+
+  return ComplexPairTy(Op.first, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
+  llvm::Value *ResR, *ResI;
+
+  if (Op.LHS.first->getType()->isFloatingPoint()) {
+    ResR = Builder.CreateFAdd(Op.LHS.first,  Op.RHS.first,  "add.r");
+    ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+  } else {
+    ResR = Builder.CreateAdd(Op.LHS.first,  Op.RHS.first,  "add.r");
+    ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
+  }
+  return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
+  llvm::Value *ResR, *ResI;
+  if (Op.LHS.first->getType()->isFloatingPoint()) {
+    ResR = Builder.CreateFSub(Op.LHS.first,  Op.RHS.first,  "sub.r");
+    ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+  } else {
+    ResR = Builder.CreateSub(Op.LHS.first,  Op.RHS.first,  "sub.r");
+    ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
+  }
+  return ComplexPairTy(ResR, ResI);
+}
+
+
+ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
+  using llvm::Value;
+  Value *ResR, *ResI;
+
+  if (Op.LHS.first->getType()->isFloatingPoint()) {
+    Value *ResRl = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+    Value *ResRr = Builder.CreateFMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+    ResR  = Builder.CreateFSub(ResRl, ResRr, "mul.r");
+
+    Value *ResIl = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il");
+    Value *ResIr = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+    ResI  = Builder.CreateFAdd(ResIl, ResIr, "mul.i");
+  } else {
+    Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+    Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+    ResR  = Builder.CreateSub(ResRl, ResRr, "mul.r");
+
+    Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
+    Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+    ResI  = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+  }
+  return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
+  llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
+  llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
+
+
+  llvm::Value *DSTr, *DSTi;
+  if (Op.LHS.first->getType()->isFloatingPoint()) {
+    // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+    llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr, "tmp"); // a*c
+    llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi, "tmp"); // b*d
+    llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+
+    llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr, "tmp"); // c*c
+    llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi, "tmp"); // d*d
+    llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+
+    llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr, "tmp"); // b*c
+    llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi, "tmp"); // a*d
+    llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8, "tmp"); // bc-ad
+
+    DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp");
+    DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp");
+  } else {
+    // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+    llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c
+    llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d
+    llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd
+
+    llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c
+    llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d
+    llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd
+
+    llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c
+    llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d
+    llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad
+
+    if (Op.Ty->getAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
+      DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp");
+      DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp");
+    } else {
+      DSTr = Builder.CreateSDiv(Tmp3, Tmp6, "tmp");
+      DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp");
+    }
+  }
+
+  return ComplexPairTy(DSTr, DSTi);
+}
+
+ComplexExprEmitter::BinOpInfo
+ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
+  TestAndClearIgnoreReal();
+  TestAndClearIgnoreImag();
+  TestAndClearIgnoreRealAssign();
+  TestAndClearIgnoreImagAssign();
+  BinOpInfo Ops;
+  Ops.LHS = Visit(E->getLHS());
+  Ops.RHS = Visit(E->getRHS());
+  Ops.Ty = E->getType();
+  return Ops;
+}
+
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+                   ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+  TestAndClearIgnoreReal();
+  TestAndClearIgnoreImag();
+  bool ignreal = TestAndClearIgnoreRealAssign();
+  bool ignimag = TestAndClearIgnoreImagAssign();
+  QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType();
+
+  BinOpInfo OpInfo;
+
+  // Load the RHS and LHS operands.
+  // __block variables need to have the rhs evaluated first, plus this should
+  // improve codegen a little.  It is possible for the RHS to be complex or
+  // scalar.
+  OpInfo.Ty = E->getComputationResultType();
+  OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty);
+
+  LValue LHSLV = CGF.EmitLValue(E->getLHS());
+
+
+  // We know the LHS is a complex lvalue.
+  OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
+  OpInfo.LHS=EmitComplexToComplexCast(OpInfo.LHS, LHSTy, OpInfo.Ty);
+
+  // Expand the binary operator.
+  ComplexPairTy Result = (this->*Func)(OpInfo);
+
+  // Truncate the result back to the LHS type.
+  Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+
+  // Store the result value into the LHS lvalue.
+  EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified());
+  // And now return the LHS
+  IgnoreReal = ignreal;
+  IgnoreImag = ignimag;
+  IgnoreRealAssign = ignreal;
+  IgnoreImagAssign = ignimag;
+  return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+  TestAndClearIgnoreReal();
+  TestAndClearIgnoreImag();
+  bool ignreal = TestAndClearIgnoreRealAssign();
+  bool ignimag = TestAndClearIgnoreImagAssign();
+  assert(CGF.getContext().getCanonicalType(E->getLHS()->getType()) ==
+         CGF.getContext().getCanonicalType(E->getRHS()->getType()) &&
+         "Invalid assignment");
+  // Emit the RHS.
+  ComplexPairTy Val = Visit(E->getRHS());
+
+  // Compute the address to store into.
+  LValue LHS = CGF.EmitLValue(E->getLHS());
+
+  // Store into it, if simple.
+  if (LHS.isSimple()) {
+    EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified());
+
+    // And now return the LHS
+    IgnoreReal = ignreal;
+    IgnoreImag = ignimag;
+    IgnoreRealAssign = ignreal;
+    IgnoreImagAssign = ignimag;
+    return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified());
+  }
+
+  // Otherwise we must have a property setter (no complex vector/bitfields).
+  if (LHS.isPropertyRef())
+    CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val));
+  else
+    CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(), RValue::getComplex(Val));
+
+  // There is no reload after a store through a method, but we need to restore
+  // the Ignore* flags.
+  IgnoreReal = ignreal;
+  IgnoreImag = ignimag;
+  IgnoreRealAssign = ignreal;
+  IgnoreImagAssign = ignimag;
+  return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
+  CGF.EmitStmt(E->getLHS());
+  CGF.EnsureInsertPoint();
+  return Visit(E->getRHS());
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+  if (!E->getLHS()) {
+    CGF.ErrorUnsupported(E, "conditional operator with missing LHS");
+    const llvm::Type *EltTy =
+      CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+    llvm::Value *U = llvm::UndefValue::get(EltTy);
+    return ComplexPairTy(U, U);
+  }
+
+  TestAndClearIgnoreReal();
+  TestAndClearIgnoreImag();
+  TestAndClearIgnoreRealAssign();
+  TestAndClearIgnoreImagAssign();
+  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+  CGF.EmitBlock(LHSBlock);
+
+  // Handle the GNU extension for missing LHS.
+  assert(E->getLHS() && "Must have LHS for complex value");
+
+  ComplexPairTy LHS = Visit(E->getLHS());
+  LHSBlock = Builder.GetInsertBlock();
+  CGF.EmitBranch(ContBlock);
+
+  CGF.EmitBlock(RHSBlock);
+
+  ComplexPairTy RHS = Visit(E->getRHS());
+  RHSBlock = Builder.GetInsertBlock();
+  CGF.EmitBranch(ContBlock);
+
+  CGF.EmitBlock(ContBlock);
+
+  // Create a PHI node for the real part.
+  llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r");
+  RealPN->reserveOperandSpace(2);
+  RealPN->addIncoming(LHS.first, LHSBlock);
+  RealPN->addIncoming(RHS.first, RHSBlock);
+
+  // Create a PHI node for the imaginary part.
+  llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), "cond.i");
+  ImagPN->reserveOperandSpace(2);
+  ImagPN->addIncoming(LHS.second, LHSBlock);
+  ImagPN->addIncoming(RHS.second, RHSBlock);
+
+  return ComplexPairTy(RealPN, ImagPN);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+  return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
+    bool Ignore = TestAndClearIgnoreReal();
+    (void)Ignore;
+    assert (Ignore == false && "init list ignored");
+    Ignore = TestAndClearIgnoreImag();
+    (void)Ignore;
+    assert (Ignore == false && "init list ignored");
+  if (E->getNumInits())
+    return Visit(E->getInit(0));
+
+  // Empty init list intializes to null
+  QualType Ty = E->getType()->getAs<ComplexType>()->getElementType();
+  const llvm::Type* LTy = CGF.ConvertType(Ty);
+  llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
+  return ComplexPairTy(zeroConstant, zeroConstant);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
+  llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
+  llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
+
+  if (!ArgPtr) {
+    CGF.ErrorUnsupported(E, "complex va_arg expression");
+    const llvm::Type *EltTy =
+      CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+    llvm::Value *U = llvm::UndefValue::get(EltTy);
+    return ComplexPairTy(U, U);
+  }
+
+  // FIXME Volatility.
+  return EmitLoadOfComplex(ArgPtr, false);
+}
+
+//===----------------------------------------------------------------------===//
+//                         Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitComplexExpr - Emit the computation of the specified expression of
+/// complex type, ignoring the result.
+ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
+                                               bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) {
+  assert(E && E->getType()->isAnyComplexType() &&
+         "Invalid complex expression to emit");
+
+  return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+                            IgnoreImagAssign)
+    .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+/// of complex type, storing into the specified Value*.
+void CodeGenFunction::EmitComplexExprIntoAddr(const Expr *E,
+                                              llvm::Value *DestAddr,
+                                              bool DestIsVolatile) {
+  assert(E && E->getType()->isAnyComplexType() &&
+         "Invalid complex expression to emit");
+  ComplexExprEmitter Emitter(*this);
+  ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
+  Emitter.EmitStoreOfComplex(Val, DestAddr, DestIsVolatile);
+}
+
+/// StoreComplexToAddr - Store a complex number into the specified address.
+void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V,
+                                         llvm::Value *DestAddr,
+                                         bool DestIsVolatile) {
+  ComplexExprEmitter(*this).EmitStoreOfComplex(V, DestAddr, DestIsVolatile);
+}
+
+/// LoadComplexFromAddr - Load a complex number from the specified address.
+ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
+                                                   bool SrcIsVolatile) {
+  return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
+}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
new file mode 100644
index 0000000..5800ce7
--- /dev/null
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -0,0 +1,1056 @@
+//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Constant Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace  {
+class ConstStructBuilder {
+  CodeGenModule &CGM;
+  CodeGenFunction *CGF;
+
+  bool Packed;
+
+  unsigned NextFieldOffsetInBytes;
+
+  unsigned LLVMStructAlignment;
+  
+  std::vector<llvm::Constant *> Elements;
+
+  ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
+    : CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0),
+    LLVMStructAlignment(1) { }
+
+  bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+                   const Expr *InitExpr) {
+    uint64_t FieldOffsetInBytes = FieldOffset / 8;
+
+    assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
+           && "Field offset mismatch!");
+
+    // Emit the field.
+    llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF);
+    if (!C)
+      return false;
+
+    unsigned FieldAlignment = getAlignment(C);
+
+    // Round up the field offset to the alignment of the field type.
+    uint64_t AlignedNextFieldOffsetInBytes =
+      llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+
+    if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
+      assert(!Packed && "Alignment is wrong even with a packed struct!");
+
+      // Convert the struct to a packed struct.
+      ConvertStructToPacked();
+      
+      AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+    }
+
+    if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+      // We need to append padding.
+      AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
+
+      assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
+             "Did not add enough padding!");
+
+      AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
+    }
+
+    // Add the field.
+    Elements.push_back(C);
+    NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C);
+    
+    if (Packed)
+      assert(LLVMStructAlignment == 1 && "Packed struct not byte-aligned!");
+    else
+      LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
+
+    return true;
+  }
+
+  bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+                      const Expr *InitExpr) {
+    llvm::ConstantInt *CI =
+      cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr,
+                                                           Field->getType(),
+                                                           CGF));
+    // FIXME: Can this ever happen?
+    if (!CI)
+      return false;
+
+    if (FieldOffset > NextFieldOffsetInBytes * 8) {
+      // We need to add padding.
+      uint64_t NumBytes =
+        llvm::RoundUpToAlignment(FieldOffset -
+                                 NextFieldOffsetInBytes * 8, 8) / 8;
+
+      AppendPadding(NumBytes);
+    }
+
+    uint64_t FieldSize =
+      Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
+
+    llvm::APInt FieldValue = CI->getValue();
+
+    // Promote the size of FieldValue if necessary
+    // FIXME: This should never occur, but currently it can because initializer
+    // constants are cast to bool, and because clang is not enforcing bitfield
+    // width limits.
+    if (FieldSize > FieldValue.getBitWidth())
+      FieldValue.zext(FieldSize);
+
+    // Truncate the size of FieldValue to the bit field size.
+    if (FieldSize < FieldValue.getBitWidth())
+      FieldValue.trunc(FieldSize);
+
+    if (FieldOffset < NextFieldOffsetInBytes * 8) {
+      // Either part of the field or the entire field can go into the previous
+      // byte.
+      assert(!Elements.empty() && "Elements can't be empty!");
+
+      unsigned BitsInPreviousByte =
+        NextFieldOffsetInBytes * 8 - FieldOffset;
+
+      bool FitsCompletelyInPreviousByte =
+        BitsInPreviousByte >= FieldValue.getBitWidth();
+
+      llvm::APInt Tmp = FieldValue;
+
+      if (!FitsCompletelyInPreviousByte) {
+        unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
+
+        if (CGM.getTargetData().isBigEndian()) {
+          Tmp = Tmp.lshr(NewFieldWidth);
+          Tmp.trunc(BitsInPreviousByte);
+
+          // We want the remaining high bits.
+          FieldValue.trunc(NewFieldWidth);
+        } else {
+          Tmp.trunc(BitsInPreviousByte);
+
+          // We want the remaining low bits.
+          FieldValue = FieldValue.lshr(BitsInPreviousByte);
+          FieldValue.trunc(NewFieldWidth);
+        }
+      }
+
+      Tmp.zext(8);
+      if (CGM.getTargetData().isBigEndian()) {
+        if (FitsCompletelyInPreviousByte)
+          Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
+      } else {
+        Tmp = Tmp.shl(8 - BitsInPreviousByte);
+      }
+
+      // Or in the bits that go into the previous byte.
+      if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(Elements.back()))
+        Tmp |= Val->getValue();
+      else
+        assert(isa<llvm::UndefValue>(Elements.back()));
+
+      Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
+
+      if (FitsCompletelyInPreviousByte)
+        return true;
+    }
+
+    while (FieldValue.getBitWidth() > 8) {
+      llvm::APInt Tmp;
+
+      if (CGM.getTargetData().isBigEndian()) {
+        // We want the high bits.
+        Tmp = FieldValue;
+        Tmp = Tmp.lshr(Tmp.getBitWidth() - 8);
+        Tmp.trunc(8);
+      } else {
+        // We want the low bits.
+        Tmp = FieldValue;
+        Tmp.trunc(8);
+
+        FieldValue = FieldValue.lshr(8);
+      }
+
+      Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
+      NextFieldOffsetInBytes++;
+
+      FieldValue.trunc(FieldValue.getBitWidth() - 8);
+    }
+
+    assert(FieldValue.getBitWidth() > 0 &&
+           "Should have at least one bit left!");
+    assert(FieldValue.getBitWidth() <= 8 &&
+           "Should not have more than a byte left!");
+
+    if (FieldValue.getBitWidth() < 8) {
+      if (CGM.getTargetData().isBigEndian()) {
+        unsigned BitWidth = FieldValue.getBitWidth();
+
+        FieldValue.zext(8);
+        FieldValue = FieldValue << (8 - BitWidth);
+      } else
+        FieldValue.zext(8);
+    }
+
+    // Append the last element.
+    Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
+                                              FieldValue));
+    NextFieldOffsetInBytes++;
+    return true;
+  }
+
+  void AppendPadding(uint64_t NumBytes) {
+    if (!NumBytes)
+      return;
+
+    const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+    if (NumBytes > 1)
+      Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+    llvm::Constant *C = llvm::UndefValue::get(Ty);
+    Elements.push_back(C);
+    assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
+
+    NextFieldOffsetInBytes += getSizeInBytes(C);
+  }
+
+  void AppendTailPadding(uint64_t RecordSize) {
+    assert(RecordSize % 8 == 0 && "Invalid record size!");
+
+    uint64_t RecordSizeInBytes = RecordSize / 8;
+    assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+
+    unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+    AppendPadding(NumPadBytes);
+  }
+
+  void ConvertStructToPacked() {
+    std::vector<llvm::Constant *> PackedElements;
+    uint64_t ElementOffsetInBytes = 0;
+
+    for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+      llvm::Constant *C = Elements[i];
+
+      unsigned ElementAlign =
+        CGM.getTargetData().getABITypeAlignment(C->getType());
+      uint64_t AlignedElementOffsetInBytes =
+        llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
+
+      if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
+        // We need some padding.
+        uint64_t NumBytes =
+          AlignedElementOffsetInBytes - ElementOffsetInBytes;
+
+        const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
+        if (NumBytes > 1)
+          Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+        llvm::Constant *Padding = llvm::UndefValue::get(Ty);
+        PackedElements.push_back(Padding);
+        ElementOffsetInBytes += getSizeInBytes(Padding);
+      }
+
+      PackedElements.push_back(C);
+      ElementOffsetInBytes += getSizeInBytes(C);
+    }
+
+    assert(ElementOffsetInBytes == NextFieldOffsetInBytes &&
+           "Packing the struct changed its size!");
+
+    Elements = PackedElements;
+    LLVMStructAlignment = 1;
+    Packed = true;
+  }
+                              
+  bool Build(InitListExpr *ILE) {
+    RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
+    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+    unsigned FieldNo = 0;
+    unsigned ElementNo = 0;
+    for (RecordDecl::field_iterator Field = RD->field_begin(),
+         FieldEnd = RD->field_end();
+         ElementNo < ILE->getNumInits() && Field != FieldEnd;
+         ++Field, ++FieldNo) {
+      if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+        continue;
+
+      if (Field->isBitField()) {
+        if (!Field->getIdentifier())
+          continue;
+
+        if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
+                            ILE->getInit(ElementNo)))
+          return false;
+      } else {
+        if (!AppendField(*Field, Layout.getFieldOffset(FieldNo),
+                         ILE->getInit(ElementNo)))
+          return false;
+      }
+
+      ElementNo++;
+    }
+
+    uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
+
+    if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
+      // If the struct is bigger than the size of the record type,
+      // we must have a flexible array member at the end.
+      assert(RD->hasFlexibleArrayMember() &&
+             "Must have flexible array member if struct is bigger than type!");
+      
+      // No tail padding is necessary.
+      return true;
+    }
+
+    uint64_t LLVMSizeInBytes = llvm::RoundUpToAlignment(NextFieldOffsetInBytes, 
+                                                        LLVMStructAlignment);
+
+    // Check if we need to convert the struct to a packed struct.
+    if (NextFieldOffsetInBytes <= LayoutSizeInBytes && 
+        LLVMSizeInBytes > LayoutSizeInBytes) {
+      assert(!Packed && "Size mismatch!");
+      
+      ConvertStructToPacked();
+      assert(NextFieldOffsetInBytes == LayoutSizeInBytes &&
+             "Converting to packed did not help!");
+    }
+
+    // Append tail padding if necessary.
+    AppendTailPadding(Layout.getSize());
+
+    assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
+           "Tail padding mismatch!");
+
+    return true;
+  }
+
+  unsigned getAlignment(const llvm::Constant *C) const {
+    if (Packed)
+      return 1;
+
+    return CGM.getTargetData().getABITypeAlignment(C->getType());
+  }
+
+  uint64_t getSizeInBytes(const llvm::Constant *C) const {
+    return CGM.getTargetData().getTypeAllocSize(C->getType());
+  }
+
+public:
+  static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+                                     InitListExpr *ILE) {
+    ConstStructBuilder Builder(CGM, CGF);
+
+    if (!Builder.Build(ILE))
+      return 0;
+
+    llvm::Constant *Result =
+      llvm::ConstantStruct::get(CGM.getLLVMContext(),
+                                Builder.Elements, Builder.Packed);
+
+    assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
+                                    Builder.getAlignment(Result)) ==
+           Builder.getSizeInBytes(Result) && "Size mismatch!");
+
+    return Result;
+  }
+};
+
+class ConstExprEmitter :
+  public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
+  CodeGenModule &CGM;
+  CodeGenFunction *CGF;
+  llvm::LLVMContext &VMContext;
+public:
+  ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
+    : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
+  }
+
+  //===--------------------------------------------------------------------===//
+  //                            Visitor Methods
+  //===--------------------------------------------------------------------===//
+
+  llvm::Constant *VisitStmt(Stmt *S) {
+    return 0;
+  }
+
+  llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+    return Visit(PE->getSubExpr());
+  }
+
+  llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+    return Visit(E->getInitializer());
+  }
+    
+  llvm::Constant *EmitMemberFunctionPointer(CXXMethodDecl *MD) {
+    assert(MD->isInstance() && "Member function must not be static!");
+    
+    MD = MD->getCanonicalDecl();
+
+    const llvm::Type *PtrDiffTy = 
+      CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+    
+    llvm::Constant *Values[2];
+    
+    // Get the function pointer (or index if this is a virtual function).
+    if (MD->isVirtual()) {
+      uint64_t Index = CGM.getVtableInfo().getMethodVtableIndex(MD);
+
+      // Itanium C++ ABI 2.3:
+      //   For a non-virtual function, this field is a simple function pointer. 
+      //   For a virtual function, it is 1 plus the virtual table offset 
+      //   (in bytes) of the function, represented as a ptrdiff_t. 
+      Values[0] = llvm::ConstantInt::get(PtrDiffTy, (Index * 8) + 1);
+    } else {
+      const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+      const llvm::Type *Ty =
+        CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+                                       FPT->isVariadic());
+
+      llvm::Constant *FuncPtr = CGM.GetAddrOfFunction(MD, Ty);
+      Values[0] = llvm::ConstantExpr::getPtrToInt(FuncPtr, PtrDiffTy);
+    } 
+    
+    // The adjustment will always be 0.
+    Values[1] = llvm::ConstantInt::get(PtrDiffTy, 0);
+    
+    return llvm::ConstantStruct::get(CGM.getLLVMContext(),
+                                     Values, 2, /*Packed=*/false);
+  }
+
+  llvm::Constant *VisitUnaryAddrOf(UnaryOperator *E) {
+    if (const MemberPointerType *MPT = 
+        E->getType()->getAs<MemberPointerType>()) {
+      QualType T = MPT->getPointeeType();
+      DeclRefExpr *DRE = cast<DeclRefExpr>(E->getSubExpr());
+
+      NamedDecl *ND = DRE->getDecl();
+      if (T->isFunctionProtoType())
+        return EmitMemberFunctionPointer(cast<CXXMethodDecl>(ND));
+      
+      // We have a pointer to data member.
+      return CGM.EmitPointerToDataMember(cast<FieldDecl>(ND));
+    }
+
+    return 0;
+  }
+    
+  llvm::Constant *VisitBinSub(BinaryOperator *E) {
+    // This must be a pointer/pointer subtraction.  This only happens for
+    // address of label.
+    if (!isa<AddrLabelExpr>(E->getLHS()->IgnoreParenNoopCasts(CGM.getContext())) ||
+       !isa<AddrLabelExpr>(E->getRHS()->IgnoreParenNoopCasts(CGM.getContext())))
+      return 0;
+    
+    llvm::Constant *LHS = CGM.EmitConstantExpr(E->getLHS(),
+                                               E->getLHS()->getType(), CGF);
+    llvm::Constant *RHS = CGM.EmitConstantExpr(E->getRHS(),
+                                               E->getRHS()->getType(), CGF);
+
+    const llvm::Type *ResultType = ConvertType(E->getType());
+    LHS = llvm::ConstantExpr::getPtrToInt(LHS, ResultType);
+    RHS = llvm::ConstantExpr::getPtrToInt(RHS, ResultType);
+        
+    // No need to divide by element size, since addr of label is always void*,
+    // which has size 1 in GNUish.
+    return llvm::ConstantExpr::getSub(LHS, RHS);
+  }
+    
+  llvm::Constant *VisitCastExpr(CastExpr* E) {
+    switch (E->getCastKind()) {
+    case CastExpr::CK_ToUnion: {
+      // GCC cast to union extension
+      assert(E->getType()->isUnionType() &&
+             "Destination type is not union type!");
+      const llvm::Type *Ty = ConvertType(E->getType());
+      Expr *SubExpr = E->getSubExpr();
+
+      llvm::Constant *C =
+        CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
+      if (!C)
+        return 0;
+
+      // Build a struct with the union sub-element as the first member,
+      // and padded to the appropriate size
+      std::vector<llvm::Constant*> Elts;
+      std::vector<const llvm::Type*> Types;
+      Elts.push_back(C);
+      Types.push_back(C->getType());
+      unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
+      unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
+
+      assert(CurSize <= TotalSize && "Union size mismatch!");
+      if (unsigned NumPadBytes = TotalSize - CurSize) {
+        const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
+        if (NumPadBytes > 1)
+          Ty = llvm::ArrayType::get(Ty, NumPadBytes);
+
+        Elts.push_back(llvm::UndefValue::get(Ty));
+        Types.push_back(Ty);
+      }
+
+      llvm::StructType* STy =
+        llvm::StructType::get(C->getType()->getContext(), Types, false);
+      return llvm::ConstantStruct::get(STy, Elts);
+    }
+    case CastExpr::CK_NullToMemberPointer:
+      return CGM.EmitNullConstant(E->getType());
+      
+    case CastExpr::CK_BaseToDerivedMemberPointer: {
+      Expr *SubExpr = E->getSubExpr();
+
+      const MemberPointerType *SrcTy = 
+        SubExpr->getType()->getAs<MemberPointerType>();
+      const MemberPointerType *DestTy = 
+        E->getType()->getAs<MemberPointerType>();
+      
+      const CXXRecordDecl *BaseClass =
+        cast<CXXRecordDecl>(cast<RecordType>(SrcTy->getClass())->getDecl());
+      const CXXRecordDecl *DerivedClass =
+        cast<CXXRecordDecl>(cast<RecordType>(DestTy->getClass())->getDecl());
+
+      if (SrcTy->getPointeeType()->isFunctionProtoType()) {
+        llvm::Constant *C = 
+          CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
+        if (!C)
+          return 0;
+        
+        llvm::ConstantStruct *CS = cast<llvm::ConstantStruct>(C);
+        
+        // Check if we need to update the adjustment.
+        if (llvm::Constant *Offset = 
+              CGM.GetNonVirtualBaseClassOffset(DerivedClass, BaseClass)) {
+          llvm::Constant *Values[2];
+        
+          Values[0] = CS->getOperand(0);
+          Values[1] = llvm::ConstantExpr::getAdd(CS->getOperand(1), Offset);
+          return llvm::ConstantStruct::get(CGM.getLLVMContext(), Values, 2, 
+                                           /*Packed=*/false);
+        }
+        
+        return CS;
+      }          
+    }
+
+    case CastExpr::CK_BitCast: 
+      // This must be a member function pointer cast.
+      return Visit(E->getSubExpr());
+
+    default: {
+      // FIXME: This should be handled by the CK_NoOp cast kind.
+      // Explicit and implicit no-op casts
+      QualType Ty = E->getType(), SubTy = E->getSubExpr()->getType();
+      if (CGM.getContext().hasSameUnqualifiedType(Ty, SubTy))
+        return Visit(E->getSubExpr());
+
+      // Handle integer->integer casts for address-of-label differences.
+      if (Ty->isIntegerType() && SubTy->isIntegerType() &&
+          CGF) {
+        llvm::Value *Src = Visit(E->getSubExpr());
+        if (Src == 0) return 0;
+        
+        // Use EmitScalarConversion to perform the conversion.
+        return cast<llvm::Constant>(CGF->EmitScalarConversion(Src, SubTy, Ty));
+      }
+      
+      return 0;
+    }
+    }
+  }
+
+  llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+    return Visit(DAE->getExpr());
+  }
+
+  llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
+    std::vector<llvm::Constant*> Elts;
+    const llvm::ArrayType *AType =
+        cast<llvm::ArrayType>(ConvertType(ILE->getType()));
+    unsigned NumInitElements = ILE->getNumInits();
+    // FIXME: Check for wide strings
+    // FIXME: Check for NumInitElements exactly equal to 1??
+    if (NumInitElements > 0 &&
+        (isa<StringLiteral>(ILE->getInit(0)) ||
+         isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
+        ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
+      return Visit(ILE->getInit(0));
+    const llvm::Type *ElemTy = AType->getElementType();
+    unsigned NumElements = AType->getNumElements();
+
+    // Initialising an array requires us to automatically
+    // initialise any elements that have not been initialised explicitly
+    unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+    // Copy initializer elements.
+    unsigned i = 0;
+    bool RewriteType = false;
+    for (; i < NumInitableElts; ++i) {
+      Expr *Init = ILE->getInit(i);
+      llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+      if (!C)
+        return 0;
+      RewriteType |= (C->getType() != ElemTy);
+      Elts.push_back(C);
+    }
+
+    // Initialize remaining array elements.
+    // FIXME: This doesn't handle member pointers correctly!
+    for (; i < NumElements; ++i)
+      Elts.push_back(llvm::Constant::getNullValue(ElemTy));
+
+    if (RewriteType) {
+      // FIXME: Try to avoid packing the array
+      std::vector<const llvm::Type*> Types;
+      for (unsigned i = 0; i < Elts.size(); ++i)
+        Types.push_back(Elts[i]->getType());
+      const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+                                                            Types, true);
+      return llvm::ConstantStruct::get(SType, Elts);
+    }
+
+    return llvm::ConstantArray::get(AType, Elts);
+  }
+
+  llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
+    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+  }
+
+  llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
+    return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+  }
+
+  llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
+    return CGM.EmitNullConstant(E->getType());
+  }
+
+  llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+    if (ILE->getType()->isScalarType()) {
+      // We have a scalar in braces. Just use the first element.
+      if (ILE->getNumInits() > 0) {
+        Expr *Init = ILE->getInit(0);
+        return CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+      }
+      return CGM.EmitNullConstant(ILE->getType());
+    }
+
+    if (ILE->getType()->isArrayType())
+      return EmitArrayInitialization(ILE);
+
+    if (ILE->getType()->isRecordType())
+      return EmitStructInitialization(ILE);
+
+    if (ILE->getType()->isUnionType())
+      return EmitUnionInitialization(ILE);
+
+    // If ILE was a constant vector, we would have handled it already.
+    if (ILE->getType()->isVectorType())
+      return 0;
+
+    assert(0 && "Unable to handle InitListExpr");
+    // Get rid of control reaches end of void function warning.
+    // Not reached.
+    return 0;
+  }
+
+  llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
+    if (!E->getConstructor()->isTrivial())
+      return 0;
+
+    QualType Ty = E->getType();
+
+    // FIXME: We should not have to call getBaseElementType here.
+    const RecordType *RT = 
+      CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
+    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+    
+    // If the class doesn't have a trivial destructor, we can't emit it as a
+    // constant expr.
+    if (!RD->hasTrivialDestructor())
+      return 0;
+    
+    // Only copy and default constructors can be trivial.
+
+
+    if (E->getNumArgs()) {
+      assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+      assert(E->getConstructor()->isCopyConstructor() &&
+             "trivial ctor has argument but isn't a copy ctor");
+
+      Expr *Arg = E->getArg(0);
+      assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
+             "argument to copy ctor is of wrong type");
+
+      return Visit(Arg);
+    }
+
+    return CGM.EmitNullConstant(Ty);
+  }
+
+  llvm::Constant *VisitStringLiteral(StringLiteral *E) {
+    assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+    // This must be a string initializing an array in a static initializer.
+    // Don't emit it as the address of the string, emit the string data itself
+    // as an inline array.
+    return llvm::ConstantArray::get(VMContext,
+                                    CGM.GetStringForStringLiteral(E), false);
+  }
+
+  llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+    // This must be an @encode initializing an array in a static initializer.
+    // Don't emit it as the address of the string, emit the string data itself
+    // as an inline array.
+    std::string Str;
+    CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+    const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
+
+    // Resize the string to the right size, adding zeros at the end, or
+    // truncating as needed.
+    Str.resize(CAT->getSize().getZExtValue(), '\0');
+    return llvm::ConstantArray::get(VMContext, Str, false);
+  }
+
+  llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
+    return Visit(E->getSubExpr());
+  }
+
+  // Utility methods
+  const llvm::Type *ConvertType(QualType T) {
+    return CGM.getTypes().ConvertType(T);
+  }
+
+public:
+  llvm::Constant *EmitLValue(Expr *E) {
+    switch (E->getStmtClass()) {
+    default: break;
+    case Expr::CompoundLiteralExprClass: {
+      // Note that due to the nature of compound literals, this is guaranteed
+      // to be the only use of the variable, so we just generate it here.
+      CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+      llvm::Constant* C = Visit(CLE->getInitializer());
+      // FIXME: "Leaked" on failure.
+      if (C)
+        C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+                                     E->getType().isConstant(CGM.getContext()),
+                                     llvm::GlobalValue::InternalLinkage,
+                                     C, ".compoundliteral", 0, false,
+                                     E->getType().getAddressSpace());
+      return C;
+    }
+    case Expr::DeclRefExprClass: {
+      NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
+      if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
+        return CGM.GetAddrOfFunction(FD);
+      if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
+        // We can never refer to a variable with local storage.
+        if (!VD->hasLocalStorage()) {
+          if (VD->isFileVarDecl() || VD->hasExternalStorage())
+            return CGM.GetAddrOfGlobalVar(VD);
+          else if (VD->isBlockVarDecl()) {
+            assert(CGF && "Can't access static local vars without CGF");
+            return CGF->GetAddrOfStaticLocalVar(VD);
+          }
+        }
+      }
+      break;
+    }
+    case Expr::StringLiteralClass:
+      return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
+    case Expr::ObjCEncodeExprClass:
+      return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
+    case Expr::ObjCStringLiteralClass: {
+      ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
+      llvm::Constant *C =
+          CGM.getObjCRuntime().GenerateConstantString(SL->getString());
+      return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+    }
+    case Expr::PredefinedExprClass: {
+      unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
+      if (CGF) {
+        LValue Res = CGF->EmitPredefinedFunctionName(Type);
+        return cast<llvm::Constant>(Res.getAddress());
+      } else if (Type == PredefinedExpr::PrettyFunction) {
+        return CGM.GetAddrOfConstantCString("top level", ".tmp");
+      }
+
+      return CGM.GetAddrOfConstantCString("", ".tmp");
+    }
+    case Expr::AddrLabelExprClass: {
+      assert(CGF && "Invalid address of label expression outside function.");
+      llvm::Constant *Ptr =
+        CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+      return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+    }
+    case Expr::CallExprClass: {
+      CallExpr* CE = cast<CallExpr>(E);
+      unsigned builtin = CE->isBuiltinCall(CGM.getContext());
+      if (builtin !=
+            Builtin::BI__builtin___CFStringMakeConstantString &&
+          builtin !=
+            Builtin::BI__builtin___NSStringMakeConstantString)
+        break;
+      const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
+      const StringLiteral *Literal = cast<StringLiteral>(Arg);
+      if (builtin ==
+            Builtin::BI__builtin___NSStringMakeConstantString) {
+        return CGM.getObjCRuntime().GenerateConstantString(Literal);
+      }
+      // FIXME: need to deal with UCN conversion issues.
+      return CGM.GetAddrOfConstantCFString(Literal);
+    }
+    case Expr::BlockExprClass: {
+      std::string FunctionName;
+      if (CGF)
+        FunctionName = CGF->CurFn->getName();
+      else
+        FunctionName = "global";
+
+      return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+    }
+    }
+
+    return 0;
+  }
+};
+
+}  // end anonymous namespace.
+
+llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
+                                                QualType DestType,
+                                                CodeGenFunction *CGF) {
+  Expr::EvalResult Result;
+
+  bool Success = false;
+
+  if (DestType->isReferenceType())
+    Success = E->EvaluateAsLValue(Result, Context);
+  else
+    Success = E->Evaluate(Result, Context);
+
+  if (Success && !Result.HasSideEffects) {
+    switch (Result.Val.getKind()) {
+    case APValue::Uninitialized:
+      assert(0 && "Constant expressions should be initialized.");
+      return 0;
+    case APValue::LValue: {
+      const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+      llvm::Constant *Offset =
+        llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+                               Result.Val.getLValueOffset().getQuantity());
+
+      llvm::Constant *C;
+      if (const Expr *LVBase = Result.Val.getLValueBase()) {
+        C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
+
+        // Apply offset if necessary.
+        if (!Offset->isNullValue()) {
+          const llvm::Type *Type = llvm::Type::getInt8PtrTy(VMContext);
+          llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
+          Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
+          C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
+        }
+
+        // Convert to the appropriate type; this could be an lvalue for
+        // an integer.
+        if (isa<llvm::PointerType>(DestTy))
+          return llvm::ConstantExpr::getBitCast(C, DestTy);
+
+        return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+      } else {
+        C = Offset;
+
+        // Convert to the appropriate type; this could be an lvalue for
+        // an integer.
+        if (isa<llvm::PointerType>(DestTy))
+          return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+        // If the types don't match this should only be a truncate.
+        if (C->getType() != DestTy)
+          return llvm::ConstantExpr::getTrunc(C, DestTy);
+
+        return C;
+      }
+    }
+    case APValue::Int: {
+      llvm::Constant *C = llvm::ConstantInt::get(VMContext,
+                                                 Result.Val.getInt());
+
+      if (C->getType() == llvm::Type::getInt1Ty(VMContext)) {
+        const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+        C = llvm::ConstantExpr::getZExt(C, BoolTy);
+      }
+      return C;
+    }
+    case APValue::ComplexInt: {
+      llvm::Constant *Complex[2];
+
+      Complex[0] = llvm::ConstantInt::get(VMContext,
+                                          Result.Val.getComplexIntReal());
+      Complex[1] = llvm::ConstantInt::get(VMContext,
+                                          Result.Val.getComplexIntImag());
+
+      // FIXME: the target may want to specify that this is packed.
+      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+    }
+    case APValue::Float:
+      return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
+    case APValue::ComplexFloat: {
+      llvm::Constant *Complex[2];
+
+      Complex[0] = llvm::ConstantFP::get(VMContext,
+                                         Result.Val.getComplexFloatReal());
+      Complex[1] = llvm::ConstantFP::get(VMContext,
+                                         Result.Val.getComplexFloatImag());
+
+      // FIXME: the target may want to specify that this is packed.
+      return llvm::ConstantStruct::get(VMContext, Complex, 2, false);
+    }
+    case APValue::Vector: {
+      llvm::SmallVector<llvm::Constant *, 4> Inits;
+      unsigned NumElts = Result.Val.getVectorLength();
+
+      for (unsigned i = 0; i != NumElts; ++i) {
+        APValue &Elt = Result.Val.getVectorElt(i);
+        if (Elt.isInt())
+          Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
+        else
+          Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
+      }
+      return llvm::ConstantVector::get(&Inits[0], Inits.size());
+    }
+    }
+  }
+
+  llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+  if (C && C->getType() == llvm::Type::getInt1Ty(VMContext)) {
+    const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+    C = llvm::ConstantExpr::getZExt(C, BoolTy);
+  }
+  return C;
+}
+
+static bool containsPointerToDataMember(CodeGenTypes &Types, QualType T) {
+  // No need to check for member pointers when not compiling C++.
+  if (!Types.getContext().getLangOptions().CPlusPlus)
+    return false;
+  
+  T = Types.getContext().getBaseElementType(T);
+  
+  if (const RecordType *RT = T->getAs<RecordType>()) {
+    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+    
+    // FIXME: It would be better if there was a way to explicitly compute the
+    // record layout instead of converting to a type.
+    Types.ConvertTagDeclType(RD);
+    
+    const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+    return Layout.containsPointerToDataMember();
+  }
+    
+  if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
+    return !MPT->getPointeeType()->isFunctionType();
+  
+  return false;
+}
+
+llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+  if (!containsPointerToDataMember(getTypes(), T))
+    return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
+    
+  if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
+
+    QualType ElementTy = CAT->getElementType();
+
+    llvm::Constant *Element = EmitNullConstant(ElementTy);
+    unsigned NumElements = CAT->getSize().getZExtValue();
+    std::vector<llvm::Constant *> Array(NumElements);
+    for (unsigned i = 0; i != NumElements; ++i)
+      Array[i] = Element;
+
+    const llvm::ArrayType *ATy =
+      cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
+    return llvm::ConstantArray::get(ATy, Array);
+  }
+
+  if (const RecordType *RT = T->getAs<RecordType>()) {
+    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+    assert(!RD->getNumBases() && 
+           "FIXME: Handle zero-initializing structs with bases and "
+           "pointers to data members.");
+    const llvm::StructType *STy =
+      cast<llvm::StructType>(getTypes().ConvertTypeForMem(T));
+    unsigned NumElements = STy->getNumElements();
+    std::vector<llvm::Constant *> Elements(NumElements);
+
+    for (RecordDecl::field_iterator I = RD->field_begin(),
+         E = RD->field_end(); I != E; ++I) {
+      const FieldDecl *FD = *I;
+      
+      unsigned FieldNo = getTypes().getLLVMFieldNo(FD);
+      Elements[FieldNo] = EmitNullConstant(FD->getType());
+    }
+    
+    // Now go through all other fields and zero them out.
+    for (unsigned i = 0; i != NumElements; ++i) {
+      if (!Elements[i])
+        Elements[i] = llvm::Constant::getNullValue(STy->getElementType(i));
+    }
+    
+    return llvm::ConstantStruct::get(STy, Elements);
+  }
+
+  assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+         "Should only see pointers to data members here!");
+  
+  // Itanium C++ ABI 2.3:
+  //   A NULL pointer is represented as -1.
+  return llvm::ConstantInt::get(getTypes().ConvertTypeForMem(T), -1, 
+                                /*isSigned=*/true);
+}
+
+llvm::Constant *
+CodeGenModule::EmitPointerToDataMember(const FieldDecl *FD) {
+
+  // Itanium C++ ABI 2.3:
+  //   A pointer to data member is an offset from the base address of the class
+  //   object containing it, represented as a ptrdiff_t
+
+  const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(FD->getParent());
+  QualType ClassType = 
+    getContext().getTypeDeclType(const_cast<CXXRecordDecl *>(ClassDecl));
+  
+  const llvm::StructType *ClassLTy =
+    cast<llvm::StructType>(getTypes().ConvertType(ClassType));
+
+  unsigned FieldNo = getTypes().getLLVMFieldNo(FD);
+  uint64_t Offset = 
+    getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
+
+  const llvm::Type *PtrDiffTy = 
+    getTypes().ConvertType(getContext().getPointerDiffType());
+
+  return llvm::ConstantInt::get(PtrDiffTy, Offset);
+}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
new file mode 100644
index 0000000..cb3fb61
--- /dev/null
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -0,0 +1,1905 @@
+//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::Value;
+
+//===----------------------------------------------------------------------===//
+//                         Scalar Expression Emitter
+//===----------------------------------------------------------------------===//
+
+struct BinOpInfo {
+  Value *LHS;
+  Value *RHS;
+  QualType Ty;  // Computation Type.
+  const BinaryOperator *E;
+};
+
+namespace {
+class ScalarExprEmitter
+  : public StmtVisitor<ScalarExprEmitter, Value*> {
+  CodeGenFunction &CGF;
+  CGBuilderTy &Builder;
+  bool IgnoreResultAssign;
+  llvm::LLVMContext &VMContext;
+public:
+
+  ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
+    : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
+      VMContext(cgf.getLLVMContext()) {
+  }
+
+  //===--------------------------------------------------------------------===//
+  //                               Utilities
+  //===--------------------------------------------------------------------===//
+
+  bool TestAndClearIgnoreResultAssign() {
+    bool I = IgnoreResultAssign;
+    IgnoreResultAssign = false;
+    return I;
+  }
+
+  const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+  LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
+  LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
+
+  Value *EmitLoadOfLValue(LValue LV, QualType T) {
+    return CGF.EmitLoadOfLValue(LV, T).getScalarVal();
+  }
+
+  /// EmitLoadOfLValue - Given an expression with complex type that represents a
+  /// value l-value, this method emits the address of the l-value, then loads
+  /// and returns the result.
+  Value *EmitLoadOfLValue(const Expr *E) {
+    return EmitLoadOfLValue(EmitCheckedLValue(E), E->getType());
+  }
+
+  /// EmitConversionToBool - Convert the specified expression value to a
+  /// boolean (i1) truth value.  This is equivalent to "Val != 0".
+  Value *EmitConversionToBool(Value *Src, QualType DstTy);
+
+  /// EmitScalarConversion - Emit a conversion from the specified type to the
+  /// specified destination type, both of which are LLVM scalar types.
+  Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
+
+  /// EmitComplexToScalarConversion - Emit a conversion from the specified
+  /// complex type to the specified destination type, where the destination type
+  /// is an LLVM scalar type.
+  Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+                                       QualType SrcTy, QualType DstTy);
+
+  //===--------------------------------------------------------------------===//
+  //                            Visitor Methods
+  //===--------------------------------------------------------------------===//
+
+  Value *VisitStmt(Stmt *S) {
+    S->dump(CGF.getContext().getSourceManager());
+    assert(0 && "Stmt can't have complex result type!");
+    return 0;
+  }
+  Value *VisitExpr(Expr *S);
+  
+  Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); }
+
+  // Leaves.
+  Value *VisitIntegerLiteral(const IntegerLiteral *E) {
+    return llvm::ConstantInt::get(VMContext, E->getValue());
+  }
+  Value *VisitFloatingLiteral(const FloatingLiteral *E) {
+    return llvm::ConstantFP::get(VMContext, E->getValue());
+  }
+  Value *VisitCharacterLiteral(const CharacterLiteral *E) {
+    return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+  }
+  Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+    return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+  }
+  Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
+    return llvm::Constant::getNullValue(ConvertType(E->getType()));
+  }
+  Value *VisitGNUNullExpr(const GNUNullExpr *E) {
+    return llvm::Constant::getNullValue(ConvertType(E->getType()));
+  }
+  Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
+    return llvm::ConstantInt::get(ConvertType(E->getType()),
+                                  CGF.getContext().typesAreCompatible(
+                                    E->getArgType1(), E->getArgType2()));
+  }
+  Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E);
+  Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
+    llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
+    return Builder.CreateBitCast(V, ConvertType(E->getType()));
+  }
+
+  // l-values.
+  Value *VisitDeclRefExpr(DeclRefExpr *E) {
+    Expr::EvalResult Result;
+    if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
+      assert(!Result.HasSideEffects && "Constant declref with side-effect?!");
+      return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+    }
+    return EmitLoadOfLValue(E);
+  }
+  Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+    return CGF.EmitObjCSelectorExpr(E);
+  }
+  Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+    return CGF.EmitObjCProtocolExpr(E);
+  }
+  Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+    return EmitLoadOfLValue(E);
+  }
+  Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+    return EmitLoadOfLValue(E);
+  }
+  Value *VisitObjCImplicitSetterGetterRefExpr(
+                        ObjCImplicitSetterGetterRefExpr *E) {
+    return EmitLoadOfLValue(E);
+  }
+  Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
+    return CGF.EmitObjCMessageExpr(E).getScalarVal();
+  }
+
+  Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
+    LValue LV = CGF.EmitObjCIsaExpr(E);
+    Value *V = CGF.EmitLoadOfLValue(LV, E->getType()).getScalarVal();
+    return V;
+  }
+
+  Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+  Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+  Value *VisitMemberExpr(MemberExpr *E);
+  Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
+  Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+    return EmitLoadOfLValue(E);
+  }
+
+  Value *VisitInitListExpr(InitListExpr *E);
+
+  Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+    return llvm::Constant::getNullValue(ConvertType(E->getType()));
+  }
+  Value *VisitCastExpr(CastExpr *E) {
+    // Make sure to evaluate VLA bounds now so that we have them for later.
+    if (E->getType()->isVariablyModifiedType())
+      CGF.EmitVLASize(E->getType());
+
+    return EmitCastExpr(E);
+  }
+  Value *EmitCastExpr(CastExpr *E);
+
+  Value *VisitCallExpr(const CallExpr *E) {
+    if (E->getCallReturnType()->isReferenceType())
+      return EmitLoadOfLValue(E);
+
+    return CGF.EmitCallExpr(E).getScalarVal();
+  }
+
+  Value *VisitStmtExpr(const StmtExpr *E);
+
+  Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E);
+
+  // Unary Operators.
+  Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre) {
+    LValue LV = EmitLValue(E->getSubExpr());
+    return CGF.EmitScalarPrePostIncDec(E, LV, isInc, isPre);
+  }
+  Value *VisitUnaryPostDec(const UnaryOperator *E) {
+    return VisitPrePostIncDec(E, false, false);
+  }
+  Value *VisitUnaryPostInc(const UnaryOperator *E) {
+    return VisitPrePostIncDec(E, true, false);
+  }
+  Value *VisitUnaryPreDec(const UnaryOperator *E) {
+    return VisitPrePostIncDec(E, false, true);
+  }
+  Value *VisitUnaryPreInc(const UnaryOperator *E) {
+    return VisitPrePostIncDec(E, true, true);
+  }
+  Value *VisitUnaryAddrOf(const UnaryOperator *E) {
+    return EmitLValue(E->getSubExpr()).getAddress();
+  }
+  Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+  Value *VisitUnaryPlus(const UnaryOperator *E) {
+    // This differs from gcc, though, most likely due to a bug in gcc.
+    TestAndClearIgnoreResultAssign();
+    return Visit(E->getSubExpr());
+  }
+  Value *VisitUnaryMinus    (const UnaryOperator *E);
+  Value *VisitUnaryNot      (const UnaryOperator *E);
+  Value *VisitUnaryLNot     (const UnaryOperator *E);
+  Value *VisitUnaryReal     (const UnaryOperator *E);
+  Value *VisitUnaryImag     (const UnaryOperator *E);
+  Value *VisitUnaryExtension(const UnaryOperator *E) {
+    return Visit(E->getSubExpr());
+  }
+  Value *VisitUnaryOffsetOf(const UnaryOperator *E);
+
+  // C++
+  Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+    return Visit(DAE->getExpr());
+  }
+  Value *VisitCXXThisExpr(CXXThisExpr *TE) {
+    return CGF.LoadCXXThis();
+  }
+
+  Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
+    return CGF.EmitCXXExprWithTemporaries(E).getScalarVal();
+  }
+  Value *VisitCXXNewExpr(const CXXNewExpr *E) {
+    return CGF.EmitCXXNewExpr(E);
+  }
+  Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+    CGF.EmitCXXDeleteExpr(E);
+    return 0;
+  }
+  Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
+    return llvm::ConstantInt::get(Builder.getInt1Ty(),
+                                  E->EvaluateTrait(CGF.getContext()));
+  }
+
+  Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
+    // C++ [expr.pseudo]p1:
+    //   The result shall only be used as the operand for the function call
+    //   operator (), and the result of such a call has type void. The only
+    //   effect is the evaluation of the postfix-expression before the dot or
+    //   arrow.
+    CGF.EmitScalarExpr(E->getBase());
+    return 0;
+  }
+
+  Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+    return llvm::Constant::getNullValue(ConvertType(E->getType()));
+  }
+
+  Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
+    CGF.EmitCXXThrowExpr(E);
+    return 0;
+  }
+
+  // Binary Operators.
+  Value *EmitMul(const BinOpInfo &Ops) {
+    if (CGF.getContext().getLangOptions().OverflowChecking
+        && Ops.Ty->isSignedIntegerType())
+      return EmitOverflowCheckedBinOp(Ops);
+    if (Ops.LHS->getType()->isFPOrFPVector())
+      return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
+    return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+  }
+  /// Create a binary op that checks for overflow.
+  /// Currently only supports +, - and *.
+  Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+  Value *EmitDiv(const BinOpInfo &Ops);
+  Value *EmitRem(const BinOpInfo &Ops);
+  Value *EmitAdd(const BinOpInfo &Ops);
+  Value *EmitSub(const BinOpInfo &Ops);
+  Value *EmitShl(const BinOpInfo &Ops);
+  Value *EmitShr(const BinOpInfo &Ops);
+  Value *EmitAnd(const BinOpInfo &Ops) {
+    return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
+  }
+  Value *EmitXor(const BinOpInfo &Ops) {
+    return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
+  }
+  Value *EmitOr (const BinOpInfo &Ops) {
+    return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
+  }
+
+  BinOpInfo EmitBinOps(const BinaryOperator *E);
+  Value *EmitCompoundAssign(const CompoundAssignOperator *E,
+                            Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
+
+  // Binary operators and binary compound assignment operators.
+#define HANDLEBINOP(OP) \
+  Value *VisitBin ## OP(const BinaryOperator *E) {                         \
+    return Emit ## OP(EmitBinOps(E));                                      \
+  }                                                                        \
+  Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) {       \
+    return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP);          \
+  }
+  HANDLEBINOP(Mul)
+  HANDLEBINOP(Div)
+  HANDLEBINOP(Rem)
+  HANDLEBINOP(Add)
+  HANDLEBINOP(Sub)
+  HANDLEBINOP(Shl)
+  HANDLEBINOP(Shr)
+  HANDLEBINOP(And)
+  HANDLEBINOP(Xor)
+  HANDLEBINOP(Or)
+#undef HANDLEBINOP
+
+  // Comparisons.
+  Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
+                     unsigned SICmpOpc, unsigned FCmpOpc);
+#define VISITCOMP(CODE, UI, SI, FP) \
+    Value *VisitBin##CODE(const BinaryOperator *E) { \
+      return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
+                         llvm::FCmpInst::FP); }
+  VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
+  VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
+  VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
+  VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
+  VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
+  VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
+#undef VISITCOMP
+
+  Value *VisitBinAssign     (const BinaryOperator *E);
+
+  Value *VisitBinLAnd       (const BinaryOperator *E);
+  Value *VisitBinLOr        (const BinaryOperator *E);
+  Value *VisitBinComma      (const BinaryOperator *E);
+
+  Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
+  Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
+
+  // Other Operators.
+  Value *VisitBlockExpr(const BlockExpr *BE);
+  Value *VisitConditionalOperator(const ConditionalOperator *CO);
+  Value *VisitChooseExpr(ChooseExpr *CE);
+  Value *VisitVAArgExpr(VAArgExpr *VE);
+  Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
+    return CGF.EmitObjCStringLiteral(E);
+  }
+};
+}  // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+//                                Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitConversionToBool - Convert the specified expression value to a
+/// boolean (i1) truth value.  This is equivalent to "Val != 0".
+Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
+  assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
+
+  if (SrcType->isRealFloatingType()) {
+    // Compare against 0.0 for fp scalars.
+    llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+    return Builder.CreateFCmpUNE(Src, Zero, "tobool");
+  }
+
+  if (SrcType->isMemberPointerType()) {
+    // Compare against -1.
+    llvm::Value *NegativeOne = llvm::Constant::getAllOnesValue(Src->getType());
+    return Builder.CreateICmpNE(Src, NegativeOne, "tobool");
+  }
+
+  assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
+         "Unknown scalar type to convert");
+
+  // Because of the type rules of C, we often end up computing a logical value,
+  // then zero extending it to int, then wanting it as a logical value again.
+  // Optimize this common case.
+  if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) {
+    if (ZI->getOperand(0)->getType() ==
+        llvm::Type::getInt1Ty(CGF.getLLVMContext())) {
+      Value *Result = ZI->getOperand(0);
+      // If there aren't any more uses, zap the instruction to save space.
+      // Note that there can be more uses, for example if this
+      // is the result of an assignment.
+      if (ZI->use_empty())
+        ZI->eraseFromParent();
+      return Result;
+    }
+  }
+
+  // Compare against an integer or pointer null.
+  llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType());
+  return Builder.CreateICmpNE(Src, Zero, "tobool");
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+                                               QualType DstType) {
+  SrcType = CGF.getContext().getCanonicalType(SrcType);
+  DstType = CGF.getContext().getCanonicalType(DstType);
+  if (SrcType == DstType) return Src;
+
+  if (DstType->isVoidType()) return 0;
+
+  llvm::LLVMContext &VMContext = CGF.getLLVMContext();
+
+  // Handle conversions to bool first, they are special: comparisons against 0.
+  if (DstType->isBooleanType())
+    return EmitConversionToBool(Src, SrcType);
+
+  const llvm::Type *DstTy = ConvertType(DstType);
+
+  // Ignore conversions like int -> uint.
+  if (Src->getType() == DstTy)
+    return Src;
+
+  // Handle pointer conversions next: pointers can only be converted to/from
+  // other pointers and integers. Check for pointer types in terms of LLVM, as
+  // some native types (like Obj-C id) may map to a pointer type.
+  if (isa<llvm::PointerType>(DstTy)) {
+    // The source value may be an integer, or a pointer.
+    if (isa<llvm::PointerType>(Src->getType()))
+      return Builder.CreateBitCast(Src, DstTy, "conv");
+
+    assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
+    // First, convert to the correct width so that we control the kind of
+    // extension.
+    const llvm::Type *MiddleTy =
+          llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+    bool InputSigned = SrcType->isSignedIntegerType();
+    llvm::Value* IntResult =
+        Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+    // Then, cast to pointer.
+    return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
+  }
+
+  if (isa<llvm::PointerType>(Src->getType())) {
+    // Must be an ptr to int cast.
+    assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
+    return Builder.CreatePtrToInt(Src, DstTy, "conv");
+  }
+
+  // A scalar can be splatted to an extended vector of the same element type
+  if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
+    // Cast the scalar to element type
+    QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
+    llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
+
+    // Insert the element in element zero of an undef vector
+    llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+    llvm::Value *Idx =
+        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+    UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+
+    // Splat the element across to all elements
+    llvm::SmallVector<llvm::Constant*, 16> Args;
+    unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+    for (unsigned i = 0; i < NumElements; i++)
+      Args.push_back(llvm::ConstantInt::get(
+                                        llvm::Type::getInt32Ty(VMContext), 0));
+
+    llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+    llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+    return Yay;
+  }
+
+  // Allow bitcast from vector to integer/fp of the same size.
+  if (isa<llvm::VectorType>(Src->getType()) ||
+      isa<llvm::VectorType>(DstTy))
+    return Builder.CreateBitCast(Src, DstTy, "conv");
+
+  // Finally, we have the arithmetic types: real int/float.
+  if (isa<llvm::IntegerType>(Src->getType())) {
+    bool InputSigned = SrcType->isSignedIntegerType();
+    if (isa<llvm::IntegerType>(DstTy))
+      return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+    else if (InputSigned)
+      return Builder.CreateSIToFP(Src, DstTy, "conv");
+    else
+      return Builder.CreateUIToFP(Src, DstTy, "conv");
+  }
+
+  assert(Src->getType()->isFloatingPoint() && "Unknown real conversion");
+  if (isa<llvm::IntegerType>(DstTy)) {
+    if (DstType->isSignedIntegerType())
+      return Builder.CreateFPToSI(Src, DstTy, "conv");
+    else
+      return Builder.CreateFPToUI(Src, DstTy, "conv");
+  }
+
+  assert(DstTy->isFloatingPoint() && "Unknown real conversion");
+  if (DstTy->getTypeID() < Src->getType()->getTypeID())
+    return Builder.CreateFPTrunc(Src, DstTy, "conv");
+  else
+    return Builder.CreateFPExt(Src, DstTy, "conv");
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *ScalarExprEmitter::
+EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+                              QualType SrcTy, QualType DstTy) {
+  // Get the source element type.
+  SrcTy = SrcTy->getAs<ComplexType>()->getElementType();
+
+  // Handle conversions to bool first, they are special: comparisons against 0.
+  if (DstTy->isBooleanType()) {
+    //  Complex != 0  -> (Real != 0) | (Imag != 0)
+    Src.first  = EmitScalarConversion(Src.first, SrcTy, DstTy);
+    Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
+    return Builder.CreateOr(Src.first, Src.second, "tobool");
+  }
+
+  // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
+  // the imaginary part of the complex value is discarded and the value of the
+  // real part is converted according to the conversion rules for the
+  // corresponding real type.
+  return EmitScalarConversion(Src.first, SrcTy, DstTy);
+}
+
+
+//===----------------------------------------------------------------------===//
+//                            Visitor Methods
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitExpr(Expr *E) {
+  CGF.ErrorUnsupported(E, "scalar expression");
+  if (E->getType()->isVoidType())
+    return 0;
+  return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+  llvm::SmallVector<llvm::Constant*, 32> indices;
+  for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
+    indices.push_back(cast<llvm::Constant>(CGF.EmitScalarExpr(E->getExpr(i))));
+  }
+  Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
+  Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+  Value* SV = llvm::ConstantVector::get(indices.begin(), indices.size());
+  return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+}
+Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
+  Expr::EvalResult Result;
+  if (E->Evaluate(Result, CGF.getContext()) && Result.Val.isInt()) {
+    if (E->isArrow())
+      CGF.EmitScalarExpr(E->getBase());
+    else
+      EmitLValue(E->getBase());
+    return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+  }
+  return EmitLoadOfLValue(E);
+}
+
+Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+  TestAndClearIgnoreResultAssign();
+
+  // Emit subscript expressions in rvalue context's.  For most cases, this just
+  // loads the lvalue formed by the subscript expr.  However, we have to be
+  // careful, because the base of a vector subscript is occasionally an rvalue,
+  // so we can't get it as an lvalue.
+  if (!E->getBase()->getType()->isVectorType())
+    return EmitLoadOfLValue(E);
+
+  // Handle the vector case.  The base must be a vector, the index must be an
+  // integer value.
+  Value *Base = Visit(E->getBase());
+  Value *Idx  = Visit(E->getIdx());
+  bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType();
+  Idx = Builder.CreateIntCast(Idx,
+                              llvm::Type::getInt32Ty(CGF.getLLVMContext()),
+                              IdxSigned,
+                              "vecidxcast");
+  return Builder.CreateExtractElement(Base, Idx, "vecext");
+}
+
+static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
+                                  unsigned Off, const llvm::Type *I32Ty) {
+  int MV = SVI->getMaskValue(Idx);
+  if (MV == -1) 
+    return llvm::UndefValue::get(I32Ty);
+  return llvm::ConstantInt::get(I32Ty, Off+MV);
+}
+
+Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
+  bool Ignore = TestAndClearIgnoreResultAssign();
+  (void)Ignore;
+  assert (Ignore == false && "init list ignored");
+  unsigned NumInitElements = E->getNumInits();
+  
+  if (E->hadArrayRangeDesignator())
+    CGF.ErrorUnsupported(E, "GNU array range designator extension");
+  
+  const llvm::VectorType *VType =
+    dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
+  
+  // We have a scalar in braces. Just use the first element.
+  if (!VType)
+    return Visit(E->getInit(0));
+  
+  unsigned ResElts = VType->getNumElements();
+  const llvm::Type *I32Ty = llvm::Type::getInt32Ty(CGF.getLLVMContext());
+  
+  // Loop over initializers collecting the Value for each, and remembering 
+  // whether the source was swizzle (ExtVectorElementExpr).  This will allow
+  // us to fold the shuffle for the swizzle into the shuffle for the vector
+  // initializer, since LLVM optimizers generally do not want to touch
+  // shuffles.
+  unsigned CurIdx = 0;
+  bool VIsUndefShuffle = false;
+  llvm::Value *V = llvm::UndefValue::get(VType);
+  for (unsigned i = 0; i != NumInitElements; ++i) {
+    Expr *IE = E->getInit(i);
+    Value *Init = Visit(IE);
+    llvm::SmallVector<llvm::Constant*, 16> Args;
+    
+    const llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
+    
+    // Handle scalar elements.  If the scalar initializer is actually one
+    // element of a different vector of the same width, use shuffle instead of 
+    // extract+insert.
+    if (!VVT) {
+      if (isa<ExtVectorElementExpr>(IE)) {
+        llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
+
+        if (EI->getVectorOperandType()->getNumElements() == ResElts) {
+          llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
+          Value *LHS = 0, *RHS = 0;
+          if (CurIdx == 0) {
+            // insert into undef -> shuffle (src, undef)
+            Args.push_back(C);
+            for (unsigned j = 1; j != ResElts; ++j)
+              Args.push_back(llvm::UndefValue::get(I32Ty));
+
+            LHS = EI->getVectorOperand();
+            RHS = V;
+            VIsUndefShuffle = true;
+          } else if (VIsUndefShuffle) {
+            // insert into undefshuffle && size match -> shuffle (v, src)
+            llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
+            for (unsigned j = 0; j != CurIdx; ++j)
+              Args.push_back(getMaskElt(SVV, j, 0, I32Ty));
+            Args.push_back(llvm::ConstantInt::get(I32Ty, 
+                                                  ResElts + C->getZExtValue()));
+            for (unsigned j = CurIdx + 1; j != ResElts; ++j)
+              Args.push_back(llvm::UndefValue::get(I32Ty));
+            
+            LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+            RHS = EI->getVectorOperand();
+            VIsUndefShuffle = false;
+          }
+          if (!Args.empty()) {
+            llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+            V = Builder.CreateShuffleVector(LHS, RHS, Mask);
+            ++CurIdx;
+            continue;
+          }
+        }
+      }
+      Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx);
+      V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+      VIsUndefShuffle = false;
+      ++CurIdx;
+      continue;
+    }
+    
+    unsigned InitElts = VVT->getNumElements();
+
+    // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's 
+    // input is the same width as the vector being constructed, generate an
+    // optimized shuffle of the swizzle input into the result.
+    unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
+    if (isa<ExtVectorElementExpr>(IE)) {
+      llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
+      Value *SVOp = SVI->getOperand(0);
+      const llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+      
+      if (OpTy->getNumElements() == ResElts) {
+        for (unsigned j = 0; j != CurIdx; ++j) {
+          // If the current vector initializer is a shuffle with undef, merge
+          // this shuffle directly into it.
+          if (VIsUndefShuffle) {
+            Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
+                                      I32Ty));
+          } else {
+            Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+          }
+        }
+        for (unsigned j = 0, je = InitElts; j != je; ++j)
+          Args.push_back(getMaskElt(SVI, j, Offset, I32Ty));
+        for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
+          Args.push_back(llvm::UndefValue::get(I32Ty));
+
+        if (VIsUndefShuffle)
+          V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+
+        Init = SVOp;
+      }
+    }
+
+    // Extend init to result vector length, and then shuffle its contribution
+    // to the vector initializer into V.
+    if (Args.empty()) {
+      for (unsigned j = 0; j != InitElts; ++j)
+        Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+      for (unsigned j = InitElts; j != ResElts; ++j)
+        Args.push_back(llvm::UndefValue::get(I32Ty));
+      llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+      Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
+                                         Mask, "vext");
+
+      Args.clear();
+      for (unsigned j = 0; j != CurIdx; ++j)
+        Args.push_back(llvm::ConstantInt::get(I32Ty, j));
+      for (unsigned j = 0; j != InitElts; ++j)
+        Args.push_back(llvm::ConstantInt::get(I32Ty, j+Offset));
+      for (unsigned j = CurIdx + InitElts; j != ResElts; ++j)
+        Args.push_back(llvm::UndefValue::get(I32Ty));
+    }
+
+    // If V is undef, make sure it ends up on the RHS of the shuffle to aid
+    // merging subsequent shuffles into this one.
+    if (CurIdx == 0)
+      std::swap(V, Init);
+    llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts);
+    V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
+    VIsUndefShuffle = isa<llvm::UndefValue>(Init);
+    CurIdx += InitElts;
+  }
+  
+  // FIXME: evaluate codegen vs. shuffling against constant null vector.
+  // Emit remaining default initializers.
+  const llvm::Type *EltTy = VType->getElementType();
+  
+  // Emit remaining default initializers
+  for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
+    Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx);
+    llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
+    V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+  }
+  return V;
+}
+
+static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
+  const Expr *E = CE->getSubExpr();
+  
+  if (isa<CXXThisExpr>(E)) {
+    // We always assume that 'this' is never null.
+    return false;
+  }
+  
+  if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
+    // And that lvalue casts are never null.
+    if (ICE->isLvalueCast())
+      return false;
+  }
+
+  return true;
+}
+
+// VisitCastExpr - Emit code for an explicit or implicit cast.  Implicit casts
+// have to handle a more broad range of conversions than explicit casts, as they
+// handle things like function to ptr-to-function decay etc.
+Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
+  Expr *E = CE->getSubExpr();
+  QualType DestTy = CE->getType();
+  CastExpr::CastKind Kind = CE->getCastKind();
+  
+  if (!DestTy->isVoidType())
+    TestAndClearIgnoreResultAssign();
+
+  // Since almost all cast kinds apply to scalars, this switch doesn't have
+  // a default case, so the compiler will warn on a missing case.  The cases
+  // are in the same order as in the CastKind enum.
+  switch (Kind) {
+  case CastExpr::CK_Unknown:
+    // FIXME: All casts should have a known kind!
+    //assert(0 && "Unknown cast kind!");
+    break;
+
+  case CastExpr::CK_AnyPointerToObjCPointerCast:
+  case CastExpr::CK_AnyPointerToBlockPointerCast:
+  case CastExpr::CK_BitCast: {
+    Value *Src = Visit(const_cast<Expr*>(E));
+    return Builder.CreateBitCast(Src, ConvertType(DestTy));
+  }
+  case CastExpr::CK_NoOp:
+  case CastExpr::CK_UserDefinedConversion:
+    return Visit(const_cast<Expr*>(E));
+
+  case CastExpr::CK_BaseToDerived: {
+    const CXXRecordDecl *BaseClassDecl = 
+      E->getType()->getCXXRecordDeclForPointerType();
+    const CXXRecordDecl *DerivedClassDecl = 
+      DestTy->getCXXRecordDeclForPointerType();
+    
+    Value *Src = Visit(const_cast<Expr*>(E));
+    
+    bool NullCheckValue = ShouldNullCheckClassCastValue(CE);
+    return CGF.GetAddressOfDerivedClass(Src, BaseClassDecl, DerivedClassDecl, 
+                                        NullCheckValue);
+  }
+  case CastExpr::CK_DerivedToBase: {
+    const RecordType *DerivedClassTy = 
+      E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
+    CXXRecordDecl *DerivedClassDecl = 
+      cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+    const RecordType *BaseClassTy = 
+      DestTy->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
+    CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseClassTy->getDecl());
+    
+    Value *Src = Visit(const_cast<Expr*>(E));
+
+    bool NullCheckValue = ShouldNullCheckClassCastValue(CE);
+    return CGF.GetAddressOfBaseClass(Src, DerivedClassDecl, BaseClassDecl,
+                                     NullCheckValue);
+  }
+  case CastExpr::CK_Dynamic: {
+    Value *V = Visit(const_cast<Expr*>(E));
+    const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
+    return CGF.EmitDynamicCast(V, DCE);
+  }
+  case CastExpr::CK_ToUnion:
+    assert(0 && "Should be unreachable!");
+    break;
+
+  case CastExpr::CK_ArrayToPointerDecay: {
+    assert(E->getType()->isArrayType() &&
+           "Array to pointer decay must have array source type!");
+
+    Value *V = EmitLValue(E).getAddress();  // Bitfields can't be arrays.
+
+    // Note that VLA pointers are always decayed, so we don't need to do
+    // anything here.
+    if (!E->getType()->isVariableArrayType()) {
+      assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
+      assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
+                                 ->getElementType()) &&
+             "Expected pointer to array");
+      V = Builder.CreateStructGEP(V, 0, "arraydecay");
+    }
+
+    return V;
+  }
+  case CastExpr::CK_FunctionToPointerDecay:
+    return EmitLValue(E).getAddress();
+
+  case CastExpr::CK_NullToMemberPointer:
+    return CGF.CGM.EmitNullConstant(DestTy);
+
+  case CastExpr::CK_BaseToDerivedMemberPointer:
+  case CastExpr::CK_DerivedToBaseMemberPointer: {
+    Value *Src = Visit(E);
+
+    // See if we need to adjust the pointer.
+    const CXXRecordDecl *BaseDecl = 
+      cast<CXXRecordDecl>(E->getType()->getAs<MemberPointerType>()->
+                          getClass()->getAs<RecordType>()->getDecl());
+    const CXXRecordDecl *DerivedDecl = 
+      cast<CXXRecordDecl>(CE->getType()->getAs<MemberPointerType>()->
+                          getClass()->getAs<RecordType>()->getDecl());
+    if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
+      std::swap(DerivedDecl, BaseDecl);
+
+    if (llvm::Constant *Adj = 
+          CGF.CGM.GetNonVirtualBaseClassOffset(DerivedDecl, BaseDecl)) {
+      if (CE->getCastKind() == CastExpr::CK_DerivedToBaseMemberPointer)
+        Src = Builder.CreateSub(Src, Adj, "adj");
+      else
+        Src = Builder.CreateAdd(Src, Adj, "adj");
+    }
+    return Src;
+  }
+
+  case CastExpr::CK_ConstructorConversion:
+    assert(0 && "Should be unreachable!");
+    break;
+
+  case CastExpr::CK_IntegralToPointer: {
+    Value *Src = Visit(const_cast<Expr*>(E));
+    
+    // First, convert to the correct width so that we control the kind of
+    // extension.
+    const llvm::Type *MiddleTy =
+      llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+    bool InputSigned = E->getType()->isSignedIntegerType();
+    llvm::Value* IntResult =
+      Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+    
+    return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
+  }
+  case CastExpr::CK_PointerToIntegral: {
+    Value *Src = Visit(const_cast<Expr*>(E));
+    return Builder.CreatePtrToInt(Src, ConvertType(DestTy));
+  }
+  case CastExpr::CK_ToVoid: {
+    CGF.EmitAnyExpr(E, 0, false, true);
+    return 0;
+  }
+  case CastExpr::CK_VectorSplat: {
+    const llvm::Type *DstTy = ConvertType(DestTy);
+    Value *Elt = Visit(const_cast<Expr*>(E));
+
+    // Insert the element in element zero of an undef vector
+    llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+    llvm::Value *Idx =
+        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+    UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp");
+
+    // Splat the element across to all elements
+    llvm::SmallVector<llvm::Constant*, 16> Args;
+    unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+    for (unsigned i = 0; i < NumElements; i++)
+      Args.push_back(llvm::ConstantInt::get(
+                                        llvm::Type::getInt32Ty(VMContext), 0));
+
+    llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements);
+    llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+    return Yay;
+  }
+  case CastExpr::CK_IntegralCast:
+  case CastExpr::CK_IntegralToFloating:
+  case CastExpr::CK_FloatingToIntegral:
+  case CastExpr::CK_FloatingCast:
+    return EmitScalarConversion(Visit(E), E->getType(), DestTy);
+
+  case CastExpr::CK_MemberPointerToBoolean:
+    return CGF.EvaluateExprAsBool(E);
+  }
+
+  // Handle cases where the source is an non-complex type.
+
+  if (!CGF.hasAggregateLLVMType(E->getType())) {
+    Value *Src = Visit(const_cast<Expr*>(E));
+
+    // Use EmitScalarConversion to perform the conversion.
+    return EmitScalarConversion(Src, E->getType(), DestTy);
+  }
+
+  if (E->getType()->isAnyComplexType()) {
+    // Handle cases where the source is a complex type.
+    bool IgnoreImag = true;
+    bool IgnoreImagAssign = true;
+    bool IgnoreReal = IgnoreResultAssign;
+    bool IgnoreRealAssign = IgnoreResultAssign;
+    if (DestTy->isBooleanType())
+      IgnoreImagAssign = IgnoreImag = false;
+    else if (DestTy->isVoidType()) {
+      IgnoreReal = IgnoreImag = false;
+      IgnoreRealAssign = IgnoreImagAssign = true;
+    }
+    CodeGenFunction::ComplexPairTy V
+      = CGF.EmitComplexExpr(E, IgnoreReal, IgnoreImag, IgnoreRealAssign,
+                            IgnoreImagAssign);
+    return EmitComplexToScalarConversion(V, E->getType(), DestTy);
+  }
+
+  // Okay, this is a cast from an aggregate.  It must be a cast to void.  Just
+  // evaluate the result and return.
+  CGF.EmitAggExpr(E, 0, false, true);
+  return 0;
+}
+
+Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+  return CGF.EmitCompoundStmt(*E->getSubStmt(),
+                              !E->getType()->isVoidType()).getScalarVal();
+}
+
+Value *ScalarExprEmitter::VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
+  llvm::Value *V = CGF.GetAddrOfBlockDecl(E);
+  if (E->getType().isObjCGCWeak())
+    return CGF.CGM.getObjCRuntime().EmitObjCWeakRead(CGF, V);
+  return Builder.CreateLoad(V, "tmp");
+}
+
+//===----------------------------------------------------------------------===//
+//                             Unary Operators
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+  TestAndClearIgnoreResultAssign();
+  Value *Op = Visit(E->getSubExpr());
+  if (Op->getType()->isFPOrFPVector())
+    return Builder.CreateFNeg(Op, "neg");
+  return Builder.CreateNeg(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+  TestAndClearIgnoreResultAssign();
+  Value *Op = Visit(E->getSubExpr());
+  return Builder.CreateNot(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+  // Compare operand to zero.
+  Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
+
+  // Invert value.
+  // TODO: Could dynamically modify easy computations here.  For example, if
+  // the operand is an icmp ne, turn into icmp eq.
+  BoolVal = Builder.CreateNot(BoolVal, "lnot");
+
+  // ZExt result to the expr type.
+  return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
+}
+
+/// VisitSizeOfAlignOfExpr - Return the size or alignment of the type of
+/// argument of the sizeof expression as an integer.
+Value *
+ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) {
+  QualType TypeToSize = E->getTypeOfArgument();
+  if (E->isSizeOf()) {
+    if (const VariableArrayType *VAT =
+          CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+      if (E->isArgumentType()) {
+        // sizeof(type) - make sure to emit the VLA size.
+        CGF.EmitVLASize(TypeToSize);
+      } else {
+        // C99 6.5.3.4p2: If the argument is an expression of type
+        // VLA, it is evaluated.
+        CGF.EmitAnyExpr(E->getArgumentExpr());
+      }
+
+      return CGF.GetVLASize(VAT);
+    }
+  }
+
+  // If this isn't sizeof(vla), the result must be constant; use the constant
+  // folding logic so we don't have to duplicate it here.
+  Expr::EvalResult Result;
+  E->Evaluate(Result, CGF.getContext());
+  return llvm::ConstantInt::get(VMContext, Result.Val.getInt());
+}
+
+Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
+  Expr *Op = E->getSubExpr();
+  if (Op->getType()->isAnyComplexType())
+    return CGF.EmitComplexExpr(Op, false, true, false, true).first;
+  return Visit(Op);
+}
+Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
+  Expr *Op = E->getSubExpr();
+  if (Op->getType()->isAnyComplexType())
+    return CGF.EmitComplexExpr(Op, true, false, true, false).second;
+
+  // __imag on a scalar returns zero.  Emit the subexpr to ensure side
+  // effects are evaluated, but not the actual value.
+  if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid)
+    CGF.EmitLValue(Op);
+  else
+    CGF.EmitScalarExpr(Op, true);
+  return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E) {
+  Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress();
+  const llvm::Type* ResultType = ConvertType(E->getType());
+  return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof");
+}
+
+//===----------------------------------------------------------------------===//
+//                           Binary Operators
+//===----------------------------------------------------------------------===//
+
+BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
+  TestAndClearIgnoreResultAssign();
+  BinOpInfo Result;
+  Result.LHS = Visit(E->getLHS());
+  Result.RHS = Visit(E->getRHS());
+  Result.Ty  = E->getType();
+  Result.E = E;
+  return Result;
+}
+
+Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
+                      Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
+  bool Ignore = TestAndClearIgnoreResultAssign();
+  QualType LHSTy = E->getLHS()->getType();
+
+  BinOpInfo OpInfo;
+
+  if (E->getComputationResultType()->isAnyComplexType()) {
+    // This needs to go through the complex expression emitter, but it's a tad
+    // complicated to do that... I'm leaving it out for now.  (Note that we do
+    // actually need the imaginary part of the RHS for multiplication and
+    // division.)
+    CGF.ErrorUnsupported(E, "complex compound assignment");
+    return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+  }
+
+  // Emit the RHS first.  __block variables need to have the rhs evaluated
+  // first, plus this should improve codegen a little.
+  OpInfo.RHS = Visit(E->getRHS());
+  OpInfo.Ty = E->getComputationResultType();
+  OpInfo.E = E;
+  // Load/convert the LHS.
+  LValue LHSLV = EmitCheckedLValue(E->getLHS());
+  OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy);
+  OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+                                    E->getComputationLHSType());
+
+  // Expand the binary operator.
+  Value *Result = (this->*Func)(OpInfo);
+
+  // Convert the result back to the LHS type.
+  Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+  // Store the result value into the LHS lvalue. Bit-fields are handled
+  // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
+  // 'An assignment expression has the value of the left operand after the
+  // assignment...'.
+  if (LHSLV.isBitfield()) {
+    if (!LHSLV.isVolatileQualified()) {
+      CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy,
+                                         &Result);
+      return Result;
+    } else
+      CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy);
+  } else
+    CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, LHSTy);
+  if (Ignore)
+    return 0;
+  return EmitLoadOfLValue(LHSLV, E->getType());
+}
+
+
+Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+  if (Ops.LHS->getType()->isFPOrFPVector())
+    return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+  else if (Ops.Ty->isUnsignedIntegerType())
+    return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
+  else
+    return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
+}
+
+Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
+  // Rem in C can't be a floating point type: C99 6.5.5p2.
+  if (Ops.Ty->isUnsignedIntegerType())
+    return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
+  else
+    return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
+}
+
+Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
+  unsigned IID;
+  unsigned OpID = 0;
+
+  switch (Ops.E->getOpcode()) {
+  case BinaryOperator::Add:
+  case BinaryOperator::AddAssign:
+    OpID = 1;
+    IID = llvm::Intrinsic::sadd_with_overflow;
+    break;
+  case BinaryOperator::Sub:
+  case BinaryOperator::SubAssign:
+    OpID = 2;
+    IID = llvm::Intrinsic::ssub_with_overflow;
+    break;
+  case BinaryOperator::Mul:
+  case BinaryOperator::MulAssign:
+    OpID = 3;
+    IID = llvm::Intrinsic::smul_with_overflow;
+    break;
+  default:
+    assert(false && "Unsupported operation for overflow detection");
+    IID = 0;
+  }
+  OpID <<= 1;
+  OpID |= 1;
+
+  const llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+
+  llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, &opTy, 1);
+
+  Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
+  Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
+  Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+
+  // Branch in case of overflow.
+  llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
+  llvm::BasicBlock *overflowBB =
+    CGF.createBasicBlock("overflow", CGF.CurFn);
+  llvm::BasicBlock *continueBB =
+    CGF.createBasicBlock("overflow.continue", CGF.CurFn);
+
+  Builder.CreateCondBr(overflow, overflowBB, continueBB);
+
+  // Handle overflow
+
+  Builder.SetInsertPoint(overflowBB);
+
+  // Handler is:
+  // long long *__overflow_handler)(long long a, long long b, char op,
+  // char width)
+  std::vector<const llvm::Type*> handerArgTypes;
+  handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
+  handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext));
+  handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
+  handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext));
+  llvm::FunctionType *handlerTy = llvm::FunctionType::get(
+      llvm::Type::getInt64Ty(VMContext), handerArgTypes, false);
+  llvm::Value *handlerFunction =
+    CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler",
+        llvm::PointerType::getUnqual(handlerTy));
+  handlerFunction = Builder.CreateLoad(handlerFunction);
+
+  llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction,
+      Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)),
+      Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)),
+      llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID),
+      llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext),
+        cast<llvm::IntegerType>(opTy)->getBitWidth()));
+
+  handlerResult = Builder.CreateTrunc(handlerResult, opTy);
+
+  Builder.CreateBr(continueBB);
+
+  // Set up the continuation
+  Builder.SetInsertPoint(continueBB);
+  // Get the correct result
+  llvm::PHINode *phi = Builder.CreatePHI(opTy);
+  phi->reserveOperandSpace(2);
+  phi->addIncoming(result, initialBB);
+  phi->addIncoming(handlerResult, overflowBB);
+
+  return phi;
+}
+
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
+  if (!Ops.Ty->isAnyPointerType()) {
+    if (CGF.getContext().getLangOptions().OverflowChecking &&
+        Ops.Ty->isSignedIntegerType())
+      return EmitOverflowCheckedBinOp(Ops);
+
+    if (Ops.LHS->getType()->isFPOrFPVector())
+      return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add");
+
+    // Signed integer overflow is undefined behavior.
+    if (Ops.Ty->isSignedIntegerType())
+      return Builder.CreateNSWAdd(Ops.LHS, Ops.RHS, "add");
+
+    return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add");
+  }
+
+  if (Ops.Ty->isPointerType() &&
+      Ops.Ty->getAs<PointerType>()->isVariableArrayType()) {
+    // The amount of the addition needs to account for the VLA size
+    CGF.ErrorUnsupported(Ops.E, "VLA pointer addition");
+  }
+  Value *Ptr, *Idx;
+  Expr *IdxExp;
+  const PointerType *PT = Ops.E->getLHS()->getType()->getAs<PointerType>();
+  const ObjCObjectPointerType *OPT =
+    Ops.E->getLHS()->getType()->getAs<ObjCObjectPointerType>();
+  if (PT || OPT) {
+    Ptr = Ops.LHS;
+    Idx = Ops.RHS;
+    IdxExp = Ops.E->getRHS();
+  } else {  // int + pointer
+    PT = Ops.E->getRHS()->getType()->getAs<PointerType>();
+    OPT = Ops.E->getRHS()->getType()->getAs<ObjCObjectPointerType>();
+    assert((PT || OPT) && "Invalid add expr");
+    Ptr = Ops.RHS;
+    Idx = Ops.LHS;
+    IdxExp = Ops.E->getLHS();
+  }
+
+  unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+  if (Width < CGF.LLVMPointerWidth) {
+    // Zero or sign extend the pointer value based on whether the index is
+    // signed or not.
+    const llvm::Type *IdxType =
+        llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+    if (IdxExp->getType()->isSignedIntegerType())
+      Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+    else
+      Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+  }
+  const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
+  // Handle interface types, which are not represented with a concrete type.
+  if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) {
+    llvm::Value *InterfaceSize =
+      llvm::ConstantInt::get(Idx->getType(),
+          CGF.getContext().getTypeSizeInChars(OIT).getQuantity());
+    Idx = Builder.CreateMul(Idx, InterfaceSize);
+    const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+    Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+    Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+    return Builder.CreateBitCast(Res, Ptr->getType());
+  }
+
+  // Explicitly handle GNU void* and function pointer arithmetic extensions. The
+  // GNU void* casts amount to no-ops since our void* type is i8*, but this is
+  // future proof.
+  if (ElementType->isVoidType() || ElementType->isFunctionType()) {
+    const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+    Value *Casted = Builder.CreateBitCast(Ptr, i8Ty);
+    Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr");
+    return Builder.CreateBitCast(Res, Ptr->getType());
+  }
+
+  return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
+  if (!isa<llvm::PointerType>(Ops.LHS->getType())) {
+    if (CGF.getContext().getLangOptions().OverflowChecking
+        && Ops.Ty->isSignedIntegerType())
+      return EmitOverflowCheckedBinOp(Ops);
+
+    if (Ops.LHS->getType()->isFPOrFPVector())
+      return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
+    return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
+  }
+
+  if (Ops.E->getLHS()->getType()->isPointerType() &&
+      Ops.E->getLHS()->getType()->getAs<PointerType>()->isVariableArrayType()) {
+    // The amount of the addition needs to account for the VLA size for
+    // ptr-int
+    // The amount of the division needs to account for the VLA size for
+    // ptr-ptr.
+    CGF.ErrorUnsupported(Ops.E, "VLA pointer subtraction");
+  }
+
+  const QualType LHSType = Ops.E->getLHS()->getType();
+  const QualType LHSElementType = LHSType->getPointeeType();
+  if (!isa<llvm::PointerType>(Ops.RHS->getType())) {
+    // pointer - int
+    Value *Idx = Ops.RHS;
+    unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth();
+    if (Width < CGF.LLVMPointerWidth) {
+      // Zero or sign extend the pointer value based on whether the index is
+      // signed or not.
+      const llvm::Type *IdxType =
+          llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth);
+      if (Ops.E->getRHS()->getType()->isSignedIntegerType())
+        Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext");
+      else
+        Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext");
+    }
+    Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
+
+    // Handle interface types, which are not represented with a concrete type.
+    if (const ObjCInterfaceType *OIT =
+        dyn_cast<ObjCInterfaceType>(LHSElementType)) {
+      llvm::Value *InterfaceSize =
+        llvm::ConstantInt::get(Idx->getType(),
+                               CGF.getContext().
+                                 getTypeSizeInChars(OIT).getQuantity());
+      Idx = Builder.CreateMul(Idx, InterfaceSize);
+      const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+      Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+      Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr");
+      return Builder.CreateBitCast(Res, Ops.LHS->getType());
+    }
+
+    // Explicitly handle GNU void* and function pointer arithmetic
+    // extensions. The GNU void* casts amount to no-ops since our void* type is
+    // i8*, but this is future proof.
+    if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+      const llvm::Type *i8Ty = llvm::Type::getInt8PtrTy(VMContext);
+      Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty);
+      Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr");
+      return Builder.CreateBitCast(Res, Ops.LHS->getType());
+    }
+
+    return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr");
+  } else {
+    // pointer - pointer
+    Value *LHS = Ops.LHS;
+    Value *RHS = Ops.RHS;
+
+    CharUnits ElementSize;
+
+    // Handle GCC extension for pointer arithmetic on void* and function pointer
+    // types.
+    if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) {
+      ElementSize = CharUnits::One();
+    } else {
+      ElementSize = CGF.getContext().getTypeSizeInChars(LHSElementType);
+    }
+
+    const llvm::Type *ResultType = ConvertType(Ops.Ty);
+    LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast");
+    RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+    Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+
+    // Optimize out the shift for element size of 1.
+    if (ElementSize.isOne())
+      return BytesBetween;
+
+    // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
+    // pointer difference in C is only defined in the case where both operands
+    // are pointing to elements of an array.
+    Value *BytesPerElt = 
+        llvm::ConstantInt::get(ResultType, ElementSize.getQuantity());
+    return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div");
+  }
+}
+
+Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+  // RHS to the same size as the LHS.
+  Value *RHS = Ops.RHS;
+  if (Ops.LHS->getType() != RHS->getType())
+    RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+  if (CGF.CatchUndefined 
+      && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+    unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+    llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+    CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+                                 llvm::ConstantInt::get(RHS->getType(), Width)),
+                             Cont, CGF.getTrapBB());
+    CGF.EmitBlock(Cont);
+  }
+
+  return Builder.CreateShl(Ops.LHS, RHS, "shl");
+}
+
+Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+  // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+  // RHS to the same size as the LHS.
+  Value *RHS = Ops.RHS;
+  if (Ops.LHS->getType() != RHS->getType())
+    RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+  if (CGF.CatchUndefined 
+      && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+    unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+    llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+    CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+                                 llvm::ConstantInt::get(RHS->getType(), Width)),
+                             Cont, CGF.getTrapBB());
+    CGF.EmitBlock(Cont);
+  }
+
+  if (Ops.Ty->isUnsignedIntegerType())
+    return Builder.CreateLShr(Ops.LHS, RHS, "shr");
+  return Builder.CreateAShr(Ops.LHS, RHS, "shr");
+}
+
+Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
+                                      unsigned SICmpOpc, unsigned FCmpOpc) {
+  TestAndClearIgnoreResultAssign();
+  Value *Result;
+  QualType LHSTy = E->getLHS()->getType();
+  if (LHSTy->isMemberFunctionPointerType()) {
+    Value *LHSPtr = CGF.EmitAnyExprToTemp(E->getLHS()).getAggregateAddr();
+    Value *RHSPtr = CGF.EmitAnyExprToTemp(E->getRHS()).getAggregateAddr();
+    llvm::Value *LHSFunc = Builder.CreateStructGEP(LHSPtr, 0);
+    LHSFunc = Builder.CreateLoad(LHSFunc);
+    llvm::Value *RHSFunc = Builder.CreateStructGEP(RHSPtr, 0);
+    RHSFunc = Builder.CreateLoad(RHSFunc);
+    Value *ResultF = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+                                        LHSFunc, RHSFunc, "cmp.func");
+    Value *NullPtr = llvm::Constant::getNullValue(LHSFunc->getType());
+    Value *ResultNull = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+                                           LHSFunc, NullPtr, "cmp.null");
+    llvm::Value *LHSAdj = Builder.CreateStructGEP(LHSPtr, 1);
+    LHSAdj = Builder.CreateLoad(LHSAdj);
+    llvm::Value *RHSAdj = Builder.CreateStructGEP(RHSPtr, 1);
+    RHSAdj = Builder.CreateLoad(RHSAdj);
+    Value *ResultA = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+                                        LHSAdj, RHSAdj, "cmp.adj");
+    if (E->getOpcode() == BinaryOperator::EQ) {
+      Result = Builder.CreateOr(ResultNull, ResultA, "or.na");
+      Result = Builder.CreateAnd(Result, ResultF, "and.f");
+    } else {
+      assert(E->getOpcode() == BinaryOperator::NE &&
+             "Member pointer comparison other than == or != ?");
+      Result = Builder.CreateAnd(ResultNull, ResultA, "and.na");
+      Result = Builder.CreateOr(Result, ResultF, "or.f");
+    }
+  } else if (!LHSTy->isAnyComplexType()) {
+    Value *LHS = Visit(E->getLHS());
+    Value *RHS = Visit(E->getRHS());
+
+    if (LHS->getType()->isFPOrFPVector()) {
+      Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
+                                  LHS, RHS, "cmp");
+    } else if (LHSTy->isSignedIntegerType()) {
+      Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
+                                  LHS, RHS, "cmp");
+    } else {
+      // Unsigned integers and pointers.
+      Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+                                  LHS, RHS, "cmp");
+    }
+
+    // If this is a vector comparison, sign extend the result to the appropriate
+    // vector integer type and return it (don't convert to bool).
+    if (LHSTy->isVectorType())
+      return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+
+  } else {
+    // Complex Comparison: can only be an equality comparison.
+    CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
+    CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
+
+    QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
+
+    Value *ResultR, *ResultI;
+    if (CETy->isRealFloatingType()) {
+      ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+                                   LHS.first, RHS.first, "cmp.r");
+      ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+                                   LHS.second, RHS.second, "cmp.i");
+    } else {
+      // Complex comparisons can only be equality comparisons.  As such, signed
+      // and unsigned opcodes are the same.
+      ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+                                   LHS.first, RHS.first, "cmp.r");
+      ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+                                   LHS.second, RHS.second, "cmp.i");
+    }
+
+    if (E->getOpcode() == BinaryOperator::EQ) {
+      Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
+    } else {
+      assert(E->getOpcode() == BinaryOperator::NE &&
+             "Complex comparison other than == or != ?");
+      Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
+    }
+  }
+
+  return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+  bool Ignore = TestAndClearIgnoreResultAssign();
+
+  // __block variables need to have the rhs evaluated first, plus this should
+  // improve codegen just a little.
+  Value *RHS = Visit(E->getRHS());
+  LValue LHS = EmitCheckedLValue(E->getLHS());
+
+  // Store the value into the LHS.  Bit-fields are handled specially
+  // because the result is altered by the store, i.e., [C99 6.5.16p1]
+  // 'An assignment expression has the value of the left operand after
+  // the assignment...'.
+  if (LHS.isBitfield()) {
+    if (!LHS.isVolatileQualified()) {
+      CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType(),
+                                         &RHS);
+      return RHS;
+    } else
+      CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, E->getType());
+  } else
+    CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType());
+  if (Ignore)
+    return 0;
+  return EmitLoadOfLValue(LHS, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+  const llvm::Type *ResTy = ConvertType(E->getType());
+  
+  // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
+  // If we have 1 && X, just emit X without inserting the control flow.
+  if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+    if (Cond == 1) { // If we have 1 && X, just emit X.
+      Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+      // ZExt result to int or bool.
+      return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
+    }
+
+    // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
+    if (!CGF.ContainsLabel(E->getRHS()))
+      return llvm::Constant::getNullValue(ResTy);
+  }
+
+  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
+  llvm::BasicBlock *RHSBlock  = CGF.createBasicBlock("land.rhs");
+
+  // Branch on the LHS first.  If it is false, go to the failure (cont) block.
+  CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
+
+  // Any edges into the ContBlock are now from an (indeterminate number of)
+  // edges from this first condition.  All of these values will be false.  Start
+  // setting up the PHI node in the Cont Block for this.
+  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+                                            "", ContBlock);
+  PN->reserveOperandSpace(2);  // Normal case, two inputs.
+  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+       PI != PE; ++PI)
+    PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
+
+  CGF.BeginConditionalBranch();
+  CGF.EmitBlock(RHSBlock);
+  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+  CGF.EndConditionalBranch();
+
+  // Reaquire the RHS block, as there may be subblocks inserted.
+  RHSBlock = Builder.GetInsertBlock();
+
+  // Emit an unconditional branch from this block to ContBlock.  Insert an entry
+  // into the phi node for the edge with the value of RHSCond.
+  CGF.EmitBlock(ContBlock);
+  PN->addIncoming(RHSCond, RHSBlock);
+
+  // ZExt result to int.
+  return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+  const llvm::Type *ResTy = ConvertType(E->getType());
+  
+  // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
+  // If we have 0 || X, just emit X without inserting the control flow.
+  if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getLHS())) {
+    if (Cond == -1) { // If we have 0 || X, just emit X.
+      Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+      // ZExt result to int or bool.
+      return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
+    }
+
+    // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
+    if (!CGF.ContainsLabel(E->getRHS()))
+      return llvm::ConstantInt::get(ResTy, 1);
+  }
+
+  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
+  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+
+  // Branch on the LHS first.  If it is true, go to the success (cont) block.
+  CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
+
+  // Any edges into the ContBlock are now from an (indeterminate number of)
+  // edges from this first condition.  All of these values will be true.  Start
+  // setting up the PHI node in the Cont Block for this.
+  llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext),
+                                            "", ContBlock);
+  PN->reserveOperandSpace(2);  // Normal case, two inputs.
+  for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+       PI != PE; ++PI)
+    PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
+
+  CGF.BeginConditionalBranch();
+
+  // Emit the RHS condition as a bool value.
+  CGF.EmitBlock(RHSBlock);
+  Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+  CGF.EndConditionalBranch();
+
+  // Reaquire the RHS block, as there may be subblocks inserted.
+  RHSBlock = Builder.GetInsertBlock();
+
+  // Emit an unconditional branch from this block to ContBlock.  Insert an entry
+  // into the phi node for the edge with the value of RHSCond.
+  CGF.EmitBlock(ContBlock);
+  PN->addIncoming(RHSCond, RHSBlock);
+
+  // ZExt result to int.
+  return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
+  CGF.EmitStmt(E->getLHS());
+  CGF.EnsureInsertPoint();
+  return Visit(E->getRHS());
+}
+
+//===----------------------------------------------------------------------===//
+//                             Other Operators
+//===----------------------------------------------------------------------===//
+
+/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
+/// expression is cheap enough and side-effect-free enough to evaluate
+/// unconditionally instead of conditionally.  This is used to convert control
+/// flow into selects in some cases.
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
+                                                   CodeGenFunction &CGF) {
+  if (const ParenExpr *PE = dyn_cast<ParenExpr>(E))
+    return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr(), CGF);
+
+  // TODO: Allow anything we can constant fold to an integer or fp constant.
+  if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) ||
+      isa<FloatingLiteral>(E))
+    return true;
+
+  // Non-volatile automatic variables too, to get "cond ? X : Y" where
+  // X and Y are local variables.
+  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+    if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+      if (VD->hasLocalStorage() && !(CGF.getContext()
+                                     .getCanonicalType(VD->getType())
+                                     .isVolatileQualified()))
+        return true;
+
+  return false;
+}
+
+
+Value *ScalarExprEmitter::
+VisitConditionalOperator(const ConditionalOperator *E) {
+  TestAndClearIgnoreResultAssign();
+  // If the condition constant folds and can be elided, try to avoid emitting
+  // the condition and the dead arm.
+  if (int Cond = CGF.ConstantFoldsToSimpleInteger(E->getCond())){
+    Expr *Live = E->getLHS(), *Dead = E->getRHS();
+    if (Cond == -1)
+      std::swap(Live, Dead);
+
+    // If the dead side doesn't have labels we need, and if the Live side isn't
+    // the gnu missing ?: extension (which we could handle, but don't bother
+    // to), just emit the Live part.
+    if ((!Dead || !CGF.ContainsLabel(Dead)) &&  // No labels in dead part
+        Live)                                   // Live part isn't missing.
+      return Visit(Live);
+  }
+
+
+  // If this is a really simple expression (like x ? 4 : 5), emit this as a
+  // select instead of as control flow.  We can only do this if it is cheap and
+  // safe to evaluate the LHS and RHS unconditionally.
+  if (E->getLHS() && isCheapEnoughToEvaluateUnconditionally(E->getLHS(),
+                                                            CGF) &&
+      isCheapEnoughToEvaluateUnconditionally(E->getRHS(), CGF)) {
+    llvm::Value *CondV = CGF.EvaluateExprAsBool(E->getCond());
+    llvm::Value *LHS = Visit(E->getLHS());
+    llvm::Value *RHS = Visit(E->getRHS());
+    return Builder.CreateSelect(CondV, LHS, RHS, "cond");
+  }
+
+
+  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+  Value *CondVal = 0;
+
+  // If we don't have the GNU missing condition extension, emit a branch on bool
+  // the normal way.
+  if (E->getLHS()) {
+    // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for
+    // the branch on bool.
+    CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+  } else {
+    // Otherwise, for the ?: extension, evaluate the conditional and then
+    // convert it to bool the hard way.  We do this explicitly because we need
+    // the unconverted value for the missing middle value of the ?:.
+    CondVal = CGF.EmitScalarExpr(E->getCond());
+
+    // In some cases, EmitScalarConversion will delete the "CondVal" expression
+    // if there are no extra uses (an optimization).  Inhibit this by making an
+    // extra dead use, because we're going to add a use of CondVal later.  We
+    // don't use the builder for this, because we don't want it to get optimized
+    // away.  This leaves dead code, but the ?: extension isn't common.
+    new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder",
+                          Builder.GetInsertBlock());
+
+    Value *CondBoolVal =
+      CGF.EmitScalarConversion(CondVal, E->getCond()->getType(),
+                               CGF.getContext().BoolTy);
+    Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock);
+  }
+
+  CGF.BeginConditionalBranch();
+  CGF.EmitBlock(LHSBlock);
+
+  // Handle the GNU extension for missing LHS.
+  Value *LHS;
+  if (E->getLHS())
+    LHS = Visit(E->getLHS());
+  else    // Perform promotions, to handle cases like "short ?: int"
+    LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType());
+
+  CGF.EndConditionalBranch();
+  LHSBlock = Builder.GetInsertBlock();
+  CGF.EmitBranch(ContBlock);
+
+  CGF.BeginConditionalBranch();
+  CGF.EmitBlock(RHSBlock);
+
+  Value *RHS = Visit(E->getRHS());
+  CGF.EndConditionalBranch();
+  RHSBlock = Builder.GetInsertBlock();
+  CGF.EmitBranch(ContBlock);
+
+  CGF.EmitBlock(ContBlock);
+
+  // If the LHS or RHS is a throw expression, it will be legitimately null.
+  if (!LHS)
+    return RHS;
+  if (!RHS)
+    return LHS;
+
+  // Create a PHI node for the real part.
+  llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond");
+  PN->reserveOperandSpace(2);
+  PN->addIncoming(LHS, LHSBlock);
+  PN->addIncoming(RHS, RHSBlock);
+  return PN;
+}
+
+Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+  return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+  llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+  llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+  // If EmitVAArg fails, we fall back to the LLVM instruction.
+  if (!ArgPtr)
+    return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
+
+  // FIXME Volatility.
+  return Builder.CreateLoad(ArgPtr);
+}
+
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) {
+  return CGF.BuildBlockLiteralTmp(BE);
+}
+
+//===----------------------------------------------------------------------===//
+//                         Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitScalarExpr - Emit the computation of the specified expression of scalar
+/// type, ignoring the result.
+Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
+  assert(E && !hasAggregateLLVMType(E->getType()) &&
+         "Invalid scalar expression to emit");
+
+  return ScalarExprEmitter(*this, IgnoreResultAssign)
+    .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
+                                             QualType DstTy) {
+  assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
+         "Invalid scalar expression to emit");
+  return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
+                                                      QualType SrcTy,
+                                                      QualType DstTy) {
+  assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
+         "Invalid complex -> scalar conversion");
+  return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
+                                                                DstTy);
+}
+
+LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
+  llvm::Value *V;
+  // object->isa or (*object).isa
+  // Generate code as for: *(Class*)object
+  // build Class* type
+  const llvm::Type *ClassPtrTy = ConvertType(E->getType());
+
+  Expr *BaseExpr = E->getBase();
+  if (BaseExpr->isLvalue(getContext()) != Expr::LV_Valid) {
+    V = CreateTempAlloca(ClassPtrTy, "resval");
+    llvm::Value *Src = EmitScalarExpr(BaseExpr);
+    Builder.CreateStore(Src, V);
+  }
+  else {
+      if (E->isArrow())
+        V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
+      else
+        V  = EmitLValue(BaseExpr).getAddress();
+  }
+  
+  // build Class* type
+  ClassPtrTy = ClassPtrTy->getPointerTo();
+  V = Builder.CreateBitCast(V, ClassPtrTy);
+  LValue LV = LValue::MakeAddr(V, MakeQualifiers(E->getType()));
+  return LV;
+}
+
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
new file mode 100644
index 0000000..595f8f6
--- /dev/null
+++ b/lib/CodeGen/CGObjC.cpp
@@ -0,0 +1,654 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Objective-C code as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+/// Emits an instance of NSConstantString representing the object.
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+{
+  llvm::Constant *C = 
+      CGM.getObjCRuntime().GenerateConstantString(E->getString());
+  // FIXME: This bitcast should just be made an invariant on the Runtime.
+  return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+}
+
+/// Emit a selector.
+llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
+  // Untyped selector.
+  // Note that this implementation allows for non-constant strings to be passed
+  // as arguments to @selector().  Currently, the only thing preventing this
+  // behaviour is the type checking in the front end.
+  return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
+  // FIXME: This should pass the Decl not the name.
+  return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
+}
+
+
+RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
+  // Only the lookup mechanism and first two arguments of the method
+  // implementation vary between runtimes.  We can get the receiver and
+  // arguments in generic code.
+
+  CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+  const Expr *ReceiverExpr = E->getReceiver();
+  bool isSuperMessage = false;
+  bool isClassMessage = false;
+  // Find the receiver
+  llvm::Value *Receiver;
+  if (!ReceiverExpr) {
+    const ObjCInterfaceDecl *OID = E->getClassInfo().first;
+
+    // Very special case, super send in class method. The receiver is
+    // self (the class object) and the send uses super semantics.
+    if (!OID) {
+      assert(E->getClassName()->isStr("super") &&
+             "Unexpected missing class interface in message send.");
+      isSuperMessage = true;
+      Receiver = LoadObjCSelf();
+    } else {
+      Receiver = Runtime.GetClass(Builder, OID);
+    }
+
+    isClassMessage = true;
+  } else if (isa<ObjCSuperExpr>(E->getReceiver())) {
+    isSuperMessage = true;
+    Receiver = LoadObjCSelf();
+  } else {
+    Receiver = EmitScalarExpr(E->getReceiver());
+  }
+
+  CallArgList Args;
+  EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
+
+  if (isSuperMessage) {
+    // super is only valid in an Objective-C method
+    const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+    bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+    return Runtime.GenerateMessageSendSuper(*this, E->getType(),
+                                            E->getSelector(),
+                                            OMD->getClassInterface(),
+                                            isCategoryImpl,
+                                            Receiver,
+                                            isClassMessage,
+                                            Args,
+                                            E->getMethodDecl());
+  }
+
+  return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(),
+                                     Receiver, isClassMessage, Args,
+                                     E->getMethodDecl());
+}
+
+/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
+/// the LLVM function and sets the other context used by
+/// CodeGenFunction.
+void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
+                                      const ObjCContainerDecl *CD) {
+  FunctionArgList Args;
+  llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
+
+  const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
+  CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
+
+  Args.push_back(std::make_pair(OMD->getSelfDecl(),
+                                OMD->getSelfDecl()->getType()));
+  Args.push_back(std::make_pair(OMD->getCmdDecl(),
+                                OMD->getCmdDecl()->getType()));
+
+  for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+       E = OMD->param_end(); PI != E; ++PI)
+    Args.push_back(std::make_pair(*PI, (*PI)->getType()));
+
+  StartFunction(OMD, OMD->getResultType(), Fn, Args, OMD->getLocEnd());
+}
+
+/// Generate an Objective-C method.  An Objective-C method is a C function with
+/// its pointer, name, and types registered in the class struture.
+void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
+  // Check if we should generate debug info for this method.
+  if (CGM.getDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
+    DebugInfo = CGM.getDebugInfo();
+  StartObjCMethod(OMD, OMD->getClassInterface());
+  EmitStmt(OMD->getBody());
+  FinishFunction(OMD->getBodyRBrace());
+}
+
+// FIXME: I wasn't sure about the synthesis approach. If we end up generating an
+// AST for the whole body we can just fall back to having a GenerateFunction
+// which takes the body Stmt.
+
+/// GenerateObjCGetter - Generate an Objective-C property getter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
+                                         const ObjCPropertyImplDecl *PID) {
+  ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+  const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+  ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
+  assert(OMD && "Invalid call to generate getter (empty method)");
+  // FIXME: This is rather murky, we create this here since they will not have
+  // been created by Sema for us.
+  OMD->createImplicitParams(getContext(), IMP->getClassInterface());
+  StartObjCMethod(OMD, IMP->getClassInterface());
+
+  // Determine if we should use an objc_getProperty call for
+  // this. Non-atomic properties are directly evaluated.
+  // atomic 'copy' and 'retain' properties are also directly
+  // evaluated in gc-only mode.
+  if (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+      !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+      (PD->getSetterKind() == ObjCPropertyDecl::Copy ||
+       PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+    llvm::Value *GetPropertyFn =
+      CGM.getObjCRuntime().GetPropertyGetFunction();
+
+    if (!GetPropertyFn) {
+      CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+      FinishFunction();
+      return;
+    }
+
+    // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
+    // FIXME: Can't this be simpler? This might even be worse than the
+    // corresponding gcc code.
+    CodeGenTypes &Types = CGM.getTypes();
+    ValueDecl *Cmd = OMD->getCmdDecl();
+    llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+    QualType IdTy = getContext().getObjCIdType();
+    llvm::Value *SelfAsId =
+      Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+    llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+    llvm::Value *True =
+      llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+    CallArgList Args;
+    Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+    Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+    Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+    Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
+    // FIXME: We shouldn't need to get the function info here, the
+    // runtime already should have computed it to build the function.
+    RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args,
+                                               CC_Default, false),
+                         GetPropertyFn, ReturnValueSlot(), Args);
+    // We need to fix the type here. Ivars with copy & retain are
+    // always objects so we don't need to worry about complex or
+    // aggregates.
+    RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+                                           Types.ConvertType(PD->getType())));
+    EmitReturnOfRValue(RV, PD->getType());
+  } else {
+    LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
+    if (hasAggregateLLVMType(Ivar->getType())) {
+      EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+    } else {
+      CodeGenTypes &Types = CGM.getTypes();
+      RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
+      RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+                       Types.ConvertType(PD->getType())));
+      EmitReturnOfRValue(RV, PD->getType());
+    }
+  }
+
+  FinishFunction();
+}
+
+/// GenerateObjCSetter - Generate an Objective-C property setter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
+                                         const ObjCPropertyImplDecl *PID) {
+  ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+  const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+  ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
+  assert(OMD && "Invalid call to generate setter (empty method)");
+  // FIXME: This is rather murky, we create this here since they will not have
+  // been created by Sema for us.
+  OMD->createImplicitParams(getContext(), IMP->getClassInterface());
+  StartObjCMethod(OMD, IMP->getClassInterface());
+
+  bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
+  bool IsAtomic =
+    !(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
+
+  // Determine if we should use an objc_setProperty call for
+  // this. Properties with 'copy' semantics always use it, as do
+  // non-atomic properties with 'release' semantics as long as we are
+  // not in gc-only mode.
+  if (IsCopy ||
+      (CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
+       PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
+    llvm::Value *SetPropertyFn =
+      CGM.getObjCRuntime().GetPropertySetFunction();
+
+    if (!SetPropertyFn) {
+      CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
+      FinishFunction();
+      return;
+    }
+
+    // Emit objc_setProperty((id) self, _cmd, offset, arg,
+    //                       <is-atomic>, <is-copy>).
+    // FIXME: Can't this be simpler? This might even be worse than the
+    // corresponding gcc code.
+    CodeGenTypes &Types = CGM.getTypes();
+    ValueDecl *Cmd = OMD->getCmdDecl();
+    llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
+    QualType IdTy = getContext().getObjCIdType();
+    llvm::Value *SelfAsId =
+      Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+    llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
+    llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
+    llvm::Value *ArgAsId =
+      Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"),
+                            Types.ConvertType(IdTy));
+    llvm::Value *True =
+      llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 1);
+    llvm::Value *False =
+      llvm::ConstantInt::get(Types.ConvertType(getContext().BoolTy), 0);
+    CallArgList Args;
+    Args.push_back(std::make_pair(RValue::get(SelfAsId), IdTy));
+    Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
+    Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
+    Args.push_back(std::make_pair(RValue::get(ArgAsId), IdTy));
+    Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
+                                  getContext().BoolTy));
+    Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
+                                  getContext().BoolTy));
+    // FIXME: We shouldn't need to get the function info here, the runtime
+    // already should have computed it to build the function.
+    EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
+                                   CC_Default, false), SetPropertyFn,
+             ReturnValueSlot(), Args);
+  } else {
+    // FIXME: Find a clean way to avoid AST node creation.
+    SourceLocation Loc = PD->getLocation();
+    ValueDecl *Self = OMD->getSelfDecl();
+    ObjCIvarDecl *Ivar = PID->getPropertyIvarDecl();
+    DeclRefExpr Base(Self, Self->getType(), Loc);
+    ParmVarDecl *ArgDecl = *OMD->param_begin();
+    DeclRefExpr Arg(ArgDecl, ArgDecl->getType(), Loc);
+    ObjCIvarRefExpr IvarRef(Ivar, Ivar->getType(), Loc, &Base, true, true);
+    
+    // The property type can differ from the ivar type in some situations with
+    // Objective-C pointer types, we can always bit cast the RHS in these cases.
+    if (getContext().getCanonicalType(Ivar->getType()) !=
+        getContext().getCanonicalType(ArgDecl->getType())) {
+      ImplicitCastExpr ArgCasted(Ivar->getType(), CastExpr::CK_BitCast, &Arg,
+                                 false);
+      BinaryOperator Assign(&IvarRef, &ArgCasted, BinaryOperator::Assign,
+                            Ivar->getType(), Loc);
+      EmitStmt(&Assign);
+    } else {
+      BinaryOperator Assign(&IvarRef, &Arg, BinaryOperator::Assign,
+                            Ivar->getType(), Loc);
+      EmitStmt(&Assign);
+    }
+  }
+
+  FinishFunction();
+}
+
+llvm::Value *CodeGenFunction::LoadObjCSelf() {
+  const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+  // See if we need to lazily forward self inside a block literal.
+  BlockForwardSelf();
+  return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
+}
+
+QualType CodeGenFunction::TypeOfSelfObject() {
+  const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+  ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
+  const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
+    getContext().getCanonicalType(selfDecl->getType()));
+  return PTy->getPointeeType();
+}
+
+RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
+                                                 const Selector &S) {
+  llvm::Value *Receiver = LoadObjCSelf();
+  const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+  bool isClassMessage = OMD->isClassMethod();
+  bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+  return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+                                                       Exp->getType(),
+                                                       S,
+                                                       OMD->getClassInterface(),
+                                                       isCategoryImpl,
+                                                       Receiver,
+                                                       isClassMessage,
+                                                       CallArgList());
+
+}
+
+RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) {
+  Exp = Exp->IgnoreParens();
+  // FIXME: Split it into two separate routines.
+  if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+    Selector S = E->getProperty()->getGetterName();
+    if (isa<ObjCSuperExpr>(E->getBase()))
+      return EmitObjCSuperPropertyGet(E, S);
+    return CGM.getObjCRuntime().
+             GenerateMessageSend(*this, Exp->getType(), S,
+                                 EmitScalarExpr(E->getBase()),
+                                 false, CallArgList());
+  } else {
+    const ObjCImplicitSetterGetterRefExpr *KE =
+      cast<ObjCImplicitSetterGetterRefExpr>(Exp);
+    Selector S = KE->getGetterMethod()->getSelector();
+    llvm::Value *Receiver;
+    if (KE->getInterfaceDecl()) {
+      const ObjCInterfaceDecl *OID = KE->getInterfaceDecl();
+      Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+    } else if (isa<ObjCSuperExpr>(KE->getBase()))
+      return EmitObjCSuperPropertyGet(KE, S);
+    else
+      Receiver = EmitScalarExpr(KE->getBase());
+    return CGM.getObjCRuntime().
+             GenerateMessageSend(*this, Exp->getType(), S,
+                                 Receiver,
+                                 KE->getInterfaceDecl() != 0, CallArgList());
+  }
+}
+
+void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
+                                               const Selector &S,
+                                               RValue Src) {
+  CallArgList Args;
+  llvm::Value *Receiver = LoadObjCSelf();
+  const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+  bool isClassMessage = OMD->isClassMethod();
+  bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+  Args.push_back(std::make_pair(Src, Exp->getType()));
+  CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+                                                Exp->getType(),
+                                                S,
+                                                OMD->getClassInterface(),
+                                                isCategoryImpl,
+                                                Receiver,
+                                                isClassMessage,
+                                                Args);
+  return;
+}
+
+void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
+                                          RValue Src) {
+  // FIXME: Split it into two separate routines.
+  if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
+    Selector S = E->getProperty()->getSetterName();
+    if (isa<ObjCSuperExpr>(E->getBase())) {
+      EmitObjCSuperPropertySet(E, S, Src);
+      return;
+    }
+    CallArgList Args;
+    Args.push_back(std::make_pair(Src, E->getType()));
+    CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+                                             EmitScalarExpr(E->getBase()),
+                                             false, Args);
+  } else if (const ObjCImplicitSetterGetterRefExpr *E =
+               dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
+    Selector S = E->getSetterMethod()->getSelector();
+    CallArgList Args;
+    llvm::Value *Receiver;
+    if (E->getInterfaceDecl()) {
+      const ObjCInterfaceDecl *OID = E->getInterfaceDecl();
+      Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
+    } else if (isa<ObjCSuperExpr>(E->getBase())) {
+      EmitObjCSuperPropertySet(E, S, Src);
+      return;
+    } else
+      Receiver = EmitScalarExpr(E->getBase());
+    Args.push_back(std::make_pair(Src, E->getType()));
+    CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+                                             Receiver,
+                                             E->getInterfaceDecl() != 0, Args);
+  } else
+    assert (0 && "bad expression node in EmitObjCPropertySet");
+}
+
+void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
+  llvm::Constant *EnumerationMutationFn =
+    CGM.getObjCRuntime().EnumerationMutationFunction();
+  llvm::Value *DeclAddress;
+  QualType ElementTy;
+
+  if (!EnumerationMutationFn) {
+    CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
+    return;
+  }
+
+  if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+    EmitStmt(SD);
+    assert(HaveInsertPoint() && "DeclStmt destroyed insert point!");
+    const Decl* D = SD->getSingleDecl();
+    ElementTy = cast<ValueDecl>(D)->getType();
+    DeclAddress = LocalDeclMap[D];
+  } else {
+    ElementTy = cast<Expr>(S.getElement())->getType();
+    DeclAddress = 0;
+  }
+
+  // Fast enumeration state.
+  QualType StateTy = getContext().getObjCFastEnumerationStateType();
+  llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
+  EmitMemSetToZero(StatePtr, StateTy);
+
+  // Number of elements in the items array.
+  static const unsigned NumItems = 16;
+
+  // Get selector
+  llvm::SmallVector<IdentifierInfo*, 3> II;
+  II.push_back(&CGM.getContext().Idents.get("countByEnumeratingWithState"));
+  II.push_back(&CGM.getContext().Idents.get("objects"));
+  II.push_back(&CGM.getContext().Idents.get("count"));
+  Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(),
+                                                                &II[0]);
+
+  QualType ItemsTy =
+    getContext().getConstantArrayType(getContext().getObjCIdType(),
+                                      llvm::APInt(32, NumItems),
+                                      ArrayType::Normal, 0);
+  llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+
+  llvm::Value *Collection = EmitScalarExpr(S.getCollection());
+
+  CallArgList Args;
+  Args.push_back(std::make_pair(RValue::get(StatePtr),
+                                getContext().getPointerType(StateTy)));
+
+  Args.push_back(std::make_pair(RValue::get(ItemsPtr),
+                                getContext().getPointerType(ItemsTy)));
+
+  const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+  llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
+  Args.push_back(std::make_pair(RValue::get(Count),
+                                getContext().UnsignedLongTy));
+
+  RValue CountRV =
+    CGM.getObjCRuntime().GenerateMessageSend(*this,
+                                             getContext().UnsignedLongTy,
+                                             FastEnumSel,
+                                             Collection, false, Args);
+
+  llvm::Value *LimitPtr = CreateMemTemp(getContext().UnsignedLongTy,
+                                        "limit.ptr");
+  Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+
+  llvm::BasicBlock *NoElements = createBasicBlock("noelements");
+  llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
+
+  llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
+  llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+
+  llvm::Value *IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+  Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
+
+  EmitBlock(SetStartMutations);
+
+  llvm::Value *StartMutationsPtr = CreateMemTemp(getContext().UnsignedLongTy);
+
+  llvm::Value *StateMutationsPtrPtr =
+    Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
+  llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
+                                                      "mutationsptr");
+
+  llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
+                                                   "mutations");
+
+  Builder.CreateStore(StateMutations, StartMutationsPtr);
+
+  llvm::BasicBlock *LoopStart = createBasicBlock("loopstart");
+  EmitBlock(LoopStart);
+
+  llvm::Value *CounterPtr = CreateMemTemp(getContext().UnsignedLongTy,
+                                       "counter.ptr");
+  Builder.CreateStore(Zero, CounterPtr);
+
+  llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
+  EmitBlock(LoopBody);
+
+  StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+  StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+
+  llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
+                                                   "mutations");
+  llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
+                                                     StartMutations,
+                                                     "tobool");
+
+
+  llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated");
+  llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated");
+
+  Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated);
+
+  EmitBlock(WasMutated);
+  llvm::Value *V =
+    Builder.CreateBitCast(Collection,
+                          ConvertType(getContext().getObjCIdType()),
+                          "tmp");
+  CallArgList Args2;
+  Args2.push_back(std::make_pair(RValue::get(V),
+                                getContext().getObjCIdType()));
+  // FIXME: We shouldn't need to get the function info here, the runtime already
+  // should have computed it to build the function.
+  EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2,
+                                          CC_Default, false),
+           EnumerationMutationFn, ReturnValueSlot(), Args2);
+
+  EmitBlock(WasNotMutated);
+
+  llvm::Value *StateItemsPtr =
+    Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+
+  llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter");
+
+  llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr,
+                                                   "stateitems");
+
+  llvm::Value *CurrentItemPtr =
+    Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr");
+
+  llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem");
+
+  // Cast the item to the right type.
+  CurrentItem = Builder.CreateBitCast(CurrentItem,
+                                      ConvertType(ElementTy), "tmp");
+
+  if (!DeclAddress) {
+    LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+    // Set the value to null.
+    Builder.CreateStore(CurrentItem, LV.getAddress());
+  } else
+    Builder.CreateStore(CurrentItem, DeclAddress);
+
+  // Increment the counter.
+  Counter = Builder.CreateAdd(Counter,
+                              llvm::ConstantInt::get(UnsignedLongLTy, 1));
+  Builder.CreateStore(Counter, CounterPtr);
+
+  llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
+  llvm::BasicBlock *AfterBody = createBasicBlock("afterbody");
+
+  BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
+
+  EmitStmt(S.getBody());
+
+  BreakContinueStack.pop_back();
+
+  EmitBlock(AfterBody);
+
+  llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
+
+  Counter = Builder.CreateLoad(CounterPtr);
+  Limit = Builder.CreateLoad(LimitPtr);
+  llvm::Value *IsLess = Builder.CreateICmpULT(Counter, Limit, "isless");
+  Builder.CreateCondBr(IsLess, LoopBody, FetchMore);
+
+  // Fetch more elements.
+  EmitBlock(FetchMore);
+
+  CountRV =
+    CGM.getObjCRuntime().GenerateMessageSend(*this,
+                                             getContext().UnsignedLongTy,
+                                             FastEnumSel,
+                                             Collection, false, Args);
+  Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
+  Limit = Builder.CreateLoad(LimitPtr);
+
+  IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
+  Builder.CreateCondBr(IsZero, NoElements, LoopStart);
+
+  // No more elements.
+  EmitBlock(NoElements);
+
+  if (!DeclAddress) {
+    // If the element was not a declaration, set it to be null.
+
+    LValue LV = EmitLValue(cast<Expr>(S.getElement()));
+
+    // Set the value to null.
+    Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)),
+                        LV.getAddress());
+  }
+
+  EmitBlock(LoopEnd);
+}
+
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
+  CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
+  CGM.getObjCRuntime().EmitThrowStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtSynchronizedStmt(
+                                              const ObjCAtSynchronizedStmt &S) {
+  CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+}
+
+CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
new file mode 100644
index 0000000..1d38ef9
--- /dev/null
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -0,0 +1,2109 @@
+//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the GNU runtime.  The
+// class in this file generates structures used by the GNU Objective-C runtime
+// library.  These structures are defined in objc/objc.h and objc/objc-api.h in
+// the GNU runtime distribution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetData.h"
+
+#include <map>
+
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::dyn_cast;
+
+// The version of the runtime that this class targets.  Must match the version
+// in the runtime.
+static const int RuntimeVersion = 8;
+static const int NonFragileRuntimeVersion = 9;
+static const int ProtocolVersion = 2;
+static const int NonFragileProtocolVersion = 3;
+
+namespace {
+class CGObjCGNU : public CodeGen::CGObjCRuntime {
+private:
+  CodeGen::CodeGenModule &CGM;
+  llvm::Module &TheModule;
+  const llvm::PointerType *SelectorTy;
+  const llvm::IntegerType *Int8Ty;
+  const llvm::PointerType *PtrToInt8Ty;
+  const llvm::FunctionType *IMPTy;
+  const llvm::PointerType *IdTy;
+  const llvm::PointerType *PtrToIdTy;
+  QualType ASTIdTy;
+  const llvm::IntegerType *IntTy;
+  const llvm::PointerType *PtrTy;
+  const llvm::IntegerType *LongTy;
+  const llvm::PointerType *PtrToIntTy;
+  llvm::GlobalAlias *ClassPtrAlias;
+  llvm::GlobalAlias *MetaClassPtrAlias;
+  std::vector<llvm::Constant*> Classes;
+  std::vector<llvm::Constant*> Categories;
+  std::vector<llvm::Constant*> ConstantStrings;
+  llvm::StringMap<llvm::Constant*> ObjCStrings;
+  llvm::Function *LoadFunction;
+  llvm::StringMap<llvm::Constant*> ExistingProtocols;
+  typedef std::pair<std::string, std::string> TypedSelector;
+  std::map<TypedSelector, llvm::GlobalAlias*> TypedSelectors;
+  llvm::StringMap<llvm::GlobalAlias*> UntypedSelectors;
+  // Selectors that we don't emit in GC mode
+  Selector RetainSel, ReleaseSel, AutoreleaseSel;
+  // Functions used for GC.
+  llvm::Constant *IvarAssignFn, *StrongCastAssignFn, *MemMoveFn, *WeakReadFn, 
+    *WeakAssignFn, *GlobalAssignFn;
+  // Some zeros used for GEPs in lots of places.
+  llvm::Constant *Zeros[2];
+  llvm::Constant *NULLPtr;
+  llvm::LLVMContext &VMContext;
+private:
+  llvm::Constant *GenerateIvarList(
+      const llvm::SmallVectorImpl<llvm::Constant *>  &IvarNames,
+      const llvm::SmallVectorImpl<llvm::Constant *>  &IvarTypes,
+      const llvm::SmallVectorImpl<llvm::Constant *>  &IvarOffsets);
+  llvm::Constant *GenerateMethodList(const std::string &ClassName,
+      const std::string &CategoryName,
+      const llvm::SmallVectorImpl<Selector>  &MethodSels,
+      const llvm::SmallVectorImpl<llvm::Constant *>  &MethodTypes,
+      bool isClassMethodList);
+  llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+  llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
+        llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
+        llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
+  llvm::Constant *GenerateProtocolList(
+      const llvm::SmallVectorImpl<std::string> &Protocols);
+  // To ensure that all protocols are seen by the runtime, we add a category on
+  // a class defined in the runtime, declaring no methods, but adopting the
+  // protocols.
+  void GenerateProtocolHolderCategory(void);
+  llvm::Constant *GenerateClassStructure(
+      llvm::Constant *MetaClass,
+      llvm::Constant *SuperClass,
+      unsigned info,
+      const char *Name,
+      llvm::Constant *Version,
+      llvm::Constant *InstanceSize,
+      llvm::Constant *IVars,
+      llvm::Constant *Methods,
+      llvm::Constant *Protocols,
+      llvm::Constant *IvarOffsets,
+      llvm::Constant *Properties);
+  llvm::Constant *GenerateProtocolMethodList(
+      const llvm::SmallVectorImpl<llvm::Constant *>  &MethodNames,
+      const llvm::SmallVectorImpl<llvm::Constant *>  &MethodTypes);
+  llvm::Constant *MakeConstantString(const std::string &Str, const std::string
+      &Name="");
+  llvm::Constant *ExportUniqueString(const std::string &Str, const std::string
+          prefix);
+  llvm::Constant *MakeGlobal(const llvm::StructType *Ty,
+    std::vector<llvm::Constant*> &V, llvm::StringRef Name="",
+    llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
+  llvm::Constant *MakeGlobal(const llvm::ArrayType *Ty,
+    std::vector<llvm::Constant*> &V, llvm::StringRef Name="",
+    llvm::GlobalValue::LinkageTypes linkage=llvm::GlobalValue::InternalLinkage);
+  llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+      const ObjCIvarDecl *Ivar);
+  void EmitClassRef(const std::string &className);
+  llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, const llvm::Type *Ty){
+    if (V->getType() == Ty) return V;
+    return B.CreateBitCast(V, Ty);
+  }
+public:
+  CGObjCGNU(CodeGen::CodeGenModule &cgm);
+  virtual llvm::Constant *GenerateConstantString(const StringLiteral *);
+  virtual CodeGen::RValue
+  GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+                      QualType ResultType,
+                      Selector Sel,
+                      llvm::Value *Receiver,
+                      bool IsClassMessage,
+                      const CallArgList &CallArgs,
+                      const ObjCMethodDecl *Method);
+  virtual CodeGen::RValue
+  GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+                           QualType ResultType,
+                           Selector Sel,
+                           const ObjCInterfaceDecl *Class,
+                           bool isCategoryImpl,
+                           llvm::Value *Receiver,
+                           bool IsClassMessage,
+                           const CallArgList &CallArgs,
+                           const ObjCMethodDecl *Method);
+  virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+                                const ObjCInterfaceDecl *OID);
+  virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+  virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+      *Method);
+
+  virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+                                         const ObjCContainerDecl *CD);
+  virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+  virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+  virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+                                           const ObjCProtocolDecl *PD);
+  virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+  virtual llvm::Function *ModuleInitFunction();
+  virtual llvm::Function *GetPropertyGetFunction();
+  virtual llvm::Function *GetPropertySetFunction();
+  virtual llvm::Constant *EnumerationMutationFunction();
+
+  virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+                                         const Stmt &S);
+  virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+                             const ObjCAtThrowStmt &S);
+  virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+                                         llvm::Value *AddrWeakObj);
+  virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+                                  llvm::Value *src, llvm::Value *dst);
+  virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+                                    llvm::Value *src, llvm::Value *dest);
+  virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+                                    llvm::Value *src, llvm::Value *dest,
+                                    llvm::Value *ivarOffset);
+  virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+                                        llvm::Value *src, llvm::Value *dest);
+  virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+                                        llvm::Value *DestPtr,
+                                        llvm::Value *SrcPtr,
+                                        QualType Ty);
+  virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+                                      QualType ObjectTy,
+                                      llvm::Value *BaseValue,
+                                      const ObjCIvarDecl *Ivar,
+                                      unsigned CVRQualifiers);
+  virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+                                      const ObjCInterfaceDecl *Interface,
+                                      const ObjCIvarDecl *Ivar);
+};
+} // end anonymous namespace
+
+
+/// Emits a reference to a dummy variable which is emitted with each class.
+/// This ensures that a linker error will be generated when trying to link
+/// together modules where a referenced class is not defined.
+void CGObjCGNU::EmitClassRef(const std::string &className) {
+  std::string symbolRef = "__objc_class_ref_" + className;
+  // Don't emit two copies of the same symbol
+  if (TheModule.getGlobalVariable(symbolRef))
+    return;
+  std::string symbolName = "__objc_class_name_" + className;
+  llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(symbolName);
+  if (!ClassSymbol) {
+    ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+        llvm::GlobalValue::ExternalLinkage, 0, symbolName);
+  }
+  new llvm::GlobalVariable(TheModule, ClassSymbol->getType(), true,
+    llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
+}
+
+static std::string SymbolNameForClass(const std::string &ClassName) {
+  return "_OBJC_CLASS_" + ClassName;
+}
+
+static std::string SymbolNameForMethod(const std::string &ClassName, const
+  std::string &CategoryName, const std::string &MethodName, bool isClassMethod)
+{
+  std::string MethodNameColonStripped = MethodName;
+  std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
+      ':', '_');
+  return std::string(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+    CategoryName + "_" + MethodNameColonStripped;
+}
+
+CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
+  : CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
+    MetaClassPtrAlias(0), VMContext(cgm.getLLVMContext()) {
+  IntTy = cast<llvm::IntegerType>(
+      CGM.getTypes().ConvertType(CGM.getContext().IntTy));
+  LongTy = cast<llvm::IntegerType>(
+      CGM.getTypes().ConvertType(CGM.getContext().LongTy));
+
+  Int8Ty = llvm::Type::getInt8Ty(VMContext);
+  // C string type.  Used in lots of places.
+  PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+
+  Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
+  Zeros[1] = Zeros[0];
+  NULLPtr = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+  // Get the selector Type.
+  QualType selTy = CGM.getContext().getObjCSelType();
+  if (QualType() == selTy) {
+    SelectorTy = PtrToInt8Ty;
+  } else {
+    SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy));
+  }
+
+  PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
+  PtrTy = PtrToInt8Ty;
+
+  // Object type
+  ASTIdTy = CGM.getContext().getObjCIdType();
+  if (QualType() == ASTIdTy) {
+    IdTy = PtrToInt8Ty;
+  } else {
+    IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+  }
+  PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
+
+  // IMP type
+  std::vector<const llvm::Type*> IMPArgs;
+  IMPArgs.push_back(IdTy);
+  IMPArgs.push_back(SelectorTy);
+  IMPTy = llvm::FunctionType::get(IdTy, IMPArgs, true);
+
+  if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+    // Get selectors needed in GC mode
+    RetainSel = GetNullarySelector("retain", CGM.getContext());
+    ReleaseSel = GetNullarySelector("release", CGM.getContext());
+    AutoreleaseSel = GetNullarySelector("autorelease", CGM.getContext());
+
+    // Get functions needed in GC mode
+
+    // id objc_assign_ivar(id, id, ptrdiff_t);
+    std::vector<const llvm::Type*> Args(1, IdTy);
+    Args.push_back(PtrToIdTy);
+    // FIXME: ptrdiff_t
+    Args.push_back(LongTy);
+    llvm::FunctionType *FTy = llvm::FunctionType::get(IdTy, Args, false);
+    IvarAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+    // id objc_assign_strongCast (id, id*)
+    Args.pop_back();
+    FTy = llvm::FunctionType::get(IdTy, Args, false);
+    StrongCastAssignFn = 
+        CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+    // id objc_assign_global(id, id*);
+    FTy = llvm::FunctionType::get(IdTy, Args, false);
+    GlobalAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+    // id objc_assign_weak(id, id*);
+    FTy = llvm::FunctionType::get(IdTy, Args, false);
+    WeakAssignFn = CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+    // id objc_read_weak(id*);
+    Args.clear();
+    Args.push_back(PtrToIdTy);
+    FTy = llvm::FunctionType::get(IdTy, Args, false);
+    WeakReadFn = CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+    // void *objc_memmove_collectable(void*, void *, size_t);
+    Args.clear();
+    Args.push_back(PtrToInt8Ty);
+    Args.push_back(PtrToInt8Ty);
+    // FIXME: size_t
+    Args.push_back(LongTy);
+    FTy = llvm::FunctionType::get(IdTy, Args, false);
+    MemMoveFn = CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+  }
+}
+
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
+                                 const ObjCInterfaceDecl *OID) {
+  llvm::Value *ClassName = CGM.GetAddrOfConstantCString(OID->getNameAsString());
+  // With the incompatible ABI, this will need to be replaced with a direct
+  // reference to the class symbol.  For the compatible nonfragile ABI we are
+  // still performing this lookup at run time but emitting the symbol for the
+  // class externally so that we can make the switch later.
+  EmitClassRef(OID->getNameAsString());
+  ClassName = Builder.CreateStructGEP(ClassName, 0);
+
+  std::vector<const llvm::Type*> Params(1, PtrToInt8Ty);
+  llvm::Constant *ClassLookupFn =
+    CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy,
+                                                      Params,
+                                                      true),
+                              "objc_lookup_class");
+  return Builder.CreateCall(ClassLookupFn, ClassName);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+  llvm::GlobalAlias *&US = UntypedSelectors[Sel.getAsString()];
+  if (US == 0)
+    US = new llvm::GlobalAlias(llvm::PointerType::getUnqual(SelectorTy),
+                               llvm::GlobalValue::PrivateLinkage,
+                               ".objc_untyped_selector_alias"+Sel.getAsString(),
+                               NULL, &TheModule);
+
+  return Builder.CreateLoad(US);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+    *Method) {
+
+  std::string SelName = Method->getSelector().getAsString();
+  std::string SelTypes;
+  CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+  // Typed selectors
+  TypedSelector Selector = TypedSelector(SelName,
+          SelTypes);
+
+  // If it's already cached, return it.
+  if (TypedSelectors[Selector]) {
+    return Builder.CreateLoad(TypedSelectors[Selector]);
+  }
+
+  // If it isn't, cache it.
+  llvm::GlobalAlias *Sel = new llvm::GlobalAlias(
+          llvm::PointerType::getUnqual(SelectorTy),
+          llvm::GlobalValue::PrivateLinkage, ".objc_selector_alias" + SelName,
+          NULL, &TheModule);
+  TypedSelectors[Selector] = Sel;
+
+  return Builder.CreateLoad(Sel);
+}
+
+llvm::Constant *CGObjCGNU::MakeConstantString(const std::string &Str,
+                                              const std::string &Name) {
+  llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+  return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+}
+llvm::Constant *CGObjCGNU::ExportUniqueString(const std::string &Str,
+        const std::string prefix) {
+  std::string name = prefix + Str;
+  llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
+  if (!ConstStr) {
+    llvm::Constant *value = llvm::ConstantArray::get(VMContext, Str, true);
+    ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
+            llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+  }
+  return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros, 2);
+}
+
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::StructType *Ty,
+    std::vector<llvm::Constant*> &V, llvm::StringRef Name,
+    llvm::GlobalValue::LinkageTypes linkage) {
+  llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
+  return new llvm::GlobalVariable(TheModule, Ty, false,
+      llvm::GlobalValue::InternalLinkage, C, Name);
+}
+
+llvm::Constant *CGObjCGNU::MakeGlobal(const llvm::ArrayType *Ty,
+    std::vector<llvm::Constant*> &V, llvm::StringRef Name,
+    llvm::GlobalValue::LinkageTypes linkage) {
+  llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
+  return new llvm::GlobalVariable(TheModule, Ty, false,
+                                  llvm::GlobalValue::InternalLinkage, C, Name);
+}
+
+/// Generate an NSConstantString object.
+llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
+
+  std::string Str(SL->getStrData(), SL->getByteLength());
+
+  // Look for an existing one
+  llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
+  if (old != ObjCStrings.end())
+    return old->getValue();
+
+  std::vector<llvm::Constant*> Ivars;
+  Ivars.push_back(NULLPtr);
+  Ivars.push_back(MakeConstantString(Str));
+  Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
+  llvm::Constant *ObjCStr = MakeGlobal(
+    llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty, IntTy, NULL),
+    Ivars, ".objc_str");
+  ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
+  ObjCStrings[Str] = ObjCStr;
+  ConstantStrings.push_back(ObjCStr);
+  return ObjCStr;
+}
+
+///Generates a message send where the super is the receiver.  This is a message
+///send to self with special delivery semantics indicating which class's method
+///should be called.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+                                    QualType ResultType,
+                                    Selector Sel,
+                                    const ObjCInterfaceDecl *Class,
+                                    bool isCategoryImpl,
+                                    llvm::Value *Receiver,
+                                    bool IsClassMessage,
+                                    const CallArgList &CallArgs,
+                                    const ObjCMethodDecl *Method) {
+  if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+    if (Sel == RetainSel || Sel == AutoreleaseSel) {
+      return RValue::get(Receiver);
+    }
+    if (Sel == ReleaseSel) {
+      return RValue::get(0);
+    }
+  }
+  llvm::Value *cmd = GetSelector(CGF.Builder, Sel);
+
+  CallArgList ActualArgs;
+
+  ActualArgs.push_back(
+      std::make_pair(RValue::get(CGF.Builder.CreateBitCast(Receiver, IdTy)),
+      ASTIdTy));
+  ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+                                      CGF.getContext().getObjCSelType()));
+  ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+  CodeGenTypes &Types = CGM.getTypes();
+  const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
+                                                       CC_Default, false);
+  const llvm::FunctionType *impType =
+    Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+
+  llvm::Value *ReceiverClass = 0;
+  if (isCategoryImpl) {
+    llvm::Constant *classLookupFunction = 0;
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(PtrTy);
+    if (IsClassMessage)  {
+      classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+            IdTy, Params, true), "objc_get_meta_class");
+    } else {
+      classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+            IdTy, Params, true), "objc_get_class");
+    }
+    ReceiverClass = CGF.Builder.CreateCall(classLookupFunction,
+        MakeConstantString(Class->getNameAsString()));
+  } else {
+    // Set up global aliases for the metaclass or class pointer if they do not
+    // already exist.  These will are forward-references which will be set to
+    // pointers to the class and metaclass structure created for the runtime
+    // load function.  To send a message to super, we look up the value of the
+    // super_class pointer from either the class or metaclass structure.
+    if (IsClassMessage)  {
+      if (!MetaClassPtrAlias) {
+        MetaClassPtrAlias = new llvm::GlobalAlias(IdTy,
+            llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" +
+            Class->getNameAsString(), NULL, &TheModule);
+      }
+      ReceiverClass = MetaClassPtrAlias;
+    } else {
+      if (!ClassPtrAlias) {
+        ClassPtrAlias = new llvm::GlobalAlias(IdTy,
+            llvm::GlobalValue::InternalLinkage, ".objc_class_ref" +
+            Class->getNameAsString(), NULL, &TheModule);
+      }
+      ReceiverClass = ClassPtrAlias;
+    }
+  }
+  // Cast the pointer to a simplified version of the class structure
+  ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass,
+      llvm::PointerType::getUnqual(
+        llvm::StructType::get(VMContext, IdTy, IdTy, NULL)));
+  // Get the superclass pointer
+  ReceiverClass = CGF.Builder.CreateStructGEP(ReceiverClass, 1);
+  // Load the superclass pointer
+  ReceiverClass = CGF.Builder.CreateLoad(ReceiverClass);
+  // Construct the structure used to look up the IMP
+  llvm::StructType *ObjCSuperTy = llvm::StructType::get(VMContext,
+      Receiver->getType(), IdTy, NULL);
+  llvm::Value *ObjCSuper = CGF.Builder.CreateAlloca(ObjCSuperTy);
+
+  CGF.Builder.CreateStore(Receiver, CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+  CGF.Builder.CreateStore(ReceiverClass,
+      CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+  // Get the IMP
+  std::vector<const llvm::Type*> Params;
+  Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
+  Params.push_back(SelectorTy);
+  llvm::Constant *lookupFunction =
+    CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+          llvm::PointerType::getUnqual(impType), Params, true),
+        "objc_msg_lookup_super");
+
+  llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+  llvm::Value *imp = CGF.Builder.CreateCall(lookupFunction, lookupArgs,
+      lookupArgs+2);
+
+  return CGF.EmitCall(FnInfo, imp, ReturnValueSlot(), ActualArgs);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue
+CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+                               QualType ResultType,
+                               Selector Sel,
+                               llvm::Value *Receiver,
+                               bool IsClassMessage,
+                               const CallArgList &CallArgs,
+                               const ObjCMethodDecl *Method) {
+  if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC) {
+    if (Sel == RetainSel || Sel == AutoreleaseSel) {
+      return RValue::get(Receiver);
+    }
+    if (Sel == ReleaseSel) {
+      return RValue::get(0);
+    }
+  }
+  CGBuilderTy &Builder = CGF.Builder;
+  IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+  llvm::Value *cmd;
+  if (Method)
+    cmd = GetSelector(Builder, Method);
+  else
+    cmd = GetSelector(Builder, Sel);
+  CallArgList ActualArgs;
+
+  Receiver = Builder.CreateBitCast(Receiver, IdTy);
+  ActualArgs.push_back(
+    std::make_pair(RValue::get(Receiver), ASTIdTy));
+  ActualArgs.push_back(std::make_pair(RValue::get(cmd),
+                                      CGF.getContext().getObjCSelType()));
+  ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+  CodeGenTypes &Types = CGM.getTypes();
+  const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
+                                                       CC_Default, false);
+  const llvm::FunctionType *impType =
+    Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+
+  llvm::Value *imp;
+  // For sender-aware dispatch, we pass the sender as the third argument to a
+  // lookup function.  When sending messages from C code, the sender is nil.
+  // objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
+  if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+
+    std::vector<const llvm::Type*> Params;
+    llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
+    Builder.CreateStore(Receiver, ReceiverPtr);
+    Params.push_back(ReceiverPtr->getType());
+    Params.push_back(SelectorTy);
+    llvm::Value *self;
+
+    if (isa<ObjCMethodDecl>(CGF.CurFuncDecl)) {
+      self = CGF.LoadObjCSelf();
+    } else {
+      self = llvm::ConstantPointerNull::get(IdTy);
+    }
+
+    Params.push_back(self->getType());
+
+    // The lookup function returns a slot, which can be safely cached.
+    llvm::Type *SlotTy = llvm::StructType::get(VMContext, PtrTy, PtrTy, PtrTy,
+            IntTy, llvm::PointerType::getUnqual(impType), NULL);
+    llvm::Constant *lookupFunction =
+      CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+          llvm::PointerType::getUnqual(SlotTy), Params, true),
+        "objc_msg_lookup_sender");
+
+    // The lookup function is guaranteed not to capture the receiver pointer.
+    if (llvm::Function *LookupFn = dyn_cast<llvm::Function>(lookupFunction)) {
+      LookupFn->setDoesNotCapture(1);
+    }
+
+    llvm::Value *slot =
+        Builder.CreateCall3(lookupFunction, ReceiverPtr, cmd, self);
+    imp = Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+    // The lookup function may have changed the receiver, so make sure we use
+    // the new one.
+    ActualArgs[0] =
+        std::make_pair(RValue::get(Builder.CreateLoad(ReceiverPtr)), ASTIdTy);
+  } else {
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(Receiver->getType());
+    Params.push_back(SelectorTy);
+    llvm::Constant *lookupFunction =
+    CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+        llvm::PointerType::getUnqual(impType), Params, true),
+      "objc_msg_lookup");
+
+    imp = Builder.CreateCall2(lookupFunction, Receiver, cmd);
+  }
+
+  return CGF.EmitCall(FnInfo, imp, ReturnValueSlot(), ActualArgs);
+}
+
+/// Generates a MethodList.  Used in construction of a objc_class and
+/// objc_category structures.
+llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
+                                              const std::string &CategoryName,
+    const llvm::SmallVectorImpl<Selector> &MethodSels,
+    const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+    bool isClassMethodList) {
+  if (MethodSels.empty())
+    return NULLPtr;
+  // Get the method structure type.
+  llvm::StructType *ObjCMethodTy = llvm::StructType::get(VMContext,
+    PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+    PtrToInt8Ty, // Method types
+    llvm::PointerType::getUnqual(IMPTy), //Method pointer
+    NULL);
+  std::vector<llvm::Constant*> Methods;
+  std::vector<llvm::Constant*> Elements;
+  for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
+    Elements.clear();
+    if (llvm::Constant *Method =
+      TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
+                                                MethodSels[i].getAsString(),
+                                                isClassMethodList))) {
+      llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
+      Elements.push_back(C);
+      Elements.push_back(MethodTypes[i]);
+      Method = llvm::ConstantExpr::getBitCast(Method,
+          llvm::PointerType::getUnqual(IMPTy));
+      Elements.push_back(Method);
+      Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
+    }
+  }
+
+  // Array of method structures
+  llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
+                                                            Methods.size());
+  llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
+                                                         Methods);
+
+  // Structure containing list pointer, array and array count
+  llvm::SmallVector<const llvm::Type*, 16> ObjCMethodListFields;
+  llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get(VMContext);
+  llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
+  llvm::StructType *ObjCMethodListTy = llvm::StructType::get(VMContext,
+      NextPtrTy,
+      IntTy,
+      ObjCMethodArrayTy,
+      NULL);
+  // Refine next pointer type to concrete type
+  llvm::cast<llvm::OpaqueType>(
+      OpaqueNextTy.get())->refineAbstractTypeTo(ObjCMethodListTy);
+  ObjCMethodListTy = llvm::cast<llvm::StructType>(OpaqueNextTy.get());
+
+  Methods.clear();
+  Methods.push_back(llvm::ConstantPointerNull::get(
+        llvm::PointerType::getUnqual(ObjCMethodListTy)));
+  Methods.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+        MethodTypes.size()));
+  Methods.push_back(MethodArray);
+
+  // Create an instance of the structure
+  return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
+}
+
+/// Generates an IvarList.  Used in construction of a objc_class.
+llvm::Constant *CGObjCGNU::GenerateIvarList(
+    const llvm::SmallVectorImpl<llvm::Constant *>  &IvarNames,
+    const llvm::SmallVectorImpl<llvm::Constant *>  &IvarTypes,
+    const llvm::SmallVectorImpl<llvm::Constant *>  &IvarOffsets) {
+  if (IvarNames.size() == 0)
+    return NULLPtr;
+  // Get the method structure type.
+  llvm::StructType *ObjCIvarTy = llvm::StructType::get(VMContext,
+    PtrToInt8Ty,
+    PtrToInt8Ty,
+    IntTy,
+    NULL);
+  std::vector<llvm::Constant*> Ivars;
+  std::vector<llvm::Constant*> Elements;
+  for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
+    Elements.clear();
+    Elements.push_back(IvarNames[i]);
+    Elements.push_back(IvarTypes[i]);
+    Elements.push_back(IvarOffsets[i]);
+    Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
+  }
+
+  // Array of method structures
+  llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
+      IvarNames.size());
+
+
+  Elements.clear();
+  Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
+  Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
+  // Structure containing array and array count
+  llvm::StructType *ObjCIvarListTy = llvm::StructType::get(VMContext, IntTy,
+    ObjCIvarArrayTy,
+    NULL);
+
+  // Create an instance of the structure
+  return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list");
+}
+
+/// Generate a class structure
+llvm::Constant *CGObjCGNU::GenerateClassStructure(
+    llvm::Constant *MetaClass,
+    llvm::Constant *SuperClass,
+    unsigned info,
+    const char *Name,
+    llvm::Constant *Version,
+    llvm::Constant *InstanceSize,
+    llvm::Constant *IVars,
+    llvm::Constant *Methods,
+    llvm::Constant *Protocols,
+    llvm::Constant *IvarOffsets,
+    llvm::Constant *Properties) {
+  // Set up the class structure
+  // Note:  Several of these are char*s when they should be ids.  This is
+  // because the runtime performs this translation on load.
+  //
+  // Fields marked New ABI are part of the GNUstep runtime.  We emit them
+  // anyway; the classes will still work with the GNU runtime, they will just
+  // be ignored.
+  llvm::StructType *ClassTy = llvm::StructType::get(VMContext,
+      PtrToInt8Ty,        // class_pointer
+      PtrToInt8Ty,        // super_class
+      PtrToInt8Ty,        // name
+      LongTy,             // version
+      LongTy,             // info
+      LongTy,             // instance_size
+      IVars->getType(),   // ivars
+      Methods->getType(), // methods
+      // These are all filled in by the runtime, so we pretend
+      PtrTy,              // dtable
+      PtrTy,              // subclass_list
+      PtrTy,              // sibling_class
+      PtrTy,              // protocols
+      PtrTy,              // gc_object_type
+      // New ABI:
+      LongTy,                 // abi_version
+      IvarOffsets->getType(), // ivar_offsets
+      Properties->getType(),  // properties
+      NULL);
+  llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+  // Fill in the structure
+  std::vector<llvm::Constant*> Elements;
+  Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
+  Elements.push_back(SuperClass);
+  Elements.push_back(MakeConstantString(Name, ".class_name"));
+  Elements.push_back(Zero);
+  Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+  Elements.push_back(InstanceSize);
+  Elements.push_back(IVars);
+  Elements.push_back(Methods);
+  Elements.push_back(NULLPtr);
+  Elements.push_back(NULLPtr);
+  Elements.push_back(NULLPtr);
+  Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
+  Elements.push_back(NULLPtr);
+  Elements.push_back(Zero);
+  Elements.push_back(IvarOffsets);
+  Elements.push_back(Properties);
+  // Create an instance of the structure
+  // This is now an externally visible symbol, so that we can speed up class
+  // messages in the next ABI.
+  return MakeGlobal(ClassTy, Elements, SymbolNameForClass(Name),
+         llvm::GlobalValue::ExternalLinkage);
+}
+
+llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
+    const llvm::SmallVectorImpl<llvm::Constant *>  &MethodNames,
+    const llvm::SmallVectorImpl<llvm::Constant *>  &MethodTypes) {
+  // Get the method structure type.
+  llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(VMContext,
+    PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
+    PtrToInt8Ty,
+    NULL);
+  std::vector<llvm::Constant*> Methods;
+  std::vector<llvm::Constant*> Elements;
+  for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
+    Elements.clear();
+    Elements.push_back(MethodNames[i]);
+    Elements.push_back(MethodTypes[i]);
+    Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
+  }
+  llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
+      MethodNames.size());
+  llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
+                                                   Methods);
+  llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(VMContext,
+      IntTy, ObjCMethodArrayTy, NULL);
+  Methods.clear();
+  Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
+  Methods.push_back(Array);
+  return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
+}
+
+// Create the protocol list structure used in classes, categories and so on
+llvm::Constant *CGObjCGNU::GenerateProtocolList(
+    const llvm::SmallVectorImpl<std::string> &Protocols) {
+  llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+      Protocols.size());
+  llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+      PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+      LongTy,//FIXME: Should be size_t
+      ProtocolArrayTy,
+      NULL);
+  std::vector<llvm::Constant*> Elements;
+  for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
+      iter != endIter ; iter++) {
+    llvm::Constant *protocol = 0;
+    llvm::StringMap<llvm::Constant*>::iterator value =
+      ExistingProtocols.find(*iter);
+    if (value == ExistingProtocols.end()) {
+      protocol = GenerateEmptyProtocol(*iter);
+    } else {
+      protocol = value->getValue();
+    }
+    llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol,
+                                                           PtrToInt8Ty);
+    Elements.push_back(Ptr);
+  }
+  llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+      Elements);
+  Elements.clear();
+  Elements.push_back(NULLPtr);
+  Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
+  Elements.push_back(ProtocolArray);
+  return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
+}
+
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
+                                            const ObjCProtocolDecl *PD) {
+  llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
+  const llvm::Type *T =
+    CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+  return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
+  const std::string &ProtocolName) {
+  llvm::SmallVector<std::string, 0> EmptyStringVector;
+  llvm::SmallVector<llvm::Constant*, 0> EmptyConstantVector;
+
+  llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
+  llvm::Constant *MethodList =
+    GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+  // Protocols are objects containing lists of the methods implemented and
+  // protocols adopted.
+  llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+      PtrToInt8Ty,
+      ProtocolList->getType(),
+      MethodList->getType(),
+      MethodList->getType(),
+      MethodList->getType(),
+      MethodList->getType(),
+      NULL);
+  std::vector<llvm::Constant*> Elements;
+  // The isa pointer must be set to a magic number so the runtime knows it's
+  // the correct layout.
+  int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
+      NonFragileProtocolVersion : ProtocolVersion;
+  Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
+  Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+  Elements.push_back(ProtocolList);
+  Elements.push_back(MethodList);
+  Elements.push_back(MethodList);
+  Elements.push_back(MethodList);
+  Elements.push_back(MethodList);
+  return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
+}
+
+void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
+  ASTContext &Context = CGM.getContext();
+  std::string ProtocolName = PD->getNameAsString();
+  llvm::SmallVector<std::string, 16> Protocols;
+  for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
+       E = PD->protocol_end(); PI != E; ++PI)
+    Protocols.push_back((*PI)->getNameAsString());
+  llvm::SmallVector<llvm::Constant*, 16> InstanceMethodNames;
+  llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+  llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
+  llvm::SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
+  for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(),
+       E = PD->instmeth_end(); iter != E; iter++) {
+    std::string TypeStr;
+    Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
+    if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+      InstanceMethodNames.push_back(
+          MakeConstantString((*iter)->getSelector().getAsString()));
+      InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+    } else {
+      OptionalInstanceMethodNames.push_back(
+          MakeConstantString((*iter)->getSelector().getAsString()));
+      OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+    }
+  }
+  // Collect information about class methods:
+  llvm::SmallVector<llvm::Constant*, 16> ClassMethodNames;
+  llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+  llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
+  llvm::SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
+  for (ObjCProtocolDecl::classmeth_iterator
+         iter = PD->classmeth_begin(), endIter = PD->classmeth_end();
+       iter != endIter ; iter++) {
+    std::string TypeStr;
+    Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+    if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+      ClassMethodNames.push_back(
+          MakeConstantString((*iter)->getSelector().getAsString()));
+      ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+    } else {
+      OptionalClassMethodNames.push_back(
+          MakeConstantString((*iter)->getSelector().getAsString()));
+      OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
+    }
+  }
+
+  llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
+  llvm::Constant *InstanceMethodList =
+    GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
+  llvm::Constant *ClassMethodList =
+    GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+  llvm::Constant *OptionalInstanceMethodList =
+    GenerateProtocolMethodList(OptionalInstanceMethodNames,
+            OptionalInstanceMethodTypes);
+  llvm::Constant *OptionalClassMethodList =
+    GenerateProtocolMethodList(OptionalClassMethodNames,
+            OptionalClassMethodTypes);
+
+  // Property metadata: name, attributes, isSynthesized, setter name, setter
+  // types, getter name, getter types.
+  // The isSynthesized value is always set to 0 in a protocol.  It exists to
+  // simplify the runtime library by allowing it to use the same data
+  // structures for protocol metadata everywhere.
+  llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+          PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+          PtrToInt8Ty, NULL);
+  std::vector<llvm::Constant*> Properties;
+  std::vector<llvm::Constant*> OptionalProperties;
+
+  // Add all of the property methods need adding to the method list and to the
+  // property metadata list.
+  for (ObjCContainerDecl::prop_iterator
+         iter = PD->prop_begin(), endIter = PD->prop_end();
+       iter != endIter ; iter++) {
+    std::vector<llvm::Constant*> Fields;
+    ObjCPropertyDecl *property = (*iter);
+
+    Fields.push_back(MakeConstantString(property->getNameAsString()));
+    Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+                property->getPropertyAttributes()));
+    Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+    if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+      std::string TypeStr;
+      Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+      llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+      InstanceMethodTypes.push_back(TypeEncoding);
+      Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+      Fields.push_back(TypeEncoding);
+    } else {
+      Fields.push_back(NULLPtr);
+      Fields.push_back(NULLPtr);
+    }
+    if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+      std::string TypeStr;
+      Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+      llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+      InstanceMethodTypes.push_back(TypeEncoding);
+      Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+      Fields.push_back(TypeEncoding);
+    } else {
+      Fields.push_back(NULLPtr);
+      Fields.push_back(NULLPtr);
+    }
+    if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) {
+      OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+    } else {
+      Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+    }
+  }
+  llvm::Constant *PropertyArray = llvm::ConstantArray::get(
+      llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties);
+  llvm::Constant* PropertyListInitFields[] =
+    {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+  llvm::Constant *PropertyListInit =
+      llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+  llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
+      PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
+      PropertyListInit, ".objc_property_list");
+
+  llvm::Constant *OptionalPropertyArray =
+      llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy,
+          OptionalProperties.size()) , OptionalProperties);
+  llvm::Constant* OptionalPropertyListInitFields[] = {
+      llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr,
+      OptionalPropertyArray };
+
+  llvm::Constant *OptionalPropertyListInit =
+      llvm::ConstantStruct::get(VMContext, OptionalPropertyListInitFields, 3, false);
+  llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
+          OptionalPropertyListInit->getType(), false,
+          llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
+          ".objc_property_list");
+
+  // Protocols are objects containing lists of the methods implemented and
+  // protocols adopted.
+  llvm::StructType *ProtocolTy = llvm::StructType::get(VMContext, IdTy,
+      PtrToInt8Ty,
+      ProtocolList->getType(),
+      InstanceMethodList->getType(),
+      ClassMethodList->getType(),
+      OptionalInstanceMethodList->getType(),
+      OptionalClassMethodList->getType(),
+      PropertyList->getType(),
+      OptionalPropertyList->getType(),
+      NULL);
+  std::vector<llvm::Constant*> Elements;
+  // The isa pointer must be set to a magic number so the runtime knows it's
+  // the correct layout.
+  int Version = CGM.getContext().getLangOptions().ObjCNonFragileABI ?
+      NonFragileProtocolVersion : ProtocolVersion;
+  Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+        llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Version), IdTy));
+  Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+  Elements.push_back(ProtocolList);
+  Elements.push_back(InstanceMethodList);
+  Elements.push_back(ClassMethodList);
+  Elements.push_back(OptionalInstanceMethodList);
+  Elements.push_back(OptionalClassMethodList);
+  Elements.push_back(PropertyList);
+  Elements.push_back(OptionalPropertyList);
+  ExistingProtocols[ProtocolName] =
+    llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
+          ".objc_protocol"), IdTy);
+}
+void CGObjCGNU::GenerateProtocolHolderCategory(void) {
+  // Collect information about instance methods
+  llvm::SmallVector<Selector, 1> MethodSels;
+  llvm::SmallVector<llvm::Constant*, 1> MethodTypes;
+
+  std::vector<llvm::Constant*> Elements;
+  const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
+  const std::string CategoryName = "AnotherHack";
+  Elements.push_back(MakeConstantString(CategoryName));
+  Elements.push_back(MakeConstantString(ClassName));
+  // Instance method list
+  Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+          ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy));
+  // Class method list
+  Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+          ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy));
+  // Protocol list
+  llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
+      ExistingProtocols.size());
+  llvm::StructType *ProtocolListTy = llvm::StructType::get(VMContext,
+      PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+      LongTy,//FIXME: Should be size_t
+      ProtocolArrayTy,
+      NULL);
+  std::vector<llvm::Constant*> ProtocolElements;
+  for (llvm::StringMapIterator<llvm::Constant*> iter =
+       ExistingProtocols.begin(), endIter = ExistingProtocols.end();
+       iter != endIter ; iter++) {
+    llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(),
+            PtrTy);
+    ProtocolElements.push_back(Ptr);
+  }
+  llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+      ProtocolElements);
+  ProtocolElements.clear();
+  ProtocolElements.push_back(NULLPtr);
+  ProtocolElements.push_back(llvm::ConstantInt::get(LongTy,
+              ExistingProtocols.size()));
+  ProtocolElements.push_back(ProtocolArray);
+  Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
+                  ProtocolElements, ".objc_protocol_list"), PtrTy));
+  Categories.push_back(llvm::ConstantExpr::getBitCast(
+        MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+            PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+  std::string ClassName = OCD->getClassInterface()->getNameAsString();
+  std::string CategoryName = OCD->getNameAsString();
+  // Collect information about instance methods
+  llvm::SmallVector<Selector, 16> InstanceMethodSels;
+  llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+  for (ObjCCategoryImplDecl::instmeth_iterator
+         iter = OCD->instmeth_begin(), endIter = OCD->instmeth_end();
+       iter != endIter ; iter++) {
+    InstanceMethodSels.push_back((*iter)->getSelector());
+    std::string TypeStr;
+    CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+    InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+  }
+
+  // Collect information about class methods
+  llvm::SmallVector<Selector, 16> ClassMethodSels;
+  llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+  for (ObjCCategoryImplDecl::classmeth_iterator
+         iter = OCD->classmeth_begin(), endIter = OCD->classmeth_end();
+       iter != endIter ; iter++) {
+    ClassMethodSels.push_back((*iter)->getSelector());
+    std::string TypeStr;
+    CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+    ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+  }
+
+  // Collect the names of referenced protocols
+  llvm::SmallVector<std::string, 16> Protocols;
+  const ObjCInterfaceDecl *ClassDecl = OCD->getClassInterface();
+  const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
+  for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+       E = Protos.end(); I != E; ++I)
+    Protocols.push_back((*I)->getNameAsString());
+
+  std::vector<llvm::Constant*> Elements;
+  Elements.push_back(MakeConstantString(CategoryName));
+  Elements.push_back(MakeConstantString(ClassName));
+  // Instance method list
+  Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+          ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
+          false), PtrTy));
+  // Class method list
+  Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+          ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true),
+        PtrTy));
+  // Protocol list
+  Elements.push_back(llvm::ConstantExpr::getBitCast(
+        GenerateProtocolList(Protocols), PtrTy));
+  Categories.push_back(llvm::ConstantExpr::getBitCast(
+        MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+            PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
+        llvm::SmallVectorImpl<Selector> &InstanceMethodSels,
+        llvm::SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
+  ASTContext &Context = CGM.getContext();
+  //
+  // Property metadata: name, attributes, isSynthesized, setter name, setter
+  // types, getter name, getter types.
+  llvm::StructType *PropertyMetadataTy = llvm::StructType::get(VMContext,
+          PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+          PtrToInt8Ty, NULL);
+  std::vector<llvm::Constant*> Properties;
+
+
+  // Add all of the property methods need adding to the method list and to the
+  // property metadata list.
+  for (ObjCImplDecl::propimpl_iterator
+         iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
+       iter != endIter ; iter++) {
+    std::vector<llvm::Constant*> Fields;
+    ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+
+    Fields.push_back(MakeConstantString(property->getNameAsString()));
+    Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+                property->getPropertyAttributes()));
+    Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+                (*iter)->getPropertyImplementation() ==
+                ObjCPropertyImplDecl::Synthesize));
+    if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+      InstanceMethodSels.push_back(getter->getSelector());
+      std::string TypeStr;
+      Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+      llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+      InstanceMethodTypes.push_back(TypeEncoding);
+      Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+      Fields.push_back(TypeEncoding);
+    } else {
+      Fields.push_back(NULLPtr);
+      Fields.push_back(NULLPtr);
+    }
+    if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+      InstanceMethodSels.push_back(setter->getSelector());
+      std::string TypeStr;
+      Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+      llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+      InstanceMethodTypes.push_back(TypeEncoding);
+      Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+      Fields.push_back(TypeEncoding);
+    } else {
+      Fields.push_back(NULLPtr);
+      Fields.push_back(NULLPtr);
+    }
+    Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+  }
+  llvm::ArrayType *PropertyArrayTy =
+      llvm::ArrayType::get(PropertyMetadataTy, Properties.size());
+  llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy,
+          Properties);
+  llvm::Constant* PropertyListInitFields[] =
+    {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+  llvm::Constant *PropertyListInit =
+      llvm::ConstantStruct::get(VMContext, PropertyListInitFields, 3, false);
+  return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
+          llvm::GlobalValue::InternalLinkage, PropertyListInit,
+          ".objc_property_list");
+}
+
+void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
+  ASTContext &Context = CGM.getContext();
+
+  // Get the superclass name.
+  const ObjCInterfaceDecl * SuperClassDecl =
+    OID->getClassInterface()->getSuperClass();
+  std::string SuperClassName;
+  if (SuperClassDecl) {
+    SuperClassName = SuperClassDecl->getNameAsString();
+    EmitClassRef(SuperClassName);
+  }
+
+  // Get the class name
+  ObjCInterfaceDecl *ClassDecl =
+    const_cast<ObjCInterfaceDecl *>(OID->getClassInterface());
+  std::string ClassName = ClassDecl->getNameAsString();
+  // Emit the symbol that is used to generate linker errors if this class is
+  // referenced in other modules but not declared.
+  std::string classSymbolName = "__objc_class_name_" + ClassName;
+  if (llvm::GlobalVariable *symbol =
+      TheModule.getGlobalVariable(classSymbolName)) {
+    symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0));
+  } else {
+    new llvm::GlobalVariable(TheModule, LongTy, false,
+    llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0),
+    classSymbolName);
+  }
+
+  // Get the size of instances.
+  int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8;
+
+  // Collect information about instance variables.
+  llvm::SmallVector<llvm::Constant*, 16> IvarNames;
+  llvm::SmallVector<llvm::Constant*, 16> IvarTypes;
+  llvm::SmallVector<llvm::Constant*, 16> IvarOffsets;
+
+  std::vector<llvm::Constant*> IvarOffsetValues;
+
+  int superInstanceSize = !SuperClassDecl ? 0 :
+    Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8;
+  // For non-fragile ivars, set the instance size to 0 - {the size of just this
+  // class}.  The runtime will then set this to the correct value on load.
+  if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+    instanceSize = 0 - (instanceSize - superInstanceSize);
+  }
+  for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
+      endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+      // Store the name
+      IvarNames.push_back(MakeConstantString((*iter)->getNameAsString()));
+      // Get the type encoding for this ivar
+      std::string TypeStr;
+      Context.getObjCEncodingForType((*iter)->getType(), TypeStr);
+      IvarTypes.push_back(MakeConstantString(TypeStr));
+      // Get the offset
+      uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, ClassDecl, *iter);
+      uint64_t Offset = BaseOffset;
+      if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+        Offset = BaseOffset - superInstanceSize;
+      }
+      IvarOffsets.push_back(
+          llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset));
+      IvarOffsetValues.push_back(new llvm::GlobalVariable(TheModule, IntTy,
+          false, llvm::GlobalValue::ExternalLinkage,
+          llvm::ConstantInt::get(IntTy, BaseOffset),
+          "__objc_ivar_offset_value_" + ClassName +"." +
+          (*iter)->getNameAsString()));
+  }
+  llvm::Constant *IvarOffsetArrayInit =
+      llvm::ConstantArray::get(llvm::ArrayType::get(PtrToIntTy,
+                  IvarOffsetValues.size()), IvarOffsetValues);
+  llvm::GlobalVariable *IvarOffsetArray = new llvm::GlobalVariable(TheModule,
+          IvarOffsetArrayInit->getType(), false,
+          llvm::GlobalValue::InternalLinkage, IvarOffsetArrayInit,
+          ".ivar.offsets");
+
+  // Collect information about instance methods
+  llvm::SmallVector<Selector, 16> InstanceMethodSels;
+  llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+  for (ObjCImplementationDecl::instmeth_iterator
+         iter = OID->instmeth_begin(), endIter = OID->instmeth_end();
+       iter != endIter ; iter++) {
+    InstanceMethodSels.push_back((*iter)->getSelector());
+    std::string TypeStr;
+    Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+    InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+  }
+
+  llvm::Constant *Properties = GeneratePropertyList(OID, InstanceMethodSels,
+          InstanceMethodTypes);
+
+
+  // Collect information about class methods
+  llvm::SmallVector<Selector, 16> ClassMethodSels;
+  llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+  for (ObjCImplementationDecl::classmeth_iterator
+         iter = OID->classmeth_begin(), endIter = OID->classmeth_end();
+       iter != endIter ; iter++) {
+    ClassMethodSels.push_back((*iter)->getSelector());
+    std::string TypeStr;
+    Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+    ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+  }
+  // Collect the names of referenced protocols
+  llvm::SmallVector<std::string, 16> Protocols;
+  const ObjCList<ObjCProtocolDecl> &Protos =ClassDecl->getReferencedProtocols();
+  for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+       E = Protos.end(); I != E; ++I)
+    Protocols.push_back((*I)->getNameAsString());
+
+
+
+  // Get the superclass pointer.
+  llvm::Constant *SuperClass;
+  if (!SuperClassName.empty()) {
+    SuperClass = MakeConstantString(SuperClassName, ".super_class_name");
+  } else {
+    SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+  }
+  // Empty vector used to construct empty method lists
+  llvm::SmallVector<llvm::Constant*, 1>  empty;
+  // Generate the method and instance variable lists
+  llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
+      InstanceMethodSels, InstanceMethodTypes, false);
+  llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "",
+      ClassMethodSels, ClassMethodTypes, true);
+  llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
+      IvarOffsets);
+  // Irrespective of whether we are compiling for a fragile or non-fragile ABI,
+  // we emit a symbol containing the offset for each ivar in the class.  This
+  // allows code compiled for the non-Fragile ABI to inherit from code compiled
+  // for the legacy ABI, without causing problems.  The converse is also
+  // possible, but causes all ivar accesses to be fragile.
+  int i = 0;
+  // Offset pointer for getting at the correct field in the ivar list when
+  // setting up the alias.  These are: The base address for the global, the
+  // ivar array (second field), the ivar in this list (set for each ivar), and
+  // the offset (third field in ivar structure)
+  const llvm::Type *IndexTy = llvm::Type::getInt32Ty(VMContext);
+  llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
+      llvm::ConstantInt::get(IndexTy, 1), 0,
+      llvm::ConstantInt::get(IndexTy, 2) };
+
+  for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
+      endIter = ClassDecl->ivar_end() ; iter != endIter ; iter++) {
+      const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
+          +(*iter)->getNameAsString();
+      offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, i++);
+      // Get the correct ivar field
+      llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
+              IvarList, offsetPointerIndexes, 4);
+      // Get the existing alias, if one exists.
+      llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
+      if (offset) {
+          offset->setInitializer(offsetValue);
+          // If this is the real definition, change its linkage type so that
+          // different modules will use this one, rather than their private
+          // copy.
+          offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
+      } else {
+          // Add a new alias if there isn't one already.
+          offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+                  false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+      }
+  }
+  //Generate metaclass for class methods
+  llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
+      NULLPtr, 0x12L, ClassName.c_str(), 0, Zeros[0], GenerateIvarList(
+        empty, empty, empty), ClassMethodList, NULLPtr, NULLPtr, NULLPtr);
+
+  // Generate the class structure
+  llvm::Constant *ClassStruct =
+    GenerateClassStructure(MetaClassStruct, SuperClass, 0x11L,
+                           ClassName.c_str(), 0,
+      llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
+      MethodList, GenerateProtocolList(Protocols), IvarOffsetArray,
+      Properties);
+
+  // Resolve the class aliases, if they exist.
+  if (ClassPtrAlias) {
+    ClassPtrAlias->setAliasee(
+        llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+    ClassPtrAlias = 0;
+  }
+  if (MetaClassPtrAlias) {
+    MetaClassPtrAlias->setAliasee(
+        llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+    MetaClassPtrAlias = 0;
+  }
+
+  // Add class structure to list to be added to the symtab later
+  ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
+  Classes.push_back(ClassStruct);
+}
+
+
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
+  // Only emit an ObjC load function if no Objective-C stuff has been called
+  if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
+      ExistingProtocols.empty() && TypedSelectors.empty() &&
+      UntypedSelectors.empty())
+    return NULL;
+
+  // Add all referenced protocols to a category.
+  GenerateProtocolHolderCategory();
+
+  const llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+          SelectorTy->getElementType());
+  const llvm::Type *SelStructPtrTy = SelectorTy;
+  bool isSelOpaque = false;
+  if (SelStructTy == 0) {
+    SelStructTy = llvm::StructType::get(VMContext, PtrToInt8Ty,
+                                        PtrToInt8Ty, NULL);
+    SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+    isSelOpaque = true;
+  }
+
+  // Name the ObjC types to make the IR a bit easier to read
+  TheModule.addTypeName(".objc_selector", SelStructPtrTy);
+  TheModule.addTypeName(".objc_id", IdTy);
+  TheModule.addTypeName(".objc_imp", IMPTy);
+
+  std::vector<llvm::Constant*> Elements;
+  llvm::Constant *Statics = NULLPtr;
+  // Generate statics list:
+  if (ConstantStrings.size()) {
+    llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+        ConstantStrings.size() + 1);
+    ConstantStrings.push_back(NULLPtr);
+
+    llvm::StringRef StringClass = CGM.getLangOptions().ObjCConstantStringClass;
+    if (StringClass.empty()) StringClass = "NXConstantString";
+    Elements.push_back(MakeConstantString(StringClass,
+                ".objc_static_class_name"));
+    Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
+       ConstantStrings));
+    llvm::StructType *StaticsListTy =
+      llvm::StructType::get(VMContext, PtrToInt8Ty, StaticsArrayTy, NULL);
+    llvm::Type *StaticsListPtrTy =
+      llvm::PointerType::getUnqual(StaticsListTy);
+    Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
+    llvm::ArrayType *StaticsListArrayTy =
+      llvm::ArrayType::get(StaticsListPtrTy, 2);
+    Elements.clear();
+    Elements.push_back(Statics);
+    Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
+    Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
+    Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
+  }
+  // Array of classes, categories, and constant objects
+  llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
+      Classes.size() + Categories.size()  + 2);
+  llvm::StructType *SymTabTy = llvm::StructType::get(VMContext,
+                                                     LongTy, SelStructPtrTy,
+                                                     llvm::Type::getInt16Ty(VMContext),
+                                                     llvm::Type::getInt16Ty(VMContext),
+                                                     ClassListTy, NULL);
+
+  Elements.clear();
+  // Pointer to an array of selectors used in this module.
+  std::vector<llvm::Constant*> Selectors;
+  for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+     iter = TypedSelectors.begin(), iterEnd = TypedSelectors.end();
+     iter != iterEnd ; ++iter) {
+    Elements.push_back(ExportUniqueString(iter->first.first, ".objc_sel_name"));
+    Elements.push_back(MakeConstantString(iter->first.second,
+                                          ".objc_sel_types"));
+    Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+    Elements.clear();
+  }
+  for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+      iter = UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+      iter != iterEnd; ++iter) {
+    Elements.push_back(
+        ExportUniqueString(iter->getKeyData(), ".objc_sel_name"));
+    Elements.push_back(NULLPtr);
+    Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+    Elements.clear();
+  }
+  Elements.push_back(NULLPtr);
+  Elements.push_back(NULLPtr);
+  Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+  Elements.clear();
+  // Number of static selectors
+  Elements.push_back(llvm::ConstantInt::get(LongTy, Selectors.size() ));
+  llvm::Constant *SelectorList = MakeGlobal(
+          llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors,
+          ".objc_selector_list");
+  Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+    SelStructPtrTy));
+
+  // Now that all of the static selectors exist, create pointers to them.
+  int index = 0;
+  for (std::map<TypedSelector, llvm::GlobalAlias*>::iterator
+     iter=TypedSelectors.begin(), iterEnd =TypedSelectors.end();
+     iter != iterEnd; ++iter) {
+    llvm::Constant *Idxs[] = {Zeros[0],
+      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
+    llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
+        true, llvm::GlobalValue::InternalLinkage,
+        llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+        ".objc_sel_ptr");
+    // If selectors are defined as an opaque type, cast the pointer to this
+    // type.
+    if (isSelOpaque) {
+      SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+        llvm::PointerType::getUnqual(SelectorTy));
+    }
+    (*iter).second->setAliasee(SelPtr);
+  }
+  for (llvm::StringMap<llvm::GlobalAlias*>::iterator
+      iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
+      iter != iterEnd; iter++) {
+    llvm::Constant *Idxs[] = {Zeros[0],
+      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
+    llvm::Constant *SelPtr = new llvm::GlobalVariable
+      (TheModule, SelStructPtrTy,
+       true, llvm::GlobalValue::InternalLinkage,
+       llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+       ".objc_sel_ptr");
+    // If selectors are defined as an opaque type, cast the pointer to this
+    // type.
+    if (isSelOpaque) {
+      SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
+        llvm::PointerType::getUnqual(SelectorTy));
+    }
+    (*iter).second->setAliasee(SelPtr);
+  }
+  // Number of classes defined.
+  Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+        Classes.size()));
+  // Number of categories defined
+  Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+        Categories.size()));
+  // Create an array of classes, then categories, then static object instances
+  Classes.insert(Classes.end(), Categories.begin(), Categories.end());
+  //  NULL-terminated list of static object instances (mainly constant strings)
+  Classes.push_back(Statics);
+  Classes.push_back(NULLPtr);
+  llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
+  Elements.push_back(ClassList);
+  // Construct the symbol table
+  llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
+
+  // The symbol table is contained in a module which has some version-checking
+  // constants
+  llvm::StructType * ModuleTy = llvm::StructType::get(VMContext, LongTy, LongTy,
+      PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy), NULL);
+  Elements.clear();
+  // Runtime version used for compatibility checking.
+  if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
+    Elements.push_back(llvm::ConstantInt::get(LongTy,
+        NonFragileRuntimeVersion));
+  } else {
+    Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
+  }
+  // sizeof(ModuleTy)
+  llvm::TargetData td(&TheModule);
+  Elements.push_back(llvm::ConstantInt::get(LongTy,
+                     td.getTypeSizeInBits(ModuleTy)/8));
+  //FIXME: Should be the path to the file where this module was declared
+  Elements.push_back(NULLPtr);
+  Elements.push_back(SymTab);
+  llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
+
+  // Create the load function calling the runtime entry point with the module
+  // structure
+  llvm::Function * LoadFunction = llvm::Function::Create(
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
+      llvm::GlobalValue::InternalLinkage, ".objc_load_function",
+      &TheModule);
+  llvm::BasicBlock *EntryBB =
+      llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
+  CGBuilderTy Builder(VMContext);
+  Builder.SetInsertPoint(EntryBB);
+
+  std::vector<const llvm::Type*> Params(1,
+      llvm::PointerType::getUnqual(ModuleTy));
+  llvm::Value *Register = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+        llvm::Type::getVoidTy(VMContext), Params, true), "__objc_exec_class");
+  Builder.CreateCall(Register, Module);
+  Builder.CreateRetVoid();
+
+  return LoadFunction;
+}
+
+llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
+                                          const ObjCContainerDecl *CD) {
+  const ObjCCategoryImplDecl *OCD =
+    dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
+  std::string CategoryName = OCD ? OCD->getNameAsString() : "";
+  std::string ClassName = OMD->getClassInterface()->getNameAsString();
+  std::string MethodName = OMD->getSelector().getAsString();
+  bool isClassMethod = !OMD->isInstanceMethod();
+
+  CodeGenTypes &Types = CGM.getTypes();
+  const llvm::FunctionType *MethodTy =
+    Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+  std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
+      MethodName, isClassMethod);
+
+  llvm::Function *Method
+    = llvm::Function::Create(MethodTy,
+                             llvm::GlobalValue::InternalLinkage,
+                             FunctionName,
+                             &TheModule);
+  return Method;
+}
+
+llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
+  std::vector<const llvm::Type*> Params;
+  const llvm::Type *BoolTy =
+    CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+  Params.push_back(IdTy);
+  Params.push_back(SelectorTy);
+  // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+  Params.push_back(LongTy);
+  Params.push_back(BoolTy);
+  // void objc_getProperty (id, SEL, ptrdiff_t, bool)
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(IdTy, Params, false);
+  return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+                                                        "objc_getProperty"));
+}
+
+llvm::Function *CGObjCGNU::GetPropertySetFunction() {
+  std::vector<const llvm::Type*> Params;
+  const llvm::Type *BoolTy =
+    CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+  Params.push_back(IdTy);
+  Params.push_back(SelectorTy);
+  // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+  Params.push_back(LongTy);
+  Params.push_back(IdTy);
+  Params.push_back(BoolTy);
+  Params.push_back(BoolTy);
+  // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+  return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+                                                        "objc_setProperty"));
+}
+
+llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
+  CodeGen::CodeGenTypes &Types = CGM.getTypes();
+  ASTContext &Ctx = CGM.getContext();
+  // void objc_enumerationMutation (id)
+  llvm::SmallVector<QualType,16> Params;
+  Params.push_back(ASTIdTy);
+  const llvm::FunctionType *FTy =
+    Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+                                                CC_Default, false), false);
+  return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+}
+
+void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+                                          const Stmt &S) {
+  // Pointer to the personality function
+  llvm::Constant *Personality =
+    CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+          true),
+        "__gnu_objc_personality_v0");
+  Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy);
+  std::vector<const llvm::Type*> Params;
+  Params.push_back(PtrTy);
+  llvm::Value *RethrowFn =
+    CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+          Params, false), "_Unwind_Resume");
+
+  bool isTry = isa<ObjCAtTryStmt>(S);
+  llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+  llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
+  llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+  llvm::BasicBlock *CatchInCatch = CGF.createBasicBlock("catch.rethrow");
+  llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+  llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+  llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+  // @synchronized()
+  if (!isTry) {
+    std::vector<const llvm::Type*> Args(1, IdTy);
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+    llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+    llvm::Value *SyncArg =
+      CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+    SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+    CGF.Builder.CreateCall(SyncEnter, SyncArg);
+  }
+
+
+  // Push an EH context entry, used for handling rethrows and jumps
+  // through finally.
+  CGF.PushCleanupBlock(FinallyBlock);
+
+  // Emit the statements in the @try {} block
+  CGF.setInvokeDest(TryHandler);
+
+  CGF.EmitBlock(TryBlock);
+  CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+                     : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+
+  // Jump to @finally if there is no exception
+  CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+  // Emit the handlers
+  CGF.EmitBlock(TryHandler);
+
+  // Get the correct versions of the exception handling intrinsics
+  llvm::Value *llvm_eh_exception =
+    CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+  llvm::Value *llvm_eh_selector =
+    CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+  llvm::Value *llvm_eh_typeid_for =
+    CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+
+  // Exception object
+  llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+  llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
+
+  llvm::SmallVector<llvm::Value*, 8> ESelArgs;
+  llvm::SmallVector<std::pair<const ParmVarDecl*, const Stmt*>, 8> Handlers;
+
+  ESelArgs.push_back(Exc);
+  ESelArgs.push_back(Personality);
+
+  bool HasCatchAll = false;
+  // Only @try blocks are allowed @catch blocks, but both can have @finally
+  if (isTry) {
+    if (const ObjCAtCatchStmt* CatchStmt =
+      cast<ObjCAtTryStmt>(S).getCatchStmts())  {
+      CGF.setInvokeDest(CatchInCatch);
+
+      for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+        const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+        Handlers.push_back(std::make_pair(CatchDecl,
+                                          CatchStmt->getCatchBody()));
+
+        // @catch() and @catch(id) both catch any ObjC exception
+        if (!CatchDecl || CatchDecl->getType()->isObjCIdType()
+            || CatchDecl->getType()->isObjCQualifiedIdType()) {
+          // Use i8* null here to signal this is a catch all, not a cleanup.
+          ESelArgs.push_back(NULLPtr);
+          HasCatchAll = true;
+          // No further catches after this one will ever by reached
+          break;
+        }
+
+        // All other types should be Objective-C interface pointer types.
+        const ObjCObjectPointerType *OPT =
+          CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+        assert(OPT && "Invalid @catch type.");
+        const ObjCInterfaceType *IT =
+          OPT->getPointeeType()->getAs<ObjCInterfaceType>();
+        assert(IT && "Invalid @catch type.");
+        llvm::Value *EHType =
+          MakeConstantString(IT->getDecl()->getNameAsString());
+        ESelArgs.push_back(EHType);
+      }
+    }
+  }
+
+  // We use a cleanup unless there was already a catch all.
+  if (!HasCatchAll) {
+    ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
+    Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+  }
+
+  // Find which handler was matched.
+  llvm::Value *ESelector = CGF.Builder.CreateCall(llvm_eh_selector,
+      ESelArgs.begin(), ESelArgs.end(), "selector");
+
+  for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+    const ParmVarDecl *CatchParam = Handlers[i].first;
+    const Stmt *CatchBody = Handlers[i].second;
+
+    llvm::BasicBlock *Next = 0;
+
+    // The last handler always matches.
+    if (i + 1 != e) {
+      assert(CatchParam && "Only last handler can be a catch all.");
+
+      // Test whether this block matches the type for the selector and branch
+      // to Match if it does, or to the next BB if it doesn't.
+      llvm::BasicBlock *Match = CGF.createBasicBlock("match");
+      Next = CGF.createBasicBlock("catch.next");
+      llvm::Value *Id = CGF.Builder.CreateCall(llvm_eh_typeid_for,
+          CGF.Builder.CreateBitCast(ESelArgs[i+2], PtrTy));
+      CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(ESelector, Id), Match,
+          Next);
+
+      CGF.EmitBlock(Match);
+    }
+
+    if (CatchBody) {
+      llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc,
+          CGF.ConvertType(CatchParam->getType()));
+
+      // Bind the catch parameter if it exists.
+      if (CatchParam) {
+        // CatchParam is a ParmVarDecl because of the grammar
+        // construction used to handle this, but for codegen purposes
+        // we treat this as a local decl.
+        CGF.EmitLocalBlockVarDecl(*CatchParam);
+        CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
+      }
+
+      CGF.ObjCEHValueStack.push_back(ExcObject);
+      CGF.EmitStmt(CatchBody);
+      CGF.ObjCEHValueStack.pop_back();
+
+      CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+      if (Next)
+        CGF.EmitBlock(Next);
+    } else {
+      assert(!Next && "catchup should be last handler.");
+
+      CGF.Builder.CreateStore(Exc, RethrowPtr);
+      CGF.EmitBranchThroughCleanup(FinallyRethrow);
+    }
+  }
+  // The @finally block is a secondary landing pad for any exceptions thrown in
+  // @catch() blocks
+  CGF.EmitBlock(CatchInCatch);
+  Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+  ESelArgs.clear();
+  ESelArgs.push_back(Exc);
+  ESelArgs.push_back(Personality);
+  // If there is a @catch or @finally clause in outside of this one then we
+  // need to make sure that we catch and rethrow it.  
+  if (PrevLandingPad) {
+    ESelArgs.push_back(NULLPtr);
+  } else {
+    ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
+  }
+  CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(),
+      "selector");
+  CGF.Builder.CreateCall(llvm_eh_typeid_for,
+      CGF.Builder.CreateIntToPtr(ESelArgs[2], PtrTy));
+  CGF.Builder.CreateStore(Exc, RethrowPtr);
+  CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+  CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+  CGF.setInvokeDest(PrevLandingPad);
+
+  CGF.EmitBlock(FinallyBlock);
+
+
+  if (isTry) {
+    if (const ObjCAtFinallyStmt* FinallyStmt =
+        cast<ObjCAtTryStmt>(S).getFinallyStmt())
+      CGF.EmitStmt(FinallyStmt->getFinallyBody());
+  } else {
+    // Emit 'objc_sync_exit(expr)' as finally's sole statement for
+    // @synchronized.
+    std::vector<const llvm::Type*> Args(1, IdTy);
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+    llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+    llvm::Value *SyncArg =
+      CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+    SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+    CGF.Builder.CreateCall(SyncExit, SyncArg);
+  }
+
+  if (Info.SwitchBlock)
+    CGF.EmitBlock(Info.SwitchBlock);
+  if (Info.EndBlock)
+    CGF.EmitBlock(Info.EndBlock);
+
+  // Branch around the rethrow code.
+  CGF.EmitBranch(FinallyEnd);
+
+  CGF.EmitBlock(FinallyRethrow);
+
+  llvm::Value *ExceptionObject = CGF.Builder.CreateLoad(RethrowPtr);
+  llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
+  if (!UnwindBB) {
+    CGF.Builder.CreateCall(RethrowFn, ExceptionObject);
+    // Exception always thrown, next instruction is never reached.
+    CGF.Builder.CreateUnreachable();
+  } else {
+    // If there is a @catch block outside this scope, we invoke instead of
+    // calling because we may return to this function.  This is very slow, but
+    // some people still do it.  It would be nice to add an optimised path for
+    // this.
+    CGF.Builder.CreateInvoke(RethrowFn, UnwindBB, UnwindBB, &ExceptionObject,
+        &ExceptionObject+1);
+  }
+
+  CGF.EmitBlock(FinallyEnd);
+}
+
+void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+                              const ObjCAtThrowStmt &S) {
+  llvm::Value *ExceptionAsObject;
+
+  std::vector<const llvm::Type*> Args(1, IdTy);
+  llvm::FunctionType *FTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+  llvm::Value *ThrowFn =
+    CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+
+  if (const Expr *ThrowExpr = S.getThrowExpr()) {
+    llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+    ExceptionAsObject = Exception;
+  } else {
+    assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+           "Unexpected rethrow outside @catch block.");
+    ExceptionAsObject = CGF.ObjCEHValueStack.back();
+  }
+  ExceptionAsObject =
+      CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy, "tmp");
+
+  // Note: This may have to be an invoke, if we want to support constructs like:
+  // @try {
+  //  @throw(obj);
+  // }
+  // @catch(id) ...
+  //
+  // This is effectively turning @throw into an incredibly-expensive goto, but
+  // it may happen as a result of inlining followed by missed optimizations, or
+  // as a result of stupidity.
+  llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
+  if (!UnwindBB) {
+    CGF.Builder.CreateCall(ThrowFn, ExceptionAsObject);
+    CGF.Builder.CreateUnreachable();
+  } else {
+    CGF.Builder.CreateInvoke(ThrowFn, UnwindBB, UnwindBB, &ExceptionAsObject,
+        &ExceptionAsObject+1);
+  }
+  // Clear the insertion point to indicate we are in unreachable code.
+  CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+                                          llvm::Value *AddrWeakObj) {
+  CGBuilderTy B = CGF.Builder;
+  AddrWeakObj = EnforceType(B, AddrWeakObj, IdTy);
+  return B.CreateCall(WeakReadFn, AddrWeakObj);
+}
+
+void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+                                   llvm::Value *src, llvm::Value *dst) {
+  CGBuilderTy B = CGF.Builder;
+  src = EnforceType(B, src, IdTy);
+  dst = EnforceType(B, dst, PtrToIdTy);
+  B.CreateCall2(WeakAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+                                     llvm::Value *src, llvm::Value *dst) {
+  CGBuilderTy B = CGF.Builder;
+  src = EnforceType(B, src, IdTy);
+  dst = EnforceType(B, dst, PtrToIdTy);
+  B.CreateCall2(GlobalAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+                                   llvm::Value *src, llvm::Value *dst,
+                                   llvm::Value *ivarOffset) {
+  CGBuilderTy B = CGF.Builder;
+  src = EnforceType(B, src, IdTy);
+  dst = EnforceType(B, dst, PtrToIdTy);
+  B.CreateCall3(IvarAssignFn, src, dst, ivarOffset);
+}
+
+void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+                                         llvm::Value *src, llvm::Value *dst) {
+  CGBuilderTy B = CGF.Builder;
+  src = EnforceType(B, src, IdTy);
+  dst = EnforceType(B, dst, PtrToIdTy);
+  B.CreateCall2(StrongCastAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+                                         llvm::Value *DestPtr,
+                                         llvm::Value *SrcPtr,
+                                         QualType Ty) {
+  CGBuilderTy B = CGF.Builder;
+  DestPtr = EnforceType(B, DestPtr, IdTy);
+  SrcPtr = EnforceType(B, SrcPtr, PtrToIdTy);
+
+  std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
+  unsigned long size = TypeInfo.first/8;
+  // FIXME: size_t
+  llvm::Value *N = llvm::ConstantInt::get(LongTy, size);
+
+  B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, N);
+}
+
+llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
+                              const ObjCInterfaceDecl *ID,
+                              const ObjCIvarDecl *Ivar) {
+  const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+    + '.' + Ivar->getNameAsString();
+  // Emit the variable and initialize it with what we think the correct value
+  // is.  This allows code compiled with non-fragile ivars to work correctly
+  // when linked against code which isn't (most of the time).
+  llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
+  if (!IvarOffsetPointer) {
+    uint64_t Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+    llvm::ConstantInt *OffsetGuess =
+      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Offset, "ivar");
+    // Don't emit the guess in non-PIC code because the linker will not be able
+    // to replace it with the real version for a library.  In non-PIC code you
+    // must compile with the fragile ABI if you want to use ivars from a
+    // GCC-compiled class.
+    if (CGM.getLangOptions().PICLevel) {
+      llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
+            llvm::Type::getInt32Ty(VMContext), false,
+            llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
+      IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+            IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage,
+            IvarOffsetGV, Name);
+    } else {
+      IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+              llvm::Type::getInt32PtrTy(VMContext), false,
+              llvm::GlobalValue::ExternalLinkage, 0, Name);
+    }
+  }
+  return IvarOffsetPointer;
+}
+
+LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+                                       QualType ObjectTy,
+                                       llvm::Value *BaseValue,
+                                       const ObjCIvarDecl *Ivar,
+                                       unsigned CVRQualifiers) {
+  const ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCInterfaceType>()->getDecl();
+  return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+                                  EmitIvarOffset(CGF, ID, Ivar));
+}
+
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+                                                  const ObjCInterfaceDecl *OID,
+                                                  const ObjCIvarDecl *OIVD) {
+  llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+  Context.ShallowCollectObjCIvars(OID, Ivars);
+  for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+    if (OIVD == Ivars[k])
+      return OID;
+  }
+
+  // Otherwise check in the super class.
+  if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+    return FindIvarInterface(Context, Super, OIVD);
+
+  return 0;
+}
+
+llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+                         const ObjCInterfaceDecl *Interface,
+                         const ObjCIvarDecl *Ivar) {
+  if (CGM.getLangOptions().ObjCNonFragileABI) {
+    Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
+    return CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
+                ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar"));
+  }
+  uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
+  return llvm::ConstantInt::get(LongTy, Offset, "ivar");
+}
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateGNUObjCRuntime(CodeGen::CodeGenModule &CGM) {
+  return new CGObjCGNU(CGM);
+}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
new file mode 100644
index 0000000..2f931bd
--- /dev/null
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -0,0 +1,5841 @@
+//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targetting the Apple runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+// Common CGObjCRuntime functions, these don't belong here, but they
+// don't belong in CGObjCRuntime either so we will live with it for
+// now.
+
+/// FindIvarInterface - Find the interface containing the ivar.
+///
+/// FIXME: We shouldn't need to do this, the containing context should
+/// be fixed.
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+                                                  const ObjCInterfaceDecl *OID,
+                                                  const ObjCIvarDecl *OIVD,
+                                                  unsigned &Index) {
+  // FIXME: The index here is closely tied to how
+  // ASTContext::getObjCLayout is implemented. This should be fixed to
+  // get the information from the layout directly.
+  Index = 0;
+  llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+  Context.ShallowCollectObjCIvars(OID, Ivars);
+  for (unsigned k = 0, e = Ivars.size(); k != e; ++k) {
+    if (OIVD == Ivars[k])
+      return OID;
+    ++Index;
+  }
+
+  // Otherwise check in the super class.
+  if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+    return FindIvarInterface(Context, Super, OIVD, Index);
+
+  return 0;
+}
+
+static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
+                                     const ObjCInterfaceDecl *OID,
+                                     const ObjCImplementationDecl *ID,
+                                     const ObjCIvarDecl *Ivar) {
+  unsigned Index;
+  const ObjCInterfaceDecl *Container =
+    FindIvarInterface(CGM.getContext(), OID, Ivar, Index);
+  assert(Container && "Unable to find ivar container");
+
+  // If we know have an implementation (and the ivar is in it) then
+  // look up in the implementation layout.
+  const ASTRecordLayout *RL;
+  if (ID && ID->getClassInterface() == Container)
+    RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
+  else
+    RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
+  return RL->getFieldOffset(Index);
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+                                              const ObjCInterfaceDecl *OID,
+                                              const ObjCIvarDecl *Ivar) {
+  return LookupFieldBitOffset(CGM, OID, 0, Ivar) / 8;
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+                                              const ObjCImplementationDecl *OID,
+                                              const ObjCIvarDecl *Ivar) {
+  return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 8;
+}
+
+LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+                                               const ObjCInterfaceDecl *OID,
+                                               llvm::Value *BaseValue,
+                                               const ObjCIvarDecl *Ivar,
+                                               unsigned CVRQualifiers,
+                                               llvm::Value *Offset) {
+  // Compute (type*) ( (char *) BaseValue + Offset)
+  const llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  QualType IvarTy = Ivar->getType();
+  const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+  llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
+  V = CGF.Builder.CreateGEP(V, Offset, "add.ptr");
+  V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
+
+  Qualifiers Quals = CGF.MakeQualifiers(IvarTy);
+  Quals.addCVRQualifiers(CVRQualifiers);
+
+  if (Ivar->isBitField()) {
+    // We need to compute the bit offset for the bit-field, the offset
+    // is to the byte. Note, there is a subtle invariant here: we can
+    // only call this routine on non-sythesized ivars but we may be
+    // called for synthesized ivars. However, a synthesized ivar can
+    // never be a bit-field so this is safe.
+    uint64_t BitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar) % 8;
+
+    uint64_t BitFieldSize =
+      Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue();
+    return LValue::MakeBitfield(V, BitOffset, BitFieldSize,
+                                IvarTy->isSignedIntegerType(),
+                                Quals.getCVRQualifiers());
+  }
+
+  
+  LValue LV = LValue::MakeAddr(V, Quals);
+  return LV;
+}
+
+///
+
+namespace {
+
+typedef std::vector<llvm::Constant*> ConstantVector;
+
+// FIXME: We should find a nicer way to make the labels for metadata, string
+// concatenation is lame.
+
+class ObjCCommonTypesHelper {
+protected:
+  llvm::LLVMContext &VMContext;
+
+private:
+  llvm::Constant *getMessageSendFn() const {
+    // id objc_msgSend (id, SEL, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(ObjectPtrTy);
+    Params.push_back(SelectorPtrTy);
+    return
+      CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                        Params, true),
+                                "objc_msgSend");
+  }
+
+  llvm::Constant *getMessageSendStretFn() const {
+    // id objc_msgSend_stret (id, SEL, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(ObjectPtrTy);
+    Params.push_back(SelectorPtrTy);
+    return
+      CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                                                        Params, true),
+                                "objc_msgSend_stret");
+
+  }
+
+  llvm::Constant *getMessageSendFpretFn() const {
+    // FIXME: This should be long double on x86_64?
+    // [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(ObjectPtrTy);
+    Params.push_back(SelectorPtrTy);
+    return
+      CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+                                             llvm::Type::getDoubleTy(VMContext),
+                                                        Params,
+                                                        true),
+                                "objc_msgSend_fpret");
+
+  }
+
+  llvm::Constant *getMessageSendSuperFn() const {
+    // id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+    const char *SuperName = "objc_msgSendSuper";
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(SuperPtrTy);
+    Params.push_back(SelectorPtrTy);
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                             Params, true),
+                                     SuperName);
+  }
+
+  llvm::Constant *getMessageSendSuperFn2() const {
+    // id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+    const char *SuperName = "objc_msgSendSuper2";
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(SuperPtrTy);
+    Params.push_back(SelectorPtrTy);
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                             Params, true),
+                                     SuperName);
+  }
+
+  llvm::Constant *getMessageSendSuperStretFn() const {
+    // void objc_msgSendSuper_stret(void * stretAddr, struct objc_super *super,
+    //                              SEL op, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(Int8PtrTy);
+    Params.push_back(SuperPtrTy);
+    Params.push_back(SelectorPtrTy);
+    return CGM.CreateRuntimeFunction(
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                              Params, true),
+      "objc_msgSendSuper_stret");
+  }
+
+  llvm::Constant *getMessageSendSuperStretFn2() const {
+    // void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
+    //                               SEL op, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(Int8PtrTy);
+    Params.push_back(SuperPtrTy);
+    Params.push_back(SelectorPtrTy);
+    return CGM.CreateRuntimeFunction(
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                              Params, true),
+      "objc_msgSendSuper2_stret");
+  }
+
+  llvm::Constant *getMessageSendSuperFpretFn() const {
+    // There is no objc_msgSendSuper_fpret? How can that work?
+    return getMessageSendSuperFn();
+  }
+
+  llvm::Constant *getMessageSendSuperFpretFn2() const {
+    // There is no objc_msgSendSuper_fpret? How can that work?
+    return getMessageSendSuperFn2();
+  }
+
+protected:
+  CodeGen::CodeGenModule &CGM;
+
+public:
+  const llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+  const llvm::Type *Int8PtrTy;
+
+  /// ObjectPtrTy - LLVM type for object handles (typeof(id))
+  const llvm::Type *ObjectPtrTy;
+
+  /// PtrObjectPtrTy - LLVM type for id *
+  const llvm::Type *PtrObjectPtrTy;
+
+  /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
+  const llvm::Type *SelectorPtrTy;
+  /// ProtocolPtrTy - LLVM type for external protocol handles
+  /// (typeof(Protocol))
+  const llvm::Type *ExternalProtocolPtrTy;
+
+  // SuperCTy - clang type for struct objc_super.
+  QualType SuperCTy;
+  // SuperPtrCTy - clang type for struct objc_super *.
+  QualType SuperPtrCTy;
+
+  /// SuperTy - LLVM type for struct objc_super.
+  const llvm::StructType *SuperTy;
+  /// SuperPtrTy - LLVM type for struct objc_super *.
+  const llvm::Type *SuperPtrTy;
+
+  /// PropertyTy - LLVM type for struct objc_property (struct _prop_t
+  /// in GCC parlance).
+  const llvm::StructType *PropertyTy;
+
+  /// PropertyListTy - LLVM type for struct objc_property_list
+  /// (_prop_list_t in GCC parlance).
+  const llvm::StructType *PropertyListTy;
+  /// PropertyListPtrTy - LLVM type for struct objc_property_list*.
+  const llvm::Type *PropertyListPtrTy;
+
+  // MethodTy - LLVM type for struct objc_method.
+  const llvm::StructType *MethodTy;
+
+  /// CacheTy - LLVM type for struct objc_cache.
+  const llvm::Type *CacheTy;
+  /// CachePtrTy - LLVM type for struct objc_cache *.
+  const llvm::Type *CachePtrTy;
+
+  llvm::Constant *getGetPropertyFn() {
+    CodeGen::CodeGenTypes &Types = CGM.getTypes();
+    ASTContext &Ctx = CGM.getContext();
+    // id objc_getProperty (id, SEL, ptrdiff_t, bool)
+    llvm::SmallVector<QualType,16> Params;
+    QualType IdType = Ctx.getObjCIdType();
+    QualType SelType = Ctx.getObjCSelType();
+    Params.push_back(IdType);
+    Params.push_back(SelType);
+    Params.push_back(Ctx.LongTy);
+    Params.push_back(Ctx.BoolTy);
+    const llvm::FunctionType *FTy =
+      Types.GetFunctionType(Types.getFunctionInfo(IdType, Params,
+                                                  CC_Default, false), false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
+  }
+
+  llvm::Constant *getSetPropertyFn() {
+    CodeGen::CodeGenTypes &Types = CGM.getTypes();
+    ASTContext &Ctx = CGM.getContext();
+    // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+    llvm::SmallVector<QualType,16> Params;
+    QualType IdType = Ctx.getObjCIdType();
+    QualType SelType = Ctx.getObjCSelType();
+    Params.push_back(IdType);
+    Params.push_back(SelType);
+    Params.push_back(Ctx.LongTy);
+    Params.push_back(IdType);
+    Params.push_back(Ctx.BoolTy);
+    Params.push_back(Ctx.BoolTy);
+    const llvm::FunctionType *FTy =
+      Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+                                                  CC_Default, false), false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
+  }
+
+  llvm::Constant *getEnumerationMutationFn() {
+    CodeGen::CodeGenTypes &Types = CGM.getTypes();
+    ASTContext &Ctx = CGM.getContext();
+    // void objc_enumerationMutation (id)
+    llvm::SmallVector<QualType,16> Params;
+    Params.push_back(Ctx.getObjCIdType());
+    const llvm::FunctionType *FTy =
+      Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
+                                                  CC_Default, false), false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+  }
+
+  /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
+  llvm::Constant *getGcReadWeakFn() {
+    // id objc_read_weak (id *)
+    std::vector<const llvm::Type*> Args;
+    Args.push_back(ObjectPtrTy->getPointerTo());
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(ObjectPtrTy, Args, false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+  }
+
+  /// GcAssignWeakFn -- LLVM objc_assign_weak function.
+  llvm::Constant *getGcAssignWeakFn() {
+    // id objc_assign_weak (id, id *)
+    std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+    Args.push_back(ObjectPtrTy->getPointerTo());
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(ObjectPtrTy, Args, false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+  }
+
+  /// GcAssignGlobalFn -- LLVM objc_assign_global function.
+  llvm::Constant *getGcAssignGlobalFn() {
+    // id objc_assign_global(id, id *)
+    std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+    Args.push_back(ObjectPtrTy->getPointerTo());
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(ObjectPtrTy, Args, false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+  }
+
+  /// GcAssignIvarFn -- LLVM objc_assign_ivar function.
+  llvm::Constant *getGcAssignIvarFn() {
+    // id objc_assign_ivar(id, id *, ptrdiff_t)
+    std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+    Args.push_back(ObjectPtrTy->getPointerTo());
+    Args.push_back(LongTy);
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(ObjectPtrTy, Args, false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+  }
+
+  /// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
+  llvm::Constant *GcMemmoveCollectableFn() {
+    // void *objc_memmove_collectable(void *dst, const void *src, size_t size)
+    std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+    Args.push_back(Int8PtrTy);
+    Args.push_back(LongTy);
+    llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, Args, false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+  }
+
+  /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
+  llvm::Constant *getGcAssignStrongCastFn() {
+    // id objc_assign_global(id, id *)
+    std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+    Args.push_back(ObjectPtrTy->getPointerTo());
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(ObjectPtrTy, Args, false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+  }
+
+  /// ExceptionThrowFn - LLVM objc_exception_throw function.
+  llvm::Constant *getExceptionThrowFn() {
+    // void objc_exception_throw(id)
+    std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+  }
+
+  /// SyncEnterFn - LLVM object_sync_enter function.
+  llvm::Constant *getSyncEnterFn() {
+    // void objc_sync_enter (id)
+    std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+  }
+
+  /// SyncExitFn - LLVM object_sync_exit function.
+  llvm::Constant *getSyncExitFn() {
+    // void objc_sync_exit (id)
+    std::vector<const llvm::Type*> Args(1, ObjectPtrTy);
+    llvm::FunctionType *FTy =
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+    return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+  }
+
+  llvm::Constant *getSendFn(bool IsSuper) const {
+    return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
+  }
+
+  llvm::Constant *getSendFn2(bool IsSuper) const {
+    return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
+  }
+
+  llvm::Constant *getSendStretFn(bool IsSuper) const {
+    return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
+  }
+
+  llvm::Constant *getSendStretFn2(bool IsSuper) const {
+    return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
+  }
+
+  llvm::Constant *getSendFpretFn(bool IsSuper) const {
+    return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
+  }
+
+  llvm::Constant *getSendFpretFn2(bool IsSuper) const {
+    return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
+  }
+
+  ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
+  ~ObjCCommonTypesHelper(){}
+};
+
+/// ObjCTypesHelper - Helper class that encapsulates lazy
+/// construction of varies types used during ObjC generation.
+class ObjCTypesHelper : public ObjCCommonTypesHelper {
+public:
+  /// SymtabTy - LLVM type for struct objc_symtab.
+  const llvm::StructType *SymtabTy;
+  /// SymtabPtrTy - LLVM type for struct objc_symtab *.
+  const llvm::Type *SymtabPtrTy;
+  /// ModuleTy - LLVM type for struct objc_module.
+  const llvm::StructType *ModuleTy;
+
+  /// ProtocolTy - LLVM type for struct objc_protocol.
+  const llvm::StructType *ProtocolTy;
+  /// ProtocolPtrTy - LLVM type for struct objc_protocol *.
+  const llvm::Type *ProtocolPtrTy;
+  /// ProtocolExtensionTy - LLVM type for struct
+  /// objc_protocol_extension.
+  const llvm::StructType *ProtocolExtensionTy;
+  /// ProtocolExtensionTy - LLVM type for struct
+  /// objc_protocol_extension *.
+  const llvm::Type *ProtocolExtensionPtrTy;
+  /// MethodDescriptionTy - LLVM type for struct
+  /// objc_method_description.
+  const llvm::StructType *MethodDescriptionTy;
+  /// MethodDescriptionListTy - LLVM type for struct
+  /// objc_method_description_list.
+  const llvm::StructType *MethodDescriptionListTy;
+  /// MethodDescriptionListPtrTy - LLVM type for struct
+  /// objc_method_description_list *.
+  const llvm::Type *MethodDescriptionListPtrTy;
+  /// ProtocolListTy - LLVM type for struct objc_property_list.
+  const llvm::Type *ProtocolListTy;
+  /// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
+  const llvm::Type *ProtocolListPtrTy;
+  /// CategoryTy - LLVM type for struct objc_category.
+  const llvm::StructType *CategoryTy;
+  /// ClassTy - LLVM type for struct objc_class.
+  const llvm::StructType *ClassTy;
+  /// ClassPtrTy - LLVM type for struct objc_class *.
+  const llvm::Type *ClassPtrTy;
+  /// ClassExtensionTy - LLVM type for struct objc_class_ext.
+  const llvm::StructType *ClassExtensionTy;
+  /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
+  const llvm::Type *ClassExtensionPtrTy;
+  // IvarTy - LLVM type for struct objc_ivar.
+  const llvm::StructType *IvarTy;
+  /// IvarListTy - LLVM type for struct objc_ivar_list.
+  const llvm::Type *IvarListTy;
+  /// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
+  const llvm::Type *IvarListPtrTy;
+  /// MethodListTy - LLVM type for struct objc_method_list.
+  const llvm::Type *MethodListTy;
+  /// MethodListPtrTy - LLVM type for struct objc_method_list *.
+  const llvm::Type *MethodListPtrTy;
+
+  /// ExceptionDataTy - LLVM type for struct _objc_exception_data.
+  const llvm::Type *ExceptionDataTy;
+
+  /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
+  llvm::Constant *getExceptionTryEnterFn() {
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+    return CGM.CreateRuntimeFunction(
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                              Params, false),
+      "objc_exception_try_enter");
+  }
+
+  /// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
+  llvm::Constant *getExceptionTryExitFn() {
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+    return CGM.CreateRuntimeFunction(
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                              Params, false),
+      "objc_exception_try_exit");
+  }
+
+  /// ExceptionExtractFn - LLVM objc_exception_extract function.
+  llvm::Constant *getExceptionExtractFn() {
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(llvm::PointerType::getUnqual(ExceptionDataTy));
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                             Params, false),
+                                     "objc_exception_extract");
+
+  }
+
+  /// ExceptionMatchFn - LLVM objc_exception_match function.
+  llvm::Constant *getExceptionMatchFn() {
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(ClassPtrTy);
+    Params.push_back(ObjectPtrTy);
+    return CGM.CreateRuntimeFunction(
+      llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+                              Params, false),
+      "objc_exception_match");
+
+  }
+
+  /// SetJmpFn - LLVM _setjmp function.
+  llvm::Constant *getSetJmpFn() {
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(llvm::Type::getInt32PtrTy(VMContext));
+    return
+      CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+                                                        Params, false),
+                                "_setjmp");
+
+  }
+
+public:
+  ObjCTypesHelper(CodeGen::CodeGenModule &cgm);
+  ~ObjCTypesHelper() {}
+};
+
+/// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's
+/// modern abi
+class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
+public:
+
+  // MethodListnfABITy - LLVM for struct _method_list_t
+  const llvm::StructType *MethodListnfABITy;
+
+  // MethodListnfABIPtrTy - LLVM for struct _method_list_t*
+  const llvm::Type *MethodListnfABIPtrTy;
+
+  // ProtocolnfABITy = LLVM for struct _protocol_t
+  const llvm::StructType *ProtocolnfABITy;
+
+  // ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
+  const llvm::Type *ProtocolnfABIPtrTy;
+
+  // ProtocolListnfABITy - LLVM for struct _objc_protocol_list
+  const llvm::StructType *ProtocolListnfABITy;
+
+  // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
+  const llvm::Type *ProtocolListnfABIPtrTy;
+
+  // ClassnfABITy - LLVM for struct _class_t
+  const llvm::StructType *ClassnfABITy;
+
+  // ClassnfABIPtrTy - LLVM for struct _class_t*
+  const llvm::Type *ClassnfABIPtrTy;
+
+  // IvarnfABITy - LLVM for struct _ivar_t
+  const llvm::StructType *IvarnfABITy;
+
+  // IvarListnfABITy - LLVM for struct _ivar_list_t
+  const llvm::StructType *IvarListnfABITy;
+
+  // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
+  const llvm::Type *IvarListnfABIPtrTy;
+
+  // ClassRonfABITy - LLVM for struct _class_ro_t
+  const llvm::StructType *ClassRonfABITy;
+
+  // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+  const llvm::Type *ImpnfABITy;
+
+  // CategorynfABITy - LLVM for struct _category_t
+  const llvm::StructType *CategorynfABITy;
+
+  // New types for nonfragile abi messaging.
+
+  // MessageRefTy - LLVM for:
+  // struct _message_ref_t {
+  //   IMP messenger;
+  //   SEL name;
+  // };
+  const llvm::StructType *MessageRefTy;
+  // MessageRefCTy - clang type for struct _message_ref_t
+  QualType MessageRefCTy;
+
+  // MessageRefPtrTy - LLVM for struct _message_ref_t*
+  const llvm::Type *MessageRefPtrTy;
+  // MessageRefCPtrTy - clang type for struct _message_ref_t*
+  QualType MessageRefCPtrTy;
+
+  // MessengerTy - Type of the messenger (shown as IMP above)
+  const llvm::FunctionType *MessengerTy;
+
+  // SuperMessageRefTy - LLVM for:
+  // struct _super_message_ref_t {
+  //   SUPER_IMP messenger;
+  //   SEL name;
+  // };
+  const llvm::StructType *SuperMessageRefTy;
+
+  // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+  const llvm::Type *SuperMessageRefPtrTy;
+
+  llvm::Constant *getMessageSendFixupFn() {
+    // id objc_msgSend_fixup(id, struct message_ref_t*, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(ObjectPtrTy);
+    Params.push_back(MessageRefPtrTy);
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                             Params, true),
+                                     "objc_msgSend_fixup");
+  }
+
+  llvm::Constant *getMessageSendFpretFixupFn() {
+    // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(ObjectPtrTy);
+    Params.push_back(MessageRefPtrTy);
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                             Params, true),
+                                     "objc_msgSend_fpret_fixup");
+  }
+
+  llvm::Constant *getMessageSendStretFixupFn() {
+    // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(ObjectPtrTy);
+    Params.push_back(MessageRefPtrTy);
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                             Params, true),
+                                     "objc_msgSend_stret_fixup");
+  }
+
+  llvm::Constant *getMessageSendIdFixupFn() {
+    // id objc_msgSendId_fixup(id, struct message_ref_t*, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(ObjectPtrTy);
+    Params.push_back(MessageRefPtrTy);
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                             Params, true),
+                                     "objc_msgSendId_fixup");
+  }
+
+  llvm::Constant *getMessageSendIdStretFixupFn() {
+    // id objc_msgSendId_stret_fixup(id, struct message_ref_t*, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(ObjectPtrTy);
+    Params.push_back(MessageRefPtrTy);
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                             Params, true),
+                                     "objc_msgSendId_stret_fixup");
+  }
+  llvm::Constant *getMessageSendSuper2FixupFn() {
+    // id objc_msgSendSuper2_fixup (struct objc_super *,
+    //                              struct _super_message_ref_t*, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(SuperPtrTy);
+    Params.push_back(SuperMessageRefPtrTy);
+    return  CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                              Params, true),
+                                      "objc_msgSendSuper2_fixup");
+  }
+
+  llvm::Constant *getMessageSendSuper2StretFixupFn() {
+    // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
+    //                                   struct _super_message_ref_t*, ...)
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(SuperPtrTy);
+    Params.push_back(SuperMessageRefPtrTy);
+    return  CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+                                                              Params, true),
+                                      "objc_msgSendSuper2_stret_fixup");
+  }
+
+
+
+  /// EHPersonalityPtr - LLVM value for an i8* to the Objective-C
+  /// exception personality function.
+  llvm::Value *getEHPersonalityPtr() {
+    llvm::Constant *Personality =
+      CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
+                                                        true),
+                                "__objc_personality_v0");
+    return llvm::ConstantExpr::getBitCast(Personality, Int8PtrTy);
+  }
+
+  llvm::Constant *getUnwindResumeOrRethrowFn() {
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(Int8PtrTy);
+    return CGM.CreateRuntimeFunction(
+      llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                              Params, false),
+      "_Unwind_Resume_or_Rethrow");
+  }
+
+  llvm::Constant *getObjCEndCatchFn() {
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                                                             false),
+                                     "objc_end_catch");
+
+  }
+
+  llvm::Constant *getObjCBeginCatchFn() {
+    std::vector<const llvm::Type*> Params;
+    Params.push_back(Int8PtrTy);
+    return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
+                                                             Params, false),
+                                     "objc_begin_catch");
+  }
+
+  const llvm::StructType *EHTypeTy;
+  const llvm::Type *EHTypePtrTy;
+
+  ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
+  ~ObjCNonFragileABITypesHelper(){}
+};
+
+class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
+public:
+  // FIXME - accessibility
+  class GC_IVAR {
+  public:
+    unsigned ivar_bytepos;
+    unsigned ivar_size;
+    GC_IVAR(unsigned bytepos = 0, unsigned size = 0)
+      : ivar_bytepos(bytepos), ivar_size(size) {}
+
+    // Allow sorting based on byte pos.
+    bool operator<(const GC_IVAR &b) const {
+      return ivar_bytepos < b.ivar_bytepos;
+    }
+  };
+
+  class SKIP_SCAN {
+  public:
+    unsigned skip;
+    unsigned scan;
+    SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
+      : skip(_skip), scan(_scan) {}
+  };
+
+protected:
+  CodeGen::CodeGenModule &CGM;
+  llvm::LLVMContext &VMContext;
+  // FIXME! May not be needing this after all.
+  unsigned ObjCABI;
+
+  // gc ivar layout bitmap calculation helper caches.
+  llvm::SmallVector<GC_IVAR, 16> SkipIvars;
+  llvm::SmallVector<GC_IVAR, 16> IvarsInfo;
+
+  /// LazySymbols - Symbols to generate a lazy reference for. See
+  /// DefinedSymbols and FinishModule().
+  llvm::SetVector<IdentifierInfo*> LazySymbols;
+
+  /// DefinedSymbols - External symbols which are defined by this
+  /// module. The symbols in this list and LazySymbols are used to add
+  /// special linker symbols which ensure that Objective-C modules are
+  /// linked properly.
+  llvm::SetVector<IdentifierInfo*> DefinedSymbols;
+
+  /// ClassNames - uniqued class names.
+  llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassNames;
+
+  /// MethodVarNames - uniqued method variable names.
+  llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+
+  /// MethodVarTypes - uniqued method type signatures. We have to use
+  /// a StringMap here because have no other unique reference.
+  llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
+
+  /// MethodDefinitions - map of methods which have been defined in
+  /// this translation unit.
+  llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions;
+
+  /// PropertyNames - uniqued method variable names.
+  llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames;
+
+  /// ClassReferences - uniqued class references.
+  llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences;
+
+  /// SelectorReferences - uniqued selector references.
+  llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences;
+
+  /// Protocols - Protocols for which an objc_protocol structure has
+  /// been emitted. Forward declarations are handled by creating an
+  /// empty structure whose initializer is filled in when/if defined.
+  llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols;
+
+  /// DefinedProtocols - Protocols which have actually been
+  /// defined. We should not need this, see FIXME in GenerateProtocol.
+  llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
+
+  /// DefinedClasses - List of defined classes.
+  std::vector<llvm::GlobalValue*> DefinedClasses;
+
+  /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+  std::vector<llvm::GlobalValue*> DefinedNonLazyClasses;
+
+  /// DefinedCategories - List of defined categories.
+  std::vector<llvm::GlobalValue*> DefinedCategories;
+
+  /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+  std::vector<llvm::GlobalValue*> DefinedNonLazyCategories;
+
+  /// GetNameForMethod - Return a name for the given method.
+  /// \param[out] NameOut - The return value.
+  void GetNameForMethod(const ObjCMethodDecl *OMD,
+                        const ObjCContainerDecl *CD,
+                        llvm::SmallVectorImpl<char> &NameOut);
+
+  /// GetMethodVarName - Return a unique constant for the given
+  /// selector's name. The return value has type char *.
+  llvm::Constant *GetMethodVarName(Selector Sel);
+  llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
+  llvm::Constant *GetMethodVarName(const std::string &Name);
+
+  /// GetMethodVarType - Return a unique constant for the given
+  /// selector's name. The return value has type char *.
+
+  // FIXME: This is a horrible name.
+  llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D);
+  llvm::Constant *GetMethodVarType(const FieldDecl *D);
+
+  /// GetPropertyName - Return a unique constant for the given
+  /// name. The return value has type char *.
+  llvm::Constant *GetPropertyName(IdentifierInfo *Ident);
+
+  // FIXME: This can be dropped once string functions are unified.
+  llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD,
+                                        const Decl *Container);
+
+  /// GetClassName - Return a unique constant for the given selector's
+  /// name. The return value has type char *.
+  llvm::Constant *GetClassName(IdentifierInfo *Ident);
+
+  /// BuildIvarLayout - Builds ivar layout bitmap for the class
+  /// implementation for the __strong or __weak case.
+  ///
+  llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
+                                  bool ForStrongLayout);
+
+  void BuildAggrIvarRecordLayout(const RecordType *RT,
+                                 unsigned int BytePos, bool ForStrongLayout,
+                                 bool &HasUnion);
+  void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+                           const llvm::StructLayout *Layout,
+                           const RecordDecl *RD,
+                           const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+                           unsigned int BytePos, bool ForStrongLayout,
+                           bool &HasUnion);
+
+  /// GetIvarLayoutName - Returns a unique constant for the given
+  /// ivar layout bitmap.
+  llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
+                                    const ObjCCommonTypesHelper &ObjCTypes);
+
+  /// EmitPropertyList - Emit the given property list. The return
+  /// value has type PropertyListPtrTy.
+  llvm::Constant *EmitPropertyList(llvm::Twine Name,
+                                   const Decl *Container,
+                                   const ObjCContainerDecl *OCD,
+                                   const ObjCCommonTypesHelper &ObjCTypes);
+
+  /// PushProtocolProperties - Push protocol's property on the input stack.
+  void PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+                              std::vector<llvm::Constant*> &Properties,
+                                   const Decl *Container,
+                                   const ObjCProtocolDecl *PROTO,
+                                   const ObjCCommonTypesHelper &ObjCTypes);
+
+  /// GetProtocolRef - Return a reference to the internal protocol
+  /// description, creating an empty one if it has not been
+  /// defined. The return value has type ProtocolPtrTy.
+  llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD);
+
+  /// CreateMetadataVar - Create a global variable with internal
+  /// linkage for use by the Objective-C runtime.
+  ///
+  /// This is a convenience wrapper which not only creates the
+  /// variable, but also sets the section and alignment and adds the
+  /// global to the "llvm.used" list.
+  ///
+  /// \param Name - The variable name.
+  /// \param Init - The variable initializer; this is also used to
+  /// define the type of the variable.
+  /// \param Section - The section the variable should go into, or 0.
+  /// \param Align - The alignment for the variable, or 0.
+  /// \param AddToUsed - Whether the variable should be added to
+  /// "llvm.used".
+  llvm::GlobalVariable *CreateMetadataVar(llvm::Twine Name,
+                                          llvm::Constant *Init,
+                                          const char *Section,
+                                          unsigned Align,
+                                          bool AddToUsed);
+
+  CodeGen::RValue EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+                                        QualType ResultType,
+                                        llvm::Value *Sel,
+                                        llvm::Value *Arg0,
+                                        QualType Arg0Ty,
+                                        bool IsSuper,
+                                        const CallArgList &CallArgs,
+                                        const ObjCMethodDecl *OMD,
+                                        const ObjCCommonTypesHelper &ObjCTypes);
+
+public:
+  CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
+    CGM(cgm), VMContext(cgm.getLLVMContext()) { }
+
+  virtual llvm::Constant *GenerateConstantString(const StringLiteral *SL);
+
+  virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+                                         const ObjCContainerDecl *CD=0);
+
+  virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+
+  /// GetOrEmitProtocol - Get the protocol object for the given
+  /// declaration, emitting it if necessary. The return value has type
+  /// ProtocolPtrTy.
+  virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
+
+  /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+  /// object for the given declaration, emitting it if needed. These
+  /// forward references will be filled in with empty bodies if no
+  /// definition is seen. The return value has type ProtocolPtrTy.
+  virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+};
+
+class CGObjCMac : public CGObjCCommonMac {
+private:
+  ObjCTypesHelper ObjCTypes;
+  /// EmitImageInfo - Emit the image info marker used to encode some module
+  /// level information.
+  void EmitImageInfo();
+
+  /// EmitModuleInfo - Another marker encoding module level
+  /// information.
+  void EmitModuleInfo();
+
+  /// EmitModuleSymols - Emit module symbols, the list of defined
+  /// classes and categories. The result has type SymtabPtrTy.
+  llvm::Constant *EmitModuleSymbols();
+
+  /// FinishModule - Write out global data structures at the end of
+  /// processing a translation unit.
+  void FinishModule();
+
+  /// EmitClassExtension - Generate the class extension structure used
+  /// to store the weak ivar layout and properties. The return value
+  /// has type ClassExtensionPtrTy.
+  llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID);
+
+  /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+  /// for the given class.
+  llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+                            const ObjCInterfaceDecl *ID);
+  
+  /// EmitSuperClassRef - Emits reference to class's main metadata class.
+  llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
+
+  CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+                                  QualType ResultType,
+                                  Selector Sel,
+                                  llvm::Value *Arg0,
+                                  QualType Arg0Ty,
+                                  bool IsSuper,
+                                  const CallArgList &CallArgs);
+
+  /// EmitIvarList - Emit the ivar list for the given
+  /// implementation. If ForClass is true the list of class ivars
+  /// (i.e. metaclass ivars) is emitted, otherwise the list of
+  /// interface ivars will be emitted. The return value has type
+  /// IvarListPtrTy.
+  llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID,
+                               bool ForClass);
+
+  /// EmitMetaClass - Emit a forward reference to the class structure
+  /// for the metaclass of the given interface. The return value has
+  /// type ClassPtrTy.
+  llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID);
+
+  /// EmitMetaClass - Emit a class structure for the metaclass of the
+  /// given implementation. The return value has type ClassPtrTy.
+  llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
+                                llvm::Constant *Protocols,
+                                const ConstantVector &Methods);
+
+  llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+  llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+  /// EmitMethodList - Emit the method list for the given
+  /// implementation. The return value has type MethodListPtrTy.
+  llvm::Constant *EmitMethodList(llvm::Twine Name,
+                                 const char *Section,
+                                 const ConstantVector &Methods);
+
+  /// EmitMethodDescList - Emit a method description list for a list of
+  /// method declarations.
+  ///  - TypeName: The name for the type containing the methods.
+  ///  - IsProtocol: True iff these methods are for a protocol.
+  ///  - ClassMethds: True iff these are class methods.
+  ///  - Required: When true, only "required" methods are
+  ///    listed. Similarly, when false only "optional" methods are
+  ///    listed. For classes this should always be true.
+  ///  - begin, end: The method list to output.
+  ///
+  /// The return value has type MethodDescriptionListPtrTy.
+  llvm::Constant *EmitMethodDescList(llvm::Twine Name,
+                                     const char *Section,
+                                     const ConstantVector &Methods);
+
+  /// GetOrEmitProtocol - Get the protocol object for the given
+  /// declaration, emitting it if necessary. The return value has type
+  /// ProtocolPtrTy.
+  virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+  /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+  /// object for the given declaration, emitting it if needed. These
+  /// forward references will be filled in with empty bodies if no
+  /// definition is seen. The return value has type ProtocolPtrTy.
+  virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+  /// EmitProtocolExtension - Generate the protocol extension
+  /// structure used to store optional instance and class methods, and
+  /// protocol properties. The return value has type
+  /// ProtocolExtensionPtrTy.
+  llvm::Constant *
+  EmitProtocolExtension(const ObjCProtocolDecl *PD,
+                        const ConstantVector &OptInstanceMethods,
+                        const ConstantVector &OptClassMethods);
+
+  /// EmitProtocolList - Generate the list of referenced
+  /// protocols. The return value has type ProtocolListPtrTy.
+  llvm::Constant *EmitProtocolList(llvm::Twine Name,
+                                   ObjCProtocolDecl::protocol_iterator begin,
+                                   ObjCProtocolDecl::protocol_iterator end);
+
+  /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+  /// for the given selector.
+  llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+
+public:
+  CGObjCMac(CodeGen::CodeGenModule &cgm);
+
+  virtual llvm::Function *ModuleInitFunction();
+
+  virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+                                              QualType ResultType,
+                                              Selector Sel,
+                                              llvm::Value *Receiver,
+                                              bool IsClassMessage,
+                                              const CallArgList &CallArgs,
+                                              const ObjCMethodDecl *Method);
+
+  virtual CodeGen::RValue
+  GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+                           QualType ResultType,
+                           Selector Sel,
+                           const ObjCInterfaceDecl *Class,
+                           bool isCategoryImpl,
+                           llvm::Value *Receiver,
+                           bool IsClassMessage,
+                           const CallArgList &CallArgs,
+                           const ObjCMethodDecl *Method);
+
+  virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+                                const ObjCInterfaceDecl *ID);
+
+  virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
+
+  /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+  /// untyped one.
+  virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+                                   const ObjCMethodDecl *Method);
+
+  virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+  virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+  virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+                                           const ObjCProtocolDecl *PD);
+
+  virtual llvm::Constant *GetPropertyGetFunction();
+  virtual llvm::Constant *GetPropertySetFunction();
+  virtual llvm::Constant *EnumerationMutationFunction();
+
+  virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+                                         const Stmt &S);
+  virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+                             const ObjCAtThrowStmt &S);
+  virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+                                         llvm::Value *AddrWeakObj);
+  virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+                                  llvm::Value *src, llvm::Value *dst);
+  virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+                                    llvm::Value *src, llvm::Value *dest);
+  virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+                                  llvm::Value *src, llvm::Value *dest,
+                                  llvm::Value *ivarOffset);
+  virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+                                        llvm::Value *src, llvm::Value *dest);
+  virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+                                        llvm::Value *dest, llvm::Value *src,
+                                        QualType Ty);
+
+  virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+                                      QualType ObjectTy,
+                                      llvm::Value *BaseValue,
+                                      const ObjCIvarDecl *Ivar,
+                                      unsigned CVRQualifiers);
+  virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+                                      const ObjCInterfaceDecl *Interface,
+                                      const ObjCIvarDecl *Ivar);
+};
+
+class CGObjCNonFragileABIMac : public CGObjCCommonMac {
+private:
+  ObjCNonFragileABITypesHelper ObjCTypes;
+  llvm::GlobalVariable* ObjCEmptyCacheVar;
+  llvm::GlobalVariable* ObjCEmptyVtableVar;
+
+  /// SuperClassReferences - uniqued super class references.
+  llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
+
+  /// MetaClassReferences - uniqued meta class references.
+  llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences;
+
+  /// EHTypeReferences - uniqued class ehtype references.
+  llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
+
+  /// NonLegacyDispatchMethods - List of methods for which we do *not* generate
+  /// legacy messaging dispatch.
+  llvm::DenseSet<Selector> NonLegacyDispatchMethods;
+
+  /// DefinedMetaClasses - List of defined meta-classes.
+  std::vector<llvm::GlobalValue*> DefinedMetaClasses;
+  
+  /// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+  /// NonLegacyDispatchMethods; false otherwise.
+  bool LegacyDispatchedSelector(Selector Sel);
+
+  /// FinishNonFragileABIModule - Write out global data structures at the end of
+  /// processing a translation unit.
+  void FinishNonFragileABIModule();
+
+  /// AddModuleClassList - Add the given list of class pointers to the
+  /// module with the provided symbol and section names.
+  void AddModuleClassList(const std::vector<llvm::GlobalValue*> &Container,
+                          const char *SymbolName,
+                          const char *SectionName);
+
+  llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
+                                              unsigned InstanceStart,
+                                              unsigned InstanceSize,
+                                              const ObjCImplementationDecl *ID);
+  llvm::GlobalVariable * BuildClassMetaData(std::string &ClassName,
+                                            llvm::Constant *IsAGV,
+                                            llvm::Constant *SuperClassGV,
+                                            llvm::Constant *ClassRoGV,
+                                            bool HiddenVisibility);
+
+  llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+  llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+  /// EmitMethodList - Emit the method list for the given
+  /// implementation. The return value has type MethodListnfABITy.
+  llvm::Constant *EmitMethodList(llvm::Twine Name,
+                                 const char *Section,
+                                 const ConstantVector &Methods);
+  /// EmitIvarList - Emit the ivar list for the given
+  /// implementation. If ForClass is true the list of class ivars
+  /// (i.e. metaclass ivars) is emitted, otherwise the list of
+  /// interface ivars will be emitted. The return value has type
+  /// IvarListnfABIPtrTy.
+  llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID);
+
+  llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+                                    const ObjCIvarDecl *Ivar,
+                                    unsigned long int offset);
+
+  /// GetOrEmitProtocol - Get the protocol object for the given
+  /// declaration, emitting it if necessary. The return value has type
+  /// ProtocolPtrTy.
+  virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+  /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+  /// object for the given declaration, emitting it if needed. These
+  /// forward references will be filled in with empty bodies if no
+  /// definition is seen. The return value has type ProtocolPtrTy.
+  virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+  /// EmitProtocolList - Generate the list of referenced
+  /// protocols. The return value has type ProtocolListPtrTy.
+  llvm::Constant *EmitProtocolList(llvm::Twine Name,
+                                   ObjCProtocolDecl::protocol_iterator begin,
+                                   ObjCProtocolDecl::protocol_iterator end);
+
+  CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+                                  QualType ResultType,
+                                  Selector Sel,
+                                  llvm::Value *Receiver,
+                                  QualType Arg0Ty,
+                                  bool IsSuper,
+                                  const CallArgList &CallArgs);
+
+  /// GetClassGlobal - Return the global variable for the Objective-C
+  /// class of the given name.
+  llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
+
+  /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+  /// for the given class reference.
+  llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+                            const ObjCInterfaceDecl *ID);
+
+  /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+  /// for the given super class reference.
+  llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder,
+                                 const ObjCInterfaceDecl *ID);
+
+  /// EmitMetaClassRef - Return a Value * of the address of _class_t
+  /// meta-data
+  llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder,
+                                const ObjCInterfaceDecl *ID);
+
+  /// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+  /// the given ivar.
+  ///
+  llvm::GlobalVariable * ObjCIvarOffsetVariable(
+    const ObjCInterfaceDecl *ID,
+    const ObjCIvarDecl *Ivar);
+
+  /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+  /// for the given selector.
+  llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel);
+
+  /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
+  /// interface. The return value has type EHTypePtrTy.
+  llvm::Value *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+                                  bool ForDefinition);
+
+  const char *getMetaclassSymbolPrefix() const {
+    return "OBJC_METACLASS_$_";
+  }
+
+  const char *getClassSymbolPrefix() const {
+    return "OBJC_CLASS_$_";
+  }
+
+  void GetClassSizeInfo(const ObjCImplementationDecl *OID,
+                        uint32_t &InstanceStart,
+                        uint32_t &InstanceSize);
+
+  // Shamelessly stolen from Analysis/CFRefCount.cpp
+  Selector GetNullarySelector(const char* name) const {
+    IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+    return CGM.getContext().Selectors.getSelector(0, &II);
+  }
+
+  Selector GetUnarySelector(const char* name) const {
+    IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+    return CGM.getContext().Selectors.getSelector(1, &II);
+  }
+
+  /// ImplementationIsNonLazy - Check whether the given category or
+  /// class implementation is "non-lazy".
+  bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const;
+
+public:
+  CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
+  // FIXME. All stubs for now!
+  virtual llvm::Function *ModuleInitFunction();
+
+  virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+                                              QualType ResultType,
+                                              Selector Sel,
+                                              llvm::Value *Receiver,
+                                              bool IsClassMessage,
+                                              const CallArgList &CallArgs,
+                                              const ObjCMethodDecl *Method);
+
+  virtual CodeGen::RValue
+  GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+                           QualType ResultType,
+                           Selector Sel,
+                           const ObjCInterfaceDecl *Class,
+                           bool isCategoryImpl,
+                           llvm::Value *Receiver,
+                           bool IsClassMessage,
+                           const CallArgList &CallArgs,
+                           const ObjCMethodDecl *Method);
+
+  virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+                                const ObjCInterfaceDecl *ID);
+
+  virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel)
+    { return EmitSelector(Builder, Sel); }
+
+  /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+  /// untyped one.
+  virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+                                   const ObjCMethodDecl *Method)
+    { return EmitSelector(Builder, Method->getSelector()); }
+
+  virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+  virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+  virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+                                           const ObjCProtocolDecl *PD);
+
+  virtual llvm::Constant *GetPropertyGetFunction() {
+    return ObjCTypes.getGetPropertyFn();
+  }
+  virtual llvm::Constant *GetPropertySetFunction() {
+    return ObjCTypes.getSetPropertyFn();
+  }
+  virtual llvm::Constant *EnumerationMutationFunction() {
+    return ObjCTypes.getEnumerationMutationFn();
+  }
+
+  virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+                                         const Stmt &S);
+  virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+                             const ObjCAtThrowStmt &S);
+  virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+                                         llvm::Value *AddrWeakObj);
+  virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+                                  llvm::Value *src, llvm::Value *dst);
+  virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+                                    llvm::Value *src, llvm::Value *dest);
+  virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+                                  llvm::Value *src, llvm::Value *dest,
+                                  llvm::Value *ivarOffset);
+  virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+                                        llvm::Value *src, llvm::Value *dest);
+  virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+                                        llvm::Value *dest, llvm::Value *src,
+                                        QualType Ty);
+  virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+                                      QualType ObjectTy,
+                                      llvm::Value *BaseValue,
+                                      const ObjCIvarDecl *Ivar,
+                                      unsigned CVRQualifiers);
+  virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+                                      const ObjCInterfaceDecl *Interface,
+                                      const ObjCIvarDecl *Ivar);
+};
+
+} // end anonymous namespace
+
+/* *** Helper Functions *** */
+
+/// getConstantGEP() - Help routine to construct simple GEPs.
+static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext,
+                                      llvm::Constant *C,
+                                      unsigned idx0,
+                                      unsigned idx1) {
+  llvm::Value *Idxs[] = {
+    llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0),
+    llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1)
+  };
+  return llvm::ConstantExpr::getGetElementPtr(C, Idxs, 2);
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+static bool hasObjCExceptionAttribute(ASTContext &Context,
+                                      const ObjCInterfaceDecl *OID) {
+  if (OID->hasAttr<ObjCExceptionAttr>())
+    return true;
+  if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+    return hasObjCExceptionAttribute(Context, Super);
+  return false;
+}
+
+/* *** CGObjCMac Public Interface *** */
+
+CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
+                                                    ObjCTypes(cgm) {
+  ObjCABI = 1;
+  EmitImageInfo();
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder,
+                                 const ObjCInterfaceDecl *ID) {
+  return EmitClassRef(Builder, ID);
+}
+
+/// GetSelector - Return the pointer to the unique'd string for this selector.
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel) {
+  return EmitSelector(Builder, Sel);
+}
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+                                    *Method) {
+  return EmitSelector(Builder, Method->getSelector());
+}
+
+/// Generate a constant CFString object.
+/*
+  struct __builtin_CFString {
+  const int *isa; // point to __CFConstantStringClassReference
+  int flags;
+  const char *str;
+  long length;
+  };
+*/
+
+llvm::Constant *CGObjCCommonMac::GenerateConstantString(
+  const StringLiteral *SL) {
+  return CGM.GetAddrOfConstantCFString(SL);
+}
+
+/// Generates a message send where the super is the receiver.  This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+                                    QualType ResultType,
+                                    Selector Sel,
+                                    const ObjCInterfaceDecl *Class,
+                                    bool isCategoryImpl,
+                                    llvm::Value *Receiver,
+                                    bool IsClassMessage,
+                                    const CodeGen::CallArgList &CallArgs,
+                                    const ObjCMethodDecl *Method) {
+  // Create and init a super structure; this is a (receiver, class)
+  // pair we will pass to objc_msgSendSuper.
+  llvm::Value *ObjCSuper =
+    CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+  llvm::Value *ReceiverAsObject =
+    CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+  CGF.Builder.CreateStore(ReceiverAsObject,
+                          CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+  // If this is a class message the metaclass is passed as the target.
+  llvm::Value *Target;
+  if (IsClassMessage) {
+    if (isCategoryImpl) {
+      // Message sent to 'super' in a class method defined in a category
+      // implementation requires an odd treatment.
+      // If we are in a class method, we must retrieve the
+      // _metaclass_ for the current class, pointed at by
+      // the class's "isa" pointer.  The following assumes that
+      // isa" is the first ivar in a class (which it must be).
+      Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+      Target = CGF.Builder.CreateStructGEP(Target, 0);
+      Target = CGF.Builder.CreateLoad(Target);
+    } else {
+      llvm::Value *MetaClassPtr = EmitMetaClassRef(Class);
+      llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(MetaClassPtr, 1);
+      llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr);
+      Target = Super;
+    }
+  } 
+  else if (isCategoryImpl)
+    Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+  else {
+    llvm::Value *ClassPtr = EmitSuperClassRef(Class);
+    ClassPtr = CGF.Builder.CreateStructGEP(ClassPtr, 1);
+    Target = CGF.Builder.CreateLoad(ClassPtr);
+  }
+  // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+  // ObjCTypes types.
+  const llvm::Type *ClassTy =
+    CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+  Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+  CGF.Builder.CreateStore(Target,
+                          CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+  return EmitLegacyMessageSend(CGF, ResultType,
+                               EmitSelector(CGF.Builder, Sel),
+                               ObjCSuper, ObjCTypes.SuperPtrCTy,
+                               true, CallArgs, Method, ObjCTypes);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+                                               QualType ResultType,
+                                               Selector Sel,
+                                               llvm::Value *Receiver,
+                                               bool IsClassMessage,
+                                               const CallArgList &CallArgs,
+                                               const ObjCMethodDecl *Method) {
+  return EmitLegacyMessageSend(CGF, ResultType,
+                               EmitSelector(CGF.Builder, Sel),
+                               Receiver, CGF.getContext().getObjCIdType(),
+                               false, CallArgs, Method, ObjCTypes);
+}
+
+CodeGen::RValue
+CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+                                       QualType ResultType,
+                                       llvm::Value *Sel,
+                                       llvm::Value *Arg0,
+                                       QualType Arg0Ty,
+                                       bool IsSuper,
+                                       const CallArgList &CallArgs,
+                                       const ObjCMethodDecl *Method,
+                                       const ObjCCommonTypesHelper &ObjCTypes) {
+  CallArgList ActualArgs;
+  if (!IsSuper)
+    Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+  ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+  ActualArgs.push_back(std::make_pair(RValue::get(Sel),
+                                      CGF.getContext().getObjCSelType()));
+  ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+
+  CodeGenTypes &Types = CGM.getTypes();
+  const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
+                                                       CC_Default, false);
+  const llvm::FunctionType *FTy =
+    Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
+
+  llvm::Constant *Fn = NULL;
+  if (CGM.ReturnTypeUsesSret(FnInfo)) {
+    Fn = (ObjCABI == 2) ?  ObjCTypes.getSendStretFn2(IsSuper)
+      : ObjCTypes.getSendStretFn(IsSuper);
+  } else if (ResultType->isFloatingType()) {
+    if (ObjCABI == 2) {
+      if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
+        BuiltinType::Kind k = BT->getKind();
+        Fn = (k == BuiltinType::LongDouble) ? ObjCTypes.getSendFpretFn2(IsSuper)
+          : ObjCTypes.getSendFn2(IsSuper);
+      } else {
+        Fn = ObjCTypes.getSendFn2(IsSuper);
+      }
+    } else
+      // FIXME. This currently matches gcc's API for x86-32. May need to change
+      // for others if we have their API.
+      Fn = ObjCTypes.getSendFpretFn(IsSuper);
+  } else {
+    Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
+      : ObjCTypes.getSendFn(IsSuper);
+  }
+  assert(Fn && "EmitLegacyMessageSend - unknown API");
+  Fn = llvm::ConstantExpr::getBitCast(Fn,
+                                      llvm::PointerType::getUnqual(FTy));
+  return CGF.EmitCall(FnInfo, Fn, ReturnValueSlot(), ActualArgs);
+}
+
+llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
+                                            const ObjCProtocolDecl *PD) {
+  // FIXME: I don't understand why gcc generates this, or where it is
+  // resolved. Investigate. Its also wasteful to look this up over and over.
+  LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+  return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
+                                        ObjCTypes.ExternalProtocolPtrTy);
+}
+
+void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
+  // FIXME: We shouldn't need this, the protocol decl should contain enough
+  // information to tell us whether this was a declaration or a definition.
+  DefinedProtocols.insert(PD->getIdentifier());
+
+  // If we have generated a forward reference to this protocol, emit
+  // it now. Otherwise do nothing, the protocol objects are lazily
+  // emitted.
+  if (Protocols.count(PD->getIdentifier()))
+    GetOrEmitProtocol(PD);
+}
+
+llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
+  if (DefinedProtocols.count(PD->getIdentifier()))
+    return GetOrEmitProtocol(PD);
+  return GetOrEmitProtocolRef(PD);
+}
+
+/*
+// APPLE LOCAL radar 4585769 - Objective-C 1.0 extensions
+struct _objc_protocol {
+struct _objc_protocol_extension *isa;
+char *protocol_name;
+struct _objc_protocol_list *protocol_list;
+struct _objc__method_prototype_list *instance_methods;
+struct _objc__method_prototype_list *class_methods
+};
+
+See EmitProtocolExtension().
+*/
+llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
+  llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+  // Early exit if a defining object has already been generated.
+  if (Entry && Entry->hasInitializer())
+    return Entry;
+
+  // FIXME: I don't understand why gcc generates this, or where it is
+  // resolved. Investigate. Its also wasteful to look this up over and over.
+  LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+  // Construct method lists.
+  std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+  std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+  for (ObjCProtocolDecl::instmeth_iterator
+         i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+    ObjCMethodDecl *MD = *i;
+    llvm::Constant *C = GetMethodDescriptionConstant(MD);
+    if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+      OptInstanceMethods.push_back(C);
+    } else {
+      InstanceMethods.push_back(C);
+    }
+  }
+
+  for (ObjCProtocolDecl::classmeth_iterator
+         i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+    ObjCMethodDecl *MD = *i;
+    llvm::Constant *C = GetMethodDescriptionConstant(MD);
+    if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+      OptClassMethods.push_back(C);
+    } else {
+      ClassMethods.push_back(C);
+    }
+  }
+
+  std::vector<llvm::Constant*> Values(5);
+  Values[0] = EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods);
+  Values[1] = GetClassName(PD->getIdentifier());
+  Values[2] =
+    EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getName(),
+                     PD->protocol_begin(),
+                     PD->protocol_end());
+  Values[3] =
+    EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
+                       "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+                       InstanceMethods);
+  Values[4] =
+    EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
+                       "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+                       ClassMethods);
+  llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+                                                   Values);
+
+  if (Entry) {
+    // Already created, fix the linkage and update the initializer.
+    Entry->setLinkage(llvm::GlobalValue::InternalLinkage);
+    Entry->setInitializer(Init);
+  } else {
+    Entry =
+      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+                               llvm::GlobalValue::InternalLinkage,
+                               Init,
+                               "\01L_OBJC_PROTOCOL_" + PD->getName());
+    Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+    Entry->setAlignment(4);
+    // FIXME: Is this necessary? Why only for protocol?
+    Entry->setAlignment(4);
+  }
+  CGM.AddUsedGlobal(Entry);
+
+  return Entry;
+}
+
+llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
+  llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+  if (!Entry) {
+    // We use the initializer as a marker of whether this is a forward
+    // reference or not. At module finalization we add the empty
+    // contents for protocols which were referenced but never defined.
+    Entry =
+      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+                               llvm::GlobalValue::ExternalLinkage,
+                               0,
+                               "\01L_OBJC_PROTOCOL_" + PD->getName());
+    Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+    Entry->setAlignment(4);
+    // FIXME: Is this necessary? Why only for protocol?
+    Entry->setAlignment(4);
+  }
+
+  return Entry;
+}
+
+/*
+  struct _objc_protocol_extension {
+  uint32_t size;
+  struct objc_method_description_list *optional_instance_methods;
+  struct objc_method_description_list *optional_class_methods;
+  struct objc_property_list *instance_properties;
+  };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
+                                 const ConstantVector &OptInstanceMethods,
+                                 const ConstantVector &OptClassMethods) {
+  uint64_t Size =
+    CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+  std::vector<llvm::Constant*> Values(4);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+  Values[1] =
+    EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
+                       + PD->getName(),
+                       "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+                       OptInstanceMethods);
+  Values[2] =
+    EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
+                       "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+                       OptClassMethods);
+  Values[3] = EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(),
+                               0, PD, ObjCTypes);
+
+  // Return null if no extension bits are used.
+  if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
+      Values[3]->isNullValue())
+    return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+
+  llvm::Constant *Init =
+    llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+
+  // No special section, but goes in llvm.used
+  return CreateMetadataVar("\01L_OBJC_PROTOCOLEXT_" + PD->getName(),
+                           Init,
+                           0, 0, true);
+}
+
+/*
+  struct objc_protocol_list {
+  struct objc_protocol_list *next;
+  long count;
+  Protocol *list[];
+  };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolList(llvm::Twine Name,
+                            ObjCProtocolDecl::protocol_iterator begin,
+                            ObjCProtocolDecl::protocol_iterator end) {
+  std::vector<llvm::Constant*> ProtocolRefs;
+
+  for (; begin != end; ++begin)
+    ProtocolRefs.push_back(GetProtocolRef(*begin));
+
+  // Just return null for empty protocol lists
+  if (ProtocolRefs.empty())
+    return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+
+  // This list is null terminated.
+  ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+
+  std::vector<llvm::Constant*> Values(3);
+  // This field is only used by the runtime.
+  Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+  Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
+                                     ProtocolRefs.size() - 1);
+  Values[2] =
+    llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
+                                                  ProtocolRefs.size()),
+                             ProtocolRefs);
+
+  llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+  llvm::GlobalVariable *GV =
+    CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+                      4, false);
+  return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
+}
+
+void CGObjCCommonMac::PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+                                   std::vector<llvm::Constant*> &Properties,
+                                   const Decl *Container,
+                                   const ObjCProtocolDecl *PROTO,
+                                   const ObjCCommonTypesHelper &ObjCTypes) {
+  std::vector<llvm::Constant*> Prop(2);
+  for (ObjCProtocolDecl::protocol_iterator P = PROTO->protocol_begin(),
+         E = PROTO->protocol_end(); P != E; ++P) 
+    PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
+  for (ObjCContainerDecl::prop_iterator I = PROTO->prop_begin(),
+       E = PROTO->prop_end(); I != E; ++I) {
+    const ObjCPropertyDecl *PD = *I;
+    if (!PropertySet.insert(PD->getIdentifier()))
+      continue;
+    Prop[0] = GetPropertyName(PD->getIdentifier());
+    Prop[1] = GetPropertyTypeString(PD, Container);
+    Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+  }
+}
+
+/*
+  struct _objc_property {
+  const char * const name;
+  const char * const attributes;
+  };
+
+  struct _objc_property_list {
+  uint32_t entsize; // sizeof (struct _objc_property)
+  uint32_t prop_count;
+  struct _objc_property[prop_count];
+  };
+*/
+llvm::Constant *CGObjCCommonMac::EmitPropertyList(llvm::Twine Name,
+                                       const Decl *Container,
+                                       const ObjCContainerDecl *OCD,
+                                       const ObjCCommonTypesHelper &ObjCTypes) {
+  std::vector<llvm::Constant*> Properties, Prop(2);
+  llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+  for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(),
+         E = OCD->prop_end(); I != E; ++I) {
+    const ObjCPropertyDecl *PD = *I;
+    PropertySet.insert(PD->getIdentifier());
+    Prop[0] = GetPropertyName(PD->getIdentifier());
+    Prop[1] = GetPropertyTypeString(PD, Container);
+    Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
+                                                   Prop));
+  }
+  if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
+    for (ObjCInterfaceDecl::protocol_iterator P = OID->protocol_begin(),
+         E = OID->protocol_end(); P != E; ++P)
+      PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
+
+  // Return null for empty list.
+  if (Properties.empty())
+    return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+
+  unsigned PropertySize =
+    CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
+  std::vector<llvm::Constant*> Values(3);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
+  Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
+  llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
+                                             Properties.size());
+  Values[2] = llvm::ConstantArray::get(AT, Properties);
+  llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+  llvm::GlobalVariable *GV =
+    CreateMetadataVar(Name, Init,
+                      (ObjCABI == 2) ? "__DATA, __objc_const" :
+                      "__OBJC,__property,regular,no_dead_strip",
+                      (ObjCABI == 2) ? 8 : 4,
+                      true);
+  return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
+}
+
+/*
+  struct objc_method_description_list {
+  int count;
+  struct objc_method_description list[];
+  };
+*/
+llvm::Constant *
+CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+  std::vector<llvm::Constant*> Desc(2);
+  Desc[0] =
+    llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+                                   ObjCTypes.SelectorPtrTy);
+  Desc[1] = GetMethodVarType(MD);
+  return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
+                                   Desc);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodDescList(llvm::Twine Name,
+                                              const char *Section,
+                                              const ConstantVector &Methods) {
+  // Return null for empty list.
+  if (Methods.empty())
+    return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+
+  std::vector<llvm::Constant*> Values(2);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+  llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
+                                             Methods.size());
+  Values[1] = llvm::ConstantArray::get(AT, Methods);
+  llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+  llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+  return llvm::ConstantExpr::getBitCast(GV,
+                                        ObjCTypes.MethodDescriptionListPtrTy);
+}
+
+/*
+  struct _objc_category {
+  char *category_name;
+  char *class_name;
+  struct _objc_method_list *instance_methods;
+  struct _objc_method_list *class_methods;
+  struct _objc_protocol_list *protocols;
+  uint32_t size; // <rdar://4585769>
+  struct _objc_property_list *instance_properties;
+  };
+*/
+void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+  unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy);
+
+  // FIXME: This is poor design, the OCD should have a pointer to the category
+  // decl. Additionally, note that Category can be null for the @implementation
+  // w/o an @interface case. Sema should just create one for us as it does for
+  // @implementation so everyone else can live life under a clear blue sky.
+  const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+  const ObjCCategoryDecl *Category =
+    Interface->FindCategoryDeclaration(OCD->getIdentifier());
+
+  llvm::SmallString<256> ExtName;
+  llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
+                                     << OCD->getName();
+
+  std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+  for (ObjCCategoryImplDecl::instmeth_iterator
+         i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+    // Instance methods should always be defined.
+    InstanceMethods.push_back(GetMethodConstant(*i));
+  }
+  for (ObjCCategoryImplDecl::classmeth_iterator
+         i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+    // Class methods should always be defined.
+    ClassMethods.push_back(GetMethodConstant(*i));
+  }
+
+  std::vector<llvm::Constant*> Values(7);
+  Values[0] = GetClassName(OCD->getIdentifier());
+  Values[1] = GetClassName(Interface->getIdentifier());
+  LazySymbols.insert(Interface->getIdentifier());
+  Values[2] =
+    EmitMethodList("\01L_OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
+                   "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+                   InstanceMethods);
+  Values[3] =
+    EmitMethodList("\01L_OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
+                   "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+                   ClassMethods);
+  if (Category) {
+    Values[4] =
+      EmitProtocolList("\01L_OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
+                       Category->protocol_begin(),
+                       Category->protocol_end());
+  } else {
+    Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+  }
+  Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+
+  // If there is no category @interface then there can be no properties.
+  if (Category) {
+    Values[6] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+                                 OCD, Category, ObjCTypes);
+  } else {
+    Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+  }
+
+  llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
+                                                   Values);
+
+  llvm::GlobalVariable *GV =
+    CreateMetadataVar("\01L_OBJC_CATEGORY_" + ExtName.str(), Init,
+                      "__OBJC,__category,regular,no_dead_strip",
+                      4, true);
+  DefinedCategories.push_back(GV);
+}
+
+// FIXME: Get from somewhere?
+enum ClassFlags {
+  eClassFlags_Factory              = 0x00001,
+  eClassFlags_Meta                 = 0x00002,
+  // <rdr://5142207>
+  eClassFlags_HasCXXStructors      = 0x02000,
+  eClassFlags_Hidden               = 0x20000,
+  eClassFlags_ABI2_Hidden          = 0x00010,
+  eClassFlags_ABI2_HasCXXStructors = 0x00004   // <rdr://4923634>
+};
+
+/*
+  struct _objc_class {
+  Class isa;
+  Class super_class;
+  const char *name;
+  long version;
+  long info;
+  long instance_size;
+  struct _objc_ivar_list *ivars;
+  struct _objc_method_list *methods;
+  struct _objc_cache *cache;
+  struct _objc_protocol_list *protocols;
+  // Objective-C 1.0 extensions (<rdr://4585769>)
+  const char *ivar_layout;
+  struct _objc_class_ext *ext;
+  };
+
+  See EmitClassExtension();
+*/
+void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
+  DefinedSymbols.insert(ID->getIdentifier());
+
+  std::string ClassName = ID->getNameAsString();
+  // FIXME: Gross
+  ObjCInterfaceDecl *Interface =
+    const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+  llvm::Constant *Protocols =
+    EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
+                     Interface->protocol_begin(),
+                     Interface->protocol_end());
+  unsigned Flags = eClassFlags_Factory;
+  unsigned Size =
+    CGM.getContext().getASTObjCImplementationLayout(ID).getSize() / 8;
+
+  // FIXME: Set CXX-structors flag.
+  if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+    Flags |= eClassFlags_Hidden;
+
+  std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+  for (ObjCImplementationDecl::instmeth_iterator
+         i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+    // Instance methods should always be defined.
+    InstanceMethods.push_back(GetMethodConstant(*i));
+  }
+  for (ObjCImplementationDecl::classmeth_iterator
+         i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+    // Class methods should always be defined.
+    ClassMethods.push_back(GetMethodConstant(*i));
+  }
+
+  for (ObjCImplementationDecl::propimpl_iterator
+         i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+    ObjCPropertyImplDecl *PID = *i;
+
+    if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+      ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+      if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+        if (llvm::Constant *C = GetMethodConstant(MD))
+          InstanceMethods.push_back(C);
+      if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+        if (llvm::Constant *C = GetMethodConstant(MD))
+          InstanceMethods.push_back(C);
+    }
+  }
+
+  std::vector<llvm::Constant*> Values(12);
+  Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+  if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
+    // Record a reference to the super class.
+    LazySymbols.insert(Super->getIdentifier());
+
+    Values[ 1] =
+      llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+                                     ObjCTypes.ClassPtrTy);
+  } else {
+    Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+  }
+  Values[ 2] = GetClassName(ID->getIdentifier());
+  // Version is always 0.
+  Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+  Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+  Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+  Values[ 6] = EmitIvarList(ID, false);
+  Values[ 7] =
+    EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getName(),
+                   "__OBJC,__inst_meth,regular,no_dead_strip",
+                   InstanceMethods);
+  // cache is always NULL.
+  Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+  Values[ 9] = Protocols;
+  Values[10] = BuildIvarLayout(ID, true);
+  Values[11] = EmitClassExtension(ID);
+  llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+                                                   Values);
+  std::string Name("\01L_OBJC_CLASS_");
+  Name += ClassName;
+  const char *Section = "__OBJC,__class,regular,no_dead_strip";
+  // Check for a forward reference.
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+  if (GV) {
+    assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+           "Forward metaclass reference has incorrect type.");
+    GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+    GV->setInitializer(Init);
+    GV->setSection(Section);
+    GV->setAlignment(4);
+    CGM.AddUsedGlobal(GV);
+  } 
+  else
+    GV = CreateMetadataVar(Name, Init, Section, 4, true);
+  DefinedClasses.push_back(GV);
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
+                                         llvm::Constant *Protocols,
+                                         const ConstantVector &Methods) {
+  unsigned Flags = eClassFlags_Meta;
+  unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
+
+  if (CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden)
+    Flags |= eClassFlags_Hidden;
+
+  std::vector<llvm::Constant*> Values(12);
+  // The isa for the metaclass is the root of the hierarchy.
+  const ObjCInterfaceDecl *Root = ID->getClassInterface();
+  while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+    Root = Super;
+  Values[ 0] =
+    llvm::ConstantExpr::getBitCast(GetClassName(Root->getIdentifier()),
+                                   ObjCTypes.ClassPtrTy);
+  // The super class for the metaclass is emitted as the name of the
+  // super class. The runtime fixes this up to point to the
+  // *metaclass* for the super class.
+  if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
+    Values[ 1] =
+      llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+                                     ObjCTypes.ClassPtrTy);
+  } else {
+    Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+  }
+  Values[ 2] = GetClassName(ID->getIdentifier());
+  // Version is always 0.
+  Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+  Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+  Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+  Values[ 6] = EmitIvarList(ID, true);
+  Values[ 7] =
+    EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+                   "__OBJC,__cls_meth,regular,no_dead_strip",
+                   Methods);
+  // cache is always NULL.
+  Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+  Values[ 9] = Protocols;
+  // ivar_layout for metaclass is always NULL.
+  Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+  // The class extension is always unused for metaclasses.
+  Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+  llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+                                                   Values);
+
+  std::string Name("\01L_OBJC_METACLASS_");
+  Name += ID->getNameAsCString();
+
+  // Check for a forward reference.
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+  if (GV) {
+    assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+           "Forward metaclass reference has incorrect type.");
+    GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+    GV->setInitializer(Init);
+  } else {
+    GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+                                  llvm::GlobalValue::InternalLinkage,
+                                  Init, Name);
+  }
+  GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
+  GV->setAlignment(4);
+  CGM.AddUsedGlobal(GV);
+
+  return GV;
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
+  std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString();
+
+  // FIXME: Should we look these up somewhere other than the module. Its a bit
+  // silly since we only generate these while processing an implementation, so
+  // exactly one pointer would work if know when we entered/exitted an
+  // implementation block.
+
+  // Check for an existing forward reference.
+  // Previously, metaclass with internal linkage may have been defined.
+  // pass 'true' as 2nd argument so it is returned.
+  if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+                                                                   true)) {
+    assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+           "Forward metaclass reference has incorrect type.");
+    return GV;
+  } else {
+    // Generate as an external reference to keep a consistent
+    // module. This will be patched up when we emit the metaclass.
+    return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+                                    llvm::GlobalValue::ExternalLinkage,
+                                    0,
+                                    Name);
+  }
+}
+
+llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
+  std::string Name = "\01L_OBJC_CLASS_" + ID->getNameAsString();
+  
+  if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+                                                                   true)) {
+    assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+           "Forward class metadata reference has incorrect type.");
+    return GV;
+  } else {
+    return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+                                    llvm::GlobalValue::ExternalLinkage,
+                                    0,
+                                    Name);
+  }
+}
+
+/*
+  struct objc_class_ext {
+  uint32_t size;
+  const char *weak_ivar_layout;
+  struct _objc_property_list *properties;
+  };
+*/
+llvm::Constant *
+CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
+  uint64_t Size =
+    CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+  std::vector<llvm::Constant*> Values(3);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+  Values[1] = BuildIvarLayout(ID, false);
+  Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+                               ID, ID->getClassInterface(), ObjCTypes);
+
+  // Return null if no extension bits are used.
+  if (Values[1]->isNullValue() && Values[2]->isNullValue())
+    return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+
+  llvm::Constant *Init =
+    llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
+  return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getName(),
+                           Init, "__OBJC,__class_ext,regular,no_dead_strip",
+                           4, true);
+}
+
+/*
+  struct objc_ivar {
+  char *ivar_name;
+  char *ivar_type;
+  int ivar_offset;
+  };
+
+  struct objc_ivar_list {
+  int ivar_count;
+  struct objc_ivar list[count];
+  };
+*/
+llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
+                                        bool ForClass) {
+  std::vector<llvm::Constant*> Ivars, Ivar(3);
+
+  // When emitting the root class GCC emits ivar entries for the
+  // actual class structure. It is not clear if we need to follow this
+  // behavior; for now lets try and get away with not doing it. If so,
+  // the cleanest solution would be to make up an ObjCInterfaceDecl
+  // for the class.
+  if (ForClass)
+    return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+  ObjCInterfaceDecl *OID =
+    const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+
+  llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+  CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
+
+  for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+    ObjCIvarDecl *IVD = OIvars[i];
+    // Ignore unnamed bit-fields.
+    if (!IVD->getDeclName())
+      continue;
+    Ivar[0] = GetMethodVarName(IVD->getIdentifier());
+    Ivar[1] = GetMethodVarType(IVD);
+    Ivar[2] = llvm::ConstantInt::get(ObjCTypes.IntTy,
+                                     ComputeIvarBaseOffset(CGM, OID, IVD));
+    Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+  }
+
+  // Return null for empty list.
+  if (Ivars.empty())
+    return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+  std::vector<llvm::Constant*> Values(2);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+  llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
+                                             Ivars.size());
+  Values[1] = llvm::ConstantArray::get(AT, Ivars);
+  llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+  llvm::GlobalVariable *GV;
+  if (ForClass)
+    GV = CreateMetadataVar("\01L_OBJC_CLASS_VARIABLES_" + ID->getName(),
+                           Init, "__OBJC,__class_vars,regular,no_dead_strip",
+                           4, true);
+  else
+    GV = CreateMetadataVar("\01L_OBJC_INSTANCE_VARIABLES_" + ID->getName(),
+                           Init, "__OBJC,__instance_vars,regular,no_dead_strip",
+                           4, true);
+  return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
+}
+
+/*
+  struct objc_method {
+  SEL method_name;
+  char *method_types;
+  void *method;
+  };
+
+  struct objc_method_list {
+  struct objc_method_list *obsolete;
+  int count;
+  struct objc_method methods_list[count];
+  };
+*/
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
+  // FIXME: Use DenseMap::lookup
+  llvm::Function *Fn = MethodDefinitions[MD];
+  if (!Fn)
+    return 0;
+
+  std::vector<llvm::Constant*> Method(3);
+  Method[0] =
+    llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+                                   ObjCTypes.SelectorPtrTy);
+  Method[1] = GetMethodVarType(MD);
+  Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+  return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodList(llvm::Twine Name,
+                                          const char *Section,
+                                          const ConstantVector &Methods) {
+  // Return null for empty list.
+  if (Methods.empty())
+    return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+
+  std::vector<llvm::Constant*> Values(3);
+  Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+  Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+  llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+                                             Methods.size());
+  Values[2] = llvm::ConstantArray::get(AT, Methods);
+  llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+  llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+  return llvm::ConstantExpr::getBitCast(GV,
+                                        ObjCTypes.MethodListPtrTy);
+}
+
+llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
+                                                const ObjCContainerDecl *CD) {
+  llvm::SmallString<256> Name;
+  GetNameForMethod(OMD, CD, Name);
+
+  CodeGenTypes &Types = CGM.getTypes();
+  const llvm::FunctionType *MethodTy =
+    Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
+  llvm::Function *Method =
+    llvm::Function::Create(MethodTy,
+                           llvm::GlobalValue::InternalLinkage,
+                           Name.str(),
+                           &CGM.getModule());
+  MethodDefinitions.insert(std::make_pair(OMD, Method));
+
+  return Method;
+}
+
+llvm::GlobalVariable *
+CGObjCCommonMac::CreateMetadataVar(llvm::Twine Name,
+                                   llvm::Constant *Init,
+                                   const char *Section,
+                                   unsigned Align,
+                                   bool AddToUsed) {
+  const llvm::Type *Ty = Init->getType();
+  llvm::GlobalVariable *GV =
+    new llvm::GlobalVariable(CGM.getModule(), Ty, false,
+                             llvm::GlobalValue::InternalLinkage, Init, Name);
+  if (Section)
+    GV->setSection(Section);
+  if (Align)
+    GV->setAlignment(Align);
+  if (AddToUsed)
+    CGM.AddUsedGlobal(GV);
+  return GV;
+}
+
+llvm::Function *CGObjCMac::ModuleInitFunction() {
+  // Abuse this interface function as a place to finalize.
+  FinishModule();
+  return NULL;
+}
+
+llvm::Constant *CGObjCMac::GetPropertyGetFunction() {
+  return ObjCTypes.getGetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetPropertySetFunction() {
+  return ObjCTypes.getSetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
+  return ObjCTypes.getEnumerationMutationFn();
+}
+
+/*
+
+  Objective-C setjmp-longjmp (sjlj) Exception Handling
+  --
+
+  The basic framework for a @try-catch-finally is as follows:
+  {
+  objc_exception_data d;
+  id _rethrow = null;
+  bool _call_try_exit = true;
+
+  objc_exception_try_enter(&d);
+  if (!setjmp(d.jmp_buf)) {
+  ... try body ...
+  } else {
+  // exception path
+  id _caught = objc_exception_extract(&d);
+
+  // enter new try scope for handlers
+  if (!setjmp(d.jmp_buf)) {
+  ... match exception and execute catch blocks ...
+
+  // fell off end, rethrow.
+  _rethrow = _caught;
+  ... jump-through-finally to finally_rethrow ...
+  } else {
+  // exception in catch block
+  _rethrow = objc_exception_extract(&d);
+  _call_try_exit = false;
+  ... jump-through-finally to finally_rethrow ...
+  }
+  }
+  ... jump-through-finally to finally_end ...
+
+  finally:
+  if (_call_try_exit)
+  objc_exception_try_exit(&d);
+
+  ... finally block ....
+  ... dispatch to finally destination ...
+
+  finally_rethrow:
+  objc_exception_throw(_rethrow);
+
+  finally_end:
+  }
+
+  This framework differs slightly from the one gcc uses, in that gcc
+  uses _rethrow to determine if objc_exception_try_exit should be called
+  and if the object should be rethrown. This breaks in the face of
+  throwing nil and introduces unnecessary branches.
+
+  We specialize this framework for a few particular circumstances:
+
+  - If there are no catch blocks, then we avoid emitting the second
+  exception handling context.
+
+  - If there is a catch-all catch block (i.e. @catch(...) or @catch(id
+  e)) we avoid emitting the code to rethrow an uncaught exception.
+
+  - FIXME: If there is no @finally block we can do a few more
+  simplifications.
+
+  Rethrows and Jumps-Through-Finally
+  --
+
+  Support for implicit rethrows and jumping through the finally block is
+  handled by storing the current exception-handling context in
+  ObjCEHStack.
+
+  In order to implement proper @finally semantics, we support one basic
+  mechanism for jumping through the finally block to an arbitrary
+  destination. Constructs which generate exits from a @try or @catch
+  block use this mechanism to implement the proper semantics by chaining
+  jumps, as necessary.
+
+  This mechanism works like the one used for indirect goto: we
+  arbitrarily assign an ID to each destination and store the ID for the
+  destination in a variable prior to entering the finally block. At the
+  end of the finally block we simply create a switch to the proper
+  destination.
+
+  Code gen for @synchronized(expr) stmt;
+  Effectively generating code for:
+  objc_sync_enter(expr);
+  @try stmt @finally { objc_sync_exit(expr); }
+*/
+
+void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+                                          const Stmt &S) {
+  bool isTry = isa<ObjCAtTryStmt>(S);
+  // Create various blocks we refer to for handling @finally.
+  llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+  llvm::BasicBlock *FinallyExit = CGF.createBasicBlock("finally.exit");
+  llvm::BasicBlock *FinallyNoExit = CGF.createBasicBlock("finally.noexit");
+  llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+  llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+  // For @synchronized, call objc_sync_enter(sync.expr). The
+  // evaluation of the expression must occur before we enter the
+  // @synchronized. We can safely avoid a temp here because jumps into
+  // @synchronized are illegal & this will dominate uses.
+  llvm::Value *SyncArg = 0;
+  if (!isTry) {
+    SyncArg =
+      CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+    SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+    CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+  }
+
+  // Push an EH context entry, used for handling rethrows and jumps
+  // through finally.
+  CGF.PushCleanupBlock(FinallyBlock);
+
+  if (CGF.ObjCEHValueStack.empty())
+    CGF.ObjCEHValueStack.push_back(0);
+  // If This is a nested @try, caught exception is that of enclosing @try.
+  else
+    CGF.ObjCEHValueStack.push_back(CGF.ObjCEHValueStack.back());
+  // Allocate memory for the exception data and rethrow pointer.
+  llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+                                                    "exceptiondata.ptr");
+  llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy,
+                                                 "_rethrow");
+  llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca(
+                                               llvm::Type::getInt1Ty(VMContext),
+                                                     "_call_try_exit");
+  CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext),
+                          CallTryExitPtr);
+
+  // Enter a new try block and call setjmp.
+  CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+  llvm::Value *JmpBufPtr = CGF.Builder.CreateStructGEP(ExceptionData, 0,
+                                                       "jmpbufarray");
+  JmpBufPtr = CGF.Builder.CreateStructGEP(JmpBufPtr, 0, "tmp");
+  llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
+                                                     JmpBufPtr, "result");
+
+  llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+  llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+  CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(SetJmpResult, "threw"),
+                           TryHandler, TryBlock);
+
+  // Emit the @try block.
+  CGF.EmitBlock(TryBlock);
+  CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+               : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+  CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+  // Emit the "exception in @try" block.
+  CGF.EmitBlock(TryHandler);
+
+  // Retrieve the exception object.  We may emit multiple blocks but
+  // nothing can cross this so the value is already in SSA form.
+  llvm::Value *Caught =
+    CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+                           ExceptionData, "caught");
+  CGF.ObjCEHValueStack.back() = Caught;
+  if (!isTry) {
+    CGF.Builder.CreateStore(Caught, RethrowPtr);
+    CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
+                            CallTryExitPtr);
+    CGF.EmitBranchThroughCleanup(FinallyRethrow);
+  } else if (const ObjCAtCatchStmt* CatchStmt =
+             cast<ObjCAtTryStmt>(S).getCatchStmts()) {
+    // Enter a new exception try block (in case a @catch block throws
+    // an exception).
+    CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+
+    llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
+                                                       JmpBufPtr, "result");
+    llvm::Value *Threw = CGF.Builder.CreateIsNotNull(SetJmpResult, "threw");
+
+    llvm::BasicBlock *CatchBlock = CGF.createBasicBlock("catch");
+    llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch.handler");
+    CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+
+    CGF.EmitBlock(CatchBlock);
+
+    // Handle catch list. As a special case we check if everything is
+    // matched and avoid generating code for falling off the end if
+    // so.
+    bool AllMatched = false;
+    for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+      llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch");
+
+      const ParmVarDecl *CatchParam = CatchStmt->getCatchParamDecl();
+      const ObjCObjectPointerType *OPT = 0;
+
+      // catch(...) always matches.
+      if (!CatchParam) {
+        AllMatched = true;
+      } else {
+        OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>();
+
+        // catch(id e) always matches.
+        // FIXME: For the time being we also match id<X>; this should
+        // be rejected by Sema instead.
+        if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()))
+          AllMatched = true;
+      }
+
+      if (AllMatched) {
+        if (CatchParam) {
+          CGF.EmitLocalBlockVarDecl(*CatchParam);
+          assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+          CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
+        }
+
+        CGF.EmitStmt(CatchStmt->getCatchBody());
+        CGF.EmitBranchThroughCleanup(FinallyEnd);
+        break;
+      }
+
+      assert(OPT && "Unexpected non-object pointer type in @catch");
+      QualType T = OPT->getPointeeType();
+      const ObjCInterfaceType *ObjCType = T->getAs<ObjCInterfaceType>();
+      assert(ObjCType && "Catch parameter must have Objective-C type!");
+
+      // Check if the @catch block matches the exception object.
+      llvm::Value *Class = EmitClassRef(CGF.Builder, ObjCType->getDecl());
+
+      llvm::Value *Match =
+        CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
+                                Class, Caught, "match");
+
+      llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("matched");
+
+      CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
+                               MatchedBlock, NextCatchBlock);
+
+      // Emit the @catch block.
+      CGF.EmitBlock(MatchedBlock);
+      CGF.EmitLocalBlockVarDecl(*CatchParam);
+      assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+      llvm::Value *Tmp =
+        CGF.Builder.CreateBitCast(Caught,
+                                  CGF.ConvertType(CatchParam->getType()),
+                                  "tmp");
+      CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
+
+      CGF.EmitStmt(CatchStmt->getCatchBody());
+      CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+      CGF.EmitBlock(NextCatchBlock);
+    }
+
+    if (!AllMatched) {
+      // None of the handlers caught the exception, so store it to be
+      // rethrown at the end of the @finally block.
+      CGF.Builder.CreateStore(Caught, RethrowPtr);
+      CGF.EmitBranchThroughCleanup(FinallyRethrow);
+    }
+
+    // Emit the exception handler for the @catch blocks.
+    CGF.EmitBlock(CatchHandler);
+    CGF.Builder.CreateStore(
+      CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+                             ExceptionData),
+      RethrowPtr);
+    CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
+                            CallTryExitPtr);
+    CGF.EmitBranchThroughCleanup(FinallyRethrow);
+  } else {
+    CGF.Builder.CreateStore(Caught, RethrowPtr);
+    CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
+                            CallTryExitPtr);
+    CGF.EmitBranchThroughCleanup(FinallyRethrow);
+  }
+
+  // Pop the exception-handling stack entry. It is important to do
+  // this now, because the code in the @finally block is not in this
+  // context.
+  CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+  CGF.ObjCEHValueStack.pop_back();
+
+  // Emit the @finally block.
+  CGF.EmitBlock(FinallyBlock);
+  llvm::Value* CallTryExit = CGF.Builder.CreateLoad(CallTryExitPtr, "tmp");
+
+  CGF.Builder.CreateCondBr(CallTryExit, FinallyExit, FinallyNoExit);
+
+  CGF.EmitBlock(FinallyExit);
+  CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData);
+
+  CGF.EmitBlock(FinallyNoExit);
+  if (isTry) {
+    if (const ObjCAtFinallyStmt* FinallyStmt =
+        cast<ObjCAtTryStmt>(S).getFinallyStmt())
+      CGF.EmitStmt(FinallyStmt->getFinallyBody());
+  } else {
+    // Emit objc_sync_exit(expr); as finally's sole statement for
+    // @synchronized.
+    CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+  }
+
+  // Emit the switch block
+  if (Info.SwitchBlock)
+    CGF.EmitBlock(Info.SwitchBlock);
+  if (Info.EndBlock)
+    CGF.EmitBlock(Info.EndBlock);
+
+  CGF.EmitBlock(FinallyRethrow);
+  CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
+                         CGF.Builder.CreateLoad(RethrowPtr));
+  CGF.Builder.CreateUnreachable();
+
+  CGF.EmitBlock(FinallyEnd);
+}
+
+void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+                              const ObjCAtThrowStmt &S) {
+  llvm::Value *ExceptionAsObject;
+
+  if (const Expr *ThrowExpr = S.getThrowExpr()) {
+    llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
+    ExceptionAsObject =
+      CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+  } else {
+    assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+           "Unexpected rethrow outside @catch block.");
+    ExceptionAsObject = CGF.ObjCEHValueStack.back();
+  }
+
+  CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+  CGF.Builder.CreateUnreachable();
+
+  // Clear the insertion point to indicate we are in unreachable code.
+  CGF.Builder.ClearInsertionPoint();
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+                                          llvm::Value *AddrWeakObj) {
+  const llvm::Type* DestTy =
+    cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+  AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
+                                          ObjCTypes.PtrObjectPtrTy);
+  llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+                                                  AddrWeakObj, "weakread");
+  read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+  return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+                                   llvm::Value *src, llvm::Value *dst) {
+  const llvm::Type * SrcTy = src->getType();
+  if (!isa<llvm::PointerType>(SrcTy)) {
+    unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+    assert(Size <= 8 && "does not support size > 8");
+    src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+      : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+    src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+  }
+  src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+  dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+  CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+                          src, dst, "weakassign");
+  return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+                                     llvm::Value *src, llvm::Value *dst) {
+  const llvm::Type * SrcTy = src->getType();
+  if (!isa<llvm::PointerType>(SrcTy)) {
+    unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+    assert(Size <= 8 && "does not support size > 8");
+    src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+      : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+    src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+  }
+  src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+  dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+  CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+                          src, dst, "globalassign");
+  return;
+}
+
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t ivaroffset)
+///
+void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+                                   llvm::Value *src, llvm::Value *dst,
+                                   llvm::Value *ivarOffset) {
+  assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
+  const llvm::Type * SrcTy = src->getType();
+  if (!isa<llvm::PointerType>(SrcTy)) {
+    unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+    assert(Size <= 8 && "does not support size > 8");
+    src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+      : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+    src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+  }
+  src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+  dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+  CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+                          src, dst, ivarOffset);
+  return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+                                         llvm::Value *src, llvm::Value *dst) {
+  const llvm::Type * SrcTy = src->getType();
+  if (!isa<llvm::PointerType>(SrcTy)) {
+    unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+    assert(Size <= 8 && "does not support size > 8");
+    src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+      : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+    src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+  }
+  src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+  dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+  CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+                          src, dst, "weakassign");
+  return;
+}
+
+void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+                                         llvm::Value *DestPtr,
+                                         llvm::Value *SrcPtr,
+                                         QualType Ty) {
+  // Get size info for this aggregate.
+  std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
+  unsigned long size = TypeInfo.first/8;
+  SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+  DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+  llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size);
+  CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+                          DestPtr, SrcPtr, N);
+  return;
+}
+
+/// EmitObjCValueForIvar - Code Gen for ivar reference.
+///
+LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+                                       QualType ObjectTy,
+                                       llvm::Value *BaseValue,
+                                       const ObjCIvarDecl *Ivar,
+                                       unsigned CVRQualifiers) {
+  const ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCInterfaceType>()->getDecl();
+  return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+                                  EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+                                       const ObjCInterfaceDecl *Interface,
+                                       const ObjCIvarDecl *Ivar) {
+  uint64_t Offset = ComputeIvarBaseOffset(CGM, Interface, Ivar);
+  return llvm::ConstantInt::get(
+    CGM.getTypes().ConvertType(CGM.getContext().LongTy),
+    Offset);
+}
+
+/* *** Private Interface *** */
+
+/// EmitImageInfo - Emit the image info marker used to encode some module
+/// level information.
+///
+/// See: <rdr://4810609&4810587&4810587>
+/// struct IMAGE_INFO {
+///   unsigned version;
+///   unsigned flags;
+/// };
+enum ImageInfoFlags {
+  eImageInfo_FixAndContinue      = (1 << 0), // FIXME: Not sure what
+                                             // this implies.
+  eImageInfo_GarbageCollected    = (1 << 1),
+  eImageInfo_GCOnly              = (1 << 2),
+  eImageInfo_OptimizedByDyld     = (1 << 3), // FIXME: When is this set.
+
+  // A flag indicating that the module has no instances of an
+  // @synthesize of a superclass variable. <rdar://problem/6803242>
+  eImageInfo_CorrectedSynthesize = (1 << 4)
+};
+
+void CGObjCMac::EmitImageInfo() {
+  unsigned version = 0; // Version is unused?
+  unsigned flags = 0;
+
+  // FIXME: Fix and continue?
+  if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
+    flags |= eImageInfo_GarbageCollected;
+  if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+    flags |= eImageInfo_GCOnly;
+
+  // We never allow @synthesize of a superclass property.
+  flags |= eImageInfo_CorrectedSynthesize;
+
+  // Emitted as int[2];
+  llvm::Constant *values[2] = {
+    llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), version),
+    llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags)
+  };
+  llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), 2);
+
+  const char *Section;
+  if (ObjCABI == 1)
+    Section = "__OBJC, __image_info,regular";
+  else
+    Section = "__DATA, __objc_imageinfo, regular, no_dead_strip";
+  llvm::GlobalVariable *GV =
+    CreateMetadataVar("\01L_OBJC_IMAGE_INFO",
+                      llvm::ConstantArray::get(AT, values, 2),
+                      Section,
+                      0,
+                      true);
+  GV->setConstant(true);
+}
+
+
+// struct objc_module {
+//   unsigned long version;
+//   unsigned long size;
+//   const char *name;
+//   Symtab symtab;
+// };
+
+// FIXME: Get from somewhere
+static const int ModuleVersion = 7;
+
+void CGObjCMac::EmitModuleInfo() {
+  uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
+
+  std::vector<llvm::Constant*> Values(4);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion);
+  Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+  // This used to be the filename, now it is unused. <rdr://4327263>
+  Values[2] = GetClassName(&CGM.getContext().Idents.get(""));
+  Values[3] = EmitModuleSymbols();
+  CreateMetadataVar("\01L_OBJC_MODULES",
+                    llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+                    "__OBJC,__module_info,regular,no_dead_strip",
+                    4, true);
+}
+
+llvm::Constant *CGObjCMac::EmitModuleSymbols() {
+  unsigned NumClasses = DefinedClasses.size();
+  unsigned NumCategories = DefinedCategories.size();
+
+  // Return null if no symbols were defined.
+  if (!NumClasses && !NumCategories)
+    return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
+
+  std::vector<llvm::Constant*> Values(5);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+  Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
+  Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
+  Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+
+  // The runtime expects exactly the list of defined classes followed
+  // by the list of defined categories, in a single array.
+  std::vector<llvm::Constant*> Symbols(NumClasses + NumCategories);
+  for (unsigned i=0; i<NumClasses; i++)
+    Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
+                                                ObjCTypes.Int8PtrTy);
+  for (unsigned i=0; i<NumCategories; i++)
+    Symbols[NumClasses + i] =
+      llvm::ConstantExpr::getBitCast(DefinedCategories[i],
+                                     ObjCTypes.Int8PtrTy);
+
+  Values[4] =
+    llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+                                                  NumClasses + NumCategories),
+                             Symbols);
+
+  llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+  llvm::GlobalVariable *GV =
+    CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
+                      "__OBJC,__symbols,regular,no_dead_strip",
+                      4, true);
+  return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
+}
+
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+                                     const ObjCInterfaceDecl *ID) {
+  LazySymbols.insert(ID->getIdentifier());
+
+  llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+  if (!Entry) {
+    llvm::Constant *Casted =
+      llvm::ConstantExpr::getBitCast(GetClassName(ID->getIdentifier()),
+                                     ObjCTypes.ClassPtrTy);
+    Entry =
+      CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+                        "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+                        4, true);
+  }
+
+  return Builder.CreateLoad(Entry, "tmp");
+}
+
+llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel) {
+  llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+  if (!Entry) {
+    llvm::Constant *Casted =
+      llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+                                     ObjCTypes.SelectorPtrTy);
+    Entry =
+      CreateMetadataVar("\01L_OBJC_SELECTOR_REFERENCES_", Casted,
+                        "__OBJC,__message_refs,literal_pointers,no_dead_strip",
+                        4, true);
+  }
+
+  return Builder.CreateLoad(Entry, "tmp");
+}
+
+llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
+  llvm::GlobalVariable *&Entry = ClassNames[Ident];
+
+  if (!Entry)
+    Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+                          llvm::ConstantArray::get(VMContext,
+                                                   Ident->getNameStart()),
+                              "__TEXT,__cstring,cstring_literals",
+                              1, true);
+
+  return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+/// GetIvarLayoutName - Returns a unique constant for the given
+/// ivar layout bitmap.
+llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
+                                       const ObjCCommonTypesHelper &ObjCTypes) {
+  return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+}
+
+static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
+  if (FQT.isObjCGCStrong())
+    return Qualifiers::Strong;
+
+  if (FQT.isObjCGCWeak())
+    return Qualifiers::Weak;
+
+  if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+    return Qualifiers::Strong;
+
+  if (const PointerType *PT = FQT->getAs<PointerType>())
+    return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
+
+  return Qualifiers::GCNone;
+}
+
+void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
+                                                unsigned int BytePos,
+                                                bool ForStrongLayout,
+                                                bool &HasUnion) {
+  const RecordDecl *RD = RT->getDecl();
+  // FIXME - Use iterator.
+  llvm::SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+  const llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+  const llvm::StructLayout *RecLayout =
+    CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
+
+  BuildAggrIvarLayout(0, RecLayout, RD, Fields, BytePos,
+                      ForStrongLayout, HasUnion);
+}
+
+void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+                             const llvm::StructLayout *Layout,
+                             const RecordDecl *RD,
+                             const llvm::SmallVectorImpl<FieldDecl*> &RecFields,
+                             unsigned int BytePos, bool ForStrongLayout,
+                             bool &HasUnion) {
+  bool IsUnion = (RD && RD->isUnion());
+  uint64_t MaxUnionIvarSize = 0;
+  uint64_t MaxSkippedUnionIvarSize = 0;
+  FieldDecl *MaxField = 0;
+  FieldDecl *MaxSkippedField = 0;
+  FieldDecl *LastFieldBitfield = 0;
+  uint64_t MaxFieldOffset = 0;
+  uint64_t MaxSkippedFieldOffset = 0;
+  uint64_t LastBitfieldOffset = 0;
+
+  if (RecFields.empty())
+    return;
+  unsigned WordSizeInBits = CGM.getContext().Target.getPointerWidth(0);
+  unsigned ByteSizeInBits = CGM.getContext().Target.getCharWidth();
+
+  for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+    FieldDecl *Field = RecFields[i];
+    uint64_t FieldOffset;
+    if (RD) {
+      if (Field->isBitField()) {
+        CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field);
+
+        const llvm::Type *Ty =
+          CGM.getTypes().ConvertTypeForMemRecursive(Field->getType());
+        uint64_t TypeSize =
+          CGM.getTypes().getTargetData().getTypeAllocSize(Ty);
+        FieldOffset = Info.FieldNo * TypeSize;
+      } else
+        FieldOffset =
+          Layout->getElementOffset(CGM.getTypes().getLLVMFieldNo(Field));
+    } else
+      FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
+
+    // Skip over unnamed or bitfields
+    if (!Field->getIdentifier() || Field->isBitField()) {
+      LastFieldBitfield = Field;
+      LastBitfieldOffset = FieldOffset;
+      continue;
+    }
+
+    LastFieldBitfield = 0;
+    QualType FQT = Field->getType();
+    if (FQT->isRecordType() || FQT->isUnionType()) {
+      if (FQT->isUnionType())
+        HasUnion = true;
+
+      BuildAggrIvarRecordLayout(FQT->getAs<RecordType>(),
+                                BytePos + FieldOffset,
+                                ForStrongLayout, HasUnion);
+      continue;
+    }
+
+    if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+      const ConstantArrayType *CArray =
+        dyn_cast_or_null<ConstantArrayType>(Array);
+      uint64_t ElCount = CArray->getSize().getZExtValue();
+      assert(CArray && "only array with known element size is supported");
+      FQT = CArray->getElementType();
+      while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+        const ConstantArrayType *CArray =
+          dyn_cast_or_null<ConstantArrayType>(Array);
+        ElCount *= CArray->getSize().getZExtValue();
+        FQT = CArray->getElementType();
+      }
+
+      assert(!FQT->isUnionType() &&
+             "layout for array of unions not supported");
+      if (FQT->isRecordType()) {
+        int OldIndex = IvarsInfo.size() - 1;
+        int OldSkIndex = SkipIvars.size() -1;
+
+        const RecordType *RT = FQT->getAs<RecordType>();
+        BuildAggrIvarRecordLayout(RT, BytePos + FieldOffset,
+                                  ForStrongLayout, HasUnion);
+
+        // Replicate layout information for each array element. Note that
+        // one element is already done.
+        uint64_t ElIx = 1;
+        for (int FirstIndex = IvarsInfo.size() - 1,
+               FirstSkIndex = SkipIvars.size() - 1 ;ElIx < ElCount; ElIx++) {
+          uint64_t Size = CGM.getContext().getTypeSize(RT)/ByteSizeInBits;
+          for (int i = OldIndex+1; i <= FirstIndex; ++i)
+            IvarsInfo.push_back(GC_IVAR(IvarsInfo[i].ivar_bytepos + Size*ElIx,
+                                        IvarsInfo[i].ivar_size));
+          for (int i = OldSkIndex+1; i <= FirstSkIndex; ++i)
+            SkipIvars.push_back(GC_IVAR(SkipIvars[i].ivar_bytepos + Size*ElIx,
+                                        SkipIvars[i].ivar_size));
+        }
+        continue;
+      }
+    }
+    // At this point, we are done with Record/Union and array there of.
+    // For other arrays we are down to its element type.
+    Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), FQT);
+
+    unsigned FieldSize = CGM.getContext().getTypeSize(Field->getType());
+    if ((ForStrongLayout && GCAttr == Qualifiers::Strong)
+        || (!ForStrongLayout && GCAttr == Qualifiers::Weak)) {
+      if (IsUnion) {
+        uint64_t UnionIvarSize = FieldSize / WordSizeInBits;
+        if (UnionIvarSize > MaxUnionIvarSize) {
+          MaxUnionIvarSize = UnionIvarSize;
+          MaxField = Field;
+          MaxFieldOffset = FieldOffset;
+        }
+      } else {
+        IvarsInfo.push_back(GC_IVAR(BytePos + FieldOffset,
+                                    FieldSize / WordSizeInBits));
+      }
+    } else if ((ForStrongLayout &&
+                (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak))
+               || (!ForStrongLayout && GCAttr != Qualifiers::Weak)) {
+      if (IsUnion) {
+        // FIXME: Why the asymmetry? We divide by word size in bits on other
+        // side.
+        uint64_t UnionIvarSize = FieldSize;
+        if (UnionIvarSize > MaxSkippedUnionIvarSize) {
+          MaxSkippedUnionIvarSize = UnionIvarSize;
+          MaxSkippedField = Field;
+          MaxSkippedFieldOffset = FieldOffset;
+        }
+      } else {
+        // FIXME: Why the asymmetry, we divide by byte size in bits here?
+        SkipIvars.push_back(GC_IVAR(BytePos + FieldOffset,
+                                    FieldSize / ByteSizeInBits));
+      }
+    }
+  }
+
+  if (LastFieldBitfield) {
+    // Last field was a bitfield. Must update skip info.
+    Expr *BitWidth = LastFieldBitfield->getBitWidth();
+    uint64_t BitFieldSize =
+      BitWidth->EvaluateAsInt(CGM.getContext()).getZExtValue();
+    GC_IVAR skivar;
+    skivar.ivar_bytepos = BytePos + LastBitfieldOffset;
+    skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
+      + ((BitFieldSize % ByteSizeInBits) != 0);
+    SkipIvars.push_back(skivar);
+  }
+
+  if (MaxField)
+    IvarsInfo.push_back(GC_IVAR(BytePos + MaxFieldOffset,
+                                MaxUnionIvarSize));
+  if (MaxSkippedField)
+    SkipIvars.push_back(GC_IVAR(BytePos + MaxSkippedFieldOffset,
+                                MaxSkippedUnionIvarSize));
+}
+
+/// BuildIvarLayout - Builds ivar layout bitmap for the class
+/// implementation for the __strong or __weak case.
+/// The layout map displays which words in ivar list must be skipped
+/// and which must be scanned by GC (see below). String is built of bytes.
+/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
+/// of words to skip and right nibble is count of words to scan. So, each
+/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
+/// represented by a 0x00 byte which also ends the string.
+/// 1. when ForStrongLayout is true, following ivars are scanned:
+/// - id, Class
+/// - object *
+/// - __strong anything
+///
+/// 2. When ForStrongLayout is false, following ivars are scanned:
+/// - __weak anything
+///
+llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
+  const ObjCImplementationDecl *OMD,
+  bool ForStrongLayout) {
+  bool hasUnion = false;
+
+  unsigned int WordsToScan, WordsToSkip;
+  const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+  if (CGM.getLangOptions().getGCMode() == LangOptions::NonGC)
+    return llvm::Constant::getNullValue(PtrTy);
+
+  llvm::SmallVector<FieldDecl*, 32> RecFields;
+  const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+  CGM.getContext().CollectObjCIvars(OI, RecFields);
+
+  // Add this implementations synthesized ivars.
+  llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
+  CGM.getContext().CollectSynthesizedIvars(OI, Ivars);
+  for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
+    RecFields.push_back(cast<FieldDecl>(Ivars[k]));
+
+  if (RecFields.empty())
+    return llvm::Constant::getNullValue(PtrTy);
+
+  SkipIvars.clear();
+  IvarsInfo.clear();
+
+  BuildAggrIvarLayout(OMD, 0, 0, RecFields, 0, ForStrongLayout, hasUnion);
+  if (IvarsInfo.empty())
+    return llvm::Constant::getNullValue(PtrTy);
+
+  // Sort on byte position in case we encounterred a union nested in
+  // the ivar list.
+  if (hasUnion && !IvarsInfo.empty())
+    std::sort(IvarsInfo.begin(), IvarsInfo.end());
+  if (hasUnion && !SkipIvars.empty())
+    std::sort(SkipIvars.begin(), SkipIvars.end());
+
+  // Build the string of skip/scan nibbles
+  llvm::SmallVector<SKIP_SCAN, 32> SkipScanIvars;
+  unsigned int WordSize =
+    CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+  if (IvarsInfo[0].ivar_bytepos == 0) {
+    WordsToSkip = 0;
+    WordsToScan = IvarsInfo[0].ivar_size;
+  } else {
+    WordsToSkip = IvarsInfo[0].ivar_bytepos/WordSize;
+    WordsToScan = IvarsInfo[0].ivar_size;
+  }
+  for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) {
+    unsigned int TailPrevGCObjC =
+      IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
+    if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) {
+      // consecutive 'scanned' object pointers.
+      WordsToScan += IvarsInfo[i].ivar_size;
+    } else {
+      // Skip over 'gc'able object pointer which lay over each other.
+      if (TailPrevGCObjC > IvarsInfo[i].ivar_bytepos)
+        continue;
+      // Must skip over 1 or more words. We save current skip/scan values
+      //  and start a new pair.
+      SKIP_SCAN SkScan;
+      SkScan.skip = WordsToSkip;
+      SkScan.scan = WordsToScan;
+      SkipScanIvars.push_back(SkScan);
+
+      // Skip the hole.
+      SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize;
+      SkScan.scan = 0;
+      SkipScanIvars.push_back(SkScan);
+      WordsToSkip = 0;
+      WordsToScan = IvarsInfo[i].ivar_size;
+    }
+  }
+  if (WordsToScan > 0) {
+    SKIP_SCAN SkScan;
+    SkScan.skip = WordsToSkip;
+    SkScan.scan = WordsToScan;
+    SkipScanIvars.push_back(SkScan);
+  }
+
+  if (!SkipIvars.empty()) {
+    unsigned int LastIndex = SkipIvars.size()-1;
+    int LastByteSkipped =
+      SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
+    LastIndex = IvarsInfo.size()-1;
+    int LastByteScanned =
+      IvarsInfo[LastIndex].ivar_bytepos +
+      IvarsInfo[LastIndex].ivar_size * WordSize;
+    // Compute number of bytes to skip at the tail end of the last ivar scanned.
+    if (LastByteSkipped > LastByteScanned) {
+      unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize;
+      SKIP_SCAN SkScan;
+      SkScan.skip = TotalWords - (LastByteScanned/WordSize);
+      SkScan.scan = 0;
+      SkipScanIvars.push_back(SkScan);
+    }
+  }
+  // Mini optimization of nibbles such that an 0xM0 followed by 0x0N is produced
+  // as 0xMN.
+  int SkipScan = SkipScanIvars.size()-1;
+  for (int i = 0; i <= SkipScan; i++) {
+    if ((i < SkipScan) && SkipScanIvars[i].skip && SkipScanIvars[i].scan == 0
+        && SkipScanIvars[i+1].skip == 0 && SkipScanIvars[i+1].scan) {
+      // 0xM0 followed by 0x0N detected.
+      SkipScanIvars[i].scan = SkipScanIvars[i+1].scan;
+      for (int j = i+1; j < SkipScan; j++)
+        SkipScanIvars[j] = SkipScanIvars[j+1];
+      --SkipScan;
+    }
+  }
+
+  // Generate the string.
+  std::string BitMap;
+  for (int i = 0; i <= SkipScan; i++) {
+    unsigned char byte;
+    unsigned int skip_small = SkipScanIvars[i].skip % 0xf;
+    unsigned int scan_small = SkipScanIvars[i].scan % 0xf;
+    unsigned int skip_big  = SkipScanIvars[i].skip / 0xf;
+    unsigned int scan_big  = SkipScanIvars[i].scan / 0xf;
+
+    // first skip big.
+    for (unsigned int ix = 0; ix < skip_big; ix++)
+      BitMap += (unsigned char)(0xf0);
+
+    // next (skip small, scan)
+    if (skip_small) {
+      byte = skip_small << 4;
+      if (scan_big > 0) {
+        byte |= 0xf;
+        --scan_big;
+      } else if (scan_small) {
+        byte |= scan_small;
+        scan_small = 0;
+      }
+      BitMap += byte;
+    }
+    // next scan big
+    for (unsigned int ix = 0; ix < scan_big; ix++)
+      BitMap += (unsigned char)(0x0f);
+    // last scan small
+    if (scan_small) {
+      byte = scan_small;
+      BitMap += byte;
+    }
+  }
+  // null terminate string.
+  unsigned char zero = 0;
+  BitMap += zero;
+
+  if (CGM.getLangOptions().ObjCGCBitmapPrint) {
+    printf("\n%s ivar layout for class '%s': ",
+           ForStrongLayout ? "strong" : "weak",
+           OMD->getClassInterface()->getNameAsCString());
+    const unsigned char *s = (unsigned char*)BitMap.c_str();
+    for (unsigned i = 0; i < BitMap.size(); i++)
+      if (!(s[i] & 0xf0))
+        printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+      else
+        printf("0x%x%s",  s[i], s[i] != 0 ? ", " : "");
+    printf("\n");
+  }
+  llvm::GlobalVariable * Entry =
+    CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+                      llvm::ConstantArray::get(VMContext, BitMap.c_str()),
+                      "__TEXT,__cstring,cstring_literals",
+                      1, true);
+  return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
+  llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
+
+  // FIXME: Avoid std::string copying.
+  if (!Entry)
+    Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
+                        llvm::ConstantArray::get(VMContext, Sel.getAsString()),
+                              "__TEXT,__cstring,cstring_literals",
+                              1, true);
+
+  return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) {
+  return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID));
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(const std::string &Name) {
+  return GetMethodVarName(&CGM.getContext().Idents.get(Name));
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
+  std::string TypeStr;
+  CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
+
+  llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+  if (!Entry)
+    Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+                              llvm::ConstantArray::get(VMContext, TypeStr),
+                              "__TEXT,__cstring,cstring_literals",
+                              1, true);
+
+  return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D) {
+  std::string TypeStr;
+  CGM.getContext().getObjCEncodingForMethodDecl(const_cast<ObjCMethodDecl*>(D),
+                                                TypeStr);
+
+  llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+  if (!Entry)
+    Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+                              llvm::ConstantArray::get(VMContext, TypeStr),
+                              "__TEXT,__cstring,cstring_literals",
+                              1, true);
+
+  return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
+  llvm::GlobalVariable *&Entry = PropertyNames[Ident];
+
+  if (!Entry)
+    Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_",
+                          llvm::ConstantArray::get(VMContext,
+                                                   Ident->getNameStart()),
+                              "__TEXT,__cstring,cstring_literals",
+                              1, true);
+
+  return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+// FIXME: This Decl should be more precise.
+llvm::Constant *
+CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
+                                       const Decl *Container) {
+  std::string TypeStr;
+  CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+  return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
+}
+
+void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
+                                       const ObjCContainerDecl *CD,
+                                       llvm::SmallVectorImpl<char> &Name) {
+  llvm::raw_svector_ostream OS(Name);
+  assert (CD && "Missing container decl in GetNameForMethod");
+  OS << '\01' << (D->isInstanceMethod() ? '-' : '+')
+     << '[' << CD->getName();
+  if (const ObjCCategoryImplDecl *CID =
+      dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
+    OS << '(' << CID->getNameAsString() << ')';
+  OS << ' ' << D->getSelector().getAsString() << ']';
+}
+
+void CGObjCMac::FinishModule() {
+  EmitModuleInfo();
+
+  // Emit the dummy bodies for any protocols which were referenced but
+  // never defined.
+  for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
+         I = Protocols.begin(), e = Protocols.end(); I != e; ++I) {
+    if (I->second->hasInitializer())
+      continue;
+
+    std::vector<llvm::Constant*> Values(5);
+    Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+    Values[1] = GetClassName(I->first);
+    Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+    Values[3] = Values[4] =
+      llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+    I->second->setLinkage(llvm::GlobalValue::InternalLinkage);
+    I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+                                                        Values));
+    CGM.AddUsedGlobal(I->second);
+  }
+
+  // Add assembler directives to add lazy undefined symbol references
+  // for classes which are referenced but not defined. This is
+  // important for correct linker interaction.
+  //
+  // FIXME: It would be nice if we had an LLVM construct for this.
+  if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
+    llvm::SmallString<256> Asm;
+    Asm += CGM.getModule().getModuleInlineAsm();
+    if (!Asm.empty() && Asm.back() != '\n')
+      Asm += '\n';
+
+    llvm::raw_svector_ostream OS(Asm);
+    for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
+           e = LazySymbols.end(); I != e; ++I)
+      OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
+    for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(),
+           e = DefinedSymbols.end(); I != e; ++I)
+      OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
+         << "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
+
+    CGM.getModule().setModuleInlineAsm(OS.str());
+  }
+}
+
+CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
+  : CGObjCCommonMac(cgm),
+    ObjCTypes(cgm) {
+  ObjCEmptyCacheVar = ObjCEmptyVtableVar = NULL;
+  ObjCABI = 2;
+}
+
+/* *** */
+
+ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
+  : VMContext(cgm.getLLVMContext()), CGM(cgm) {
+  CodeGen::CodeGenTypes &Types = CGM.getTypes();
+  ASTContext &Ctx = CGM.getContext();
+
+  ShortTy = Types.ConvertType(Ctx.ShortTy);
+  IntTy = Types.ConvertType(Ctx.IntTy);
+  LongTy = Types.ConvertType(Ctx.LongTy);
+  LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+  Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+  ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
+  PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
+  SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+
+  // FIXME: It would be nice to unify this with the opaque type, so that the IR
+  // comes out a bit cleaner.
+  const llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+  ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+
+  // I'm not sure I like this. The implicit coordination is a bit
+  // gross. We should solve this in a reasonable fashion because this
+  // is a pretty common task (match some runtime data structure with
+  // an LLVM data structure).
+
+  // FIXME: This is leaked.
+  // FIXME: Merge with rewriter code?
+
+  // struct _objc_super {
+  //   id self;
+  //   Class cls;
+  // }
+  RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct, 0,
+                                      SourceLocation(),
+                                      &Ctx.Idents.get("_objc_super"));
+  RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+                                Ctx.getObjCIdType(), 0, 0, false));
+  RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+                                Ctx.getObjCClassType(), 0, 0, false));
+  RD->completeDefinition(Ctx);
+
+  SuperCTy = Ctx.getTagDeclType(RD);
+  SuperPtrCTy = Ctx.getPointerType(SuperCTy);
+
+  SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
+  SuperPtrTy = llvm::PointerType::getUnqual(SuperTy);
+
+  // struct _prop_t {
+  //   char *name;
+  //   char *attributes;
+  // }
+  PropertyTy = llvm::StructType::get(VMContext, Int8PtrTy, Int8PtrTy, NULL);
+  CGM.getModule().addTypeName("struct._prop_t",
+                              PropertyTy);
+
+  // struct _prop_list_t {
+  //   uint32_t entsize;      // sizeof(struct _prop_t)
+  //   uint32_t count_of_properties;
+  //   struct _prop_t prop_list[count_of_properties];
+  // }
+  PropertyListTy = llvm::StructType::get(VMContext, IntTy,
+                                         IntTy,
+                                         llvm::ArrayType::get(PropertyTy, 0),
+                                         NULL);
+  CGM.getModule().addTypeName("struct._prop_list_t",
+                              PropertyListTy);
+  // struct _prop_list_t *
+  PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
+
+  // struct _objc_method {
+  //   SEL _cmd;
+  //   char *method_type;
+  //   char *_imp;
+  // }
+  MethodTy = llvm::StructType::get(VMContext, SelectorPtrTy,
+                                   Int8PtrTy,
+                                   Int8PtrTy,
+                                   NULL);
+  CGM.getModule().addTypeName("struct._objc_method", MethodTy);
+
+  // struct _objc_cache *
+  CacheTy = llvm::OpaqueType::get(VMContext);
+  CGM.getModule().addTypeName("struct._objc_cache", CacheTy);
+  CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+}
+
+ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
+  : ObjCCommonTypesHelper(cgm) {
+  // struct _objc_method_description {
+  //   SEL name;
+  //   char *types;
+  // }
+  MethodDescriptionTy =
+    llvm::StructType::get(VMContext, SelectorPtrTy,
+                          Int8PtrTy,
+                          NULL);
+  CGM.getModule().addTypeName("struct._objc_method_description",
+                              MethodDescriptionTy);
+
+  // struct _objc_method_description_list {
+  //   int count;
+  //   struct _objc_method_description[1];
+  // }
+  MethodDescriptionListTy =
+    llvm::StructType::get(VMContext, IntTy,
+                          llvm::ArrayType::get(MethodDescriptionTy, 0),
+                          NULL);
+  CGM.getModule().addTypeName("struct._objc_method_description_list",
+                              MethodDescriptionListTy);
+
+  // struct _objc_method_description_list *
+  MethodDescriptionListPtrTy =
+    llvm::PointerType::getUnqual(MethodDescriptionListTy);
+
+  // Protocol description structures
+
+  // struct _objc_protocol_extension {
+  //   uint32_t size;  // sizeof(struct _objc_protocol_extension)
+  //   struct _objc_method_description_list *optional_instance_methods;
+  //   struct _objc_method_description_list *optional_class_methods;
+  //   struct _objc_property_list *instance_properties;
+  // }
+  ProtocolExtensionTy =
+    llvm::StructType::get(VMContext, IntTy,
+                          MethodDescriptionListPtrTy,
+                          MethodDescriptionListPtrTy,
+                          PropertyListPtrTy,
+                          NULL);
+  CGM.getModule().addTypeName("struct._objc_protocol_extension",
+                              ProtocolExtensionTy);
+
+  // struct _objc_protocol_extension *
+  ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
+
+  // Handle recursive construction of Protocol and ProtocolList types
+
+  llvm::PATypeHolder ProtocolTyHolder = llvm::OpaqueType::get(VMContext);
+  llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
+
+  const llvm::Type *T =
+    llvm::StructType::get(VMContext,
+                          llvm::PointerType::getUnqual(ProtocolListTyHolder),
+                          LongTy,
+                          llvm::ArrayType::get(ProtocolTyHolder, 0),
+                          NULL);
+  cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(T);
+
+  // struct _objc_protocol {
+  //   struct _objc_protocol_extension *isa;
+  //   char *protocol_name;
+  //   struct _objc_protocol **_objc_protocol_list;
+  //   struct _objc_method_description_list *instance_methods;
+  //   struct _objc_method_description_list *class_methods;
+  // }
+  T = llvm::StructType::get(VMContext, ProtocolExtensionPtrTy,
+                            Int8PtrTy,
+                            llvm::PointerType::getUnqual(ProtocolListTyHolder),
+                            MethodDescriptionListPtrTy,
+                            MethodDescriptionListPtrTy,
+                            NULL);
+  cast<llvm::OpaqueType>(ProtocolTyHolder.get())->refineAbstractTypeTo(T);
+
+  ProtocolListTy = cast<llvm::StructType>(ProtocolListTyHolder.get());
+  CGM.getModule().addTypeName("struct._objc_protocol_list",
+                              ProtocolListTy);
+  // struct _objc_protocol_list *
+  ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
+
+  ProtocolTy = cast<llvm::StructType>(ProtocolTyHolder.get());
+  CGM.getModule().addTypeName("struct._objc_protocol", ProtocolTy);
+  ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
+
+  // Class description structures
+
+  // struct _objc_ivar {
+  //   char *ivar_name;
+  //   char *ivar_type;
+  //   int  ivar_offset;
+  // }
+  IvarTy = llvm::StructType::get(VMContext, Int8PtrTy,
+                                 Int8PtrTy,
+                                 IntTy,
+                                 NULL);
+  CGM.getModule().addTypeName("struct._objc_ivar", IvarTy);
+
+  // struct _objc_ivar_list *
+  IvarListTy = llvm::OpaqueType::get(VMContext);
+  CGM.getModule().addTypeName("struct._objc_ivar_list", IvarListTy);
+  IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
+
+  // struct _objc_method_list *
+  MethodListTy = llvm::OpaqueType::get(VMContext);
+  CGM.getModule().addTypeName("struct._objc_method_list", MethodListTy);
+  MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
+
+  // struct _objc_class_extension *
+  ClassExtensionTy =
+    llvm::StructType::get(VMContext, IntTy,
+                          Int8PtrTy,
+                          PropertyListPtrTy,
+                          NULL);
+  CGM.getModule().addTypeName("struct._objc_class_extension", ClassExtensionTy);
+  ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
+
+  llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
+
+  // struct _objc_class {
+  //   Class isa;
+  //   Class super_class;
+  //   char *name;
+  //   long version;
+  //   long info;
+  //   long instance_size;
+  //   struct _objc_ivar_list *ivars;
+  //   struct _objc_method_list *methods;
+  //   struct _objc_cache *cache;
+  //   struct _objc_protocol_list *protocols;
+  //   char *ivar_layout;
+  //   struct _objc_class_ext *ext;
+  // };
+  T = llvm::StructType::get(VMContext,
+                            llvm::PointerType::getUnqual(ClassTyHolder),
+                            llvm::PointerType::getUnqual(ClassTyHolder),
+                            Int8PtrTy,
+                            LongTy,
+                            LongTy,
+                            LongTy,
+                            IvarListPtrTy,
+                            MethodListPtrTy,
+                            CachePtrTy,
+                            ProtocolListPtrTy,
+                            Int8PtrTy,
+                            ClassExtensionPtrTy,
+                            NULL);
+  cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(T);
+
+  ClassTy = cast<llvm::StructType>(ClassTyHolder.get());
+  CGM.getModule().addTypeName("struct._objc_class", ClassTy);
+  ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
+
+  // struct _objc_category {
+  //   char *category_name;
+  //   char *class_name;
+  //   struct _objc_method_list *instance_method;
+  //   struct _objc_method_list *class_method;
+  //   uint32_t size;  // sizeof(struct _objc_category)
+  //   struct _objc_property_list *instance_properties;// category's @property
+  // }
+  CategoryTy = llvm::StructType::get(VMContext, Int8PtrTy,
+                                     Int8PtrTy,
+                                     MethodListPtrTy,
+                                     MethodListPtrTy,
+                                     ProtocolListPtrTy,
+                                     IntTy,
+                                     PropertyListPtrTy,
+                                     NULL);
+  CGM.getModule().addTypeName("struct._objc_category", CategoryTy);
+
+  // Global metadata structures
+
+  // struct _objc_symtab {
+  //   long sel_ref_cnt;
+  //   SEL *refs;
+  //   short cls_def_cnt;
+  //   short cat_def_cnt;
+  //   char *defs[cls_def_cnt + cat_def_cnt];
+  // }
+  SymtabTy = llvm::StructType::get(VMContext, LongTy,
+                                   SelectorPtrTy,
+                                   ShortTy,
+                                   ShortTy,
+                                   llvm::ArrayType::get(Int8PtrTy, 0),
+                                   NULL);
+  CGM.getModule().addTypeName("struct._objc_symtab", SymtabTy);
+  SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
+
+  // struct _objc_module {
+  //   long version;
+  //   long size;   // sizeof(struct _objc_module)
+  //   char *name;
+  //   struct _objc_symtab* symtab;
+  //  }
+  ModuleTy =
+    llvm::StructType::get(VMContext, LongTy,
+                          LongTy,
+                          Int8PtrTy,
+                          SymtabPtrTy,
+                          NULL);
+  CGM.getModule().addTypeName("struct._objc_module", ModuleTy);
+
+
+  // FIXME: This is the size of the setjmp buffer and should be target
+  // specific. 18 is what's used on 32-bit X86.
+  uint64_t SetJmpBufferSize = 18;
+
+  // Exceptions
+  const llvm::Type *StackPtrTy = llvm::ArrayType::get(
+    llvm::Type::getInt8PtrTy(VMContext), 4);
+
+  ExceptionDataTy =
+    llvm::StructType::get(VMContext, llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext),
+                                                          SetJmpBufferSize),
+                          StackPtrTy, NULL);
+  CGM.getModule().addTypeName("struct._objc_exception_data",
+                              ExceptionDataTy);
+
+}
+
+ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm)
+  : ObjCCommonTypesHelper(cgm) {
+  // struct _method_list_t {
+  //   uint32_t entsize;  // sizeof(struct _objc_method)
+  //   uint32_t method_count;
+  //   struct _objc_method method_list[method_count];
+  // }
+  MethodListnfABITy = llvm::StructType::get(VMContext, IntTy,
+                                            IntTy,
+                                            llvm::ArrayType::get(MethodTy, 0),
+                                            NULL);
+  CGM.getModule().addTypeName("struct.__method_list_t",
+                              MethodListnfABITy);
+  // struct method_list_t *
+  MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
+
+  // struct _protocol_t {
+  //   id isa;  // NULL
+  //   const char * const protocol_name;
+  //   const struct _protocol_list_t * protocol_list; // super protocols
+  //   const struct method_list_t * const instance_methods;
+  //   const struct method_list_t * const class_methods;
+  //   const struct method_list_t *optionalInstanceMethods;
+  //   const struct method_list_t *optionalClassMethods;
+  //   const struct _prop_list_t * properties;
+  //   const uint32_t size;  // sizeof(struct _protocol_t)
+  //   const uint32_t flags;  // = 0
+  // }
+
+  // Holder for struct _protocol_list_t *
+  llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
+
+  ProtocolnfABITy = llvm::StructType::get(VMContext, ObjectPtrTy,
+                                          Int8PtrTy,
+                                          llvm::PointerType::getUnqual(
+                                            ProtocolListTyHolder),
+                                          MethodListnfABIPtrTy,
+                                          MethodListnfABIPtrTy,
+                                          MethodListnfABIPtrTy,
+                                          MethodListnfABIPtrTy,
+                                          PropertyListPtrTy,
+                                          IntTy,
+                                          IntTy,
+                                          NULL);
+  CGM.getModule().addTypeName("struct._protocol_t",
+                              ProtocolnfABITy);
+
+  // struct _protocol_t*
+  ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
+
+  // struct _protocol_list_t {
+  //   long protocol_count;   // Note, this is 32/64 bit
+  //   struct _protocol_t *[protocol_count];
+  // }
+  ProtocolListnfABITy = llvm::StructType::get(VMContext, LongTy,
+                                              llvm::ArrayType::get(
+                                                ProtocolnfABIPtrTy, 0),
+                                              NULL);
+  CGM.getModule().addTypeName("struct._objc_protocol_list",
+                              ProtocolListnfABITy);
+  cast<llvm::OpaqueType>(ProtocolListTyHolder.get())->refineAbstractTypeTo(
+    ProtocolListnfABITy);
+
+  // struct _objc_protocol_list*
+  ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
+
+  // struct _ivar_t {
+  //   unsigned long int *offset;  // pointer to ivar offset location
+  //   char *name;
+  //   char *type;
+  //   uint32_t alignment;
+  //   uint32_t size;
+  // }
+  IvarnfABITy = llvm::StructType::get(VMContext,
+                                      llvm::PointerType::getUnqual(LongTy),
+                                      Int8PtrTy,
+                                      Int8PtrTy,
+                                      IntTy,
+                                      IntTy,
+                                      NULL);
+  CGM.getModule().addTypeName("struct._ivar_t", IvarnfABITy);
+
+  // struct _ivar_list_t {
+  //   uint32 entsize;  // sizeof(struct _ivar_t)
+  //   uint32 count;
+  //   struct _iver_t list[count];
+  // }
+  IvarListnfABITy = llvm::StructType::get(VMContext, IntTy,
+                                          IntTy,
+                                          llvm::ArrayType::get(
+                                            IvarnfABITy, 0),
+                                          NULL);
+  CGM.getModule().addTypeName("struct._ivar_list_t", IvarListnfABITy);
+
+  IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
+
+  // struct _class_ro_t {
+  //   uint32_t const flags;
+  //   uint32_t const instanceStart;
+  //   uint32_t const instanceSize;
+  //   uint32_t const reserved;  // only when building for 64bit targets
+  //   const uint8_t * const ivarLayout;
+  //   const char *const name;
+  //   const struct _method_list_t * const baseMethods;
+  //   const struct _objc_protocol_list *const baseProtocols;
+  //   const struct _ivar_list_t *const ivars;
+  //   const uint8_t * const weakIvarLayout;
+  //   const struct _prop_list_t * const properties;
+  // }
+
+  // FIXME. Add 'reserved' field in 64bit abi mode!
+  ClassRonfABITy = llvm::StructType::get(VMContext, IntTy,
+                                         IntTy,
+                                         IntTy,
+                                         Int8PtrTy,
+                                         Int8PtrTy,
+                                         MethodListnfABIPtrTy,
+                                         ProtocolListnfABIPtrTy,
+                                         IvarListnfABIPtrTy,
+                                         Int8PtrTy,
+                                         PropertyListPtrTy,
+                                         NULL);
+  CGM.getModule().addTypeName("struct._class_ro_t",
+                              ClassRonfABITy);
+
+  // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+  std::vector<const llvm::Type*> Params;
+  Params.push_back(ObjectPtrTy);
+  Params.push_back(SelectorPtrTy);
+  ImpnfABITy = llvm::PointerType::getUnqual(
+    llvm::FunctionType::get(ObjectPtrTy, Params, false));
+
+  // struct _class_t {
+  //   struct _class_t *isa;
+  //   struct _class_t * const superclass;
+  //   void *cache;
+  //   IMP *vtable;
+  //   struct class_ro_t *ro;
+  // }
+
+  llvm::PATypeHolder ClassTyHolder = llvm::OpaqueType::get(VMContext);
+  ClassnfABITy =
+    llvm::StructType::get(VMContext,
+                          llvm::PointerType::getUnqual(ClassTyHolder),
+                          llvm::PointerType::getUnqual(ClassTyHolder),
+                          CachePtrTy,
+                          llvm::PointerType::getUnqual(ImpnfABITy),
+                          llvm::PointerType::getUnqual(ClassRonfABITy),
+                          NULL);
+  CGM.getModule().addTypeName("struct._class_t", ClassnfABITy);
+
+  cast<llvm::OpaqueType>(ClassTyHolder.get())->refineAbstractTypeTo(
+    ClassnfABITy);
+
+  // LLVM for struct _class_t *
+  ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
+
+  // struct _category_t {
+  //   const char * const name;
+  //   struct _class_t *const cls;
+  //   const struct _method_list_t * const instance_methods;
+  //   const struct _method_list_t * const class_methods;
+  //   const struct _protocol_list_t * const protocols;
+  //   const struct _prop_list_t * const properties;
+  // }
+  CategorynfABITy = llvm::StructType::get(VMContext, Int8PtrTy,
+                                          ClassnfABIPtrTy,
+                                          MethodListnfABIPtrTy,
+                                          MethodListnfABIPtrTy,
+                                          ProtocolListnfABIPtrTy,
+                                          PropertyListPtrTy,
+                                          NULL);
+  CGM.getModule().addTypeName("struct._category_t", CategorynfABITy);
+
+  // New types for nonfragile abi messaging.
+  CodeGen::CodeGenTypes &Types = CGM.getTypes();
+  ASTContext &Ctx = CGM.getContext();
+
+  // MessageRefTy - LLVM for:
+  // struct _message_ref_t {
+  //   IMP messenger;
+  //   SEL name;
+  // };
+
+  // First the clang type for struct _message_ref_t
+  RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct, 0,
+                                      SourceLocation(),
+                                      &Ctx.Idents.get("_message_ref_t"));
+  RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+                                Ctx.VoidPtrTy, 0, 0, false));
+  RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), 0,
+                                Ctx.getObjCSelType(), 0, 0, false));
+  RD->completeDefinition(Ctx);
+
+  MessageRefCTy = Ctx.getTagDeclType(RD);
+  MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
+  MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
+
+  // MessageRefPtrTy - LLVM for struct _message_ref_t*
+  MessageRefPtrTy = llvm::PointerType::getUnqual(MessageRefTy);
+
+  // SuperMessageRefTy - LLVM for:
+  // struct _super_message_ref_t {
+  //   SUPER_IMP messenger;
+  //   SEL name;
+  // };
+  SuperMessageRefTy = llvm::StructType::get(VMContext, ImpnfABITy,
+                                            SelectorPtrTy,
+                                            NULL);
+  CGM.getModule().addTypeName("struct._super_message_ref_t", SuperMessageRefTy);
+
+  // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+  SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
+
+
+  // struct objc_typeinfo {
+  //   const void** vtable; // objc_ehtype_vtable + 2
+  //   const char*  name;    // c++ typeinfo string
+  //   Class        cls;
+  // };
+  EHTypeTy = llvm::StructType::get(VMContext,
+                                   llvm::PointerType::getUnqual(Int8PtrTy),
+                                   Int8PtrTy,
+                                   ClassnfABIPtrTy,
+                                   NULL);
+  CGM.getModule().addTypeName("struct._objc_typeinfo", EHTypeTy);
+  EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
+}
+
+llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
+  FinishNonFragileABIModule();
+
+  return NULL;
+}
+
+void CGObjCNonFragileABIMac::AddModuleClassList(const
+                                                std::vector<llvm::GlobalValue*>
+                                                &Container,
+                                                const char *SymbolName,
+                                                const char *SectionName) {
+  unsigned NumClasses = Container.size();
+
+  if (!NumClasses)
+    return;
+
+  std::vector<llvm::Constant*> Symbols(NumClasses);
+  for (unsigned i=0; i<NumClasses; i++)
+    Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
+                                                ObjCTypes.Int8PtrTy);
+  llvm::Constant* Init =
+    llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+                                                  NumClasses),
+                             Symbols);
+
+  llvm::GlobalVariable *GV =
+    new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+                             llvm::GlobalValue::InternalLinkage,
+                             Init,
+                             SymbolName);
+  GV->setAlignment(8);
+  GV->setSection(SectionName);
+  CGM.AddUsedGlobal(GV);
+}
+
+void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
+  // nonfragile abi has no module definition.
+
+  // Build list of all implemented class addresses in array
+  // L_OBJC_LABEL_CLASS_$.
+  AddModuleClassList(DefinedClasses,
+                     "\01L_OBJC_LABEL_CLASS_$",
+                     "__DATA, __objc_classlist, regular, no_dead_strip");
+  
+  for (unsigned i = 0; i < DefinedClasses.size(); i++) {
+    llvm::GlobalValue *IMPLGV = DefinedClasses[i];
+    if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+      continue;
+    IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+  }
+  
+  for (unsigned i = 0; i < DefinedMetaClasses.size(); i++) {
+    llvm::GlobalValue *IMPLGV = DefinedMetaClasses[i];
+    if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+      continue;
+    IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+  }    
+  
+  AddModuleClassList(DefinedNonLazyClasses,
+                     "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
+                     "__DATA, __objc_nlclslist, regular, no_dead_strip");
+
+  // Build list of all implemented category addresses in array
+  // L_OBJC_LABEL_CATEGORY_$.
+  AddModuleClassList(DefinedCategories,
+                     "\01L_OBJC_LABEL_CATEGORY_$",
+                     "__DATA, __objc_catlist, regular, no_dead_strip");
+  AddModuleClassList(DefinedNonLazyCategories,
+                     "\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
+                     "__DATA, __objc_nlcatlist, regular, no_dead_strip");
+
+  //  static int L_OBJC_IMAGE_INFO[2] = { 0, flags };
+  // FIXME. flags can be 0 | 1 | 2 | 6. For now just use 0
+  std::vector<llvm::Constant*> Values(2);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, 0);
+  unsigned int flags = 0;
+  // FIXME: Fix and continue?
+  if (CGM.getLangOptions().getGCMode() != LangOptions::NonGC)
+    flags |= eImageInfo_GarbageCollected;
+  if (CGM.getLangOptions().getGCMode() == LangOptions::GCOnly)
+    flags |= eImageInfo_GCOnly;
+  Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+  llvm::Constant* Init = llvm::ConstantArray::get(
+    llvm::ArrayType::get(ObjCTypes.IntTy, 2),
+    Values);
+  llvm::GlobalVariable *IMGV =
+    new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+                             llvm::GlobalValue::InternalLinkage,
+                             Init,
+                             "\01L_OBJC_IMAGE_INFO");
+  IMGV->setSection("__DATA, __objc_imageinfo, regular, no_dead_strip");
+  IMGV->setConstant(true);
+  CGM.AddUsedGlobal(IMGV);
+}
+
+/// LegacyDispatchedSelector - Returns true if SEL is not in the list of
+/// NonLegacyDispatchMethods; false otherwise. What this means is that
+/// except for the 19 selectors in the list, we generate 32bit-style
+/// message dispatch call for all the rest.
+///
+bool CGObjCNonFragileABIMac::LegacyDispatchedSelector(Selector Sel) {
+  if (CGM.getCodeGenOpts().ObjCLegacyDispatch)
+    return true;
+
+  if (NonLegacyDispatchMethods.empty()) {
+    NonLegacyDispatchMethods.insert(GetNullarySelector("alloc"));
+    NonLegacyDispatchMethods.insert(GetNullarySelector("class"));
+    NonLegacyDispatchMethods.insert(GetNullarySelector("self"));
+    NonLegacyDispatchMethods.insert(GetNullarySelector("isFlipped"));
+    NonLegacyDispatchMethods.insert(GetNullarySelector("length"));
+    NonLegacyDispatchMethods.insert(GetNullarySelector("count"));
+    NonLegacyDispatchMethods.insert(GetNullarySelector("retain"));
+    NonLegacyDispatchMethods.insert(GetNullarySelector("release"));
+    NonLegacyDispatchMethods.insert(GetNullarySelector("autorelease"));
+    NonLegacyDispatchMethods.insert(GetNullarySelector("hash"));
+
+    NonLegacyDispatchMethods.insert(GetUnarySelector("allocWithZone"));
+    NonLegacyDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
+    NonLegacyDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
+    NonLegacyDispatchMethods.insert(GetUnarySelector("objectForKey"));
+    NonLegacyDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
+    NonLegacyDispatchMethods.insert(GetUnarySelector("isEqualToString"));
+    NonLegacyDispatchMethods.insert(GetUnarySelector("isEqual"));
+    NonLegacyDispatchMethods.insert(GetUnarySelector("addObject"));
+    // "countByEnumeratingWithState:objects:count"
+    IdentifierInfo *KeyIdents[] = {
+      &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+      &CGM.getContext().Idents.get("objects"),
+      &CGM.getContext().Idents.get("count")
+    };
+    NonLegacyDispatchMethods.insert(
+      CGM.getContext().Selectors.getSelector(3, KeyIdents));
+  }
+  return (NonLegacyDispatchMethods.count(Sel) == 0);
+}
+
+// Metadata flags
+enum MetaDataDlags {
+  CLS = 0x0,
+  CLS_META = 0x1,
+  CLS_ROOT = 0x2,
+  OBJC2_CLS_HIDDEN = 0x10,
+  CLS_EXCEPTION = 0x20
+};
+/// BuildClassRoTInitializer - generate meta-data for:
+/// struct _class_ro_t {
+///   uint32_t const flags;
+///   uint32_t const instanceStart;
+///   uint32_t const instanceSize;
+///   uint32_t const reserved;  // only when building for 64bit targets
+///   const uint8_t * const ivarLayout;
+///   const char *const name;
+///   const struct _method_list_t * const baseMethods;
+///   const struct _protocol_list_t *const baseProtocols;
+///   const struct _ivar_list_t *const ivars;
+///   const uint8_t * const weakIvarLayout;
+///   const struct _prop_list_t * const properties;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
+  unsigned flags,
+  unsigned InstanceStart,
+  unsigned InstanceSize,
+  const ObjCImplementationDecl *ID) {
+  std::string ClassName = ID->getNameAsString();
+  std::vector<llvm::Constant*> Values(10); // 11 for 64bit targets!
+  Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+  Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
+  Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
+  // FIXME. For 64bit targets add 0 here.
+  Values[ 3] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+    : BuildIvarLayout(ID, true);
+  Values[ 4] = GetClassName(ID->getIdentifier());
+  // const struct _method_list_t * const baseMethods;
+  std::vector<llvm::Constant*> Methods;
+  std::string MethodListName("\01l_OBJC_$_");
+  if (flags & CLS_META) {
+    MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
+    for (ObjCImplementationDecl::classmeth_iterator
+           i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+      // Class methods should always be defined.
+      Methods.push_back(GetMethodConstant(*i));
+    }
+  } else {
+    MethodListName += "INSTANCE_METHODS_" + ID->getNameAsString();
+    for (ObjCImplementationDecl::instmeth_iterator
+           i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+      // Instance methods should always be defined.
+      Methods.push_back(GetMethodConstant(*i));
+    }
+    for (ObjCImplementationDecl::propimpl_iterator
+           i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+      ObjCPropertyImplDecl *PID = *i;
+
+      if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
+        ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+        if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+          if (llvm::Constant *C = GetMethodConstant(MD))
+            Methods.push_back(C);
+        if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+          if (llvm::Constant *C = GetMethodConstant(MD))
+            Methods.push_back(C);
+      }
+    }
+  }
+  Values[ 5] = EmitMethodList(MethodListName,
+                              "__DATA, __objc_const", Methods);
+
+  const ObjCInterfaceDecl *OID = ID->getClassInterface();
+  assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
+  Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+                                + OID->getName(),
+                                OID->protocol_begin(),
+                                OID->protocol_end());
+
+  if (flags & CLS_META)
+    Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+  else
+    Values[ 7] = EmitIvarList(ID);
+  Values[ 8] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+    : BuildIvarLayout(ID, false);
+  if (flags & CLS_META)
+    Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+  else
+    Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+                                  ID, ID->getClassInterface(), ObjCTypes);
+  llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
+                                                   Values);
+  llvm::GlobalVariable *CLASS_RO_GV =
+    new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
+                             llvm::GlobalValue::InternalLinkage,
+                             Init,
+                             (flags & CLS_META) ?
+                             std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
+                             std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
+  CLASS_RO_GV->setAlignment(
+    CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ClassRonfABITy));
+  CLASS_RO_GV->setSection("__DATA, __objc_const");
+  return CLASS_RO_GV;
+
+}
+
+/// BuildClassMetaData - This routine defines that to-level meta-data
+/// for the given ClassName for:
+/// struct _class_t {
+///   struct _class_t *isa;
+///   struct _class_t * const superclass;
+///   void *cache;
+///   IMP *vtable;
+///   struct class_ro_t *ro;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
+  std::string &ClassName,
+  llvm::Constant *IsAGV,
+  llvm::Constant *SuperClassGV,
+  llvm::Constant *ClassRoGV,
+  bool HiddenVisibility) {
+  std::vector<llvm::Constant*> Values(5);
+  Values[0] = IsAGV;
+  Values[1] = SuperClassGV;
+  if (!Values[1])
+    Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
+  Values[2] = ObjCEmptyCacheVar;  // &ObjCEmptyCacheVar
+  Values[3] = ObjCEmptyVtableVar; // &ObjCEmptyVtableVar
+  Values[4] = ClassRoGV;                 // &CLASS_RO_GV
+  llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
+                                                   Values);
+  llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
+  GV->setInitializer(Init);
+  GV->setSection("__DATA, __objc_data");
+  GV->setAlignment(
+    CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ClassnfABITy));
+  if (HiddenVisibility)
+    GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+  return GV;
+}
+
+bool
+CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+  return OD->getClassMethod(GetNullarySelector("load")) != 0;
+}
+
+void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
+                                              uint32_t &InstanceStart,
+                                              uint32_t &InstanceSize) {
+  const ASTRecordLayout &RL =
+    CGM.getContext().getASTObjCImplementationLayout(OID);
+
+  // InstanceSize is really instance end.
+  InstanceSize = llvm::RoundUpToAlignment(RL.getDataSize(), 8) / 8;
+
+  // If there are no fields, the start is the same as the end.
+  if (!RL.getFieldCount())
+    InstanceStart = InstanceSize;
+  else
+    InstanceStart = RL.getFieldOffset(0) / 8;
+}
+
+void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
+  std::string ClassName = ID->getNameAsString();
+  if (!ObjCEmptyCacheVar) {
+    ObjCEmptyCacheVar = new llvm::GlobalVariable(
+      CGM.getModule(),
+      ObjCTypes.CacheTy,
+      false,
+      llvm::GlobalValue::ExternalLinkage,
+      0,
+      "_objc_empty_cache");
+
+    ObjCEmptyVtableVar = new llvm::GlobalVariable(
+      CGM.getModule(),
+      ObjCTypes.ImpnfABITy,
+      false,
+      llvm::GlobalValue::ExternalLinkage,
+      0,
+      "_objc_empty_vtable");
+  }
+  assert(ID->getClassInterface() &&
+         "CGObjCNonFragileABIMac::GenerateClass - class is 0");
+  // FIXME: Is this correct (that meta class size is never computed)?
+  uint32_t InstanceStart =
+    CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+  uint32_t InstanceSize = InstanceStart;
+  uint32_t flags = CLS_META;
+  std::string ObjCMetaClassName(getMetaclassSymbolPrefix());
+  std::string ObjCClassName(getClassSymbolPrefix());
+
+  llvm::GlobalVariable *SuperClassGV, *IsAGV;
+
+  bool classIsHidden =
+    CGM.getDeclVisibilityMode(ID->getClassInterface()) == LangOptions::Hidden;
+  if (classIsHidden)
+    flags |= OBJC2_CLS_HIDDEN;
+  if (!ID->getClassInterface()->getSuperClass()) {
+    // class is root
+    flags |= CLS_ROOT;
+    SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
+    IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
+  } else {
+    // Has a root. Current class is not a root.
+    const ObjCInterfaceDecl *Root = ID->getClassInterface();
+    while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+      Root = Super;
+    IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString());
+    if (Root->hasAttr<WeakImportAttr>())
+      IsAGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+    // work on super class metadata symbol.
+    std::string SuperClassName =
+      ObjCMetaClassName + 
+        ID->getClassInterface()->getSuperClass()->getNameAsString();
+    SuperClassGV = GetClassGlobal(SuperClassName);
+    if (ID->getClassInterface()->getSuperClass()->hasAttr<WeakImportAttr>())
+      SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+  }
+  llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
+                                                               InstanceStart,
+                                                               InstanceSize,ID);
+  std::string TClassName = ObjCMetaClassName + ClassName;
+  llvm::GlobalVariable *MetaTClass =
+    BuildClassMetaData(TClassName, IsAGV, SuperClassGV, CLASS_RO_GV,
+                       classIsHidden);
+  DefinedMetaClasses.push_back(MetaTClass);
+
+  // Metadata for the class
+  flags = CLS;
+  if (classIsHidden)
+    flags |= OBJC2_CLS_HIDDEN;
+
+  if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
+    flags |= CLS_EXCEPTION;
+
+  if (!ID->getClassInterface()->getSuperClass()) {
+    flags |= CLS_ROOT;
+    SuperClassGV = 0;
+  } else {
+    // Has a root. Current class is not a root.
+    std::string RootClassName =
+      ID->getClassInterface()->getSuperClass()->getNameAsString();
+    SuperClassGV = GetClassGlobal(ObjCClassName + RootClassName);
+    if (ID->getClassInterface()->getSuperClass()->hasAttr<WeakImportAttr>())
+      SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+  }
+  GetClassSizeInfo(ID, InstanceStart, InstanceSize);
+  CLASS_RO_GV = BuildClassRoTInitializer(flags,
+                                         InstanceStart,
+                                         InstanceSize,
+                                         ID);
+
+  TClassName = ObjCClassName + ClassName;
+  llvm::GlobalVariable *ClassMD =
+    BuildClassMetaData(TClassName, MetaTClass, SuperClassGV, CLASS_RO_GV,
+                       classIsHidden);
+  DefinedClasses.push_back(ClassMD);
+
+  // Determine if this class is also "non-lazy".
+  if (ImplementationIsNonLazy(ID))
+    DefinedNonLazyClasses.push_back(ClassMD);
+
+  // Force the definition of the EHType if necessary.
+  if (flags & CLS_EXCEPTION)
+    GetInterfaceEHType(ID->getClassInterface(), true);
+}
+
+/// GenerateProtocolRef - This routine is called to generate code for
+/// a protocol reference expression; as in:
+/// @code
+///   @protocol(Proto1);
+/// @endcode
+/// It generates a weak reference to l_OBJC_PROTOCOL_REFERENCE_$_Proto1
+/// which will hold address of the protocol meta-data.
+///
+llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
+                                                         const ObjCProtocolDecl *PD) {
+
+  // This routine is called for @protocol only. So, we must build definition
+  // of protocol's meta-data (not a reference to it!)
+  //
+  llvm::Constant *Init =
+    llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
+                                   ObjCTypes.ExternalProtocolPtrTy);
+
+  std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
+  ProtocolName += PD->getNameAsCString();
+
+  llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
+  if (PTGV)
+    return Builder.CreateLoad(PTGV, "tmp");
+  PTGV = new llvm::GlobalVariable(
+    CGM.getModule(),
+    Init->getType(), false,
+    llvm::GlobalValue::WeakAnyLinkage,
+    Init,
+    ProtocolName);
+  PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
+  PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+  CGM.AddUsedGlobal(PTGV);
+  return Builder.CreateLoad(PTGV, "tmp");
+}
+
+/// GenerateCategory - Build metadata for a category implementation.
+/// struct _category_t {
+///   const char * const name;
+///   struct _class_t *const cls;
+///   const struct _method_list_t * const instance_methods;
+///   const struct _method_list_t * const class_methods;
+///   const struct _protocol_list_t * const protocols;
+///   const struct _prop_list_t * const properties;
+/// }
+///
+void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+  const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+  const char *Prefix = "\01l_OBJC_$_CATEGORY_";
+  std::string ExtCatName(Prefix + Interface->getNameAsString()+
+                         "_$_" + OCD->getNameAsString());
+  std::string ExtClassName(getClassSymbolPrefix() +
+                           Interface->getNameAsString());
+
+  std::vector<llvm::Constant*> Values(6);
+  Values[0] = GetClassName(OCD->getIdentifier());
+  // meta-class entry symbol
+  llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
+  if (Interface->hasAttr<WeakImportAttr>())
+    ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+  
+  Values[1] = ClassGV;
+  std::vector<llvm::Constant*> Methods;
+  std::string MethodListName(Prefix);
+  MethodListName += "INSTANCE_METHODS_" + Interface->getNameAsString() +
+    "_$_" + OCD->getNameAsString();
+
+  for (ObjCCategoryImplDecl::instmeth_iterator
+         i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+    // Instance methods should always be defined.
+    Methods.push_back(GetMethodConstant(*i));
+  }
+
+  Values[2] = EmitMethodList(MethodListName,
+                             "__DATA, __objc_const",
+                             Methods);
+
+  MethodListName = Prefix;
+  MethodListName += "CLASS_METHODS_" + Interface->getNameAsString() + "_$_" +
+    OCD->getNameAsString();
+  Methods.clear();
+  for (ObjCCategoryImplDecl::classmeth_iterator
+         i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+    // Class methods should always be defined.
+    Methods.push_back(GetMethodConstant(*i));
+  }
+
+  Values[3] = EmitMethodList(MethodListName,
+                             "__DATA, __objc_const",
+                             Methods);
+  const ObjCCategoryDecl *Category =
+    Interface->FindCategoryDeclaration(OCD->getIdentifier());
+  if (Category) {
+    llvm::SmallString<256> ExtName;
+    llvm::raw_svector_ostream(ExtName) << Interface->getName() << "_$_"
+                                       << OCD->getName();
+    Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+                                 + Interface->getName() + "_$_"
+                                 + Category->getName(),
+                                 Category->protocol_begin(),
+                                 Category->protocol_end());
+    Values[5] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+                                 OCD, Category, ObjCTypes);
+  } else {
+    Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+    Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+  }
+
+  llvm::Constant *Init =
+    llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
+                              Values);
+  llvm::GlobalVariable *GCATV
+    = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy,
+                               false,
+                               llvm::GlobalValue::InternalLinkage,
+                               Init,
+                               ExtCatName);
+  GCATV->setAlignment(
+    CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.CategorynfABITy));
+  GCATV->setSection("__DATA, __objc_const");
+  CGM.AddUsedGlobal(GCATV);
+  DefinedCategories.push_back(GCATV);
+
+  // Determine if this category is also "non-lazy".
+  if (ImplementationIsNonLazy(OCD))
+    DefinedNonLazyCategories.push_back(GCATV);
+}
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
+  const ObjCMethodDecl *MD) {
+  // FIXME: Use DenseMap::lookup
+  llvm::Function *Fn = MethodDefinitions[MD];
+  if (!Fn)
+    return 0;
+
+  std::vector<llvm::Constant*> Method(3);
+  Method[0] =
+    llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+                                   ObjCTypes.SelectorPtrTy);
+  Method[1] = GetMethodVarType(MD);
+  Method[2] = llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy);
+  return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+/// EmitMethodList - Build meta-data for method declarations
+/// struct _method_list_t {
+///   uint32_t entsize;  // sizeof(struct _objc_method)
+///   uint32_t method_count;
+///   struct _objc_method method_list[method_count];
+/// }
+///
+llvm::Constant *CGObjCNonFragileABIMac::EmitMethodList(llvm::Twine Name,
+                                                       const char *Section,
+                                                const ConstantVector &Methods) {
+  // Return null for empty list.
+  if (Methods.empty())
+    return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
+
+  std::vector<llvm::Constant*> Values(3);
+  // sizeof(struct _objc_method)
+  unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+  // method_count
+  Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+  llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+                                             Methods.size());
+  Values[2] = llvm::ConstantArray::get(AT, Methods);
+  llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+
+  llvm::GlobalVariable *GV =
+    new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+                             llvm::GlobalValue::InternalLinkage,
+                             Init,
+                             Name);
+  GV->setAlignment(
+    CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+  GV->setSection(Section);
+  CGM.AddUsedGlobal(GV);
+  return llvm::ConstantExpr::getBitCast(GV,
+                                        ObjCTypes.MethodListnfABIPtrTy);
+}
+
+/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+/// the given ivar.
+llvm::GlobalVariable * CGObjCNonFragileABIMac::ObjCIvarOffsetVariable(
+  const ObjCInterfaceDecl *ID,
+  const ObjCIvarDecl *Ivar) {
+  // FIXME: We shouldn't need to do this lookup.
+  unsigned Index;
+  const ObjCInterfaceDecl *Container =
+    FindIvarInterface(CGM.getContext(), ID, Ivar, Index);
+  assert(Container && "Unable to find ivar container!");
+  std::string Name = "OBJC_IVAR_$_" + Container->getNameAsString() +
+    '.' + Ivar->getNameAsString();
+  llvm::GlobalVariable *IvarOffsetGV =
+    CGM.getModule().getGlobalVariable(Name);
+  if (!IvarOffsetGV)
+    IvarOffsetGV =
+      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.LongTy,
+                               false,
+                               llvm::GlobalValue::ExternalLinkage,
+                               0,
+                               Name);
+  return IvarOffsetGV;
+}
+
+llvm::Constant * CGObjCNonFragileABIMac::EmitIvarOffsetVar(
+  const ObjCInterfaceDecl *ID,
+  const ObjCIvarDecl *Ivar,
+  unsigned long int Offset) {
+  llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
+  IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
+                                                      Offset));
+  IvarOffsetGV->setAlignment(
+    CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.LongTy));
+
+  // FIXME: This matches gcc, but shouldn't the visibility be set on the use as
+  // well (i.e., in ObjCIvarOffsetVariable).
+  if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+      Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+      CGM.getDeclVisibilityMode(ID) == LangOptions::Hidden)
+    IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+  else
+    IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+  IvarOffsetGV->setSection("__DATA, __objc_const");
+  return IvarOffsetGV;
+}
+
+/// EmitIvarList - Emit the ivar list for the given
+/// implementation. The return value has type
+/// IvarListnfABIPtrTy.
+///  struct _ivar_t {
+///   unsigned long int *offset;  // pointer to ivar offset location
+///   char *name;
+///   char *type;
+///   uint32_t alignment;
+///   uint32_t size;
+/// }
+/// struct _ivar_list_t {
+///   uint32 entsize;  // sizeof(struct _ivar_t)
+///   uint32 count;
+///   struct _iver_t list[count];
+/// }
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
+  const ObjCImplementationDecl *ID) {
+
+  std::vector<llvm::Constant*> Ivars, Ivar(5);
+
+  const ObjCInterfaceDecl *OID = ID->getClassInterface();
+  assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
+
+  // FIXME. Consolidate this with similar code in GenerateClass.
+
+  // Collect declared and synthesized ivars in a small vector.
+  llvm::SmallVector<ObjCIvarDecl*, 16> OIvars;
+  CGM.getContext().ShallowCollectObjCIvars(OID, OIvars);
+
+  for (unsigned i = 0, e = OIvars.size(); i != e; ++i) {
+    ObjCIvarDecl *IVD = OIvars[i];
+    // Ignore unnamed bit-fields.
+    if (!IVD->getDeclName())
+      continue;
+    Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+                                ComputeIvarBaseOffset(CGM, ID, IVD));
+    Ivar[1] = GetMethodVarName(IVD->getIdentifier());
+    Ivar[2] = GetMethodVarType(IVD);
+    const llvm::Type *FieldTy =
+      CGM.getTypes().ConvertTypeForMem(IVD->getType());
+    unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
+    unsigned Align = CGM.getContext().getPreferredTypeAlign(
+      IVD->getType().getTypePtr()) >> 3;
+    Align = llvm::Log2_32(Align);
+    Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+    // NOTE. Size of a bitfield does not match gcc's, because of the
+    // way bitfields are treated special in each. But I am told that
+    // 'size' for bitfield ivars is ignored by the runtime so it does
+    // not matter.  If it matters, there is enough info to get the
+    // bitfield right!
+    Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+    Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+  }
+  // Return null for empty list.
+  if (Ivars.empty())
+    return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+  std::vector<llvm::Constant*> Values(3);
+  unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+  Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+  Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+  llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
+                                             Ivars.size());
+  Values[2] = llvm::ConstantArray::get(AT, Ivars);
+  llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+  const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
+  llvm::GlobalVariable *GV =
+    new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+                             llvm::GlobalValue::InternalLinkage,
+                             Init,
+                             Prefix + OID->getName());
+  GV->setAlignment(
+    CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+  GV->setSection("__DATA, __objc_const");
+
+  CGM.AddUsedGlobal(GV);
+  return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
+  const ObjCProtocolDecl *PD) {
+  llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+  if (!Entry) {
+    // We use the initializer as a marker of whether this is a forward
+    // reference or not. At module finalization we add the empty
+    // contents for protocols which were referenced but never defined.
+    Entry =
+      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy, false,
+                               llvm::GlobalValue::ExternalLinkage,
+                               0,
+                               "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+    Entry->setSection("__DATA,__datacoal_nt,coalesced");
+  }
+
+  return Entry;
+}
+
+/// GetOrEmitProtocol - Generate the protocol meta-data:
+/// @code
+/// struct _protocol_t {
+///   id isa;  // NULL
+///   const char * const protocol_name;
+///   const struct _protocol_list_t * protocol_list; // super protocols
+///   const struct method_list_t * const instance_methods;
+///   const struct method_list_t * const class_methods;
+///   const struct method_list_t *optionalInstanceMethods;
+///   const struct method_list_t *optionalClassMethods;
+///   const struct _prop_list_t * properties;
+///   const uint32_t size;  // sizeof(struct _protocol_t)
+///   const uint32_t flags;  // = 0
+/// }
+/// @endcode
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
+  const ObjCProtocolDecl *PD) {
+  llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+  // Early exit if a defining object has already been generated.
+  if (Entry && Entry->hasInitializer())
+    return Entry;
+
+  // Construct method lists.
+  std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+  std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+  for (ObjCProtocolDecl::instmeth_iterator
+         i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+    ObjCMethodDecl *MD = *i;
+    llvm::Constant *C = GetMethodDescriptionConstant(MD);
+    if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+      OptInstanceMethods.push_back(C);
+    } else {
+      InstanceMethods.push_back(C);
+    }
+  }
+
+  for (ObjCProtocolDecl::classmeth_iterator
+         i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+    ObjCMethodDecl *MD = *i;
+    llvm::Constant *C = GetMethodDescriptionConstant(MD);
+    if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+      OptClassMethods.push_back(C);
+    } else {
+      ClassMethods.push_back(C);
+    }
+  }
+
+  std::vector<llvm::Constant*> Values(10);
+  // isa is NULL
+  Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
+  Values[1] = GetClassName(PD->getIdentifier());
+  Values[2] = EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_" + PD->getName(),
+                               PD->protocol_begin(),
+                               PD->protocol_end());
+
+  Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
+                             + PD->getName(),
+                             "__DATA, __objc_const",
+                             InstanceMethods);
+  Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
+                             + PD->getName(),
+                             "__DATA, __objc_const",
+                             ClassMethods);
+  Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
+                             + PD->getName(),
+                             "__DATA, __objc_const",
+                             OptInstanceMethods);
+  Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
+                             + PD->getName(),
+                             "__DATA, __objc_const",
+                             OptClassMethods);
+  Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getName(),
+                               0, PD, ObjCTypes);
+  uint32_t Size =
+    CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+  Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+  Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+  llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
+                                                   Values);
+
+  if (Entry) {
+    // Already created, fix the linkage and update the initializer.
+    Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+    Entry->setInitializer(Init);
+  } else {
+    Entry =
+      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+                               false, llvm::GlobalValue::WeakAnyLinkage, Init,
+                               "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+    Entry->setAlignment(
+      CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ProtocolnfABITy));
+    Entry->setSection("__DATA,__datacoal_nt,coalesced");
+  }
+  Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+  CGM.AddUsedGlobal(Entry);
+
+  // Use this protocol meta-data to build protocol list table in section
+  // __DATA, __objc_protolist
+  llvm::GlobalVariable *PTGV =
+    new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABIPtrTy,
+                             false, llvm::GlobalValue::WeakAnyLinkage, Entry,
+                             "\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getName());
+  PTGV->setAlignment(
+    CGM.getTargetData().getPrefTypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+  PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+  PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+  CGM.AddUsedGlobal(PTGV);
+  return Entry;
+}
+
+/// EmitProtocolList - Generate protocol list meta-data:
+/// @code
+/// struct _protocol_list_t {
+///   long protocol_count;   // Note, this is 32/64 bit
+///   struct _protocol_t[protocol_count];
+/// }
+/// @endcode
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitProtocolList(llvm::Twine Name,
+                                      ObjCProtocolDecl::protocol_iterator begin,
+                                      ObjCProtocolDecl::protocol_iterator end) {
+  std::vector<llvm::Constant*> ProtocolRefs;
+
+  // Just return null for empty protocol lists
+  if (begin == end)
+    return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+
+  // FIXME: We shouldn't need to do this lookup here, should we?
+  llvm::SmallString<256> TmpName;
+  Name.toVector(TmpName);
+  llvm::GlobalVariable *GV =
+    CGM.getModule().getGlobalVariable(TmpName.str(), true);
+  if (GV)
+    return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy);
+
+  for (; begin != end; ++begin)
+    ProtocolRefs.push_back(GetProtocolRef(*begin));  // Implemented???
+
+  // This list is null terminated.
+  ProtocolRefs.push_back(llvm::Constant::getNullValue(
+                           ObjCTypes.ProtocolnfABIPtrTy));
+
+  std::vector<llvm::Constant*> Values(2);
+  Values[0] =
+    llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+  Values[1] =
+    llvm::ConstantArray::get(
+      llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+                           ProtocolRefs.size()),
+      ProtocolRefs);
+
+  llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+  GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+                                llvm::GlobalValue::InternalLinkage,
+                                Init,
+                                Name);
+  GV->setSection("__DATA, __objc_const");
+  GV->setAlignment(
+    CGM.getTargetData().getPrefTypeAlignment(Init->getType()));
+  CGM.AddUsedGlobal(GV);
+  return llvm::ConstantExpr::getBitCast(GV,
+                                        ObjCTypes.ProtocolListnfABIPtrTy);
+}
+
+/// GetMethodDescriptionConstant - This routine build following meta-data:
+/// struct _objc_method {
+///   SEL _cmd;
+///   char *method_type;
+///   char *_imp;
+/// }
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+  std::vector<llvm::Constant*> Desc(3);
+  Desc[0] =
+    llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+                                   ObjCTypes.SelectorPtrTy);
+  Desc[1] = GetMethodVarType(MD);
+  // Protocol methods have no implementation. So, this entry is always NULL.
+  Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+  return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
+}
+
+/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
+/// This code gen. amounts to generating code for:
+/// @code
+/// (type *)((char *)base + _OBJC_IVAR_$_.ivar;
+/// @encode
+///
+LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
+  CodeGen::CodeGenFunction &CGF,
+  QualType ObjectTy,
+  llvm::Value *BaseValue,
+  const ObjCIvarDecl *Ivar,
+  unsigned CVRQualifiers) {
+  const ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCInterfaceType>()->getDecl();
+  return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+                                  EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
+  CodeGen::CodeGenFunction &CGF,
+  const ObjCInterfaceDecl *Interface,
+  const ObjCIvarDecl *Ivar) {
+  return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),"ivar");
+}
+
+CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
+  CodeGen::CodeGenFunction &CGF,
+  QualType ResultType,
+  Selector Sel,
+  llvm::Value *Receiver,
+  QualType Arg0Ty,
+  bool IsSuper,
+  const CallArgList &CallArgs) {
+  // FIXME. Even though IsSuper is passes. This function doese not handle calls
+  // to 'super' receivers.
+  CodeGenTypes &Types = CGM.getTypes();
+  llvm::Value *Arg0 = Receiver;
+  if (!IsSuper)
+    Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy, "tmp");
+
+  // Find the message function name.
+  // FIXME. This is too much work to get the ABI-specific result type needed to
+  // find the message name.
+  const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType,
+                                                       llvm::SmallVector<QualType, 16>(),
+                                                       CC_Default, false);
+  llvm::Constant *Fn = 0;
+  std::string Name("\01l_");
+  if (CGM.ReturnTypeUsesSret(FnInfo)) {
+#if 0
+    // unlike what is documented. gcc never generates this API!!
+    if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
+      Fn = ObjCTypes.getMessageSendIdStretFixupFn();
+      // FIXME. Is there a better way of getting these names.
+      // They are available in RuntimeFunctions vector pair.
+      Name += "objc_msgSendId_stret_fixup";
+    } else
+#endif
+      if (IsSuper) {
+        Fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+        Name += "objc_msgSendSuper2_stret_fixup";
+      } else {
+        Fn = ObjCTypes.getMessageSendStretFixupFn();
+        Name += "objc_msgSend_stret_fixup";
+      }
+  } else if (!IsSuper && ResultType->isFloatingType()) {
+    if (ResultType->isSpecificBuiltinType(BuiltinType::LongDouble)) {
+      Fn = ObjCTypes.getMessageSendFpretFixupFn();
+      Name += "objc_msgSend_fpret_fixup";
+    } else {
+      Fn = ObjCTypes.getMessageSendFixupFn();
+      Name += "objc_msgSend_fixup";
+    }
+  } else {
+#if 0
+// unlike what is documented. gcc never generates this API!!
+    if (Receiver->getType() == ObjCTypes.ObjectPtrTy) {
+      Fn = ObjCTypes.getMessageSendIdFixupFn();
+      Name += "objc_msgSendId_fixup";
+    } else
+#endif
+      if (IsSuper) {
+        Fn = ObjCTypes.getMessageSendSuper2FixupFn();
+        Name += "objc_msgSendSuper2_fixup";
+      } else {
+        Fn = ObjCTypes.getMessageSendFixupFn();
+        Name += "objc_msgSend_fixup";
+      }
+  }
+  assert(Fn && "CGObjCNonFragileABIMac::EmitMessageSend");
+  Name += '_';
+  std::string SelName(Sel.getAsString());
+  // Replace all ':' in selector name with '_'  ouch!
+  for (unsigned i = 0; i < SelName.size(); i++)
+    if (SelName[i] == ':')
+      SelName[i] = '_';
+  Name += SelName;
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+  if (!GV) {
+    // Build message ref table entry.
+    std::vector<llvm::Constant*> Values(2);
+    Values[0] = Fn;
+    Values[1] = GetMethodVarName(Sel);
+    llvm::Constant *Init = llvm::ConstantStruct::get(VMContext, Values, false);
+    GV =  new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+                                   llvm::GlobalValue::WeakAnyLinkage,
+                                   Init,
+                                   Name);
+    GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+    GV->setAlignment(16);
+    GV->setSection("__DATA, __objc_msgrefs, coalesced");
+  }
+  llvm::Value *Arg1 = CGF.Builder.CreateBitCast(GV, ObjCTypes.MessageRefPtrTy);
+
+  CallArgList ActualArgs;
+  ActualArgs.push_back(std::make_pair(RValue::get(Arg0), Arg0Ty));
+  ActualArgs.push_back(std::make_pair(RValue::get(Arg1),
+                                      ObjCTypes.MessageRefCPtrTy));
+  ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
+  const CGFunctionInfo &FnInfo1 = Types.getFunctionInfo(ResultType, ActualArgs,
+                                                        CC_Default, false);
+  llvm::Value *Callee = CGF.Builder.CreateStructGEP(Arg1, 0);
+  Callee = CGF.Builder.CreateLoad(Callee);
+  const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo1, true);
+  Callee = CGF.Builder.CreateBitCast(Callee,
+                                     llvm::PointerType::getUnqual(FTy));
+  return CGF.EmitCall(FnInfo1, Callee, ReturnValueSlot(), ActualArgs);
+}
+
+/// Generate code for a message send expression in the nonfragile abi.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+                                            QualType ResultType,
+                                            Selector Sel,
+                                            llvm::Value *Receiver,
+                                            bool IsClassMessage,
+                                            const CallArgList &CallArgs,
+                                            const ObjCMethodDecl *Method) {
+  return LegacyDispatchedSelector(Sel)
+    ? EmitLegacyMessageSend(CGF, ResultType, EmitSelector(CGF.Builder, Sel),
+                            Receiver, CGF.getContext().getObjCIdType(),
+                            false, CallArgs, Method, ObjCTypes)
+    : EmitMessageSend(CGF, ResultType, Sel,
+                      Receiver, CGF.getContext().getObjCIdType(),
+                      false, CallArgs);
+}
+
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+
+  if (!GV) {
+    GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
+                                  false, llvm::GlobalValue::ExternalLinkage,
+                                  0, Name);
+  }
+
+  return GV;
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+                                                  const ObjCInterfaceDecl *ID) {
+  llvm::GlobalVariable *&Entry = ClassReferences[ID->getIdentifier()];
+
+  if (!Entry) {
+    std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+    llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+    Entry =
+      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+                               false, llvm::GlobalValue::InternalLinkage,
+                               ClassGV,
+                               "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+    Entry->setAlignment(
+      CGM.getTargetData().getPrefTypeAlignment(
+        ObjCTypes.ClassnfABIPtrTy));
+    Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
+    CGM.AddUsedGlobal(Entry);
+  }
+
+  return Builder.CreateLoad(Entry, "tmp");
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
+                                          const ObjCInterfaceDecl *ID) {
+  llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
+
+  if (!Entry) {
+    std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+    llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+    Entry =
+      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+                               false, llvm::GlobalValue::InternalLinkage,
+                               ClassGV,
+                               "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+    Entry->setAlignment(
+      CGM.getTargetData().getPrefTypeAlignment(
+        ObjCTypes.ClassnfABIPtrTy));
+    Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+    CGM.AddUsedGlobal(Entry);
+  }
+
+  return Builder.CreateLoad(Entry, "tmp");
+}
+
+/// EmitMetaClassRef - Return a Value * of the address of _class_t
+/// meta-data
+///
+llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
+                                                      const ObjCInterfaceDecl *ID) {
+  llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
+  if (Entry)
+    return Builder.CreateLoad(Entry, "tmp");
+
+  std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString());
+  llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
+  Entry =
+    new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false,
+                             llvm::GlobalValue::InternalLinkage,
+                             MetaClassGV,
+                             "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+  Entry->setAlignment(
+    CGM.getTargetData().getPrefTypeAlignment(
+      ObjCTypes.ClassnfABIPtrTy));
+
+  Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+  CGM.AddUsedGlobal(Entry);
+
+  return Builder.CreateLoad(Entry, "tmp");
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCNonFragileABIMac::GetClass(CGBuilderTy &Builder,
+                                              const ObjCInterfaceDecl *ID) {
+  if (ID->hasAttr<WeakImportAttr>()) {
+    std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+    llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+    ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+  }
+  
+  return EmitClassRef(Builder, ID);
+}
+
+/// Generates a message send where the super is the receiver.  This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+                                                 QualType ResultType,
+                                                 Selector Sel,
+                                                 const ObjCInterfaceDecl *Class,
+                                                 bool isCategoryImpl,
+                                                 llvm::Value *Receiver,
+                                                 bool IsClassMessage,
+                                                 const CodeGen::CallArgList &CallArgs,
+                                                 const ObjCMethodDecl *Method) {
+  // ...
+  // Create and init a super structure; this is a (receiver, class)
+  // pair we will pass to objc_msgSendSuper.
+  llvm::Value *ObjCSuper =
+    CGF.Builder.CreateAlloca(ObjCTypes.SuperTy, 0, "objc_super");
+
+  llvm::Value *ReceiverAsObject =
+    CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+  CGF.Builder.CreateStore(ReceiverAsObject,
+                          CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+  // If this is a class message the metaclass is passed as the target.
+  llvm::Value *Target;
+  if (IsClassMessage) {
+    if (isCategoryImpl) {
+      // Message sent to "super' in a class method defined in
+      // a category implementation.
+      Target = EmitClassRef(CGF.Builder, Class);
+      Target = CGF.Builder.CreateStructGEP(Target, 0);
+      Target = CGF.Builder.CreateLoad(Target);
+    } else
+      Target = EmitMetaClassRef(CGF.Builder, Class);
+  } else
+    Target = EmitSuperClassRef(CGF.Builder, Class);
+
+  // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+  // ObjCTypes types.
+  const llvm::Type *ClassTy =
+    CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+  Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+  CGF.Builder.CreateStore(Target,
+                          CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+  return (LegacyDispatchedSelector(Sel))
+    ? EmitLegacyMessageSend(CGF, ResultType,EmitSelector(CGF.Builder, Sel),
+                            ObjCSuper, ObjCTypes.SuperPtrCTy,
+                            true, CallArgs, Method, ObjCTypes)
+    : EmitMessageSend(CGF, ResultType, Sel,
+                      ObjCSuper, ObjCTypes.SuperPtrCTy,
+                      true, CallArgs);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
+                                                  Selector Sel) {
+  llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+  if (!Entry) {
+    llvm::Constant *Casted =
+      llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+                                     ObjCTypes.SelectorPtrTy);
+    Entry =
+      new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
+                               llvm::GlobalValue::InternalLinkage,
+                               Casted, "\01L_OBJC_SELECTOR_REFERENCES_");
+    Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+    CGM.AddUsedGlobal(Entry);
+  }
+
+  return Builder.CreateLoad(Entry, "tmp");
+}
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
+///
+void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+                                                llvm::Value *src,
+                                                llvm::Value *dst,
+                                                llvm::Value *ivarOffset) {
+  const llvm::Type * SrcTy = src->getType();
+  if (!isa<llvm::PointerType>(SrcTy)) {
+    unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+    assert(Size <= 8 && "does not support size > 8");
+    src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+           : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+    src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+  }
+  src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+  dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+  CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+                          src, dst, ivarOffset);
+  return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
+  CodeGen::CodeGenFunction &CGF,
+  llvm::Value *src, llvm::Value *dst) {
+  const llvm::Type * SrcTy = src->getType();
+  if (!isa<llvm::PointerType>(SrcTy)) {
+    unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+    assert(Size <= 8 && "does not support size > 8");
+    src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+           : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+    src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+  }
+  src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+  dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+  CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+                          src, dst, "weakassign");
+  return;
+}
+
+void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
+  CodeGen::CodeGenFunction &CGF,
+  llvm::Value *DestPtr,
+  llvm::Value *SrcPtr,
+  QualType Ty) {
+  // Get size info for this aggregate.
+  std::pair<uint64_t, unsigned> TypeInfo = CGM.getContext().getTypeInfo(Ty);
+  unsigned long size = TypeInfo.first/8;
+  SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+  DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+  llvm::Value *N = llvm::ConstantInt::get(ObjCTypes.LongTy, size);
+  CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+                          DestPtr, SrcPtr, N);
+  return;
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
+  CodeGen::CodeGenFunction &CGF,
+  llvm::Value *AddrWeakObj) {
+  const llvm::Type* DestTy =
+    cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+  AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+  llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+                                                  AddrWeakObj, "weakread");
+  read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+  return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+                                                llvm::Value *src, llvm::Value *dst) {
+  const llvm::Type * SrcTy = src->getType();
+  if (!isa<llvm::PointerType>(SrcTy)) {
+    unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+    assert(Size <= 8 && "does not support size > 8");
+    src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+           : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+    src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+  }
+  src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+  dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+  CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+                          src, dst, "weakassign");
+  return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+                                                  llvm::Value *src, llvm::Value *dst) {
+  const llvm::Type * SrcTy = src->getType();
+  if (!isa<llvm::PointerType>(SrcTy)) {
+    unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+    assert(Size <= 8 && "does not support size > 8");
+    src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+           : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+    src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+  }
+  src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+  dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+  CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+                          src, dst, "globalassign");
+  return;
+}
+
+void
+CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+                                                  const Stmt &S) {
+  bool isTry = isa<ObjCAtTryStmt>(S);
+  llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+  llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
+  llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+  llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
+  llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
+  llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+  // For @synchronized, call objc_sync_enter(sync.expr). The
+  // evaluation of the expression must occur before we enter the
+  // @synchronized. We can safely avoid a temp here because jumps into
+  // @synchronized are illegal & this will dominate uses.
+  llvm::Value *SyncArg = 0;
+  if (!isTry) {
+    SyncArg =
+      CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+    SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+    CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+  }
+
+  // Push an EH context entry, used for handling rethrows and jumps
+  // through finally.
+  CGF.PushCleanupBlock(FinallyBlock);
+
+  CGF.setInvokeDest(TryHandler);
+
+  CGF.EmitBlock(TryBlock);
+  CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+               : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+  CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+  // Emit the exception handler.
+
+  CGF.EmitBlock(TryHandler);
+
+  llvm::Value *llvm_eh_exception =
+    CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+  llvm::Value *llvm_eh_selector =
+    CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+  llvm::Value *llvm_eh_typeid_for =
+    CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+  llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+  llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
+
+  llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
+  SelectorArgs.push_back(Exc);
+  SelectorArgs.push_back(ObjCTypes.getEHPersonalityPtr());
+
+  // Construct the lists of (type, catch body) to handle.
+  llvm::SmallVector<std::pair<const ParmVarDecl*, const Stmt*>, 8> Handlers;
+  bool HasCatchAll = false;
+  if (isTry) {
+    if (const ObjCAtCatchStmt* CatchStmt =
+        cast<ObjCAtTryStmt>(S).getCatchStmts())  {
+      for (; CatchStmt; CatchStmt = CatchStmt->getNextCatchStmt()) {
+        const ParmVarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+        Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
+
+        // catch(...) always matches.
+        if (!CatchDecl) {
+          // Use i8* null here to signal this is a catch all, not a cleanup.
+          llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+          SelectorArgs.push_back(Null);
+          HasCatchAll = true;
+          break;
+        }
+
+        if (CatchDecl->getType()->isObjCIdType() ||
+            CatchDecl->getType()->isObjCQualifiedIdType()) {
+          llvm::Value *IDEHType =
+            CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+          if (!IDEHType)
+            IDEHType =
+              new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
+                                       false,
+                                       llvm::GlobalValue::ExternalLinkage,
+                                       0, "OBJC_EHTYPE_id");
+          SelectorArgs.push_back(IDEHType);
+        } else {
+          // All other types should be Objective-C interface pointer types.
+          const ObjCObjectPointerType *PT =
+            CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+          assert(PT && "Invalid @catch type.");
+          const ObjCInterfaceType *IT = PT->getInterfaceType();
+          assert(IT && "Invalid @catch type.");
+          llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false);
+          SelectorArgs.push_back(EHType);
+        }
+      }
+    }
+  }
+
+  // We use a cleanup unless there was already a catch all.
+  if (!HasCatchAll) {
+    // Even though this is a cleanup, treat it as a catch all to avoid the C++
+    // personality behavior of terminating the process if only cleanups are
+    // found in the exception handling stack.
+    SelectorArgs.push_back(llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy));
+    Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+  }
+
+  llvm::Value *Selector =
+    CGF.Builder.CreateCall(llvm_eh_selector,
+                           SelectorArgs.begin(), SelectorArgs.end(),
+                           "selector");
+  for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
+    const ParmVarDecl *CatchParam = Handlers[i].first;
+    const Stmt *CatchBody = Handlers[i].second;
+
+    llvm::BasicBlock *Next = 0;
+
+    // The last handler always matches.
+    if (i + 1 != e) {
+      assert(CatchParam && "Only last handler can be a catch all.");
+
+      llvm::BasicBlock *Match = CGF.createBasicBlock("match");
+      Next = CGF.createBasicBlock("catch.next");
+      llvm::Value *Id =
+        CGF.Builder.CreateCall(llvm_eh_typeid_for,
+                               CGF.Builder.CreateBitCast(SelectorArgs[i+2],
+                                                         ObjCTypes.Int8PtrTy));
+      CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(Selector, Id),
+                               Match, Next);
+
+      CGF.EmitBlock(Match);
+    }
+
+    if (CatchBody) {
+      llvm::BasicBlock *MatchEnd = CGF.createBasicBlock("match.end");
+      llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler");
+
+      // Cleanups must call objc_end_catch.
+      //
+      // FIXME: It seems incorrect for objc_begin_catch to be inside this
+      // context, but this matches gcc.
+      CGF.PushCleanupBlock(MatchEnd);
+      CGF.setInvokeDest(MatchHandler);
+
+      llvm::Value *ExcObject =
+        CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), Exc);
+
+      // Bind the catch parameter if it exists.
+      if (CatchParam) {
+        ExcObject =
+          CGF.Builder.CreateBitCast(ExcObject,
+                                    CGF.ConvertType(CatchParam->getType()));
+        // CatchParam is a ParmVarDecl because of the grammar
+        // construction used to handle this, but for codegen purposes
+        // we treat this as a local decl.
+        CGF.EmitLocalBlockVarDecl(*CatchParam);
+        CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
+      }
+
+      CGF.ObjCEHValueStack.push_back(ExcObject);
+      CGF.EmitStmt(CatchBody);
+      CGF.ObjCEHValueStack.pop_back();
+
+      CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+      CGF.EmitBlock(MatchHandler);
+
+      llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+      // We are required to emit this call to satisfy LLVM, even
+      // though we don't use the result.
+      llvm::SmallVector<llvm::Value*, 8> Args;
+      Args.push_back(Exc);
+      Args.push_back(ObjCTypes.getEHPersonalityPtr());
+      Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                            0));
+      CGF.Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end());
+      CGF.Builder.CreateStore(Exc, RethrowPtr);
+      CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+      CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+
+      CGF.EmitBlock(MatchEnd);
+
+      // Unfortunately, we also have to generate another EH frame here
+      // in case this throws.
+      llvm::BasicBlock *MatchEndHandler =
+        CGF.createBasicBlock("match.end.handler");
+      llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+      CGF.Builder.CreateInvoke(ObjCTypes.getObjCEndCatchFn(),
+                               Cont, MatchEndHandler,
+                               Args.begin(), Args.begin());
+
+      CGF.EmitBlock(Cont);
+      if (Info.SwitchBlock)
+        CGF.EmitBlock(Info.SwitchBlock);
+      if (Info.EndBlock)
+        CGF.EmitBlock(Info.EndBlock);
+
+      CGF.EmitBlock(MatchEndHandler);
+      Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
+      // We are required to emit this call to satisfy LLVM, even
+      // though we don't use the result.
+      Args.clear();
+      Args.push_back(Exc);
+      Args.push_back(ObjCTypes.getEHPersonalityPtr());
+      Args.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                            0));
+      CGF.Builder.CreateCall(llvm_eh_selector, Args.begin(), Args.end());
+      CGF.Builder.CreateStore(Exc, RethrowPtr);
+      CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+      if (Next)
+        CGF.EmitBlock(Next);
+    } else {
+      assert(!Next && "catchup should be last handler.");
+
+      CGF.Builder.CreateStore(Exc, RethrowPtr);
+      CGF.EmitBranchThroughCleanup(FinallyRethrow);
+    }
+  }
+
+  // Pop the cleanup entry, the @finally is outside this cleanup
+  // scope.
+  CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+  CGF.setInvokeDest(PrevLandingPad);
+
+  CGF.EmitBlock(FinallyBlock);
+
+  if (isTry) {
+    if (const ObjCAtFinallyStmt* FinallyStmt =
+        cast<ObjCAtTryStmt>(S).getFinallyStmt())
+      CGF.EmitStmt(FinallyStmt->getFinallyBody());
+  } else {
+    // Emit 'objc_sync_exit(expr)' as finally's sole statement for
+    // @synchronized.
+    CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+  }
+
+  if (Info.SwitchBlock)
+    CGF.EmitBlock(Info.SwitchBlock);
+  if (Info.EndBlock)
+    CGF.EmitBlock(Info.EndBlock);
+
+  // Branch around the rethrow code.
+  CGF.EmitBranch(FinallyEnd);
+
+  CGF.EmitBlock(FinallyRethrow);
+  CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(),
+                         CGF.Builder.CreateLoad(RethrowPtr));
+  CGF.Builder.CreateUnreachable();
+
+  CGF.EmitBlock(FinallyEnd);
+}
+
+/// EmitThrowStmt - Generate code for a throw statement.
+void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+                                           const ObjCAtThrowStmt &S) {
+  llvm::Value *Exception;
+  if (const Expr *ThrowExpr = S.getThrowExpr()) {
+    Exception = CGF.EmitScalarExpr(ThrowExpr);
+  } else {
+    assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+           "Unexpected rethrow outside @catch block.");
+    Exception = CGF.ObjCEHValueStack.back();
+  }
+
+  llvm::Value *ExceptionAsObject =
+    CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
+  llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
+  if (InvokeDest) {
+    llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+    CGF.Builder.CreateInvoke(ObjCTypes.getExceptionThrowFn(),
+                             Cont, InvokeDest,
+                             &ExceptionAsObject, &ExceptionAsObject + 1);
+    CGF.EmitBlock(Cont);
+  } else
+    CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+  CGF.Builder.CreateUnreachable();
+
+  // Clear the insertion point to indicate we are in unreachable code.
+  CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+                                           bool ForDefinition) {
+  llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
+
+  // If we don't need a definition, return the entry if found or check
+  // if we use an external reference.
+  if (!ForDefinition) {
+    if (Entry)
+      return Entry;
+
+    // If this type (or a super class) has the __objc_exception__
+    // attribute, emit an external reference.
+    if (hasObjCExceptionAttribute(CGM.getContext(), ID))
+      return Entry =
+        new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+                                 llvm::GlobalValue::ExternalLinkage,
+                                 0,
+                                 ("OBJC_EHTYPE_$_" +
+                                  ID->getIdentifier()->getName()));
+  }
+
+  // Otherwise we need to either make a new entry or fill in the
+  // initializer.
+  assert((!Entry || !Entry->hasInitializer()) && "Duplicate EHType definition");
+  std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+  std::string VTableName = "objc_ehtype_vtable";
+  llvm::GlobalVariable *VTableGV =
+    CGM.getModule().getGlobalVariable(VTableName);
+  if (!VTableGV)
+    VTableGV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.Int8PtrTy,
+                                        false,
+                                        llvm::GlobalValue::ExternalLinkage,
+                                        0, VTableName);
+
+  llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2);
+
+  std::vector<llvm::Constant*> Values(3);
+  Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1);
+  Values[1] = GetClassName(ID->getIdentifier());
+  Values[2] = GetClassGlobal(ClassName);
+  llvm::Constant *Init =
+    llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
+
+  if (Entry) {
+    Entry->setInitializer(Init);
+  } else {
+    Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+                                     llvm::GlobalValue::WeakAnyLinkage,
+                                     Init,
+                                     ("OBJC_EHTYPE_$_" +
+                                      ID->getIdentifier()->getName()));
+  }
+
+  if (CGM.getLangOptions().getVisibilityMode() == LangOptions::Hidden)
+    Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+  Entry->setAlignment(8);
+
+  if (ForDefinition) {
+    Entry->setSection("__DATA,__objc_const");
+    Entry->setLinkage(llvm::GlobalValue::ExternalLinkage);
+  } else {
+    Entry->setSection("__DATA,__datacoal_nt,coalesced");
+  }
+
+  return Entry;
+}
+
+/* *** */
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
+  return new CGObjCMac(CGM);
+}
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacNonFragileABIObjCRuntime(CodeGen::CodeGenModule &CGM) {
+  return new CGObjCNonFragileABIMac(CGM);
+}
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
new file mode 100644
index 0000000..ff5d40b
--- /dev/null
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -0,0 +1,218 @@
+//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for Objective-C code generation.  Concrete
+// subclasses of this implement code generation for specific Objective-C
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
+#define CLANG_CODEGEN_OBCJRUNTIME_H
+#include "clang/Basic/IdentifierTable.h" // Selector
+#include "llvm/ADT/SmallVector.h"
+#include "clang/AST/DeclObjC.h"
+#include <string>
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+  class Constant;
+  class Function;
+  class Module;
+  class StructLayout;
+  class StructType;
+  class Type;
+  class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+  class CodeGenFunction;
+}
+
+  class FieldDecl;
+  class ObjCAtTryStmt;
+  class ObjCAtThrowStmt;
+  class ObjCAtSynchronizedStmt;
+  class ObjCContainerDecl;
+  class ObjCCategoryImplDecl;
+  class ObjCImplementationDecl;
+  class ObjCInterfaceDecl;
+  class ObjCMessageExpr;
+  class ObjCMethodDecl;
+  class ObjCProtocolDecl;
+  class Selector;
+  class ObjCIvarDecl;
+  class ObjCStringLiteral;
+
+namespace CodeGen {
+  class CodeGenModule;
+
+// FIXME: Several methods should be pure virtual but aren't to avoid the
+// partially-implemented subclass breaking.
+
+/// Implements runtime-specific code generation functions.
+class CGObjCRuntime {
+public:
+  // Utility functions for unified ivar access. These need to
+  // eventually be folded into other places (the structure layout
+  // code).
+
+protected:
+  /// Compute an offset to the given ivar, suitable for passing to
+  /// EmitValueForIvarAtOffset.  Note that the correct handling of
+  /// bit-fields is carefully coordinated by these two, use caution!
+  ///
+  /// The latter overload is suitable for computing the offset of a
+  /// sythesized ivar.
+  uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+                                 const ObjCInterfaceDecl *OID,
+                                 const ObjCIvarDecl *Ivar);
+  uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+                                 const ObjCImplementationDecl *OID,
+                                 const ObjCIvarDecl *Ivar);
+
+  LValue EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+                                  const ObjCInterfaceDecl *OID,
+                                  llvm::Value *BaseValue,
+                                  const ObjCIvarDecl *Ivar,
+                                  unsigned CVRQualifiers,
+                                  llvm::Value *Offset);
+
+public:
+  virtual ~CGObjCRuntime();
+
+  /// Generate the function required to register all Objective-C components in
+  /// this compilation unit with the runtime library.
+  virtual llvm::Function *ModuleInitFunction() = 0;
+
+  /// Get a selector for the specified name and type values. The
+  /// return value should have the LLVM type for pointer-to
+  /// ASTContext::getObjCSelType().
+  virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+                                   Selector Sel) = 0;
+
+  /// Get a typed selector.
+  virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+                                   const ObjCMethodDecl *Method) = 0;
+
+  /// Generate a constant string object.
+  virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
+
+  /// Generate a category.  A category contains a list of methods (and
+  /// accompanying metadata) and a list of protocols.
+  virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
+
+  /// Generate a class stucture for this class.
+  virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+
+  /// Generate an Objective-C message send operation.
+  ///
+  /// \param Method - The method being called, this may be null if synthesizing
+  /// a property setter or getter.
+  virtual CodeGen::RValue
+  GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+                      QualType ResultType,
+                      Selector Sel,
+                      llvm::Value *Receiver,
+                      bool IsClassMessage,
+                      const CallArgList &CallArgs,
+                      const ObjCMethodDecl *Method = 0) = 0;
+
+  /// Generate an Objective-C message send operation to the super
+  /// class initiated in a method for Class and with the given Self
+  /// object.
+  ///
+  /// \param Method - The method being called, this may be null if synthesizing
+  /// a property setter or getter.
+  virtual CodeGen::RValue
+  GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+                           QualType ResultType,
+                           Selector Sel,
+                           const ObjCInterfaceDecl *Class,
+                           bool isCategoryImpl,
+                           llvm::Value *Self,
+                           bool IsClassMessage,
+                           const CallArgList &CallArgs,
+                           const ObjCMethodDecl *Method = 0) = 0;
+
+  /// Emit the code to return the named protocol as an object, as in a
+  /// @protocol expression.
+  virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+                                           const ObjCProtocolDecl *OPD) = 0;
+
+  /// Generate the named protocol.  Protocols contain method metadata but no
+  /// implementations.
+  virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+
+  /// Generate a function preamble for a method with the specified
+  /// types.
+
+  // FIXME: Current this just generates the Function definition, but really this
+  // should also be generating the loads of the parameters, as the runtime
+  // should have full control over how parameters are passed.
+  virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+                                         const ObjCContainerDecl *CD) = 0;
+
+  /// Return the runtime function for getting properties.
+  virtual llvm::Constant *GetPropertyGetFunction() = 0;
+
+  /// Return the runtime function for setting properties.
+  virtual llvm::Constant *GetPropertySetFunction() = 0;
+
+  /// GetClass - Return a reference to the class for the given
+  /// interface decl.
+  virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+                                const ObjCInterfaceDecl *OID) = 0;
+
+  /// EnumerationMutationFunction - Return the function that's called by the
+  /// compiler when a mutation is detected during foreach iteration.
+  virtual llvm::Constant *EnumerationMutationFunction() = 0;
+
+  virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+                                         const Stmt &S) = 0;
+  virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+                             const ObjCAtThrowStmt &S) = 0;
+  virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+                                        llvm::Value *AddrWeakObj) = 0;
+  virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+                                  llvm::Value *src, llvm::Value *dest) = 0;
+  virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+                                    llvm::Value *src, llvm::Value *dest) = 0;
+  virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+                                  llvm::Value *src, llvm::Value *dest,
+                                  llvm::Value *ivarOffset) = 0;
+  virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+                                        llvm::Value *src, llvm::Value *dest) = 0;
+
+  virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+                                      QualType ObjectTy,
+                                      llvm::Value *BaseValue,
+                                      const ObjCIvarDecl *Ivar,
+                                      unsigned CVRQualifiers) = 0;
+  virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+                                      const ObjCInterfaceDecl *Interface,
+                                      const ObjCIvarDecl *Ivar) = 0;
+  virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+                                        llvm::Value *DestPtr,
+                                        llvm::Value *SrcPtr,
+                                        QualType Ty) = 0;
+};
+
+/// Creates an instance of an Objective-C runtime class.
+//TODO: This should include some way of selecting which runtime to target.
+CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacNonFragileABIObjCRuntime(CodeGenModule &CGM);
+}
+}
+#endif
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
new file mode 100644
index 0000000..5236d20
--- /dev/null
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -0,0 +1,856 @@
+//===--- CGCXXRTTI.cpp - Emit LLVM Code for C++ RTTI descriptors ----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of RTTI descriptors.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Type.h"
+#include "clang/AST/RecordLayout.h"
+#include "CodeGenModule.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class RTTIBuilder {
+  CodeGenModule &CGM;  // Per-module state.
+  llvm::LLVMContext &VMContext;
+  
+  const llvm::Type *Int8PtrTy;
+  
+  /// Fields - The fields of the RTTI descriptor currently being built.
+  llvm::SmallVector<llvm::Constant *, 16> Fields;
+
+  /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI 
+  /// descriptor of the given type.
+  llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
+  
+  /// BuildVtablePointer - Build the vtable pointer for the given type.
+  void BuildVtablePointer(const Type *Ty);
+  
+  /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+  /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
+  void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
+  
+  /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+  /// classes with bases that do not satisfy the abi::__si_class_type_info 
+  /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+  void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
+  
+  /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
+  /// for pointer types.
+  void BuildPointerTypeInfo(const PointerType *Ty);
+  
+  /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 
+  /// struct, used for member pointer types.
+  void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
+  
+public:
+  RTTIBuilder(CodeGenModule &cgm)
+    : CGM(cgm), VMContext(cgm.getModule().getContext()),
+      Int8PtrTy(llvm::Type::getInt8PtrTy(VMContext)) { }
+
+  llvm::Constant *BuildName(QualType Ty, bool Hidden, 
+                            llvm::GlobalVariable::LinkageTypes Linkage) {
+    llvm::SmallString<256> OutName;
+    CGM.getMangleContext().mangleCXXRTTIName(Ty, OutName);
+    llvm::StringRef Name = OutName.str();
+
+    llvm::GlobalVariable *OGV = CGM.getModule().getNamedGlobal(Name);
+    if (OGV && !OGV->isDeclaration())
+      return llvm::ConstantExpr::getBitCast(OGV, Int8PtrTy);
+
+    llvm::Constant *C = llvm::ConstantArray::get(VMContext, Name.substr(4));
+
+    llvm::GlobalVariable *GV = 
+      new llvm::GlobalVariable(CGM.getModule(), C->getType(), true, Linkage,
+                               C, Name);
+    if (OGV) {
+      GV->takeName(OGV);
+      llvm::Constant *NewPtr = llvm::ConstantExpr::getBitCast(GV,
+                                                              OGV->getType());
+      OGV->replaceAllUsesWith(NewPtr);
+      OGV->eraseFromParent();
+    }
+    if (Hidden)
+      GV->setVisibility(llvm::GlobalVariable::HiddenVisibility);
+    return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+  }
+
+  // FIXME: unify with DecideExtern
+  bool DecideHidden(QualType Ty) {
+    // For this type, see if all components are never hidden.
+    if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
+      return (DecideHidden(MPT->getPointeeType())
+              && DecideHidden(QualType(MPT->getClass(), 0)));
+    if (const PointerType *PT = Ty->getAs<PointerType>())
+      return DecideHidden(PT->getPointeeType());
+    if (const FunctionType *FT = Ty->getAs<FunctionType>()) {
+      if (DecideHidden(FT->getResultType()) == false)
+        return false;
+      if (const FunctionProtoType *FPT = Ty->getAs<FunctionProtoType>()) {
+        for (unsigned i = 0; i <FPT->getNumArgs(); ++i)
+          if (DecideHidden(FPT->getArgType(i)) == false)
+            return false;
+        for (unsigned i = 0; i <FPT->getNumExceptions(); ++i)
+          if (DecideHidden(FPT->getExceptionType(i)) == false)
+            return false;
+        return true;
+      }
+    }
+    if (const RecordType *RT = Ty->getAs<RecordType>())
+      if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+        return CGM.getDeclVisibilityMode(RD) == LangOptions::Hidden;
+    return false;
+  }
+  
+  // Pointer type info flags.
+  enum {
+    /// PTI_Const - Type has const qualifier.
+    PTI_Const = 0x1,
+    
+    /// PTI_Volatile - Type has volatile qualifier.
+    PTI_Volatile = 0x2,
+    
+    /// PTI_Restrict - Type has restrict qualifier.
+    PTI_Restrict = 0x4,
+    
+    /// PTI_Incomplete - Type is incomplete.
+    PTI_Incomplete = 0x8,
+    
+    /// PTI_ContainingClassIncomplete - Containing class is incomplete.
+    /// (in pointer to member).
+    PTI_ContainingClassIncomplete = 0x10
+  };
+  
+  // VMI type info flags.
+  enum {
+    /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
+    VMI_NonDiamondRepeat = 0x1,
+    
+    /// VMI_DiamondShaped - Class is diamond shaped.
+    VMI_DiamondShaped = 0x2
+  };
+  
+  // Base class type info flags.
+  enum {
+    /// BCTI_Virtual - Base class is virtual.
+    BCTI_Virtual = 0x1,
+    
+    /// BCTI_Public - Base class is public.
+    BCTI_Public = 0x2
+  };
+  
+  /// BuildTypeInfo - Build the RTTI type info struct for the given type.
+  llvm::Constant *BuildTypeInfo(QualType Ty);
+};
+}
+
+llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
+  // Mangle the RTTI name.
+  llvm::SmallString<256> OutName;
+  CGM.getMangleContext().mangleCXXRTTI(Ty, OutName);
+  llvm::StringRef Name = OutName.str();
+
+  // Look for an existing global.
+  llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
+  
+  if (!GV) {
+    // Create a new global variable.
+    GV = new llvm::GlobalVariable(CGM.getModule(), Int8PtrTy, /*Constant=*/true,
+                                  llvm::GlobalValue::ExternalLinkage, 0, Name);
+  }
+  
+  return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+}
+
+/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
+/// info for that type is defined in the standard library.
+static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
+  // Itanium C++ ABI 2.9.2:
+  //   Basic type information (e.g. for "int", "bool", etc.) will be kept in
+  //   the run-time support library. Specifically, the run-time support
+  //   library should contain type_info objects for the types X, X* and 
+  //   X const*, for every X in: void, bool, wchar_t, char, unsigned char, 
+  //   signed char, short, unsigned short, int, unsigned int, long, 
+  //   unsigned long, long long, unsigned long long, float, double, long double, 
+  //   char16_t, char32_t, and the IEEE 754r decimal and half-precision 
+  //   floating point types.
+  switch (Ty->getKind()) {
+    case BuiltinType::Void:
+    case BuiltinType::Bool:
+    case BuiltinType::WChar:
+    case BuiltinType::Char_U:
+    case BuiltinType::Char_S:
+    case BuiltinType::UChar:
+    case BuiltinType::SChar:
+    case BuiltinType::Short:
+    case BuiltinType::UShort:
+    case BuiltinType::Int:
+    case BuiltinType::UInt:
+    case BuiltinType::Long:
+    case BuiltinType::ULong:
+    case BuiltinType::LongLong:
+    case BuiltinType::ULongLong:
+    case BuiltinType::Float:
+    case BuiltinType::Double:
+    case BuiltinType::LongDouble:
+    case BuiltinType::Char16:
+    case BuiltinType::Char32:
+    case BuiltinType::Int128:
+    case BuiltinType::UInt128:
+      return true;
+      
+    case BuiltinType::Overload:
+    case BuiltinType::Dependent:
+    case BuiltinType::UndeducedAuto:
+      assert(false && "Should not see this type here!");
+      
+    case BuiltinType::NullPtr:
+      assert(false && "FIXME: nullptr_t is not handled!");
+
+    case BuiltinType::ObjCId:
+    case BuiltinType::ObjCClass:
+    case BuiltinType::ObjCSel:
+      assert(false && "FIXME: Objective-C types are unsupported!");
+  }
+  
+  // Silent gcc.
+  return false;
+}
+
+static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
+  QualType PointeeTy = PointerTy->getPointeeType();
+  const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
+  if (!BuiltinTy)
+    return false;
+    
+  // Check the qualifiers.
+  Qualifiers Quals = PointeeTy.getQualifiers();
+  Quals.removeConst();
+    
+  if (!Quals.empty())
+    return false;
+    
+  return TypeInfoIsInStandardLibrary(BuiltinTy);
+}
+
+/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
+/// the given type exists somewhere else, and that we should not emit the typ
+/// information in this translation unit.
+bool ShouldUseExternalRTTIDescriptor(QualType Ty) {
+  // Type info for builtin types is defined in the standard library.
+  if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
+    return TypeInfoIsInStandardLibrary(BuiltinTy);
+  
+  // Type info for some pointer types to builtin types is defined in the
+  // standard library.
+  if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+    return TypeInfoIsInStandardLibrary(PointerTy);
+
+  if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+    if (!RD->hasDefinition())
+      return false;
+
+    if (!RD->isDynamicClass())
+      return false;
+
+    // Get the key function.
+    const CXXMethodDecl *KeyFunction = RD->getASTContext().getKeyFunction(RD);
+    if (KeyFunction && !KeyFunction->getBody()) {
+      // The class has a key function, but it is not defined in this translation
+      // unit, so we should use the external descriptor for it.
+      return true;
+    }
+  }
+  
+  return false;
+}
+
+/// IsIncompleteClassType - Returns whether the given record type is incomplete.
+static bool IsIncompleteClassType(const RecordType *RecordTy) {
+  return !RecordTy->getDecl()->isDefinition();
+}  
+
+/// ContainsIncompleteClassType - Returns whether the given type contains an
+/// incomplete class type. This is true if
+///
+///   * The given type is an incomplete class type.
+///   * The given type is a pointer type whose pointee type contains an 
+///     incomplete class type.
+///   * The given type is a member pointer type whose class is an incomplete
+///     class type.
+///   * The given type is a member pointer type whoise pointee type contains an
+///     incomplete class type.
+/// is an indirect or direct pointer to an incomplete class type.
+static bool ContainsIncompleteClassType(QualType Ty) {
+  if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+    if (IsIncompleteClassType(RecordTy))
+      return true;
+  }
+  
+  if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+    return ContainsIncompleteClassType(PointerTy->getPointeeType());
+  
+  if (const MemberPointerType *MemberPointerTy = 
+      dyn_cast<MemberPointerType>(Ty)) {
+    // Check if the class type is incomplete.
+    const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
+    if (IsIncompleteClassType(ClassType))
+      return true;
+    
+    return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
+  }
+  
+  return false;
+}
+
+/// getTypeInfoLinkage - Return the linkage that the type info and type info
+/// name constants should have for the given type.
+static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
+  // Itanium C++ ABI 2.9.5p7:
+  //   In addition, it and all of the intermediate abi::__pointer_type_info 
+  //   structs in the chain down to the abi::__class_type_info for the
+  //   incomplete class type must be prevented from resolving to the 
+  //   corresponding type_info structs for the complete class type, possibly
+  //   by making them local static objects. Finally, a dummy class RTTI is
+  //   generated for the incomplete type that will not resolve to the final 
+  //   complete class RTTI (because the latter need not exist), possibly by 
+  //   making it a local static object.
+  if (ContainsIncompleteClassType(Ty))
+    return llvm::GlobalValue::InternalLinkage;
+  
+  switch (Ty->getTypeClass()) {
+  default:   
+    // FIXME: We need to add code to handle all types.
+    assert(false && "Unhandled type!");
+    break;
+
+  case Type::Pointer: {
+    const PointerType *PointerTy = cast<PointerType>(Ty);
+ 
+    // If the pointee type has internal linkage, then the pointer type needs to
+    // have it as well.
+    if (getTypeInfoLinkage(PointerTy->getPointeeType()) == 
+        llvm::GlobalVariable::InternalLinkage)
+      return llvm::GlobalVariable::InternalLinkage;
+    
+    return llvm::GlobalVariable::WeakODRLinkage;
+  }
+
+  case Type::Enum: {
+    const EnumType *EnumTy = cast<EnumType>(Ty);
+    const EnumDecl *ED = EnumTy->getDecl();
+    
+    // If we're in an anonymous namespace, then we always want internal linkage.
+    if (ED->isInAnonymousNamespace() || !ED->hasLinkage())
+      return llvm::GlobalVariable::InternalLinkage;
+    
+    return llvm::GlobalValue::WeakODRLinkage;
+  }
+
+  case Type::Record: {
+    const RecordType *RecordTy = cast<RecordType>(Ty);
+    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+
+    // If we're in an anonymous namespace, then we always want internal linkage.
+    if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
+      return llvm::GlobalVariable::InternalLinkage;
+
+    // If this class does not have a vtable, we want weak linkage.
+    if (!RD->isDynamicClass())
+      return llvm::GlobalValue::WeakODRLinkage;
+    
+    return CodeGenModule::getVtableLinkage(RD);
+  }
+
+  case Type::Vector:
+  case Type::ExtVector:
+  case Type::Builtin:
+    return llvm::GlobalValue::WeakODRLinkage;
+
+  case Type::FunctionProto: {
+    const FunctionProtoType *FPT = cast<FunctionProtoType>(Ty);
+
+    // Check the return type.
+    if (getTypeInfoLinkage(FPT->getResultType()) == 
+        llvm::GlobalValue::InternalLinkage)
+      return llvm::GlobalValue::InternalLinkage;
+    
+    // Check the parameter types.
+    for (unsigned i = 0; i != FPT->getNumArgs(); ++i) {
+      if (getTypeInfoLinkage(FPT->getArgType(i)) == 
+          llvm::GlobalValue::InternalLinkage)
+        return llvm::GlobalValue::InternalLinkage;
+    }
+    
+    return llvm::GlobalValue::WeakODRLinkage;
+  }
+  
+  case Type::ConstantArray: 
+  case Type::IncompleteArray: {
+    const ArrayType *AT = cast<ArrayType>(Ty);
+
+    // Check the element type.
+    if (getTypeInfoLinkage(AT->getElementType()) ==
+        llvm::GlobalValue::InternalLinkage)
+      return llvm::GlobalValue::InternalLinkage;
+  }
+
+  }
+
+  return llvm::GlobalValue::WeakODRLinkage;
+}
+
+// CanUseSingleInheritance - Return whether the given record decl has a "single, 
+// public, non-virtual base at offset zero (i.e. the derived class is dynamic 
+// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
+static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
+  // Check the number of bases.
+  if (RD->getNumBases() != 1)
+    return false;
+  
+  // Get the base.
+  CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
+  
+  // Check that the base is not virtual.
+  if (Base->isVirtual())
+    return false;
+  
+  // Check that the base is public.
+  if (Base->getAccessSpecifier() != AS_public)
+    return false;
+  
+  // Check that the class is dynamic iff the base is.
+  const CXXRecordDecl *BaseDecl = 
+    cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+  if (!BaseDecl->isEmpty() && 
+      BaseDecl->isDynamicClass() != RD->isDynamicClass())
+    return false;
+  
+  return true;
+}
+
+void RTTIBuilder::BuildVtablePointer(const Type *Ty) {
+  const char *VtableName;
+
+  switch (Ty->getTypeClass()) {
+  default: assert(0 && "Unhandled type!");
+
+  // GCC treats vector types as fundamental types.
+  case Type::Vector:
+  case Type::ExtVector:
+    // abi::__fundamental_type_info.
+    VtableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
+    break;
+
+  case Type::ConstantArray:
+  case Type::IncompleteArray:
+    // abi::__array_type_info.
+    VtableName = "_ZTVN10__cxxabiv117__array_type_infoE";
+    break;
+
+  case Type::FunctionNoProto:
+  case Type::FunctionProto:
+    // abi::__function_type_info.
+    VtableName = "_ZTVN10__cxxabiv120__function_type_infoE";
+    break;
+
+  case Type::Enum:
+    // abi::__enum_type_info.
+    VtableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
+    break;
+      
+  case Type::Record: {
+    const CXXRecordDecl *RD = 
+      cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+    
+    if (!RD->hasDefinition() || !RD->getNumBases()) {
+      // abi::__class_type_info.
+      VtableName = "_ZTVN10__cxxabiv117__class_type_infoE";
+    } else if (CanUseSingleInheritance(RD)) {
+      // abi::__si_class_type_info.
+      VtableName = "_ZTVN10__cxxabiv120__si_class_type_infoE";
+    } else {
+      // abi::__vmi_class_type_info.
+      VtableName = "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+    }
+    
+    break;
+  }
+
+  case Type::Pointer:
+    // abi::__pointer_type_info.
+    VtableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
+    break;
+
+  case Type::MemberPointer:
+    // abi::__pointer_to_member_type_info.
+    VtableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
+    break;
+  }
+
+  llvm::Constant *Vtable = 
+    CGM.getModule().getOrInsertGlobal(VtableName, Int8PtrTy);
+    
+  const llvm::Type *PtrDiffTy = 
+    CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+  // The vtable address point is 2.
+  llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
+  Vtable = llvm::ConstantExpr::getInBoundsGetElementPtr(Vtable, &Two, 1);
+  Vtable = llvm::ConstantExpr::getBitCast(Vtable, Int8PtrTy);
+
+  Fields.push_back(Vtable);
+}
+
+llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty) {
+  // We want to operate on the canonical type.
+  Ty = CGM.getContext().getCanonicalType(Ty);
+
+  // Check if we've already emitted an RTTI descriptor for this type.
+  llvm::SmallString<256> OutName;
+  CGM.getMangleContext().mangleCXXRTTI(Ty, OutName);
+  llvm::StringRef Name = OutName.str();
+  
+  llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
+  if (OldGV && !OldGV->isDeclaration())
+    return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
+  
+  // Check if there is already an external RTTI descriptor for this type.
+  if (ShouldUseExternalRTTIDescriptor(Ty))
+    return GetAddrOfExternalRTTIDescriptor(Ty);
+
+  llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(Ty);
+
+  // Add the vtable pointer.
+  BuildVtablePointer(cast<Type>(Ty));
+  
+  // And the name.
+  Fields.push_back(BuildName(Ty, DecideHidden(Ty), Linkage));
+  
+  switch (Ty->getTypeClass()) {
+  default: assert(false && "Unhandled type class!");
+  case Type::Builtin:
+    assert(false && "Builtin type info must be in the standard library!");
+    break;
+
+  // GCC treats vector types as fundamental types.
+  case Type::Vector:
+  case Type::ExtVector:
+    // Itanium C++ ABI 2.9.5p4:
+    // abi::__fundamental_type_info adds no data members to std::type_info.
+    break;
+      
+  case Type::ConstantArray:
+  case Type::IncompleteArray:
+    // Itanium C++ ABI 2.9.5p5:
+    // abi::__array_type_info adds no data members to std::type_info.
+    break;
+
+  case Type::FunctionNoProto:
+  case Type::FunctionProto:
+    // Itanium C++ ABI 2.9.5p5:
+    // abi::__function_type_info adds no data members to std::type_info.
+    break;
+
+  case Type::Enum:
+    // Itanium C++ ABI 2.9.5p5:
+    // abi::__enum_type_info adds no data members to std::type_info.
+    break;
+
+  case Type::Record: {
+    const CXXRecordDecl *RD = 
+      cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+    if (!RD->hasDefinition() || !RD->getNumBases()) {
+      // We don't need to emit any fields.
+      break;
+    }
+    
+    if (CanUseSingleInheritance(RD))
+      BuildSIClassTypeInfo(RD);
+    else 
+      BuildVMIClassTypeInfo(RD);
+
+    break;
+  }
+      
+  case Type::Pointer:
+    BuildPointerTypeInfo(cast<PointerType>(Ty));
+    break;
+  
+  case Type::MemberPointer:
+    BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
+    break;
+  }
+
+  llvm::Constant *Init = 
+    llvm::ConstantStruct::get(VMContext, &Fields[0], Fields.size(), 
+                              /*Packed=*/false);
+
+  llvm::GlobalVariable *GV = 
+    new llvm::GlobalVariable(CGM.getModule(), Init->getType(), 
+                             /*Constant=*/true, Linkage, Init, Name);
+  
+  // If there's already an old global variable, replace it with the new one.
+  if (OldGV) {
+    GV->takeName(OldGV);
+    llvm::Constant *NewPtr = 
+      llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+    OldGV->replaceAllUsesWith(NewPtr);
+    OldGV->eraseFromParent();
+  }
+    
+  return llvm::ConstantExpr::getBitCast(GV, Int8PtrTy);
+}
+
+/// ComputeQualifierFlags - Compute the pointer type info flags from the
+/// given qualifier.
+static unsigned ComputeQualifierFlags(Qualifiers Quals) {
+  unsigned Flags = 0;
+
+  if (Quals.hasConst())
+    Flags |= RTTIBuilder::PTI_Const;
+  if (Quals.hasVolatile())
+    Flags |= RTTIBuilder::PTI_Volatile;
+  if (Quals.hasRestrict())
+    Flags |= RTTIBuilder::PTI_Restrict;
+
+  return Flags;
+}
+
+/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
+void RTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
+  // Itanium C++ ABI 2.9.5p6b:
+  // It adds to abi::__class_type_info a single member pointing to the 
+  // type_info structure for the base type,
+  llvm::Constant *BaseTypeInfo = 
+    RTTIBuilder(CGM).BuildTypeInfo(RD->bases_begin()->getType());
+  Fields.push_back(BaseTypeInfo);
+}
+
+/// SeenBases - Contains virtual and non-virtual bases seen when traversing
+/// a class hierarchy.
+struct SeenBases {
+  llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
+  llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
+};
+
+/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
+/// abi::__vmi_class_type_info.
+///
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base, 
+                                             SeenBases &Bases) {
+  
+  unsigned Flags = 0;
+  
+  const CXXRecordDecl *BaseDecl = 
+    cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+  
+  if (Base->isVirtual()) {
+    if (Bases.VirtualBases.count(BaseDecl)) {
+      // If this virtual base has been seen before, then the class is diamond
+      // shaped.
+      Flags |= RTTIBuilder::VMI_DiamondShaped;
+    } else {
+      if (Bases.NonVirtualBases.count(BaseDecl))
+        Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+
+      // Mark the virtual base as seen.
+      Bases.VirtualBases.insert(BaseDecl);
+    }
+  } else {
+    if (Bases.NonVirtualBases.count(BaseDecl)) {
+      // If this non-virtual base has been seen before, then the class has non-
+      // diamond shaped repeated inheritance.
+      Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+    } else {
+      if (Bases.VirtualBases.count(BaseDecl))
+        Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+        
+      // Mark the non-virtual base as seen.
+      Bases.NonVirtualBases.insert(BaseDecl);
+    }
+  }
+
+  // Walk all bases.
+  for (CXXRecordDecl::base_class_const_iterator I = BaseDecl->bases_begin(),
+       E = BaseDecl->bases_end(); I != E; ++I) 
+    Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+  
+  return Flags;
+}
+
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
+  unsigned Flags = 0;
+  SeenBases Bases;
+  
+  // Walk all bases.
+  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+       E = RD->bases_end(); I != E; ++I) 
+    Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+  
+  return Flags;
+}
+
+/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+/// classes with bases that do not satisfy the abi::__si_class_type_info 
+/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
+  const llvm::Type *UnsignedIntLTy = 
+    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+  
+  // Itanium C++ ABI 2.9.5p6c:
+  //   __flags is a word with flags describing details about the class 
+  //   structure, which may be referenced by using the __flags_masks 
+  //   enumeration. These flags refer to both direct and indirect bases. 
+  unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
+  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+  // Itanium C++ ABI 2.9.5p6c:
+  //   __base_count is a word with the number of direct proper base class 
+  //   descriptions that follow.
+  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
+  
+  if (!RD->getNumBases())
+    return;
+  
+  const llvm::Type *LongLTy = 
+    CGM.getTypes().ConvertType(CGM.getContext().LongTy);
+
+  // Now add the base class descriptions.
+  
+  // Itanium C++ ABI 2.9.5p6c:
+  //   __base_info[] is an array of base class descriptions -- one for every 
+  //   direct proper base. Each description is of the type:
+  //
+  //   struct abi::__base_class_type_info {
+	//   public:
+  //     const __class_type_info *__base_type;
+  //     long __offset_flags;
+  //
+  //     enum __offset_flags_masks {
+  //       __virtual_mask = 0x1,
+  //       __public_mask = 0x2,
+  //       __offset_shift = 8
+  //     };
+  //   };
+  for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+       E = RD->bases_end(); I != E; ++I) {
+    const CXXBaseSpecifier *Base = I;
+
+    // The __base_type member points to the RTTI for the base type.
+    Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(Base->getType()));
+
+    const CXXRecordDecl *BaseDecl = 
+      cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+    int64_t OffsetFlags = 0;
+    
+    // All but the lower 8 bits of __offset_flags are a signed offset. 
+    // For a non-virtual base, this is the offset in the object of the base
+    // subobject. For a virtual base, this is the offset in the virtual table of
+    // the virtual base offset for the virtual base referenced (negative).
+    if (Base->isVirtual())
+      OffsetFlags = CGM.getVtableInfo().getVirtualBaseOffsetIndex(RD, BaseDecl);
+    else {
+      const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+      OffsetFlags = Layout.getBaseClassOffset(BaseDecl) / 8;
+    };
+    
+    OffsetFlags <<= 8;
+    
+    // The low-order byte of __offset_flags contains flags, as given by the 
+    // masks from the enumeration __offset_flags_masks.
+    if (Base->isVirtual())
+      OffsetFlags |= BCTI_Virtual;
+    if (Base->getAccessSpecifier() == AS_public)
+      OffsetFlags |= BCTI_Public;
+
+    Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
+  }
+}
+
+/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
+/// used for pointer types.
+void RTTIBuilder::BuildPointerTypeInfo(const PointerType *Ty) {
+  QualType PointeeTy = Ty->getPointeeType();
+  
+  // Itanium C++ ABI 2.9.5p7:
+  //   __flags is a flag word describing the cv-qualification and other 
+  //   attributes of the type pointed to
+  unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers());
+
+  // Itanium C++ ABI 2.9.5p7:
+  //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
+  //   incomplete class type, the incomplete target type flag is set. 
+  if (ContainsIncompleteClassType(PointeeTy))
+    Flags |= PTI_Incomplete;
+
+  const llvm::Type *UnsignedIntLTy = 
+    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+  
+  // Itanium C++ ABI 2.9.5p7:
+  //  __pointee is a pointer to the std::type_info derivation for the 
+  //  unqualified type being pointed to.
+  llvm::Constant *PointeeTypeInfo = 
+    RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType());
+  Fields.push_back(PointeeTypeInfo);
+}
+
+/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info 
+/// struct, used for member pointer types.
+void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
+  QualType PointeeTy = Ty->getPointeeType();
+  
+  // Itanium C++ ABI 2.9.5p7:
+  //   __flags is a flag word describing the cv-qualification and other 
+  //   attributes of the type pointed to.
+  unsigned Flags = ComputeQualifierFlags(PointeeTy.getQualifiers());
+
+  const RecordType *ClassType = cast<RecordType>(Ty->getClass());
+
+  // Itanium C++ ABI 2.9.5p7:
+  //   When the abi::__pbase_type_info is for a direct or indirect pointer to an
+  //   incomplete class type, the incomplete target type flag is set. 
+  if (ContainsIncompleteClassType(PointeeTy))
+    Flags |= PTI_Incomplete;
+
+  if (IsIncompleteClassType(ClassType))
+    Flags |= PTI_ContainingClassIncomplete;
+  
+  const llvm::Type *UnsignedIntLTy = 
+    CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+  Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+  
+  // Itanium C++ ABI 2.9.5p7:
+  //   __pointee is a pointer to the std::type_info derivation for the 
+  //   unqualified type being pointed to.
+  llvm::Constant *PointeeTypeInfo = 
+    RTTIBuilder(CGM).BuildTypeInfo(PointeeTy.getUnqualifiedType());
+  Fields.push_back(PointeeTypeInfo);
+
+  // Itanium C++ ABI 2.9.5p9:
+  //   __context is a pointer to an abi::__class_type_info corresponding to the
+  //   class type containing the member pointed to 
+  //   (e.g., the "A" in "int A::*").
+  Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(QualType(ClassType, 0)));
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty) {
+  if (!getContext().getLangOptions().RTTI) {
+    const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+    return llvm::Constant::getNullValue(Int8PtrTy);
+  }
+  
+  return RTTIBuilder(*this).BuildTypeInfo(Ty);
+}
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
new file mode 100644
index 0000000..baafd68
--- /dev/null
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -0,0 +1,404 @@
+//===--- CGRecordLayoutBuilder.cpp - Record builder helper ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a helper class used to build CGRecordLayout objects and LLVM types.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGRecordLayoutBuilder.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "CodeGenTypes.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Target/TargetData.h"
+
+
+using namespace clang;
+using namespace CodeGen;
+
+void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
+  Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
+  Packed = D->hasAttr<PackedAttr>();
+
+  if (D->isUnion()) {
+    LayoutUnion(D);
+    return;
+  }
+
+  if (LayoutFields(D))
+    return;
+
+  // We weren't able to layout the struct. Try again with a packed struct
+  Packed = true;
+  AlignmentAsLLVMStruct = 1;
+  NextFieldOffsetInBytes = 0;
+  FieldTypes.clear();
+  LLVMFields.clear();
+  LLVMBitFields.clear();
+
+  LayoutFields(D);
+}
+
+void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
+                                           uint64_t FieldOffset) {
+  uint64_t FieldSize =
+    D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
+
+  if (FieldSize == 0)
+    return;
+
+  uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
+  unsigned NumBytesToAppend;
+
+  if (FieldOffset < NextFieldOffset) {
+    assert(BitsAvailableInLastField && "Bitfield size mismatch!");
+    assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
+
+    // The bitfield begins in the previous bit-field.
+    NumBytesToAppend =
+      llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
+  } else {
+    assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
+
+    // Append padding if necessary.
+    AppendBytes((FieldOffset - NextFieldOffset) / 8);
+
+    NumBytesToAppend =
+      llvm::RoundUpToAlignment(FieldSize, 8) / 8;
+
+    assert(NumBytesToAppend && "No bytes to append!");
+  }
+
+  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
+  uint64_t TypeSizeInBits = getTypeSizeInBytes(Ty) * 8;
+
+  LLVMBitFields.push_back(LLVMBitFieldInfo(D, FieldOffset / TypeSizeInBits,
+                                           FieldOffset % TypeSizeInBits,
+                                           FieldSize));
+
+  AppendBytes(NumBytesToAppend);
+
+  BitsAvailableInLastField =
+    NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
+}
+
+bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
+                                        uint64_t FieldOffset) {
+  // If the field is packed, then we need a packed struct.
+  if (!Packed && D->hasAttr<PackedAttr>())
+    return false;
+
+  if (D->isBitField()) {
+    // We must use packed structs for unnamed bit fields since they
+    // don't affect the struct alignment.
+    if (!Packed && !D->getDeclName())
+      return false;
+
+    LayoutBitField(D, FieldOffset);
+    return true;
+  }
+
+  // Check if we have a pointer to data member in this field.
+  CheckForPointerToDataMember(D->getType());
+  
+  assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
+  uint64_t FieldOffsetInBytes = FieldOffset / 8;
+
+  const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
+  unsigned TypeAlignment = getTypeAlignment(Ty);
+
+  // If the type alignment is larger then the struct alignment, we must use
+  // a packed struct.
+  if (TypeAlignment > Alignment) {
+    assert(!Packed && "Alignment is wrong even with packed struct!");
+    return false;
+  }
+
+  if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
+    const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+    if (const PragmaPackAttr *PPA = RD->getAttr<PragmaPackAttr>()) {
+      if (PPA->getAlignment() != TypeAlignment * 8 && !Packed)
+        return false;
+    }
+  }
+
+  // Round up the field offset to the alignment of the field type.
+  uint64_t AlignedNextFieldOffsetInBytes =
+    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
+
+  if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
+    assert(!Packed && "Could not place field even with packed struct!");
+    return false;
+  }
+
+  if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+    // Even with alignment, the field offset is not at the right place,
+    // insert padding.
+    uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
+
+    AppendBytes(PaddingInBytes);
+  }
+
+  // Now append the field.
+  LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
+  AppendField(FieldOffsetInBytes, Ty);
+
+  return true;
+}
+
+void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
+  assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
+
+  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+
+  const llvm::Type *Ty = 0;
+  uint64_t Size = 0;
+  unsigned Align = 0;
+
+  bool HasOnlyZeroSizedBitFields = true;
+  
+  unsigned FieldNo = 0;
+  for (RecordDecl::field_iterator Field = D->field_begin(),
+       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+    assert(Layout.getFieldOffset(FieldNo) == 0 &&
+          "Union field offset did not start at the beginning of record!");
+
+    if (Field->isBitField()) {
+      uint64_t FieldSize =
+        Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
+
+      // Ignore zero sized bit fields.
+      if (FieldSize == 0)
+        continue;
+
+      // Add the bit field info.
+      Types.addBitFieldInfo(*Field, 0, 0, FieldSize);
+    } else
+      Types.addFieldInfo(*Field, 0);
+
+    HasOnlyZeroSizedBitFields = false;
+    
+    const llvm::Type *FieldTy =
+      Types.ConvertTypeForMemRecursive(Field->getType());
+    unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
+    uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
+
+    if (FieldAlign < Align)
+      continue;
+
+    if (FieldAlign > Align || FieldSize > Size) {
+      Ty = FieldTy;
+      Align = FieldAlign;
+      Size = FieldSize;
+    }
+  }
+
+  // Now add our field.
+  if (Ty) {
+    AppendField(0, Ty);
+
+    if (getTypeAlignment(Ty) > Layout.getAlignment() / 8) {
+      // We need a packed struct.
+      Packed = true;
+      Align = 1;
+    }
+  }
+  if (!Align) {
+    assert(HasOnlyZeroSizedBitFields &&
+           "0-align record did not have all zero-sized bit-fields!");
+    Align = 1;
+  }
+  
+  // Append tail padding.
+  if (Layout.getSize() / 8 > Size)
+    AppendPadding(Layout.getSize() / 8, Align);
+}
+
+void CGRecordLayoutBuilder::LayoutBases(const CXXRecordDecl *RD,
+                                        const ASTRecordLayout &Layout) {
+  // Check if we need to add a vtable pointer.
+  if (RD->isDynamicClass() && !Layout.getPrimaryBase()) {
+    const llvm::Type *Int8PtrTy = 
+      llvm::Type::getInt8PtrTy(Types.getLLVMContext());
+    
+    assert(NextFieldOffsetInBytes == 0 &&
+           "Vtable pointer must come first!");
+    AppendField(NextFieldOffsetInBytes, Int8PtrTy->getPointerTo());
+  }
+}
+
+bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+  assert(!D->isUnion() && "Can't call LayoutFields on a union!");
+  assert(Alignment && "Did not set alignment!");
+
+  const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+
+  if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
+    LayoutBases(RD, Layout);
+  
+  unsigned FieldNo = 0;
+
+  for (RecordDecl::field_iterator Field = D->field_begin(),
+       FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+    if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
+      assert(!Packed &&
+             "Could not layout fields even with a packed LLVM struct!");
+      return false;
+    }
+  }
+
+  // Append tail padding if necessary.
+  AppendTailPadding(Layout.getSize());
+
+  return true;
+}
+
+void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
+  assert(RecordSize % 8 == 0 && "Invalid record size!");
+
+  uint64_t RecordSizeInBytes = RecordSize / 8;
+  assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
+
+  uint64_t AlignedNextFieldOffset = 
+    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
+
+  if (AlignedNextFieldOffset == RecordSizeInBytes) {
+    // We don't need any padding.
+    return;
+  }
+  
+  unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
+  AppendBytes(NumPadBytes);
+}
+
+void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
+                                        const llvm::Type *FieldTy) {
+  AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
+                                   getTypeAlignment(FieldTy));
+
+  uint64_t FieldSizeInBytes = getTypeSizeInBytes(FieldTy);
+
+  FieldTypes.push_back(FieldTy);
+
+  NextFieldOffsetInBytes = FieldOffsetInBytes + FieldSizeInBytes;
+  BitsAvailableInLastField = 0;
+}
+
+void
+CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
+                                     const llvm::Type *FieldTy) {
+  AppendPadding(FieldOffsetInBytes, getTypeAlignment(FieldTy));
+}
+
+void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
+                                          unsigned FieldAlignment) {
+  assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
+         "Incorrect field layout!");
+
+  // Round up the field offset to the alignment of the field type.
+  uint64_t AlignedNextFieldOffsetInBytes =
+    llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
+
+  if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
+    // Even with alignment, the field offset is not at the right place,
+    // insert padding.
+    uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
+
+    AppendBytes(PaddingInBytes);
+  }
+}
+
+void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
+  if (NumBytes == 0)
+    return;
+
+  const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
+  if (NumBytes > 1)
+    Ty = llvm::ArrayType::get(Ty, NumBytes);
+
+  // Append the padding field
+  AppendField(NextFieldOffsetInBytes, Ty);
+}
+
+unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
+  if (Packed)
+    return 1;
+
+  return Types.getTargetData().getABITypeAlignment(Ty);
+}
+
+uint64_t CGRecordLayoutBuilder::getTypeSizeInBytes(const llvm::Type *Ty) const {
+  return Types.getTargetData().getTypeAllocSize(Ty);
+}
+
+void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
+  // This record already contains a member pointer.
+  if (ContainsPointerToDataMember)
+    return;
+
+  // Can only have member pointers if we're compiling C++.
+  if (!Types.getContext().getLangOptions().CPlusPlus)
+    return;
+
+  T = Types.getContext().getBaseElementType(T);
+
+  if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) {
+    if (!MPT->getPointeeType()->isFunctionType()) {
+      // We have a pointer to data member.
+      ContainsPointerToDataMember = true;
+    }
+  } else if (const RecordType *RT = T->getAs<RecordType>()) {
+    const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+    
+    // FIXME: It would be better if there was a way to explicitly compute the
+    // record layout instead of converting to a type.
+    Types.ConvertTagDeclType(RD);
+    
+    const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+    
+    if (Layout.containsPointerToDataMember())
+      ContainsPointerToDataMember = true;
+  }    
+}
+
+CGRecordLayout *
+CGRecordLayoutBuilder::ComputeLayout(CodeGenTypes &Types,
+                                     const RecordDecl *D) {
+  CGRecordLayoutBuilder Builder(Types);
+
+  Builder.Layout(D);
+
+  const llvm::Type *Ty = llvm::StructType::get(Types.getLLVMContext(),
+                                               Builder.FieldTypes,
+                                               Builder.Packed);
+  assert(Types.getContext().getASTRecordLayout(D).getSize() / 8 ==
+         Types.getTargetData().getTypeAllocSize(Ty) &&
+         "Type size mismatch!");
+
+  // Add all the field numbers.
+  for (unsigned i = 0, e = Builder.LLVMFields.size(); i != e; ++i) {
+    const FieldDecl *FD = Builder.LLVMFields[i].first;
+    unsigned FieldNo = Builder.LLVMFields[i].second;
+
+    Types.addFieldInfo(FD, FieldNo);
+  }
+
+  // Add bitfield info.
+  for (unsigned i = 0, e = Builder.LLVMBitFields.size(); i != e; ++i) {
+    const LLVMBitFieldInfo &Info = Builder.LLVMBitFields[i];
+
+    Types.addBitFieldInfo(Info.FD, Info.FieldNo, Info.Start, Info.Size);
+  }
+
+  return new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
+}
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.h b/lib/CodeGen/CGRecordLayoutBuilder.h
new file mode 100644
index 0000000..eb60ed7
--- /dev/null
+++ b/lib/CodeGen/CGRecordLayoutBuilder.h
@@ -0,0 +1,142 @@
+//===--- CGRecordLayoutBuilder.h - Record builder helper --------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a helper class used to build CGRecordLayout objects and LLVM types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGRECORDLAYOUTBUILDER_H
+#define CLANG_CODEGEN_CGRECORDLAYOUTBUILDER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/System/DataTypes.h"
+#include <vector>
+
+namespace llvm {
+  class Type;
+}
+
+namespace clang {
+  class ASTRecordLayout;
+  class CXXRecordDecl;
+  class FieldDecl;
+  class RecordDecl;
+  class QualType;
+
+namespace CodeGen {
+  class CGRecordLayout;
+  class CodeGenTypes;
+
+class CGRecordLayoutBuilder {
+  CodeGenTypes &Types;
+
+  /// Packed - Whether the resulting LLVM struct will be packed or not.
+  bool Packed;
+
+  /// ContainsPointerToDataMember - Whether one of the fields in this record 
+  /// layout is a pointer to data member, or a struct that contains pointer to
+  /// data member.
+  bool ContainsPointerToDataMember;
+
+  /// Alignment - Contains the alignment of the RecordDecl.
+  unsigned Alignment;
+
+  /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
+  /// LLVM types.
+  unsigned AlignmentAsLLVMStruct;
+
+  /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
+  /// this will have the number of bits still available in the field.
+  char BitsAvailableInLastField;
+
+  /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
+  uint64_t NextFieldOffsetInBytes;
+
+  /// FieldTypes - Holds the LLVM types that the struct is created from.
+  std::vector<const llvm::Type *> FieldTypes;
+
+  /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
+  typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
+  llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
+
+  /// LLVMBitFieldInfo - Holds location and size information about a bit field.
+  struct LLVMBitFieldInfo {
+    LLVMBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, unsigned Start,
+                     unsigned Size)
+      : FD(FD), FieldNo(FieldNo), Start(Start), Size(Size) { }
+
+    const FieldDecl *FD;
+
+    unsigned FieldNo;
+    unsigned Start;
+    unsigned Size;
+  };
+  llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
+
+  CGRecordLayoutBuilder(CodeGenTypes &Types)
+    : Types(Types), Packed(false), ContainsPointerToDataMember(false)
+    , Alignment(0), AlignmentAsLLVMStruct(1)
+    , BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
+
+  /// Layout - Will layout a RecordDecl.
+  void Layout(const RecordDecl *D);
+
+  /// LayoutUnion - Will layout a union RecordDecl.
+  void LayoutUnion(const RecordDecl *D);
+
+  /// LayoutField - try to layout all fields in the record decl.
+  /// Returns false if the operation failed because the struct is not packed.
+  bool LayoutFields(const RecordDecl *D);
+
+  /// LayoutBases - layout the bases and vtable pointer of a record decl.
+  void LayoutBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout);
+  
+  /// LayoutField - layout a single field. Returns false if the operation failed
+  /// because the current struct is not packed.
+  bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
+
+  /// LayoutBitField - layout a single bit field.
+  void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
+
+  /// AppendField - Appends a field with the given offset and type.
+  void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
+
+  /// AppendPadding - Appends enough padding bytes so that the total struct
+  /// size matches the alignment of the passed in type.
+  void AppendPadding(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
+
+  /// AppendPadding - Appends enough padding bytes so that the total
+  /// struct size is a multiple of the field alignment.
+  void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
+
+  /// AppendBytes - Append a given number of bytes to the record.
+  void AppendBytes(uint64_t NumBytes);
+
+  /// AppendTailPadding - Append enough tail padding so that the type will have
+  /// the passed size.
+  void AppendTailPadding(uint64_t RecordSize);
+
+  unsigned getTypeAlignment(const llvm::Type *Ty) const;
+  uint64_t getTypeSizeInBytes(const llvm::Type *Ty) const;
+
+  /// CheckForPointerToDataMember - Check if the given type contains a pointer 
+  /// to data member.
+  void CheckForPointerToDataMember(QualType T);
+
+public:
+  /// ComputeLayout - Return the right record layout for a given record decl.
+  static CGRecordLayout *ComputeLayout(CodeGenTypes &Types,
+                                       const RecordDecl *D);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+
+#endif
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
new file mode 100644
index 0000000..008a480
--- /dev/null
+++ b/lib/CodeGen/CGStmt.cpp
@@ -0,0 +1,1138 @@
+//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Stmt nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+//                              Statement Emission
+//===----------------------------------------------------------------------===//
+
+void CodeGenFunction::EmitStopPoint(const Stmt *S) {
+  if (CGDebugInfo *DI = getDebugInfo()) {
+    DI->setLocation(S->getLocStart());
+    DI->EmitStopPoint(CurFn, Builder);
+  }
+}
+
+void CodeGenFunction::EmitStmt(const Stmt *S) {
+  assert(S && "Null statement?");
+
+  // Check if we can handle this without bothering to generate an
+  // insert point or debug info.
+  if (EmitSimpleStmt(S))
+    return;
+
+  // Check if we are generating unreachable code.
+  if (!HaveInsertPoint()) {
+    // If so, and the statement doesn't contain a label, then we do not need to
+    // generate actual code. This is safe because (1) the current point is
+    // unreachable, so we don't need to execute the code, and (2) we've already
+    // handled the statements which update internal data structures (like the
+    // local variable map) which could be used by subsequent statements.
+    if (!ContainsLabel(S)) {
+      // Verify that any decl statements were handled as simple, they may be in
+      // scope of subsequent reachable statements.
+      assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
+      return;
+    }
+
+    // Otherwise, make a new block to hold the code.
+    EnsureInsertPoint();
+  }
+
+  // Generate a stoppoint if we are emitting debug info.
+  EmitStopPoint(S);
+
+  switch (S->getStmtClass()) {
+  default:
+    // Must be an expression in a stmt context.  Emit the value (to get
+    // side-effects) and ignore the result.
+    if (!isa<Expr>(S))
+      ErrorUnsupported(S, "statement");
+
+    EmitAnyExpr(cast<Expr>(S), 0, false, true);
+
+    // Expression emitters don't handle unreachable blocks yet, so look for one
+    // explicitly here. This handles the common case of a call to a noreturn
+    // function.
+    if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
+      if (CurBB->empty() && CurBB->use_empty()) {
+        CurBB->eraseFromParent();
+        Builder.ClearInsertionPoint();
+      }
+    }
+    break;
+  case Stmt::IndirectGotoStmtClass:
+    EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
+
+  case Stmt::IfStmtClass:       EmitIfStmt(cast<IfStmt>(*S));             break;
+  case Stmt::WhileStmtClass:    EmitWhileStmt(cast<WhileStmt>(*S));       break;
+  case Stmt::DoStmtClass:       EmitDoStmt(cast<DoStmt>(*S));             break;
+  case Stmt::ForStmtClass:      EmitForStmt(cast<ForStmt>(*S));           break;
+
+  case Stmt::ReturnStmtClass:   EmitReturnStmt(cast<ReturnStmt>(*S));     break;
+
+  case Stmt::SwitchStmtClass:   EmitSwitchStmt(cast<SwitchStmt>(*S));     break;
+  case Stmt::AsmStmtClass:      EmitAsmStmt(cast<AsmStmt>(*S));           break;
+
+  case Stmt::ObjCAtTryStmtClass:
+    EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
+    break;
+  case Stmt::ObjCAtCatchStmtClass:
+    assert(0 && "@catch statements should be handled by EmitObjCAtTryStmt");
+    break;
+  case Stmt::ObjCAtFinallyStmtClass:
+    assert(0 && "@finally statements should be handled by EmitObjCAtTryStmt");
+    break;
+  case Stmt::ObjCAtThrowStmtClass:
+    EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
+    break;
+  case Stmt::ObjCAtSynchronizedStmtClass:
+    EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
+    break;
+  case Stmt::ObjCForCollectionStmtClass:
+    EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
+    break;
+      
+  case Stmt::CXXTryStmtClass:
+    EmitCXXTryStmt(cast<CXXTryStmt>(*S));
+    break;
+  }
+}
+
+bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
+  switch (S->getStmtClass()) {
+  default: return false;
+  case Stmt::NullStmtClass: break;
+  case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
+  case Stmt::DeclStmtClass:     EmitDeclStmt(cast<DeclStmt>(*S));         break;
+  case Stmt::LabelStmtClass:    EmitLabelStmt(cast<LabelStmt>(*S));       break;
+  case Stmt::GotoStmtClass:     EmitGotoStmt(cast<GotoStmt>(*S));         break;
+  case Stmt::BreakStmtClass:    EmitBreakStmt(cast<BreakStmt>(*S));       break;
+  case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
+  case Stmt::DefaultStmtClass:  EmitDefaultStmt(cast<DefaultStmt>(*S));   break;
+  case Stmt::CaseStmtClass:     EmitCaseStmt(cast<CaseStmt>(*S));         break;
+  }
+
+  return true;
+}
+
+/// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
+/// this captures the expression result of the last sub-statement and returns it
+/// (for use by the statement expression extension).
+RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+                                         llvm::Value *AggLoc, bool isAggVol) {
+  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
+                             "LLVM IR generation of compound statement ('{}')");
+
+  CGDebugInfo *DI = getDebugInfo();
+  if (DI) {
+    DI->setLocation(S.getLBracLoc());
+    DI->EmitRegionStart(CurFn, Builder);
+  }
+
+  // Keep track of the current cleanup stack depth.
+  CleanupScope Scope(*this);
+
+  for (CompoundStmt::const_body_iterator I = S.body_begin(),
+       E = S.body_end()-GetLast; I != E; ++I)
+    EmitStmt(*I);
+
+  if (DI) {
+    DI->setLocation(S.getLBracLoc());
+    DI->EmitRegionEnd(CurFn, Builder);
+  }
+
+  RValue RV;
+  if (!GetLast)
+    RV = RValue::get(0);
+  else {
+    // We have to special case labels here.  They are statements, but when put
+    // at the end of a statement expression, they yield the value of their
+    // subexpression.  Handle this by walking through all labels we encounter,
+    // emitting them before we evaluate the subexpr.
+    const Stmt *LastStmt = S.body_back();
+    while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
+      EmitLabel(*LS);
+      LastStmt = LS->getSubStmt();
+    }
+
+    EnsureInsertPoint();
+
+    RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
+  }
+
+  return RV;
+}
+
+void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
+  llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
+
+  // If there is a cleanup stack, then we it isn't worth trying to
+  // simplify this block (we would need to remove it from the scope map
+  // and cleanup entry).
+  if (!CleanupEntries.empty())
+    return;
+
+  // Can only simplify direct branches.
+  if (!BI || !BI->isUnconditional())
+    return;
+
+  BB->replaceAllUsesWith(BI->getSuccessor(0));
+  BI->eraseFromParent();
+  BB->eraseFromParent();
+}
+
+void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
+  // Fall out of the current block (if necessary).
+  EmitBranch(BB);
+
+  if (IsFinished && BB->use_empty()) {
+    delete BB;
+    return;
+  }
+
+  // If necessary, associate the block with the cleanup stack size.
+  if (!CleanupEntries.empty()) {
+    // Check if the basic block has already been inserted.
+    BlockScopeMap::iterator I = BlockScopes.find(BB);
+    if (I != BlockScopes.end()) {
+      assert(I->second == CleanupEntries.size() - 1);
+    } else {
+      BlockScopes[BB] = CleanupEntries.size() - 1;
+      CleanupEntries.back().Blocks.push_back(BB);
+    }
+  }
+
+  CurFn->getBasicBlockList().push_back(BB);
+  Builder.SetInsertPoint(BB);
+}
+
+void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
+  // Emit a branch from the current block to the target one if this
+  // was a real block.  If this was just a fall-through block after a
+  // terminator, don't emit it.
+  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+  if (!CurBB || CurBB->getTerminator()) {
+    // If there is no insert point or the previous block is already
+    // terminated, don't touch it.
+  } else {
+    // Otherwise, create a fall-through branch.
+    Builder.CreateBr(Target);
+  }
+
+  Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitLabel(const LabelStmt &S) {
+  EmitBlock(getBasicBlockForLabel(&S));
+}
+
+
+void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
+  EmitLabel(S);
+  EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
+  // If this code is reachable then emit a stop point (if generating
+  // debug info). We have to do this ourselves because we are on the
+  // "simple" statement path.
+  if (HaveInsertPoint())
+    EmitStopPoint(&S);
+
+  EmitBranchThroughCleanup(getBasicBlockForLabel(S.getLabel()));
+}
+
+
+void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+  // Ensure that we have an i8* for our PHI node.
+  llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
+                                         llvm::Type::getInt8PtrTy(VMContext),
+                                          "addr");
+  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+  
+
+  // Get the basic block for the indirect goto.
+  llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
+  
+  // The first instruction in the block has to be the PHI for the switch dest,
+  // add an entry for this branch.
+  cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
+  
+  EmitBranch(IndGotoBB);
+}
+
+void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
+  // C99 6.8.4.1: The first substatement is executed if the expression compares
+  // unequal to 0.  The condition must be a scalar type.
+  CleanupScope ConditionScope(*this);
+
+  if (S.getConditionVariable())
+    EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+  // If the condition constant folds and can be elided, try to avoid emitting
+  // the condition and the dead arm of the if/else.
+  if (int Cond = ConstantFoldsToSimpleInteger(S.getCond())) {
+    // Figure out which block (then or else) is executed.
+    const Stmt *Executed = S.getThen(), *Skipped  = S.getElse();
+    if (Cond == -1)  // Condition false?
+      std::swap(Executed, Skipped);
+
+    // If the skipped block has no labels in it, just emit the executed block.
+    // This avoids emitting dead code and simplifies the CFG substantially.
+    if (!ContainsLabel(Skipped)) {
+      if (Executed) {
+        CleanupScope ExecutedScope(*this);
+        EmitStmt(Executed);
+      }
+      return;
+    }
+  }
+
+  // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
+  // the conditional branch.
+  llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
+  llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
+  llvm::BasicBlock *ElseBlock = ContBlock;
+  if (S.getElse())
+    ElseBlock = createBasicBlock("if.else");
+  EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
+
+  // Emit the 'then' code.
+  EmitBlock(ThenBlock); 
+  {
+    CleanupScope ThenScope(*this);
+    EmitStmt(S.getThen());
+  }
+  EmitBranch(ContBlock);
+
+  // Emit the 'else' code if present.
+  if (const Stmt *Else = S.getElse()) {
+    EmitBlock(ElseBlock);
+    {
+      CleanupScope ElseScope(*this);
+      EmitStmt(Else);
+    }
+    EmitBranch(ContBlock);
+  }
+
+  // Emit the continuation block for code after the if.
+  EmitBlock(ContBlock, true);
+}
+
+void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
+  // Emit the header for the loop, insert it, which will create an uncond br to
+  // it.
+  llvm::BasicBlock *LoopHeader = createBasicBlock("while.cond");
+  EmitBlock(LoopHeader);
+
+  // Create an exit block for when the condition fails, create a block for the
+  // body of the loop.
+  llvm::BasicBlock *ExitBlock = createBasicBlock("while.end");
+  llvm::BasicBlock *LoopBody  = createBasicBlock("while.body");
+  llvm::BasicBlock *CleanupBlock = 0;
+  llvm::BasicBlock *EffectiveExitBlock = ExitBlock;
+
+  // Store the blocks to use for break and continue.
+  BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader));
+
+  // C++ [stmt.while]p2:
+  //   When the condition of a while statement is a declaration, the
+  //   scope of the variable that is declared extends from its point
+  //   of declaration (3.3.2) to the end of the while statement.
+  //   [...]
+  //   The object created in a condition is destroyed and created
+  //   with each iteration of the loop.
+  CleanupScope ConditionScope(*this);
+
+  if (S.getConditionVariable()) {
+    EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+    // If this condition variable requires cleanups, create a basic
+    // block to handle those cleanups.
+    if (ConditionScope.requiresCleanups()) {
+      CleanupBlock = createBasicBlock("while.cleanup");
+      EffectiveExitBlock = CleanupBlock;
+    }
+  }
+  
+  // Evaluate the conditional in the while header.  C99 6.8.5.1: The
+  // evaluation of the controlling expression takes place before each
+  // execution of the loop body.
+  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+   
+  // while(1) is common, avoid extra exit blocks.  Be sure
+  // to correctly handle break/continue though.
+  bool EmitBoolCondBranch = true;
+  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+    if (C->isOne())
+      EmitBoolCondBranch = false;
+
+  // As long as the condition is true, go to the loop body.
+  if (EmitBoolCondBranch)
+    Builder.CreateCondBr(BoolCondVal, LoopBody, EffectiveExitBlock);
+ 
+  // Emit the loop body.
+  {
+    CleanupScope BodyScope(*this);
+    EmitBlock(LoopBody);
+    EmitStmt(S.getBody());
+  }
+
+  BreakContinueStack.pop_back();
+
+  if (CleanupBlock) {
+    // If we have a cleanup block, jump there to perform cleanups
+    // before looping.
+    EmitBranch(CleanupBlock);
+
+    // Emit the cleanup block, performing cleanups for the condition
+    // and then jumping to either the loop header or the exit block.
+    EmitBlock(CleanupBlock);
+    ConditionScope.ForceCleanup();
+    Builder.CreateCondBr(BoolCondVal, LoopHeader, ExitBlock);
+  } else {
+    // Cycle to the condition.
+    EmitBranch(LoopHeader);
+  }
+
+  // Emit the exit block.
+  EmitBlock(ExitBlock, true);
+
+
+  // The LoopHeader typically is just a branch if we skipped emitting
+  // a branch, try to erase it.
+  if (!EmitBoolCondBranch && !CleanupBlock)
+    SimplifyForwardingBlocks(LoopHeader);
+}
+
+void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
+  // Emit the body for the loop, insert it, which will create an uncond br to
+  // it.
+  llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+  llvm::BasicBlock *AfterDo = createBasicBlock("do.end");
+  EmitBlock(LoopBody);
+
+  llvm::BasicBlock *DoCond = createBasicBlock("do.cond");
+
+  // Store the blocks to use for break and continue.
+  BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond));
+
+  // Emit the body of the loop into the block.
+  EmitStmt(S.getBody());
+
+  BreakContinueStack.pop_back();
+
+  EmitBlock(DoCond);
+
+  // C99 6.8.5.2: "The evaluation of the controlling expression takes place
+  // after each execution of the loop body."
+
+  // Evaluate the conditional in the while header.
+  // C99 6.8.5p2/p4: The first substatement is executed if the expression
+  // compares unequal to 0.  The condition must be a scalar type.
+  llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+  // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
+  // to correctly handle break/continue though.
+  bool EmitBoolCondBranch = true;
+  if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+    if (C->isZero())
+      EmitBoolCondBranch = false;
+
+  // As long as the condition is true, iterate the loop.
+  if (EmitBoolCondBranch)
+    Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo);
+
+  // Emit the exit block.
+  EmitBlock(AfterDo);
+
+  // The DoCond block typically is just a branch if we skipped
+  // emitting a branch, try to erase it.
+  if (!EmitBoolCondBranch)
+    SimplifyForwardingBlocks(DoCond);
+}
+
+void CodeGenFunction::EmitForStmt(const ForStmt &S) {
+  // FIXME: What do we do if the increment (f.e.) contains a stmt expression,
+  // which contains a continue/break?
+  CleanupScope ForScope(*this);
+
+  // Evaluate the first part before the loop.
+  if (S.getInit())
+    EmitStmt(S.getInit());
+
+  // Start the loop with a block that tests the condition.
+  llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+  llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
+  llvm::BasicBlock *IncBlock = 0;
+  llvm::BasicBlock *CondCleanup = 0;
+  llvm::BasicBlock *EffectiveExitBlock = AfterFor;
+  EmitBlock(CondBlock);
+
+  // Create a cleanup scope for the condition variable cleanups.
+  CleanupScope ConditionScope(*this);
+  
+  llvm::Value *BoolCondVal = 0;
+  if (S.getCond()) {
+    // If the for statement has a condition scope, emit the local variable
+    // declaration.
+    if (S.getConditionVariable()) {
+      EmitLocalBlockVarDecl(*S.getConditionVariable());
+      
+      if (ConditionScope.requiresCleanups()) {
+        CondCleanup = createBasicBlock("for.cond.cleanup");
+        EffectiveExitBlock = CondCleanup;
+      }
+    }
+    
+    // As long as the condition is true, iterate the loop.
+    llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+    // C99 6.8.5p2/p4: The first substatement is executed if the expression
+    // compares unequal to 0.  The condition must be a scalar type.
+    BoolCondVal = EvaluateExprAsBool(S.getCond());
+    Builder.CreateCondBr(BoolCondVal, ForBody, EffectiveExitBlock);
+
+    EmitBlock(ForBody);
+  } else {
+    // Treat it as a non-zero constant.  Don't even create a new block for the
+    // body, just fall into it.
+  }
+
+  // If the for loop doesn't have an increment we can just use the
+  // condition as the continue block.
+  llvm::BasicBlock *ContinueBlock;
+  if (S.getInc())
+    ContinueBlock = IncBlock = createBasicBlock("for.inc");
+  else
+    ContinueBlock = CondBlock;
+
+  // Store the blocks to use for break and continue.
+  BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock));
+
+  // If the condition is true, execute the body of the for stmt.
+  CGDebugInfo *DI = getDebugInfo();
+  if (DI) {
+    DI->setLocation(S.getSourceRange().getBegin());
+    DI->EmitRegionStart(CurFn, Builder);
+  }
+
+  {
+    // Create a separate cleanup scope for the body, in case it is not
+    // a compound statement.
+    CleanupScope BodyScope(*this);
+    EmitStmt(S.getBody());
+  }
+
+  BreakContinueStack.pop_back();
+
+  // If there is an increment, emit it next.
+  if (S.getInc()) {
+    EmitBlock(IncBlock);
+    EmitStmt(S.getInc());
+  }
+
+  // Finally, branch back up to the condition for the next iteration.
+  if (CondCleanup) {
+    // Branch to the cleanup block.
+    EmitBranch(CondCleanup);
+
+    // Emit the cleanup block, which branches back to the loop body or
+    // outside of the for statement once it is done.
+    EmitBlock(CondCleanup);
+    ConditionScope.ForceCleanup();
+    Builder.CreateCondBr(BoolCondVal, CondBlock, AfterFor);
+  } else
+    EmitBranch(CondBlock);
+  if (DI) {
+    DI->setLocation(S.getSourceRange().getEnd());
+    DI->EmitRegionEnd(CurFn, Builder);
+  }
+
+  // Emit the fall-through block.
+  EmitBlock(AfterFor, true);
+}
+
+void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
+  if (RV.isScalar()) {
+    Builder.CreateStore(RV.getScalarVal(), ReturnValue);
+  } else if (RV.isAggregate()) {
+    EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
+  } else {
+    StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
+  }
+  EmitBranchThroughCleanup(ReturnBlock);
+}
+
+/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
+/// if the function returns void, or may be missing one if the function returns
+/// non-void.  Fun stuff :).
+void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
+  // Emit the result value, even if unused, to evalute the side effects.
+  const Expr *RV = S.getRetValue();
+
+  // FIXME: Clean this up by using an LValue for ReturnTemp,
+  // EmitStoreThroughLValue, and EmitAnyExpr.
+  if (!ReturnValue) {
+    // Make sure not to return anything, but evaluate the expression
+    // for side effects.
+    if (RV)
+      EmitAnyExpr(RV);
+  } else if (RV == 0) {
+    // Do nothing (return value is left uninitialized)
+  } else if (FnRetTy->isReferenceType()) {
+    // If this function returns a reference, take the address of the expression
+    // rather than the value.
+    Builder.CreateStore(EmitLValue(RV).getAddress(), ReturnValue);
+  } else if (!hasAggregateLLVMType(RV->getType())) {
+    Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
+  } else if (RV->getType()->isAnyComplexType()) {
+    EmitComplexExprIntoAddr(RV, ReturnValue, false);
+  } else {
+    EmitAggExpr(RV, ReturnValue, false);
+  }
+
+  EmitBranchThroughCleanup(ReturnBlock);
+}
+
+void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
+  // As long as debug info is modeled with instructions, we have to ensure we
+  // have a place to insert here and write the stop point here.
+  if (getDebugInfo()) {
+    EnsureInsertPoint();
+    EmitStopPoint(&S);
+  }
+
+  for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
+       I != E; ++I)
+    EmitDecl(**I);
+}
+
+void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
+  assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
+
+  // If this code is reachable then emit a stop point (if generating
+  // debug info). We have to do this ourselves because we are on the
+  // "simple" statement path.
+  if (HaveInsertPoint())
+    EmitStopPoint(&S);
+
+  llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock;
+  EmitBranchThroughCleanup(Block);
+}
+
+void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
+  assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+
+  // If this code is reachable then emit a stop point (if generating
+  // debug info). We have to do this ourselves because we are on the
+  // "simple" statement path.
+  if (HaveInsertPoint())
+    EmitStopPoint(&S);
+
+  llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock;
+  EmitBranchThroughCleanup(Block);
+}
+
+/// EmitCaseStmtRange - If case statement range is not too big then
+/// add multiple cases to switch instruction, one for each value within
+/// the range. If range is too big then emit "if" condition check.
+void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
+  assert(S.getRHS() && "Expected RHS value in CaseStmt");
+
+  llvm::APSInt LHS = S.getLHS()->EvaluateAsInt(getContext());
+  llvm::APSInt RHS = S.getRHS()->EvaluateAsInt(getContext());
+
+  // Emit the code for this case. We do this first to make sure it is
+  // properly chained from our predecessor before generating the
+  // switch machinery to enter this block.
+  EmitBlock(createBasicBlock("sw.bb"));
+  llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+  EmitStmt(S.getSubStmt());
+
+  // If range is empty, do nothing.
+  if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
+    return;
+
+  llvm::APInt Range = RHS - LHS;
+  // FIXME: parameters such as this should not be hardcoded.
+  if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
+    // Range is small enough to add multiple switch instruction cases.
+    for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
+      SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, LHS), CaseDest);
+      LHS++;
+    }
+    return;
+  }
+
+  // The range is too big. Emit "if" condition into a new block,
+  // making sure to save and restore the current insertion point.
+  llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
+
+  // Push this test onto the chain of range checks (which terminates
+  // in the default basic block). The switch's default will be changed
+  // to the top of this chain after switch emission is complete.
+  llvm::BasicBlock *FalseDest = CaseRangeBlock;
+  CaseRangeBlock = createBasicBlock("sw.caserange");
+
+  CurFn->getBasicBlockList().push_back(CaseRangeBlock);
+  Builder.SetInsertPoint(CaseRangeBlock);
+
+  // Emit range check.
+  llvm::Value *Diff =
+    Builder.CreateSub(SwitchInsn->getCondition(),
+                      llvm::ConstantInt::get(VMContext, LHS),  "tmp");
+  llvm::Value *Cond =
+    Builder.CreateICmpULE(Diff,
+                          llvm::ConstantInt::get(VMContext, Range), "tmp");
+  Builder.CreateCondBr(Cond, CaseDest, FalseDest);
+
+  // Restore the appropriate insertion point.
+  if (RestoreBB)
+    Builder.SetInsertPoint(RestoreBB);
+  else
+    Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+  if (S.getRHS()) {
+    EmitCaseStmtRange(S);
+    return;
+  }
+
+  EmitBlock(createBasicBlock("sw.bb"));
+  llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+  llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
+  SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+
+  // Recursively emitting the statement is acceptable, but is not wonderful for
+  // code where we have many case statements nested together, i.e.:
+  //  case 1:
+  //    case 2:
+  //      case 3: etc.
+  // Handling this recursively will create a new block for each case statement
+  // that falls through to the next case which is IR intensive.  It also causes
+  // deep recursion which can run into stack depth limitations.  Handle
+  // sequential non-range case statements specially.
+  const CaseStmt *CurCase = &S;
+  const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
+
+  // Otherwise, iteratively add consequtive cases to this switch stmt.
+  while (NextCase && NextCase->getRHS() == 0) {
+    CurCase = NextCase;
+    CaseVal = CurCase->getLHS()->EvaluateAsInt(getContext());
+    SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
+
+    NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
+  }
+
+  // Normal default recursion for non-cases.
+  EmitStmt(CurCase->getSubStmt());
+}
+
+void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
+  llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
+  assert(DefaultBlock->empty() &&
+         "EmitDefaultStmt: Default block already defined?");
+  EmitBlock(DefaultBlock);
+  EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
+  CleanupScope ConditionScope(*this);
+
+  if (S.getConditionVariable())
+    EmitLocalBlockVarDecl(*S.getConditionVariable());
+
+  llvm::Value *CondV = EmitScalarExpr(S.getCond());
+
+  // Handle nested switch statements.
+  llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+  llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
+  // Create basic block to hold stuff that comes after switch
+  // statement. We also need to create a default block now so that
+  // explicit case ranges tests can have a place to jump to on
+  // failure.
+  llvm::BasicBlock *NextBlock = createBasicBlock("sw.epilog");
+  llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
+  SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
+  CaseRangeBlock = DefaultBlock;
+
+  // Clear the insertion point to indicate we are in unreachable code.
+  Builder.ClearInsertionPoint();
+
+  // All break statements jump to NextBlock. If BreakContinueStack is non empty
+  // then reuse last ContinueBlock.
+  llvm::BasicBlock *ContinueBlock = 0;
+  if (!BreakContinueStack.empty())
+    ContinueBlock = BreakContinueStack.back().ContinueBlock;
+
+  // Ensure any vlas created between there and here, are undone
+  BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock));
+
+  // Emit switch body.
+  EmitStmt(S.getBody());
+
+  BreakContinueStack.pop_back();
+
+  // Update the default block in case explicit case range tests have
+  // been chained on top.
+  SwitchInsn->setSuccessor(0, CaseRangeBlock);
+
+  // If a default was never emitted then reroute any jumps to it and
+  // discard.
+  if (!DefaultBlock->getParent()) {
+    DefaultBlock->replaceAllUsesWith(NextBlock);
+    delete DefaultBlock;
+  }
+
+  // Emit continuation.
+  EmitBlock(NextBlock, true);
+
+  SwitchInsn = SavedSwitchInsn;
+  CaseRangeBlock = SavedCRBlock;
+}
+
+static std::string
+SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
+                 llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
+  std::string Result;
+
+  while (*Constraint) {
+    switch (*Constraint) {
+    default:
+      Result += Target.convertConstraint(*Constraint);
+      break;
+    // Ignore these
+    case '*':
+    case '?':
+    case '!':
+      break;
+    case 'g':
+      Result += "imr";
+      break;
+    case '[': {
+      assert(OutCons &&
+             "Must pass output names to constraints with a symbolic name");
+      unsigned Index;
+      bool result = Target.resolveSymbolicName(Constraint,
+                                               &(*OutCons)[0],
+                                               OutCons->size(), Index);
+      assert(result && "Could not resolve symbolic name"); result=result;
+      Result += llvm::utostr(Index);
+      break;
+    }
+    }
+
+    Constraint++;
+  }
+
+  return Result;
+}
+
+llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+                                         const TargetInfo::ConstraintInfo &Info,
+                                           const Expr *InputExpr,
+                                           std::string &ConstraintStr) {
+  llvm::Value *Arg;
+  if (Info.allowsRegister() || !Info.allowsMemory()) {
+    if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType())) {
+      Arg = EmitScalarExpr(InputExpr);
+    } else {
+      InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+      LValue Dest = EmitLValue(InputExpr);
+
+      const llvm::Type *Ty = ConvertType(InputExpr->getType());
+      uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
+      if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
+        Ty = llvm::IntegerType::get(VMContext, Size);
+        Ty = llvm::PointerType::getUnqual(Ty);
+
+        Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty));
+      } else {
+        Arg = Dest.getAddress();
+        ConstraintStr += '*';
+      }
+    }
+  } else {
+    InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+    LValue Dest = EmitLValue(InputExpr);
+    Arg = Dest.getAddress();
+    ConstraintStr += '*';
+  }
+
+  return Arg;
+}
+
+void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+  // Analyze the asm string to decompose it into its pieces.  We know that Sema
+  // has already done this, so it is guaranteed to be successful.
+  llvm::SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
+  unsigned DiagOffs;
+  S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
+
+  // Assemble the pieces into the final asm string.
+  std::string AsmString;
+  for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+    if (Pieces[i].isString())
+      AsmString += Pieces[i].getString();
+    else if (Pieces[i].getModifier() == '\0')
+      AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+    else
+      AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
+                   Pieces[i].getModifier() + '}';
+  }
+
+  // Get all the output and input constraints together.
+  llvm::SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+  llvm::SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+  for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+    TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
+                                    S.getOutputName(i));
+    assert(Target.validateOutputConstraint(Info) && 
+           "Failed to parse output constraint");
+    OutputConstraintInfos.push_back(Info);
+  }
+
+  for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+    TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
+                                    S.getInputName(i));
+    assert(Target.validateInputConstraint(OutputConstraintInfos.data(),
+                                          S.getNumOutputs(),
+                                          Info) &&
+           "Failed to parse input constraint");
+    InputConstraintInfos.push_back(Info);
+  }
+
+  std::string Constraints;
+
+  std::vector<LValue> ResultRegDests;
+  std::vector<QualType> ResultRegQualTys;
+  std::vector<const llvm::Type *> ResultRegTypes;
+  std::vector<const llvm::Type *> ResultTruncRegTypes;
+  std::vector<const llvm::Type*> ArgTypes;
+  std::vector<llvm::Value*> Args;
+
+  // Keep track of inout constraints.
+  std::string InOutConstraints;
+  std::vector<llvm::Value*> InOutArgs;
+  std::vector<const llvm::Type*> InOutArgTypes;
+
+  for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+    TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+
+    // Simplify the output constraint.
+    std::string OutputConstraint(S.getOutputConstraint(i));
+    OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
+
+    const Expr *OutExpr = S.getOutputExpr(i);
+    OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+
+    LValue Dest = EmitLValue(OutExpr);
+    if (!Constraints.empty())
+      Constraints += ',';
+
+    // If this is a register output, then make the inline asm return it
+    // by-value.  If this is a memory result, return the value by-reference.
+    if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) {
+      Constraints += "=" + OutputConstraint;
+      ResultRegQualTys.push_back(OutExpr->getType());
+      ResultRegDests.push_back(Dest);
+      ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
+      ResultTruncRegTypes.push_back(ResultRegTypes.back());
+
+      // If this output is tied to an input, and if the input is larger, then
+      // we need to set the actual result type of the inline asm node to be the
+      // same as the input type.
+      if (Info.hasMatchingInput()) {
+        unsigned InputNo;
+        for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
+          TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
+          if (Input.hasTiedOperand() &&
+              Input.getTiedOperand() == i)
+            break;
+        }
+        assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
+
+        QualType InputTy = S.getInputExpr(InputNo)->getType();
+        QualType OutputTy = OutExpr->getType();
+
+        uint64_t InputSize = getContext().getTypeSize(InputTy);
+        if (getContext().getTypeSize(OutputTy) < InputSize) {
+          // Form the asm to return the value as a larger integer type.
+          ResultRegTypes.back() = llvm::IntegerType::get(VMContext, (unsigned)InputSize);
+        }
+      }
+    } else {
+      ArgTypes.push_back(Dest.getAddress()->getType());
+      Args.push_back(Dest.getAddress());
+      Constraints += "=*";
+      Constraints += OutputConstraint;
+    }
+
+    if (Info.isReadWrite()) {
+      InOutConstraints += ',';
+
+      const Expr *InputExpr = S.getOutputExpr(i);
+      llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, InOutConstraints);
+
+      if (Info.allowsRegister())
+        InOutConstraints += llvm::utostr(i);
+      else
+        InOutConstraints += OutputConstraint;
+
+      InOutArgTypes.push_back(Arg->getType());
+      InOutArgs.push_back(Arg);
+    }
+  }
+
+  unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
+
+  for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+    const Expr *InputExpr = S.getInputExpr(i);
+
+    TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+    if (!Constraints.empty())
+      Constraints += ',';
+
+    // Simplify the input constraint.
+    std::string InputConstraint(S.getInputConstraint(i));
+    InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
+                                         &OutputConstraintInfos);
+
+    llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
+
+    // If this input argument is tied to a larger output result, extend the
+    // input to be the same size as the output.  The LLVM backend wants to see
+    // the input and output of a matching constraint be the same size.  Note
+    // that GCC does not define what the top bits are here.  We use zext because
+    // that is usually cheaper, but LLVM IR should really get an anyext someday.
+    if (Info.hasTiedOperand()) {
+      unsigned Output = Info.getTiedOperand();
+      QualType OutputTy = S.getOutputExpr(Output)->getType();
+      QualType InputTy = InputExpr->getType();
+
+      if (getContext().getTypeSize(OutputTy) >
+          getContext().getTypeSize(InputTy)) {
+        // Use ptrtoint as appropriate so that we can do our extension.
+        if (isa<llvm::PointerType>(Arg->getType()))
+          Arg = Builder.CreatePtrToInt(Arg,
+                                      llvm::IntegerType::get(VMContext, LLVMPointerWidth));
+        unsigned OutputSize = (unsigned)getContext().getTypeSize(OutputTy);
+        Arg = Builder.CreateZExt(Arg, llvm::IntegerType::get(VMContext, OutputSize));
+      }
+    }
+
+
+    ArgTypes.push_back(Arg->getType());
+    Args.push_back(Arg);
+    Constraints += InputConstraint;
+  }
+
+  // Append the "input" part of inout constraints last.
+  for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+    ArgTypes.push_back(InOutArgTypes[i]);
+    Args.push_back(InOutArgs[i]);
+  }
+  Constraints += InOutConstraints;
+
+  // Clobbers
+  for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
+    llvm::StringRef Clobber = S.getClobber(i)->getString();
+
+    Clobber = Target.getNormalizedGCCRegisterName(Clobber);
+
+    if (i != 0 || NumConstraints != 0)
+      Constraints += ',';
+
+    Constraints += "~{";
+    Constraints += Clobber;
+    Constraints += '}';
+  }
+
+  // Add machine specific clobbers
+  std::string MachineClobbers = Target.getClobbers();
+  if (!MachineClobbers.empty()) {
+    if (!Constraints.empty())
+      Constraints += ',';
+    Constraints += MachineClobbers;
+  }
+
+  const llvm::Type *ResultType;
+  if (ResultRegTypes.empty())
+    ResultType = llvm::Type::getVoidTy(VMContext);
+  else if (ResultRegTypes.size() == 1)
+    ResultType = ResultRegTypes[0];
+  else
+    ResultType = llvm::StructType::get(VMContext, ResultRegTypes);
+
+  const llvm::FunctionType *FTy =
+    llvm::FunctionType::get(ResultType, ArgTypes, false);
+
+  llvm::InlineAsm *IA =
+    llvm::InlineAsm::get(FTy, AsmString, Constraints,
+                         S.isVolatile() || S.getNumOutputs() == 0);
+  llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
+  Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+
+
+  // Extract all of the register value results from the asm.
+  std::vector<llvm::Value*> RegResults;
+  if (ResultRegTypes.size() == 1) {
+    RegResults.push_back(Result);
+  } else {
+    for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
+      llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
+      RegResults.push_back(Tmp);
+    }
+  }
+
+  for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+    llvm::Value *Tmp = RegResults[i];
+
+    // If the result type of the LLVM IR asm doesn't match the result type of
+    // the expression, do the conversion.
+    if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
+      const llvm::Type *TruncTy = ResultTruncRegTypes[i];
+      // Truncate the integer result to the right size, note that
+      // ResultTruncRegTypes can be a pointer.
+      uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+      Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext, (unsigned)ResSize));
+
+      if (Tmp->getType() != TruncTy) {
+        assert(isa<llvm::PointerType>(TruncTy));
+        Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+      }
+    }
+
+    EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i],
+                           ResultRegQualTys[i]);
+  }
+}
diff --git a/lib/CodeGen/CGTemporaries.cpp b/lib/CodeGen/CGTemporaries.cpp
new file mode 100644
index 0000000..bed8439
--- /dev/null
+++ b/lib/CodeGen/CGTemporaries.cpp
@@ -0,0 +1,163 @@
+//===--- CGTemporaries.cpp - Emit LLVM Code for C++ temporaries -----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of temporaries
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+using namespace clang;
+using namespace CodeGen;
+
+void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
+                                       llvm::Value *Ptr) {
+  assert((LiveTemporaries.empty() ||
+          LiveTemporaries.back().ThisPtr != Ptr ||
+          ConditionalBranchLevel) &&
+         "Pushed the same temporary twice; AST is likely wrong");
+  llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor");
+
+  llvm::Value *CondPtr = 0;
+
+  // Check if temporaries need to be conditional. If so, we'll create a
+  // condition boolean, initialize it to 0 and
+  if (ConditionalBranchLevel != 0) {
+    CondPtr = CreateTempAlloca(llvm::Type::getInt1Ty(VMContext), "cond");
+
+    // Initialize it to false. This initialization takes place right after
+    // the alloca insert point.
+    llvm::StoreInst *SI =
+      new llvm::StoreInst(llvm::ConstantInt::getFalse(VMContext), CondPtr);
+    llvm::BasicBlock *Block = AllocaInsertPt->getParent();
+    Block->getInstList().insertAfter((llvm::Instruction *)AllocaInsertPt, SI);
+
+    // Now set it to true.
+    Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr);
+  }
+
+  LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
+                                                 CondPtr));
+
+  PushCleanupBlock(DtorBlock);
+
+  if (Exceptions) {
+    const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
+    llvm::BasicBlock *CondEnd = 0;
+    
+    EHCleanupBlock Cleanup(*this);
+
+    // If this is a conditional temporary, we need to check the condition
+    // boolean and only call the destructor if it's true.
+    if (Info.CondPtr) {
+      llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
+      CondEnd = createBasicBlock("cond.dtor.end");
+
+      llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
+      Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+      EmitBlock(CondBlock);
+    }
+
+    EmitCXXDestructorCall(Info.Temporary->getDestructor(),
+                          Dtor_Complete, Info.ThisPtr);
+
+    if (CondEnd) {
+      // Reset the condition. to false.
+      Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
+      EmitBlock(CondEnd);
+    }
+  }
+}
+
+void CodeGenFunction::PopCXXTemporary() {
+  const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
+
+  CleanupBlockInfo CleanupInfo = PopCleanupBlock();
+  assert(CleanupInfo.CleanupBlock == Info.DtorBlock &&
+         "Cleanup block mismatch!");
+  assert(!CleanupInfo.SwitchBlock &&
+         "Should not have a switch block for temporary cleanup!");
+  assert(!CleanupInfo.EndBlock &&
+         "Should not have an end block for temporary cleanup!");
+
+  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+  if (CurBB && !CurBB->getTerminator() &&
+      Info.DtorBlock->getNumUses() == 0) {
+    CurBB->getInstList().splice(CurBB->end(), Info.DtorBlock->getInstList());
+    delete Info.DtorBlock;
+  } else
+    EmitBlock(Info.DtorBlock);
+
+  llvm::BasicBlock *CondEnd = 0;
+
+  // If this is a conditional temporary, we need to check the condition
+  // boolean and only call the destructor if it's true.
+  if (Info.CondPtr) {
+    llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
+    CondEnd = createBasicBlock("cond.dtor.end");
+
+    llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
+    Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+    EmitBlock(CondBlock);
+  }
+
+  EmitCXXDestructorCall(Info.Temporary->getDestructor(),
+                        Dtor_Complete, Info.ThisPtr);
+
+  if (CondEnd) {
+    // Reset the condition. to false.
+    Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
+    EmitBlock(CondEnd);
+  }
+
+  LiveTemporaries.pop_back();
+}
+
+RValue
+CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+                                            llvm::Value *AggLoc,
+                                            bool IsAggLocVolatile,
+                                            bool IsInitializer) {
+  // Keep track of the current cleanup stack depth.
+  size_t CleanupStackDepth = CleanupEntries.size();
+  (void) CleanupStackDepth;
+
+  unsigned OldNumLiveTemporaries = LiveTemporaries.size();
+
+  RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
+                          /*IgnoreResult=*/false, IsInitializer);
+
+  // Pop temporaries.
+  while (LiveTemporaries.size() > OldNumLiveTemporaries)
+    PopCXXTemporary();
+
+  assert(CleanupEntries.size() == CleanupStackDepth &&
+         "Cleanup size mismatch!");
+
+  return RV;
+}
+
+LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
+                                              const CXXExprWithTemporaries *E) {
+  // Keep track of the current cleanup stack depth.
+  size_t CleanupStackDepth = CleanupEntries.size();
+  (void) CleanupStackDepth;
+
+  unsigned OldNumLiveTemporaries = LiveTemporaries.size();
+
+  LValue LV = EmitLValue(E->getSubExpr());
+
+  // Pop temporaries.
+  while (LiveTemporaries.size() > OldNumLiveTemporaries)
+    PopCXXTemporary();
+
+  assert(CleanupEntries.size() == CleanupStackDepth &&
+         "Cleanup size mismatch!");
+
+  return LV;
+}
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
new file mode 100644
index 0000000..9714bd9
--- /dev/null
+++ b/lib/CodeGen/CGVTT.cpp
@@ -0,0 +1,398 @@
+//===--- CGVTT.cpp - Emit LLVM Code for C++ VTTs --------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of VTTs (vtable tables).
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "clang/AST/RecordLayout.h"
+using namespace clang;
+using namespace CodeGen;
+
+#define D1(x)
+
+namespace {
+class VTTBuilder {
+  /// Inits - The list of values built for the VTT.
+  std::vector<llvm::Constant *> &Inits;
+  /// Class - The most derived class that this vtable is being built for.
+  const CXXRecordDecl *Class;
+  CodeGenModule &CGM;  // Per-module state.
+  llvm::SmallSet<const CXXRecordDecl *, 32> SeenVBase;
+  /// BLayout - Layout for the most derived class that this vtable is being
+  /// built for.
+  const ASTRecordLayout &BLayout;
+  CGVtableInfo::AddrMap_t &AddressPoints;
+  // vtbl - A pointer to the vtable for Class.
+  llvm::Constant *ClassVtbl;
+  llvm::LLVMContext &VMContext;
+
+  /// SeenVBasesInSecondary - The seen virtual bases when building the 
+  /// secondary virtual pointers.
+  llvm::SmallPtrSet<const CXXRecordDecl *, 32> SeenVBasesInSecondary;
+
+  llvm::DenseMap<const CXXRecordDecl *, uint64_t> SubVTTIndicies;
+  
+  bool GenerateDefinition;
+
+  llvm::DenseMap<BaseSubobject, llvm::Constant *> CtorVtables;
+  llvm::DenseMap<std::pair<const CXXRecordDecl *, BaseSubobject>, uint64_t> 
+    CtorVtableAddressPoints;
+  
+  llvm::Constant *getCtorVtable(const BaseSubobject &Base) {
+    if (!GenerateDefinition)
+      return 0;
+
+    llvm::Constant *&CtorVtable = CtorVtables[Base];
+    if (!CtorVtable) {
+      // Build the vtable.
+      CGVtableInfo::CtorVtableInfo Info
+        = CGM.getVtableInfo().getCtorVtable(Class, Base);
+      
+      CtorVtable = Info.Vtable;
+      
+      // Add the address points for this base.
+      for (CGVtableInfo::AddressPointsMapTy::const_iterator I =
+           Info.AddressPoints.begin(), E = Info.AddressPoints.end(); 
+           I != E; ++I) {
+        uint64_t &AddressPoint = 
+          CtorVtableAddressPoints[std::make_pair(Base.getBase(), I->first)];
+        
+        // Check if we already have the address points for this base.
+        if (AddressPoint)
+          break;
+
+        // Otherwise, insert it.
+        AddressPoint = I->second;
+      }      
+    }
+    
+    return CtorVtable;
+  }
+  
+  
+  /// BuildVtablePtr - Build up a referene to the given secondary vtable
+  llvm::Constant *BuildVtablePtr(llvm::Constant *Vtable,
+                                 const CXXRecordDecl *VtableClass,
+                                 const CXXRecordDecl *RD,
+                                 uint64_t Offset) {
+    if (!GenerateDefinition)
+      return 0;
+
+    uint64_t AddressPoint;
+    
+    if (VtableClass != Class) {
+      // We have a ctor vtable, look for the address point in the ctor vtable
+      // address points.
+      AddressPoint = 
+        CtorVtableAddressPoints[std::make_pair(VtableClass, 
+                                               BaseSubobject(RD, Offset))];
+    } else { 
+      AddressPoint = 
+        (*AddressPoints[VtableClass])[std::make_pair(RD, Offset)];
+    }
+
+    // FIXME: We can never have 0 address point.  Do this for now so gepping
+    // retains the same structure.  Later we'll just assert.
+    if (AddressPoint == 0)
+      AddressPoint = 1;
+    D1(printf("XXX address point for %s in %s layout %s at offset %d was %d\n",
+              RD->getNameAsCString(), VtblClass->getNameAsCString(),
+              Class->getNameAsCString(), (int)Offset, (int)AddressPoint));
+
+    llvm::Value *Idxs[] = {
+      llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 0),
+      llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), AddressPoint)
+    };
+    
+    llvm::Constant *Init = 
+      llvm::ConstantExpr::getInBoundsGetElementPtr(Vtable, Idxs, 2);
+
+    const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+    return llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+  }
+
+  /// Secondary - Add the secondary vtable pointers to Inits.  Offset is the
+  /// current offset in bits to the object we're working on.
+  void Secondary(const CXXRecordDecl *RD, llvm::Constant *vtbl,
+                 const CXXRecordDecl *VtblClass, uint64_t Offset=0,
+                 bool MorallyVirtual=false) {
+    if (RD->getNumVBases() == 0 && ! MorallyVirtual)
+      return;
+
+    for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+           e = RD->bases_end(); i != e; ++i) {
+      const CXXRecordDecl *Base =
+        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+      // We only want to visit each virtual base once.
+      if (i->isVirtual() && SeenVBasesInSecondary.count(Base))
+        continue;
+      
+      // Itanium C++ ABI 2.6.2:
+      //   Secondary virtual pointers are present for all bases with either
+      //   virtual bases or virtual function declarations overridden along a 
+      //   virtual path.
+      //
+      // If the base class is not dynamic, we don't want to add it, nor any
+      // of its base classes.
+      if (!Base->isDynamicClass())
+        continue;
+
+      const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+      const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+      const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
+      bool NonVirtualPrimaryBase;
+      NonVirtualPrimaryBase = !PrimaryBaseWasVirtual && Base == PrimaryBase;
+      bool BaseMorallyVirtual = MorallyVirtual | i->isVirtual();
+      uint64_t BaseOffset;
+      if (!i->isVirtual()) {
+        const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+        BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+      } else
+        BaseOffset = BLayout.getVBaseClassOffset(Base);
+      llvm::Constant *subvtbl = vtbl;
+      const CXXRecordDecl *subVtblClass = VtblClass;
+      if ((Base->getNumVBases() || BaseMorallyVirtual)
+          && !NonVirtualPrimaryBase) {
+        llvm::Constant *init;
+        if (BaseMorallyVirtual || VtblClass == Class)
+          init = BuildVtablePtr(vtbl, VtblClass, Base, BaseOffset);
+        else {
+          init = getCtorVtable(BaseSubobject(Base, BaseOffset));
+          
+          subvtbl = init;
+          subVtblClass = Base;
+          
+          init = BuildVtablePtr(init, Class, Base, BaseOffset);
+        }
+
+        Inits.push_back(init);
+      }
+      
+      if (i->isVirtual())
+        SeenVBasesInSecondary.insert(Base);
+      
+      Secondary(Base, subvtbl, subVtblClass, BaseOffset, BaseMorallyVirtual);
+    }
+  }
+
+  /// BuiltVTT - Add the VTT to Inits.  Offset is the offset in bits to the
+  /// currnet object we're working on.
+  void BuildVTT(const CXXRecordDecl *RD, uint64_t Offset, bool MorallyVirtual) {
+    // Itanium C++ ABI 2.6.2:
+    //   An array of virtual table addresses, called the VTT, is declared for 
+    //   each class type that has indirect or direct virtual base classes.
+    if (RD->getNumVBases() == 0)
+      return;
+
+    // Remember the sub-VTT index.
+    SubVTTIndicies[RD] = Inits.size();
+
+    llvm::Constant *Vtable;
+    const CXXRecordDecl *VtableClass;
+
+    // First comes the primary virtual table pointer...
+    if (MorallyVirtual) {
+      Vtable = ClassVtbl;
+      VtableClass = Class;
+    } else {
+      Vtable = getCtorVtable(BaseSubobject(RD, Offset));
+      VtableClass = RD;
+    }
+    
+    llvm::Constant *Init = BuildVtablePtr(Vtable, VtableClass, RD, Offset);
+    Inits.push_back(Init);
+
+    // then the secondary VTTs....
+    SecondaryVTTs(RD, Offset, MorallyVirtual);
+
+    // Make sure to clear the set of seen virtual bases.
+    SeenVBasesInSecondary.clear();
+
+    // and last the secondary vtable pointers.
+    Secondary(RD, Vtable, VtableClass, Offset, MorallyVirtual);
+  }
+
+  /// SecondaryVTTs - Add the secondary VTTs to Inits.  The secondary VTTs are
+  /// built from each direct non-virtual proper base that requires a VTT in
+  /// declaration order.
+  void SecondaryVTTs(const CXXRecordDecl *RD, uint64_t Offset=0,
+                     bool MorallyVirtual=false) {
+    for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+           e = RD->bases_end(); i != e; ++i) {
+      const CXXRecordDecl *Base =
+        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+      if (i->isVirtual())
+        continue;
+      const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+      uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+      
+      BuildVTT(Base, BaseOffset, MorallyVirtual);
+    }
+  }
+
+  /// VirtualVTTs - Add the VTT for each proper virtual base in inheritance
+  /// graph preorder.
+  void VirtualVTTs(const CXXRecordDecl *RD) {
+    for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+           e = RD->bases_end(); i != e; ++i) {
+      const CXXRecordDecl *Base =
+        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+      if (i->isVirtual() && !SeenVBase.count(Base)) {
+        SeenVBase.insert(Base);
+        uint64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
+        BuildVTT(Base, BaseOffset, false);
+      }
+      VirtualVTTs(Base);
+    }
+  }
+
+public:
+  VTTBuilder(std::vector<llvm::Constant *> &inits, const CXXRecordDecl *c,
+             CodeGenModule &cgm, bool GenerateDefinition)
+    : Inits(inits), Class(c), CGM(cgm),
+      BLayout(cgm.getContext().getASTRecordLayout(c)),
+      AddressPoints(*cgm.getVtableInfo().AddressPoints[c]),
+      VMContext(cgm.getModule().getContext()),
+      GenerateDefinition(GenerateDefinition) {
+    
+    // First comes the primary virtual table pointer for the complete class...
+    ClassVtbl = GenerateDefinition ? CGM.getVtableInfo().getVtable(Class) : 0;
+
+    llvm::Constant *Init = BuildVtablePtr(ClassVtbl, Class, Class, 0);
+    Inits.push_back(Init);
+    
+    // then the secondary VTTs...
+    SecondaryVTTs(Class);
+
+    // Make sure to clear the set of seen virtual bases.
+    SeenVBasesInSecondary.clear();
+
+    // then the secondary vtable pointers...
+    Secondary(Class, ClassVtbl, Class);
+
+    // and last, the virtual VTTs.
+    VirtualVTTs(Class);
+  }
+  
+  llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getSubVTTIndicies() {
+    return SubVTTIndicies;
+  }
+};
+}
+
+llvm::GlobalVariable *
+CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+                          bool GenerateDefinition,
+                          const CXXRecordDecl *RD) {
+  // Only classes that have virtual bases need a VTT.
+  if (RD->getNumVBases() == 0)
+    return 0;
+
+  llvm::SmallString<256> OutName;
+  CGM.getMangleContext().mangleCXXVTT(RD, OutName);
+  llvm::StringRef Name = OutName.str();
+
+  D1(printf("vtt %s\n", RD->getNameAsCString()));
+
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+  if (GV == 0 || GV->isDeclaration()) {
+    const llvm::Type *Int8PtrTy = 
+      llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+
+    std::vector<llvm::Constant *> inits;
+    VTTBuilder b(inits, RD, CGM, GenerateDefinition);
+
+    const llvm::ArrayType *Type = llvm::ArrayType::get(Int8PtrTy, inits.size());
+    llvm::Constant *Init = 0;
+    if (GenerateDefinition)
+      Init = llvm::ConstantArray::get(Type, inits);
+
+    llvm::GlobalVariable *OldGV = GV;
+    GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true, 
+                                  Linkage, Init, Name);
+    CGM.setGlobalVisibility(GV, RD);
+    
+    if (OldGV) {
+      GV->takeName(OldGV);
+      llvm::Constant *NewPtr = 
+        llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+      OldGV->replaceAllUsesWith(NewPtr);
+      OldGV->eraseFromParent();
+    }
+  }
+  
+  return GV;
+}
+
+CGVtableInfo::CtorVtableInfo 
+CGVtableInfo::getCtorVtable(const CXXRecordDecl *RD, 
+                            const BaseSubobject &Base) {
+  CtorVtableInfo Info;
+  
+  Info.Vtable = GenerateVtable(llvm::GlobalValue::InternalLinkage,
+                               /*GenerateDefinition=*/true,
+                               RD, Base.getBase(), Base.getBaseOffset(),
+                               Info.AddressPoints);
+  return Info;
+}
+
+llvm::GlobalVariable *CGVtableInfo::getVTT(const CXXRecordDecl *RD) {
+  return GenerateVTT(llvm::GlobalValue::ExternalLinkage, 
+                     /*GenerateDefinition=*/false, RD);
+  
+}
+
+
+bool CGVtableInfo::needsVTTParameter(GlobalDecl GD) {
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+  
+  // We don't have any virtual bases, just return early.
+  if (!MD->getParent()->getNumVBases())
+    return false;
+  
+  // Check if we have a base constructor.
+  if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
+    return true;
+
+  // Check if we have a base destructor.
+  if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+    return true;
+  
+  return false;
+}
+
+uint64_t CGVtableInfo::getSubVTTIndex(const CXXRecordDecl *RD, 
+                                      const CXXRecordDecl *Base) {
+  ClassPairTy ClassPair(RD, Base);
+
+  SubVTTIndiciesTy::iterator I = 
+    SubVTTIndicies.find(ClassPair);
+  if (I != SubVTTIndicies.end())
+    return I->second;
+  
+  std::vector<llvm::Constant *> inits;
+  VTTBuilder Builder(inits, RD, CGM, /*GenerateDefinition=*/false);
+
+  for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
+       Builder.getSubVTTIndicies().begin(), 
+       E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
+    // Insert all indices.
+    ClassPairTy ClassPair(RD, I->first);
+    
+    SubVTTIndicies.insert(std::make_pair(ClassPair, I->second));
+  }
+    
+  I = SubVTTIndicies.find(ClassPair);
+  assert(I != SubVTTIndicies.end() && "Did not find index!");
+  
+  return I->second;
+}
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
new file mode 100644
index 0000000..fa77471
--- /dev/null
+++ b/lib/CodeGen/CGValue.h
@@ -0,0 +1,313 @@
+//===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement wrappers around llvm::Value in order to
+// fully represent the range of values for C L- and R- values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVALUE_H
+#define CLANG_CODEGEN_CGVALUE_H
+
+#include "clang/AST/Type.h"
+
+namespace llvm {
+  class Constant;
+  class Value;
+}
+
+namespace clang {
+  class ObjCPropertyRefExpr;
+  class ObjCImplicitSetterGetterRefExpr;
+
+namespace CodeGen {
+
+/// RValue - This trivial value class is used to represent the result of an
+/// expression that is evaluated.  It can be one of three things: either a
+/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
+/// address of an aggregate value in memory.
+class RValue {
+  llvm::Value *V1, *V2;
+  // TODO: Encode this into the low bit of pointer for more efficient
+  // return-by-value.
+  enum { Scalar, Complex, Aggregate } Flavor;
+
+  bool Volatile:1;
+public:
+
+  bool isScalar() const { return Flavor == Scalar; }
+  bool isComplex() const { return Flavor == Complex; }
+  bool isAggregate() const { return Flavor == Aggregate; }
+
+  bool isVolatileQualified() const { return Volatile; }
+
+  /// getScalarVal() - Return the Value* of this scalar value.
+  llvm::Value *getScalarVal() const {
+    assert(isScalar() && "Not a scalar!");
+    return V1;
+  }
+
+  /// getComplexVal - Return the real/imag components of this complex value.
+  ///
+  std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
+    return std::pair<llvm::Value *, llvm::Value *>(V1, V2);
+  }
+
+  /// getAggregateAddr() - Return the Value* of the address of the aggregate.
+  llvm::Value *getAggregateAddr() const {
+    assert(isAggregate() && "Not an aggregate!");
+    return V1;
+  }
+
+  static RValue get(llvm::Value *V) {
+    RValue ER;
+    ER.V1 = V;
+    ER.Flavor = Scalar;
+    ER.Volatile = false;
+    return ER;
+  }
+  static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
+    RValue ER;
+    ER.V1 = V1;
+    ER.V2 = V2;
+    ER.Flavor = Complex;
+    ER.Volatile = false;
+    return ER;
+  }
+  static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
+    RValue ER;
+    ER.V1 = C.first;
+    ER.V2 = C.second;
+    ER.Flavor = Complex;
+    ER.Volatile = false;
+    return ER;
+  }
+  // FIXME: Aggregate rvalues need to retain information about whether they are
+  // volatile or not.  Remove default to find all places that probably get this
+  // wrong.
+  static RValue getAggregate(llvm::Value *V, bool Vol = false) {
+    RValue ER;
+    ER.V1 = V;
+    ER.Flavor = Aggregate;
+    ER.Volatile = Vol;
+    return ER;
+  }
+};
+
+
+/// LValue - This represents an lvalue references.  Because C/C++ allow
+/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
+/// bitrange.
+class LValue {
+  // FIXME: alignment?
+
+  enum {
+    Simple,       // This is a normal l-value, use getAddress().
+    VectorElt,    // This is a vector element l-value (V[i]), use getVector*
+    BitField,     // This is a bitfield l-value, use getBitfield*.
+    ExtVectorElt, // This is an extended vector subset, use getExtVectorComp
+    PropertyRef,  // This is an Objective-C property reference, use
+                  // getPropertyRefExpr
+    KVCRef        // This is an objective-c 'implicit' property ref,
+                  // use getKVCRefExpr
+  } LVType;
+
+  llvm::Value *V;
+
+  union {
+    // Index into a vector subscript: V[i]
+    llvm::Value *VectorIdx;
+
+    // ExtVector element subset: V.xyx
+    llvm::Constant *VectorElts;
+
+    // BitField start bit and size
+    struct {
+      unsigned short StartBit;
+      unsigned short Size;
+      bool IsSigned;
+    } BitfieldData;
+
+    // Obj-C property reference expression
+    const ObjCPropertyRefExpr *PropertyRefExpr;
+    // ObjC 'implicit' property reference expression
+    const ObjCImplicitSetterGetterRefExpr *KVCRefExpr;
+  };
+
+  // 'const' is unused here
+  Qualifiers Quals;
+
+  // objective-c's ivar
+  bool Ivar:1;
+  
+  // objective-c's ivar is an array
+  bool ObjIsArray:1;
+
+  // LValue is non-gc'able for any reason, including being a parameter or local
+  // variable.
+  bool NonGC: 1;
+
+  // Lvalue is a global reference of an objective-c object
+  bool GlobalObjCRef : 1;
+
+  Expr *BaseIvarExp;
+private:
+  void SetQualifiers(Qualifiers Quals) {
+    this->Quals = Quals;
+    
+    // FIXME: Convenient place to set objc flags to 0. This should really be
+    // done in a user-defined constructor instead.
+    this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
+    this->BaseIvarExp = 0;
+  }
+
+public:
+  bool isSimple() const { return LVType == Simple; }
+  bool isVectorElt() const { return LVType == VectorElt; }
+  bool isBitfield() const { return LVType == BitField; }
+  bool isExtVectorElt() const { return LVType == ExtVectorElt; }
+  bool isPropertyRef() const { return LVType == PropertyRef; }
+  bool isKVCRef() const { return LVType == KVCRef; }
+
+  bool isVolatileQualified() const { return Quals.hasVolatile(); }
+  bool isRestrictQualified() const { return Quals.hasRestrict(); }
+  unsigned getVRQualifiers() const {
+    return Quals.getCVRQualifiers() & ~Qualifiers::Const;
+  }
+
+  bool isObjCIvar() const { return Ivar; }
+  bool isObjCArray() const { return ObjIsArray; }
+  bool isNonGC () const { return NonGC; }
+  bool isGlobalObjCRef() const { return GlobalObjCRef; }
+  bool isObjCWeak() const { return Quals.getObjCGCAttr() == Qualifiers::Weak; }
+  bool isObjCStrong() const { return Quals.getObjCGCAttr() == Qualifiers::Strong; }
+  
+  Expr *getBaseIvarExp() const { return BaseIvarExp; }
+  void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
+
+  unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
+
+  static void SetObjCIvar(LValue& R, bool iValue) {
+    R.Ivar = iValue;
+  }
+  static void SetObjCArray(LValue& R, bool iValue) {
+    R.ObjIsArray = iValue;
+  }
+  static void SetGlobalObjCRef(LValue& R, bool iValue) {
+    R.GlobalObjCRef = iValue;
+  }
+
+  static void SetObjCNonGC(LValue& R, bool iValue) {
+    R.NonGC = iValue;
+  }
+
+  // simple lvalue
+  llvm::Value *getAddress() const { assert(isSimple()); return V; }
+  // vector elt lvalue
+  llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
+  llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+  // extended vector elements.
+  llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
+  llvm::Constant *getExtVectorElts() const {
+    assert(isExtVectorElt());
+    return VectorElts;
+  }
+  // bitfield lvalue
+  llvm::Value *getBitfieldAddr() const { assert(isBitfield()); return V; }
+  unsigned short getBitfieldStartBit() const {
+    assert(isBitfield());
+    return BitfieldData.StartBit;
+  }
+  unsigned short getBitfieldSize() const {
+    assert(isBitfield());
+    return BitfieldData.Size;
+  }
+  bool isBitfieldSigned() const {
+    assert(isBitfield());
+    return BitfieldData.IsSigned;
+  }
+  // property ref lvalue
+  const ObjCPropertyRefExpr *getPropertyRefExpr() const {
+    assert(isPropertyRef());
+    return PropertyRefExpr;
+  }
+
+  // 'implicit' property ref lvalue
+  const ObjCImplicitSetterGetterRefExpr *getKVCRefExpr() const {
+    assert(isKVCRef());
+    return KVCRefExpr;
+  }
+
+  static LValue MakeAddr(llvm::Value *V, Qualifiers Quals) {
+    LValue R;
+    R.LVType = Simple;
+    R.V = V;
+    R.SetQualifiers(Quals);
+    return R;
+  }
+
+  static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
+                              unsigned CVR) {
+    LValue R;
+    R.LVType = VectorElt;
+    R.V = Vec;
+    R.VectorIdx = Idx;
+    R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+    return R;
+  }
+
+  static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
+                                 unsigned CVR) {
+    LValue R;
+    R.LVType = ExtVectorElt;
+    R.V = Vec;
+    R.VectorElts = Elts;
+    R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+    return R;
+  }
+
+  static LValue MakeBitfield(llvm::Value *V, unsigned short StartBit,
+                             unsigned short Size, bool IsSigned,
+                             unsigned CVR) {
+    LValue R;
+    R.LVType = BitField;
+    R.V = V;
+    R.BitfieldData.StartBit = StartBit;
+    R.BitfieldData.Size = Size;
+    R.BitfieldData.IsSigned = IsSigned;
+    R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+    return R;
+  }
+
+  // FIXME: It is probably bad that we aren't emitting the target when we build
+  // the lvalue. However, this complicates the code a bit, and I haven't figured
+  // out how to make it go wrong yet.
+  static LValue MakePropertyRef(const ObjCPropertyRefExpr *E,
+                                unsigned CVR) {
+    LValue R;
+    R.LVType = PropertyRef;
+    R.PropertyRefExpr = E;
+    R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+    return R;
+  }
+
+  static LValue MakeKVCRef(const ObjCImplicitSetterGetterRefExpr *E,
+                           unsigned CVR) {
+    LValue R;
+    R.LVType = KVCRef;
+    R.KVCRefExpr = E;
+    R.SetQualifiers(Qualifiers::fromCVRMask(CVR));
+    return R;
+  }
+};
+
+}  // end namespace CodeGen
+}  // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVtable.cpp
new file mode 100644
index 0000000..8b90f28
--- /dev/null
+++ b/lib/CodeGen/CGVtable.cpp
@@ -0,0 +1,1503 @@
+//===--- CGVtable.cpp - Emit LLVM Code for C++ vtables --------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/ADT/DenseSet.h"
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class VtableBuilder {
+public:
+  /// Index_t - Vtable index type.
+  typedef uint64_t Index_t;
+  typedef std::vector<std::pair<GlobalDecl,
+                                std::pair<GlobalDecl, ThunkAdjustment> > >
+      SavedAdjustmentsVectorTy;
+private:
+  
+  // VtableComponents - The components of the vtable being built.
+  typedef llvm::SmallVector<llvm::Constant *, 64> VtableComponentsVectorTy;
+  VtableComponentsVectorTy VtableComponents;
+  
+  const bool BuildVtable;
+
+  llvm::Type *Ptr8Ty;
+  
+  /// MostDerivedClass - The most derived class that this vtable is being 
+  /// built for.
+  const CXXRecordDecl *MostDerivedClass;
+  
+  /// LayoutClass - The most derived class used for virtual base layout
+  /// information.
+  const CXXRecordDecl *LayoutClass;
+  /// LayoutOffset - The offset for Class in LayoutClass.
+  uint64_t LayoutOffset;
+  /// BLayout - Layout for the most derived class that this vtable is being
+  /// built for.
+  const ASTRecordLayout &BLayout;
+  llvm::SmallSet<const CXXRecordDecl *, 32> IndirectPrimary;
+  llvm::SmallSet<const CXXRecordDecl *, 32> SeenVBase;
+  llvm::Constant *rtti;
+  llvm::LLVMContext &VMContext;
+  CodeGenModule &CGM;  // Per-module state.
+  
+  llvm::DenseMap<const CXXMethodDecl *, Index_t> VCall;
+  llvm::DenseMap<GlobalDecl, Index_t> VCallOffset;
+  llvm::DenseMap<GlobalDecl, Index_t> VCallOffsetForVCall;
+  // This is the offset to the nearest virtual base
+  llvm::DenseMap<const CXXMethodDecl *, Index_t> NonVirtualOffset;
+  llvm::DenseMap<const CXXRecordDecl *, Index_t> VBIndex;
+
+  /// PureVirtualFunction - Points to __cxa_pure_virtual.
+  llvm::Constant *PureVirtualFn;
+  
+  /// VtableMethods - A data structure for keeping track of methods in a vtable.
+  /// Can add methods, override methods and iterate in vtable order.
+  class VtableMethods {
+    // MethodToIndexMap - Maps from a global decl to the index it has in the
+    // Methods vector.
+    llvm::DenseMap<GlobalDecl, uint64_t> MethodToIndexMap;
+
+    /// Methods - The methods, in vtable order.
+    typedef llvm::SmallVector<GlobalDecl, 16> MethodsVectorTy;
+    MethodsVectorTy Methods;
+    MethodsVectorTy OrigMethods;
+
+  public:
+    /// AddMethod - Add a method to the vtable methods.
+    void AddMethod(GlobalDecl GD) {
+      assert(!MethodToIndexMap.count(GD) && 
+             "Method has already been added!");
+      
+      MethodToIndexMap[GD] = Methods.size();
+      Methods.push_back(GD);
+      OrigMethods.push_back(GD);
+    }
+    
+    /// OverrideMethod - Replace a method with another.
+    void OverrideMethod(GlobalDecl OverriddenGD, GlobalDecl GD) {
+      llvm::DenseMap<GlobalDecl, uint64_t>::iterator i 
+        = MethodToIndexMap.find(OverriddenGD);
+      assert(i != MethodToIndexMap.end() && "Did not find entry!");
+
+      // Get the index of the old decl.
+      uint64_t Index = i->second;
+      
+      // Replace the old decl with the new decl.
+      Methods[Index] = GD;
+
+      // And add the new.
+      MethodToIndexMap[GD] = Index;
+    }
+
+    /// getIndex - Gives the index of a passed in GlobalDecl. Returns false if
+    /// the index couldn't be found.
+    bool getIndex(GlobalDecl GD, uint64_t &Index) const {
+      llvm::DenseMap<GlobalDecl, uint64_t>::const_iterator i 
+        = MethodToIndexMap.find(GD);
+
+      if (i == MethodToIndexMap.end())
+        return false;
+      
+      Index = i->second;
+      return true;
+    }
+
+    GlobalDecl getOrigMethod(uint64_t Index) const {
+      return OrigMethods[Index];
+    }
+
+    MethodsVectorTy::size_type size() const {
+      return Methods.size();
+    }
+
+    void clear() {
+      MethodToIndexMap.clear();
+      Methods.clear();
+      OrigMethods.clear();
+    }
+    
+    GlobalDecl operator[](uint64_t Index) const {
+      return Methods[Index];
+    }
+  };
+  
+  /// Methods - The vtable methods we're currently building.
+  VtableMethods Methods;
+  
+  /// ThisAdjustments - For a given index in the vtable, contains the 'this'
+  /// pointer adjustment needed for a method.
+  typedef llvm::DenseMap<uint64_t, ThunkAdjustment> ThisAdjustmentsMapTy;
+  ThisAdjustmentsMapTy ThisAdjustments;
+
+  SavedAdjustmentsVectorTy SavedAdjustments;
+
+  /// BaseReturnTypes - Contains the base return types of methods who have been
+  /// overridden with methods whose return types require adjustment. Used for
+  /// generating covariant thunk information.
+  typedef llvm::DenseMap<uint64_t, CanQualType> BaseReturnTypesMapTy;
+  BaseReturnTypesMapTy BaseReturnTypes;
+  
+  std::vector<Index_t> VCalls;
+
+  typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t;
+  // subAddressPoints - Used to hold the AddressPoints (offsets) into the built
+  // vtable for use in computing the initializers for the VTT.
+  llvm::DenseMap<CtorVtable_t, int64_t> &subAddressPoints;
+
+  /// AddressPoints - Address points for this vtable.
+  CGVtableInfo::AddressPointsMapTy& AddressPoints;
+  
+  typedef CXXRecordDecl::method_iterator method_iter;
+  const uint32_t LLVMPointerWidth;
+  Index_t extra;
+  typedef std::vector<std::pair<const CXXRecordDecl *, int64_t> > Path_t;
+  static llvm::DenseMap<CtorVtable_t, int64_t>&
+  AllocAddressPoint(CodeGenModule &cgm, const CXXRecordDecl *l,
+                    const CXXRecordDecl *c) {
+    CGVtableInfo::AddrMap_t *&oref = cgm.getVtableInfo().AddressPoints[l];
+    if (oref == 0)
+      oref = new CGVtableInfo::AddrMap_t;
+
+    llvm::DenseMap<CtorVtable_t, int64_t> *&ref = (*oref)[c];
+    if (ref == 0)
+      ref = new llvm::DenseMap<CtorVtable_t, int64_t>;
+    return *ref;
+  }
+  
+  bool DclIsSame(const FunctionDecl *New, const FunctionDecl *Old) {
+    FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate();
+    FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate();
+
+    // C++ [temp.fct]p2:
+    //   A function template can be overloaded with other function templates
+    //   and with normal (non-template) functions.
+    if ((OldTemplate == 0) != (NewTemplate == 0))
+      return false;
+
+    // Is the function New an overload of the function Old?
+    QualType OldQType = CGM.getContext().getCanonicalType(Old->getType());
+    QualType NewQType = CGM.getContext().getCanonicalType(New->getType());
+
+    // Compare the signatures (C++ 1.3.10) of the two functions to
+    // determine whether they are overloads. If we find any mismatch
+    // in the signature, they are overloads.
+
+    // If either of these functions is a K&R-style function (no
+    // prototype), then we consider them to have matching signatures.
+    if (isa<FunctionNoProtoType>(OldQType.getTypePtr()) ||
+        isa<FunctionNoProtoType>(NewQType.getTypePtr()))
+      return true;
+
+    FunctionProtoType* OldType = cast<FunctionProtoType>(OldQType);
+    FunctionProtoType* NewType = cast<FunctionProtoType>(NewQType);
+
+    // The signature of a function includes the types of its
+    // parameters (C++ 1.3.10), which includes the presence or absence
+    // of the ellipsis; see C++ DR 357).
+    if (OldQType != NewQType &&
+        (OldType->getNumArgs() != NewType->getNumArgs() ||
+         OldType->isVariadic() != NewType->isVariadic() ||
+         !std::equal(OldType->arg_type_begin(), OldType->arg_type_end(),
+                     NewType->arg_type_begin())))
+      return false;
+
+#if 0
+    // C++ [temp.over.link]p4:
+    //   The signature of a function template consists of its function
+    //   signature, its return type and its template parameter list. The names
+    //   of the template parameters are significant only for establishing the
+    //   relationship between the template parameters and the rest of the
+    //   signature.
+    //
+    // We check the return type and template parameter lists for function
+    // templates first; the remaining checks follow.
+    if (NewTemplate &&
+        (!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
+                                         OldTemplate->getTemplateParameters(),
+                                         TPL_TemplateMatch) ||
+         OldType->getResultType() != NewType->getResultType()))
+      return false;
+#endif
+
+    // If the function is a class member, its signature includes the
+    // cv-qualifiers (if any) on the function itself.
+    //
+    // As part of this, also check whether one of the member functions
+    // is static, in which case they are not overloads (C++
+    // 13.1p2). While not part of the definition of the signature,
+    // this check is important to determine whether these functions
+    // can be overloaded.
+    const CXXMethodDecl* OldMethod = dyn_cast<CXXMethodDecl>(Old);
+    const CXXMethodDecl* NewMethod = dyn_cast<CXXMethodDecl>(New);
+    if (OldMethod && NewMethod &&
+        !OldMethod->isStatic() && !NewMethod->isStatic() &&
+        OldMethod->getTypeQualifiers() != NewMethod->getTypeQualifiers())
+      return false;
+  
+    // The signatures match; this is not an overload.
+    return true;
+  }
+
+  typedef llvm::DenseMap<const CXXMethodDecl *, const CXXMethodDecl*>
+    ForwardUnique_t;
+  ForwardUnique_t ForwardUnique;
+  llvm::DenseMap<const CXXMethodDecl*, const CXXMethodDecl*> UniqueOverrider;
+
+  void BuildUniqueOverrider(const CXXMethodDecl *U, const CXXMethodDecl *MD) {
+    const CXXMethodDecl *PrevU = UniqueOverrider[MD];
+    assert(U && "no unique overrider");
+    if (PrevU == U)
+      return;
+    if (PrevU != U && PrevU != 0) {
+      // If already set, note the two sets as the same
+      if (0)
+        printf("%s::%s same as %s::%s\n",
+               PrevU->getParent()->getNameAsCString(),
+               PrevU->getNameAsCString(),
+               U->getParent()->getNameAsCString(),
+               U->getNameAsCString());
+      ForwardUnique[PrevU] = U;
+      return;
+    }
+
+    // Not set, set it now
+    if (0)
+      printf("marking %s::%s %p override as %s::%s\n",
+             MD->getParent()->getNameAsCString(),
+             MD->getNameAsCString(),
+             (void*)MD,
+             U->getParent()->getNameAsCString(),
+             U->getNameAsCString());
+    UniqueOverrider[MD] = U;
+
+    for (CXXMethodDecl::method_iterator mi = MD->begin_overridden_methods(),
+           me = MD->end_overridden_methods(); mi != me; ++mi) {
+      BuildUniqueOverrider(U, *mi);
+    }
+  }
+
+  void BuildUniqueOverriders(const CXXRecordDecl *RD) {
+    if (0) printf("walking %s\n", RD->getNameAsCString());
+    for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+           e = RD->method_end(); i != e; ++i) {
+      const CXXMethodDecl *MD = *i;
+      if (!MD->isVirtual())
+        continue;
+
+      if (UniqueOverrider[MD] == 0) {
+        // Only set this, if it hasn't been set yet.
+        BuildUniqueOverrider(MD, MD);
+        if (0)
+          printf("top set is %s::%s %p\n",
+                  MD->getParent()->getNameAsCString(),
+                  MD->getNameAsCString(),
+                  (void*)MD);
+        ForwardUnique[MD] = MD;
+      }
+    }
+    for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+           e = RD->bases_end(); i != e; ++i) {
+      const CXXRecordDecl *Base =
+        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+      BuildUniqueOverriders(Base);
+    }
+  }
+
+  static int DclCmp(const void *p1, const void *p2) {
+    const CXXMethodDecl *MD1 = (const CXXMethodDecl *)p1;
+    const CXXMethodDecl *MD2 = (const CXXMethodDecl *)p2;
+    return (MD1->getIdentifier() - MD2->getIdentifier());
+  }
+  
+  void MergeForwarding() {
+    typedef llvm::SmallVector<const CXXMethodDecl *, 100>  A_t;
+    A_t A;
+    for (ForwardUnique_t::iterator I = ForwardUnique.begin(),
+           E = ForwardUnique.end(); I != E; ++I) {
+      if (I->first == I->second)
+        // Only add the roots of all trees
+        A.push_back(I->first);
+    }
+    llvm::array_pod_sort(A.begin(), A.end(), DclCmp);
+    for (A_t::iterator I = A.begin(),
+           E = A.end(); I != E; ++I) {
+      A_t::iterator J = I;
+      while (++J != E  && DclCmp(*I, *J) == 0)
+        if (DclIsSame(*I, *J)) {
+          printf("connecting %s\n", (*I)->getNameAsCString());
+          ForwardUnique[*J] = *I;
+        }
+    }
+  }
+
+  const CXXMethodDecl *getUnique(const CXXMethodDecl *MD) {
+    const CXXMethodDecl *U = UniqueOverrider[MD];
+    assert(U && "unique overrider not found");
+    while (ForwardUnique.count(U)) {
+      const CXXMethodDecl *NU = ForwardUnique[U];
+      if (NU == U) break;
+      U = NU;
+    }
+    return U;
+  }
+
+  GlobalDecl getUnique(GlobalDecl GD) {
+    const CXXMethodDecl *Unique = getUnique(cast<CXXMethodDecl>(GD.getDecl()));
+    
+    if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Unique))
+      return GlobalDecl(CD, GD.getCtorType());
+    
+    if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Unique))
+      return GlobalDecl(DD, GD.getDtorType());
+    
+    return Unique;
+  }
+
+  /// getPureVirtualFn - Return the __cxa_pure_virtual function.
+  llvm::Constant* getPureVirtualFn() {
+    if (!PureVirtualFn) {
+      const llvm::FunctionType *Ty = 
+        llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), 
+                                /*isVarArg=*/false);
+      PureVirtualFn = wrap(CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual"));
+    }
+    
+    return PureVirtualFn;
+  }
+  
+public:
+  VtableBuilder(const CXXRecordDecl *MostDerivedClass,
+                const CXXRecordDecl *l, uint64_t lo, CodeGenModule &cgm,
+                bool build, CGVtableInfo::AddressPointsMapTy& AddressPoints)
+    : BuildVtable(build), MostDerivedClass(MostDerivedClass), LayoutClass(l),
+      LayoutOffset(lo), BLayout(cgm.getContext().getASTRecordLayout(l)),
+      rtti(0), VMContext(cgm.getModule().getContext()),CGM(cgm),
+      PureVirtualFn(0),
+      subAddressPoints(AllocAddressPoint(cgm, l, MostDerivedClass)),
+      AddressPoints(AddressPoints),
+      LLVMPointerWidth(cgm.getContext().Target.getPointerWidth(0))
+      {
+    Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
+    if (BuildVtable) {
+      QualType ClassType = CGM.getContext().getTagDeclType(MostDerivedClass);
+      rtti = CGM.GetAddrOfRTTIDescriptor(ClassType);
+    }
+    BuildUniqueOverriders(MostDerivedClass);
+    MergeForwarding();
+  }
+
+  // getVtableComponents - Returns a reference to the vtable components.
+  const VtableComponentsVectorTy &getVtableComponents() const {
+    return VtableComponents;
+  }
+  
+  llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getVBIndex()
+    { return VBIndex; }
+
+  SavedAdjustmentsVectorTy &getSavedAdjustments()
+    { return SavedAdjustments; }
+
+  llvm::Constant *wrap(Index_t i) {
+    llvm::Constant *m;
+    m = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), i);
+    return llvm::ConstantExpr::getIntToPtr(m, Ptr8Ty);
+  }
+
+  llvm::Constant *wrap(llvm::Constant *m) {
+    return llvm::ConstantExpr::getBitCast(m, Ptr8Ty);
+  }
+
+//#define D1(x)
+#define D1(X) do { if (getenv("DEBUG")) { X; } } while (0)
+
+  void GenerateVBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset,
+                            bool updateVBIndex, Index_t current_vbindex) {
+    for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+           e = RD->bases_end(); i != e; ++i) {
+      const CXXRecordDecl *Base =
+        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+      Index_t next_vbindex = current_vbindex;
+      if (i->isVirtual() && !SeenVBase.count(Base)) {
+        SeenVBase.insert(Base);
+        if (updateVBIndex) {
+          next_vbindex = (ssize_t)(-(VCalls.size()*LLVMPointerWidth/8)
+                                   - 3*LLVMPointerWidth/8);
+          VBIndex[Base] = next_vbindex;
+        }
+        int64_t BaseOffset = -(Offset/8) + BLayout.getVBaseClassOffset(Base)/8;
+        VCalls.push_back((0?700:0) + BaseOffset);
+        D1(printf("  vbase for %s at %d delta %d most derived %s\n",
+                  Base->getNameAsCString(),
+                  (int)-VCalls.size()-3, (int)BaseOffset,
+                  MostDerivedClass->getNameAsCString()));
+      }
+      // We also record offsets for non-virtual bases to closest enclosing
+      // virtual base.  We do this so that we don't have to search
+      // for the nearst virtual base class when generating thunks.
+      if (updateVBIndex && VBIndex.count(Base) == 0)
+        VBIndex[Base] = next_vbindex;
+      GenerateVBaseOffsets(Base, Offset, updateVBIndex, next_vbindex);
+    }
+  }
+
+  void StartNewTable() {
+    SeenVBase.clear();
+  }
+
+  Index_t getNVOffset_1(const CXXRecordDecl *D, const CXXRecordDecl *B,
+    Index_t Offset = 0) {
+
+    if (B == D)
+      return Offset;
+
+    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D);
+    for (CXXRecordDecl::base_class_const_iterator i = D->bases_begin(),
+           e = D->bases_end(); i != e; ++i) {
+      const CXXRecordDecl *Base =
+        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+      int64_t BaseOffset = 0;
+      if (!i->isVirtual())
+        BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+      int64_t o = getNVOffset_1(Base, B, BaseOffset);
+      if (o >= 0)
+        return o;
+    }
+
+    return -1;
+  }
+
+  /// getNVOffset - Returns the non-virtual offset for the given (B) base of the
+  /// derived class D.
+  Index_t getNVOffset(QualType qB, QualType qD) {
+    qD = qD->getPointeeType();
+    qB = qB->getPointeeType();
+    CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl());
+    CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl());
+    int64_t o = getNVOffset_1(D, B);
+    if (o >= 0)
+      return o;
+
+    assert(false && "FIXME: non-virtual base not found");
+    return 0;
+  }
+
+  /// getVbaseOffset - Returns the index into the vtable for the virtual base
+  /// offset for the given (B) virtual base of the derived class D.
+  Index_t getVbaseOffset(QualType qB, QualType qD) {
+    qD = qD->getPointeeType();
+    qB = qB->getPointeeType();
+    CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl());
+    CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl());
+    if (D != MostDerivedClass)
+      return CGM.getVtableInfo().getVirtualBaseOffsetIndex(D, B);
+    llvm::DenseMap<const CXXRecordDecl *, Index_t>::iterator i;
+    i = VBIndex.find(B);
+    if (i != VBIndex.end())
+      return i->second;
+
+    assert(false && "FIXME: Base not found");
+    return 0;
+  }
+
+  bool OverrideMethod(GlobalDecl GD, bool MorallyVirtual,
+                      Index_t OverrideOffset, Index_t Offset,
+                      int64_t CurrentVBaseOffset);
+
+  /// AppendMethods - Append the current methods to the vtable.
+  void AppendMethodsToVtable();
+  
+  llvm::Constant *WrapAddrOf(GlobalDecl GD) {
+    const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+    const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVtable(MD);
+
+    return wrap(CGM.GetAddrOfFunction(GD, Ty));
+  }
+
+  void OverrideMethods(Path_t *Path, bool MorallyVirtual, int64_t Offset,
+                       int64_t CurrentVBaseOffset) {
+    for (Path_t::reverse_iterator i = Path->rbegin(),
+           e = Path->rend(); i != e; ++i) {
+      const CXXRecordDecl *RD = i->first;
+      int64_t OverrideOffset = i->second;
+      for (method_iter mi = RD->method_begin(), me = RD->method_end(); mi != me;
+           ++mi) {
+        const CXXMethodDecl *MD = *mi;
+
+        if (!MD->isVirtual())
+          continue;
+
+        if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+          // Override both the complete and the deleting destructor.
+          GlobalDecl CompDtor(DD, Dtor_Complete);
+          OverrideMethod(CompDtor, MorallyVirtual, OverrideOffset, Offset,
+                         CurrentVBaseOffset);
+
+          GlobalDecl DeletingDtor(DD, Dtor_Deleting);
+          OverrideMethod(DeletingDtor, MorallyVirtual, OverrideOffset, Offset,
+                         CurrentVBaseOffset);
+        } else {
+          OverrideMethod(MD, MorallyVirtual, OverrideOffset, Offset,
+                         CurrentVBaseOffset);
+        }
+      }
+    }
+  }
+
+  void AddMethod(const GlobalDecl GD, bool MorallyVirtual, Index_t Offset,
+                 int64_t CurrentVBaseOffset) {
+    // If we can find a previously allocated slot for this, reuse it.
+    if (OverrideMethod(GD, MorallyVirtual, Offset, Offset,
+                       CurrentVBaseOffset))
+      return;
+
+    D1(printf("  vfn for %s at %d\n",
+              dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsCString(),
+              (int)Methods.size()));
+
+    // We didn't find an entry in the vtable that we could use, add a new
+    // entry.
+    Methods.AddMethod(GD);
+
+    VCallOffset[GD] = Offset/8 - CurrentVBaseOffset/8;
+
+    if (MorallyVirtual) {
+      GlobalDecl UGD = getUnique(GD);
+      const CXXMethodDecl *UMD = cast<CXXMethodDecl>(UGD.getDecl());
+  
+      assert(UMD && "final overrider not found");
+
+      Index_t &idx = VCall[UMD];
+      // Allocate the first one, after that, we reuse the previous one.
+      if (idx == 0) {
+        VCallOffsetForVCall[UGD] = Offset/8;
+        NonVirtualOffset[UMD] = Offset/8 - CurrentVBaseOffset/8;
+        idx = VCalls.size()+1;
+        VCalls.push_back(Offset/8 - CurrentVBaseOffset/8);
+        D1(printf("  vcall for %s at %d with delta %d\n",
+                  dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsCString(),
+                  (int)-VCalls.size()-3, (int)VCalls[idx-1]));
+      }
+    }
+  }
+
+  void AddMethods(const CXXRecordDecl *RD, bool MorallyVirtual,
+                  Index_t Offset, int64_t CurrentVBaseOffset) {
+    for (method_iter mi = RD->method_begin(), me = RD->method_end(); mi != me;
+         ++mi) {
+      const CXXMethodDecl *MD = *mi;
+      if (!MD->isVirtual())
+        continue;
+      
+      if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+        // For destructors, add both the complete and the deleting destructor
+        // to the vtable.
+        AddMethod(GlobalDecl(DD, Dtor_Complete), MorallyVirtual, Offset, 
+                  CurrentVBaseOffset);
+        AddMethod(GlobalDecl(DD, Dtor_Deleting), MorallyVirtual, Offset, 
+                  CurrentVBaseOffset);
+      } else
+        AddMethod(MD, MorallyVirtual, Offset, CurrentVBaseOffset);
+    }
+  }
+
+  void NonVirtualBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout,
+                       const CXXRecordDecl *PrimaryBase,
+                       bool PrimaryBaseWasVirtual, bool MorallyVirtual,
+                       int64_t Offset, int64_t CurrentVBaseOffset,
+                       Path_t *Path) {
+    Path->push_back(std::make_pair(RD, Offset));
+    for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+           e = RD->bases_end(); i != e; ++i) {
+      if (i->isVirtual())
+        continue;
+      const CXXRecordDecl *Base =
+        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+      uint64_t o = Offset + Layout.getBaseClassOffset(Base);
+      StartNewTable();
+      GenerateVtableForBase(Base, o, MorallyVirtual, false,
+                            true, Base == PrimaryBase && !PrimaryBaseWasVirtual,
+                            CurrentVBaseOffset, Path);
+    }
+    Path->pop_back();
+  }
+
+// #define D(X) do { X; } while (0)
+#define D(X)
+
+  void insertVCalls(int InsertionPoint) {
+    D1(printf("============= combining vbase/vcall\n"));
+    D(VCalls.insert(VCalls.begin(), 673));
+    D(VCalls.push_back(672));
+
+    VtableComponents.insert(VtableComponents.begin() + InsertionPoint, 
+                            VCalls.size(), 0);
+    if (BuildVtable) {
+      // The vcalls come first...
+      for (std::vector<Index_t>::reverse_iterator i = VCalls.rbegin(),
+             e = VCalls.rend();
+           i != e; ++i)
+        VtableComponents[InsertionPoint++] = wrap((0?600:0) + *i);
+    }
+    VCalls.clear();
+    VCall.clear();
+    VCallOffsetForVCall.clear();
+    VCallOffset.clear();
+    NonVirtualOffset.clear();
+  }
+
+  void AddAddressPoints(const CXXRecordDecl *RD, uint64_t Offset,
+                       Index_t AddressPoint) {
+    D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n",
+              RD->getNameAsCString(), MostDerivedClass->getNameAsCString(),
+              LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint));
+    subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint;
+    AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint;
+
+    // Now also add the address point for all our primary bases.
+    while (1) {
+      const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+      RD = Layout.getPrimaryBase();
+      const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
+      // FIXME: Double check this.
+      if (RD == 0)
+        break;
+      if (PrimaryBaseWasVirtual &&
+          BLayout.getVBaseClassOffset(RD) != Offset)
+        break;
+      D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n",
+                RD->getNameAsCString(), MostDerivedClass->getNameAsCString(),
+                LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint));
+      subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint;
+      AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint;
+    }
+  }
+
+
+  void FinishGenerateVtable(const CXXRecordDecl *RD,
+                            const ASTRecordLayout &Layout,
+                            const CXXRecordDecl *PrimaryBase,
+                            bool ForNPNVBases, bool WasPrimaryBase,
+                            bool PrimaryBaseWasVirtual,
+                            bool MorallyVirtual, int64_t Offset,
+                            bool ForVirtualBase, int64_t CurrentVBaseOffset,
+                            Path_t *Path) {
+    bool alloc = false;
+    if (Path == 0) {
+      alloc = true;
+      Path = new Path_t;
+    }
+
+    StartNewTable();
+    extra = 0;
+    Index_t AddressPoint = 0;
+    int VCallInsertionPoint = 0;
+    if (!ForNPNVBases || !WasPrimaryBase) {
+      bool DeferVCalls = MorallyVirtual || ForVirtualBase;
+      VCallInsertionPoint = VtableComponents.size();
+      if (!DeferVCalls) {
+        insertVCalls(VCallInsertionPoint);
+      } else
+        // FIXME: just for extra, or for all uses of VCalls.size post this?
+        extra = -VCalls.size();
+
+      // Add the offset to top.
+      VtableComponents.push_back(BuildVtable ? wrap(-((Offset-LayoutOffset)/8)) : 0);
+    
+      // Add the RTTI information.
+      VtableComponents.push_back(rtti);
+    
+      AddressPoint = VtableComponents.size();
+
+      AppendMethodsToVtable();
+    }
+
+    // and then the non-virtual bases.
+    NonVirtualBases(RD, Layout, PrimaryBase, PrimaryBaseWasVirtual,
+                    MorallyVirtual, Offset, CurrentVBaseOffset, Path);
+
+    if (ForVirtualBase) {
+      // FIXME: We're adding to VCalls in callers, we need to do the overrides
+      // in the inner part, so that we know the complete set of vcalls during
+      // the build and don't have to insert into methods.  Saving out the
+      // AddressPoint here, would need to be fixed, if we didn't do that.  Also
+      // retroactively adding vcalls for overrides later wind up in the wrong
+      // place, the vcall slot has to be alloted during the walk of the base
+      // when the function is first introduces.
+      AddressPoint += VCalls.size();
+      insertVCalls(VCallInsertionPoint);
+    }
+    
+    if (!ForNPNVBases || !WasPrimaryBase)
+      AddAddressPoints(RD, Offset, AddressPoint);
+
+    if (alloc) {
+      delete Path;
+    }
+  }
+
+  void Primaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset,
+                 bool updateVBIndex, Index_t current_vbindex,
+                 int64_t CurrentVBaseOffset) {
+    if (!RD->isDynamicClass())
+      return;
+
+    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+    const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+    const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
+
+    // vtables are composed from the chain of primaries.
+    if (PrimaryBase && !PrimaryBaseWasVirtual) {
+      D1(printf(" doing primaries for %s most derived %s\n",
+                RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
+      Primaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset,
+                updateVBIndex, current_vbindex, CurrentVBaseOffset);
+    }
+
+    D1(printf(" doing vcall entries for %s most derived %s\n",
+              RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
+
+    // And add the virtuals for the class to the primary vtable.
+    AddMethods(RD, MorallyVirtual, Offset, CurrentVBaseOffset);
+  }
+
+  void VBPrimaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset,
+                   bool updateVBIndex, Index_t current_vbindex,
+                   bool RDisVirtualBase, int64_t CurrentVBaseOffset,
+                   bool bottom) {
+    if (!RD->isDynamicClass())
+      return;
+
+    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+    const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+    const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
+
+    // vtables are composed from the chain of primaries.
+    if (PrimaryBase) {
+      int BaseCurrentVBaseOffset = CurrentVBaseOffset;
+      if (PrimaryBaseWasVirtual) {
+        IndirectPrimary.insert(PrimaryBase);
+        BaseCurrentVBaseOffset = BLayout.getVBaseClassOffset(PrimaryBase);
+      }
+
+      D1(printf(" doing primaries for %s most derived %s\n",
+                RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
+      
+      VBPrimaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset,
+                  updateVBIndex, current_vbindex, PrimaryBaseWasVirtual,
+                  BaseCurrentVBaseOffset, false);
+    }
+
+    D1(printf(" doing vbase entries for %s most derived %s\n",
+              RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
+    GenerateVBaseOffsets(RD, Offset, updateVBIndex, current_vbindex);
+
+    if (RDisVirtualBase || bottom) {
+      Primaries(RD, MorallyVirtual, Offset, updateVBIndex, current_vbindex,
+                CurrentVBaseOffset);
+    }
+  }
+
+  void GenerateVtableForBase(const CXXRecordDecl *RD, int64_t Offset = 0,
+                             bool MorallyVirtual = false, 
+                             bool ForVirtualBase = false,
+                             bool ForNPNVBases = false,
+                             bool WasPrimaryBase = true,
+                             int CurrentVBaseOffset = 0,
+                             Path_t *Path = 0) {
+    if (!RD->isDynamicClass())
+      return;
+
+    // Construction vtable don't need parts that have no virtual bases and
+    // aren't morally virtual.
+    if ((LayoutClass != MostDerivedClass) && 
+        RD->getNumVBases() == 0 && !MorallyVirtual)
+      return;
+
+    const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+    const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+    const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
+
+    extra = 0;
+    D1(printf("building entries for base %s most derived %s\n",
+              RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
+
+    if (ForVirtualBase)
+      extra = VCalls.size();
+
+    if (!ForNPNVBases || !WasPrimaryBase) {
+      VBPrimaries(RD, MorallyVirtual, Offset, !ForVirtualBase, 0,
+                  ForVirtualBase, CurrentVBaseOffset, true);
+
+      if (Path)
+        OverrideMethods(Path, MorallyVirtual, Offset, CurrentVBaseOffset);
+    }
+
+    FinishGenerateVtable(RD, Layout, PrimaryBase, ForNPNVBases, WasPrimaryBase,
+                         PrimaryBaseWasVirtual, MorallyVirtual, Offset,
+                         ForVirtualBase, CurrentVBaseOffset, Path);
+  }
+
+  void GenerateVtableForVBases(const CXXRecordDecl *RD,
+                               int64_t Offset = 0,
+                               Path_t *Path = 0) {
+    bool alloc = false;
+    if (Path == 0) {
+      alloc = true;
+      Path = new Path_t;
+    }
+    // FIXME: We also need to override using all paths to a virtual base,
+    // right now, we just process the first path
+    Path->push_back(std::make_pair(RD, Offset));
+    for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+           e = RD->bases_end(); i != e; ++i) {
+      const CXXRecordDecl *Base =
+        cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+      if (i->isVirtual() && !IndirectPrimary.count(Base)) {
+        // Mark it so we don't output it twice.
+        IndirectPrimary.insert(Base);
+        StartNewTable();
+        VCall.clear();
+        int64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
+        int64_t CurrentVBaseOffset = BaseOffset;
+        D1(printf("vtable %s virtual base %s\n",
+                  MostDerivedClass->getNameAsCString(), Base->getNameAsCString()));
+        GenerateVtableForBase(Base, BaseOffset, true, true, false,
+                              true, CurrentVBaseOffset, Path);
+      }
+      int64_t BaseOffset;
+      if (i->isVirtual())
+        BaseOffset = BLayout.getVBaseClassOffset(Base);
+      else {
+        const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+        BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+      }
+        
+      if (Base->getNumVBases()) {
+        GenerateVtableForVBases(Base, BaseOffset, Path);
+      }
+    }
+    Path->pop_back();
+    if (alloc)
+      delete Path;
+  }
+};
+} // end anonymous namespace
+
+/// TypeConversionRequiresAdjustment - Returns whether conversion from a 
+/// derived type to a base type requires adjustment.
+static bool
+TypeConversionRequiresAdjustment(ASTContext &Ctx,
+                                 const CXXRecordDecl *DerivedDecl,
+                                 const CXXRecordDecl *BaseDecl) {
+  CXXBasePaths Paths(/*FindAmbiguities=*/false,
+                     /*RecordPaths=*/true, /*DetectVirtual=*/true);
+  if (!const_cast<CXXRecordDecl *>(DerivedDecl)->
+      isDerivedFrom(const_cast<CXXRecordDecl *>(BaseDecl), Paths)) {
+    assert(false && "Class must be derived from the passed in base class!");
+    return false;
+  }
+  
+  // If we found a virtual base we always want to require adjustment.
+  if (Paths.getDetectedVirtual())
+    return true;
+  
+  const CXXBasePath &Path = Paths.front();
+  
+  for (size_t Start = 0, End = Path.size(); Start != End; ++Start) {
+    const CXXBasePathElement &Element = Path[Start];
+    
+    // Check the base class offset.
+    const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(Element.Class);
+    
+    const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
+    const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
+    
+    if (Layout.getBaseClassOffset(Base) != 0) {
+      // This requires an adjustment.
+      return true;
+    }
+  }
+  
+  return false;
+}
+
+static bool 
+TypeConversionRequiresAdjustment(ASTContext &Ctx,
+                                 QualType DerivedType, QualType BaseType) {
+  // Canonicalize the types.
+  QualType CanDerivedType = Ctx.getCanonicalType(DerivedType);
+  QualType CanBaseType = Ctx.getCanonicalType(BaseType);
+  
+  assert(CanDerivedType->getTypeClass() == CanBaseType->getTypeClass() && 
+         "Types must have same type class!");
+  
+  if (CanDerivedType == CanBaseType) {
+    // No adjustment needed.
+    return false;
+  }
+  
+  if (const ReferenceType *RT = dyn_cast<ReferenceType>(CanDerivedType)) {
+    CanDerivedType = RT->getPointeeType();
+    CanBaseType = cast<ReferenceType>(CanBaseType)->getPointeeType();
+  } else if (const PointerType *PT = dyn_cast<PointerType>(CanDerivedType)) {
+    CanDerivedType = PT->getPointeeType();
+    CanBaseType = cast<PointerType>(CanBaseType)->getPointeeType();
+  } else {
+    assert(false && "Unexpected return type!");
+  }
+  
+  if (CanDerivedType == CanBaseType) {
+    // No adjustment needed.
+    return false;
+  }
+  
+  const CXXRecordDecl *DerivedDecl = 
+    cast<CXXRecordDecl>(cast<RecordType>(CanDerivedType)->getDecl());
+  
+  const CXXRecordDecl *BaseDecl = 
+    cast<CXXRecordDecl>(cast<RecordType>(CanBaseType)->getDecl());
+  
+  return TypeConversionRequiresAdjustment(Ctx, DerivedDecl, BaseDecl);
+}
+
+bool VtableBuilder::OverrideMethod(GlobalDecl GD, bool MorallyVirtual,
+                                   Index_t OverrideOffset, Index_t Offset,
+                                   int64_t CurrentVBaseOffset) {
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+  const bool isPure = MD->isPure();
+  
+  // FIXME: Should OverrideOffset's be Offset?
+
+  for (CXXMethodDecl::method_iterator mi = MD->begin_overridden_methods(),
+       e = MD->end_overridden_methods(); mi != e; ++mi) {
+    GlobalDecl OGD;
+    GlobalDecl OGD2;
+    
+    const CXXMethodDecl *OMD = *mi;
+    if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(OMD))
+      OGD = GlobalDecl(DD, GD.getDtorType());
+    else
+      OGD = OMD;
+
+    // Check whether this is the method being overridden in this section of
+    // the vtable.
+    uint64_t Index;
+    if (!Methods.getIndex(OGD, Index))
+      continue;
+
+    OGD2 = OGD;
+
+    // Get the original method, which we should be computing thunks, etc,
+    // against.
+    OGD = Methods.getOrigMethod(Index);
+    OMD = cast<CXXMethodDecl>(OGD.getDecl());
+
+    QualType ReturnType = 
+      MD->getType()->getAs<FunctionType>()->getResultType();
+    QualType OverriddenReturnType = 
+      OMD->getType()->getAs<FunctionType>()->getResultType();
+    
+    // Check if we need a return type adjustment.
+    if (TypeConversionRequiresAdjustment(CGM.getContext(), ReturnType, 
+                                          OverriddenReturnType)) {
+      CanQualType &BaseReturnType = BaseReturnTypes[Index];
+
+      // Insert the base return type.
+      if (BaseReturnType.isNull())
+        BaseReturnType =
+          CGM.getContext().getCanonicalType(OverriddenReturnType);
+    }
+
+    Methods.OverrideMethod(OGD, GD);
+
+    GlobalDecl UGD = getUnique(GD);
+    const CXXMethodDecl *UMD = cast<CXXMethodDecl>(UGD.getDecl());
+    assert(UGD.getDecl() && "unique overrider not found");
+    assert(UGD == getUnique(OGD) && "unique overrider not unique");
+
+    ThisAdjustments.erase(Index);
+    if (MorallyVirtual || VCall.count(UMD)) {
+
+      Index_t &idx = VCall[UMD];
+      if (idx == 0) {
+        VCallOffset[GD] = VCallOffset[OGD];
+        // NonVirtualOffset[UMD] = CurrentVBaseOffset/8 - OverrideOffset/8;
+        NonVirtualOffset[UMD] = VCallOffset[OGD];
+        VCallOffsetForVCall[UMD] = OverrideOffset/8;
+        idx = VCalls.size()+1;
+        VCalls.push_back(OverrideOffset/8 - CurrentVBaseOffset/8);
+        D1(printf("  vcall for %s at %d with delta %d most derived %s\n",
+                  MD->getNameAsString().c_str(), (int)-idx-3,
+                  (int)VCalls[idx-1], MostDerivedClass->getNameAsCString()));
+      } else {
+        VCallOffset[GD] = NonVirtualOffset[UMD];
+        VCalls[idx-1] = -VCallOffsetForVCall[UGD] + OverrideOffset/8;
+        D1(printf("  vcall patch for %s at %d with delta %d most derived %s\n",
+                  MD->getNameAsString().c_str(), (int)-idx-3,
+                  (int)VCalls[idx-1], MostDerivedClass->getNameAsCString()));
+      }
+      int64_t NonVirtualAdjustment = -VCallOffset[OGD];
+      QualType DerivedType = MD->getThisType(CGM.getContext());
+      QualType BaseType = cast<const CXXMethodDecl>(OGD.getDecl())->getThisType(CGM.getContext());
+      int64_t NonVirtualAdjustment2 = -(getNVOffset(BaseType, DerivedType)/8);
+      if (NonVirtualAdjustment2 != NonVirtualAdjustment) {
+        NonVirtualAdjustment = NonVirtualAdjustment2;
+      }
+      int64_t VirtualAdjustment = 
+        -((idx + extra + 2) * LLVMPointerWidth / 8);
+      
+      // Optimize out virtual adjustments of 0.
+      if (VCalls[idx-1] == 0)
+        VirtualAdjustment = 0;
+      
+      ThunkAdjustment ThisAdjustment(NonVirtualAdjustment,
+                                      VirtualAdjustment);
+
+      if (!isPure && !ThisAdjustment.isEmpty()) {
+        ThisAdjustments[Index] = ThisAdjustment;
+        SavedAdjustments.push_back(
+            std::make_pair(GD, std::make_pair(OGD, ThisAdjustment)));
+      }
+      return true;
+    }
+
+    VCallOffset[GD] = VCallOffset[OGD2] - OverrideOffset/8;
+
+    int64_t NonVirtualAdjustment = -VCallOffset[GD];
+    QualType DerivedType = MD->getThisType(CGM.getContext());
+    QualType BaseType = cast<const CXXMethodDecl>(OGD.getDecl())->getThisType(CGM.getContext());
+    int64_t NonVirtualAdjustment2 = -(getNVOffset(BaseType, DerivedType)/8);
+    if (NonVirtualAdjustment2 != NonVirtualAdjustment) {
+      NonVirtualAdjustment = NonVirtualAdjustment2;
+    }
+      
+    if (NonVirtualAdjustment) {
+      ThunkAdjustment ThisAdjustment(NonVirtualAdjustment, 0);
+      
+      if (!isPure) {
+        ThisAdjustments[Index] = ThisAdjustment;
+        SavedAdjustments.push_back(
+            std::make_pair(GD, std::make_pair(OGD, ThisAdjustment)));
+      }
+    }
+    return true;
+  }
+
+  return false;
+}
+
+void VtableBuilder::AppendMethodsToVtable() {
+  if (!BuildVtable) {
+    VtableComponents.insert(VtableComponents.end(), Methods.size(), 
+                            (llvm::Constant *)0);
+    ThisAdjustments.clear();
+    BaseReturnTypes.clear();
+    Methods.clear();
+    return;
+  }
+
+  // Reserve room in the vtable for our new methods.
+  VtableComponents.reserve(VtableComponents.size() + Methods.size());
+
+  for (unsigned i = 0, e = Methods.size(); i != e; ++i) {
+    GlobalDecl GD = Methods[i];
+    const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+  
+    // Get the 'this' pointer adjustment.
+    ThunkAdjustment ThisAdjustment = ThisAdjustments.lookup(i);
+  
+    // Construct the return type adjustment.
+    ThunkAdjustment ReturnAdjustment;
+
+    QualType BaseReturnType = BaseReturnTypes.lookup(i);
+    if (!BaseReturnType.isNull() && !MD->isPure()) {
+      QualType DerivedType = 
+        MD->getType()->getAs<FunctionType>()->getResultType();
+      
+      int64_t NonVirtualAdjustment = 
+      getNVOffset(BaseReturnType, DerivedType) / 8;
+      
+      int64_t VirtualAdjustment = 
+      getVbaseOffset(BaseReturnType, DerivedType);
+      
+      ReturnAdjustment = ThunkAdjustment(NonVirtualAdjustment, 
+                                         VirtualAdjustment);
+    }
+
+    llvm::Constant *Method = 0;
+    if (!ReturnAdjustment.isEmpty()) {
+      // Build a covariant thunk.
+      CovariantThunkAdjustment Adjustment(ThisAdjustment, ReturnAdjustment);
+      Method = wrap(CGM.GetAddrOfCovariantThunk(GD, Adjustment));
+    } else if (!ThisAdjustment.isEmpty()) {
+      // Build a "regular" thunk.
+      Method = wrap(CGM.GetAddrOfThunk(GD, ThisAdjustment));
+    } else if (MD->isPure()) {
+      // We have a pure virtual method.
+      Method = getPureVirtualFn();
+    } else {
+      // We have a good old regular method.
+      Method = WrapAddrOf(GD);
+    }
+
+    // Add the method to the vtable.
+    VtableComponents.push_back(Method);
+  }
+  
+  
+  ThisAdjustments.clear();
+  BaseReturnTypes.clear();
+  
+  Methods.clear();
+}
+
+void CGVtableInfo::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
+  
+  // Itanium C++ ABI 2.5.2:
+  //   The order of the virtual function pointers in a virtual table is the 
+  //   order of declaration of the corresponding member functions in the class.
+  //
+  //   There is an entry for any virtual function declared in a class, 
+  //   whether it is a new function or overrides a base class function, 
+  //   unless it overrides a function from the primary base, and conversion
+  //   between their return types does not require an adjustment. 
+
+  int64_t CurrentIndex = 0;
+  
+  const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+  const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+  
+  if (PrimaryBase) {
+    assert(PrimaryBase->isDefinition() && 
+           "Should have the definition decl of the primary base!");
+
+    // Since the record decl shares its vtable pointer with the primary base
+    // we need to start counting at the end of the primary base's vtable.
+    CurrentIndex = getNumVirtualFunctionPointers(PrimaryBase);
+  }
+
+  // Collect all the primary bases, so we can check whether methods override
+  // a method from the base.
+  llvm::SmallPtrSet<const CXXRecordDecl *, 5> PrimaryBases;
+  for (ASTRecordLayout::primary_base_info_iterator
+       I = Layout.primary_base_begin(), E = Layout.primary_base_end();
+       I != E; ++I)
+    PrimaryBases.insert((*I).getBase());
+
+  const CXXDestructorDecl *ImplicitVirtualDtor = 0;
+  
+  for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+       e = RD->method_end(); i != e; ++i) {
+    const CXXMethodDecl *MD = *i;
+
+    // We only want virtual methods.
+    if (!MD->isVirtual())
+      continue;
+
+    bool ShouldAddEntryForMethod = true;
+    
+    // Check if this method overrides a method in the primary base.
+    for (CXXMethodDecl::method_iterator i = MD->begin_overridden_methods(),
+         e = MD->end_overridden_methods(); i != e; ++i) {
+      const CXXMethodDecl *OverriddenMD = *i;
+      const CXXRecordDecl *OverriddenRD = OverriddenMD->getParent();
+      assert(OverriddenMD->isCanonicalDecl() &&
+             "Should have the canonical decl of the overridden RD!");
+      
+      if (PrimaryBases.count(OverriddenRD)) {
+        // Check if converting from the return type of the method to the 
+        // return type of the overridden method requires conversion.
+        QualType ReturnType = 
+          MD->getType()->getAs<FunctionType>()->getResultType();
+        QualType OverriddenReturnType =
+          OverriddenMD->getType()->getAs<FunctionType>()->getResultType();
+        
+        if (!TypeConversionRequiresAdjustment(CGM.getContext(), 
+                                            ReturnType, OverriddenReturnType)) {
+          // This index is shared between the index in the vtable of the primary
+          // base class.
+          if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+            const CXXDestructorDecl *OverriddenDD = 
+              cast<CXXDestructorDecl>(OverriddenMD);
+            
+            // Add both the complete and deleting entries.
+            MethodVtableIndices[GlobalDecl(DD, Dtor_Complete)] = 
+              getMethodVtableIndex(GlobalDecl(OverriddenDD, Dtor_Complete));
+            MethodVtableIndices[GlobalDecl(DD, Dtor_Deleting)] = 
+              getMethodVtableIndex(GlobalDecl(OverriddenDD, Dtor_Deleting));
+          } else {
+            MethodVtableIndices[MD] = getMethodVtableIndex(OverriddenMD);
+          }
+          
+          // We don't need to add an entry for this method.
+          ShouldAddEntryForMethod = false;
+          break;
+        }        
+      }
+    }
+    
+    if (!ShouldAddEntryForMethod)
+      continue;
+    
+    if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+      if (MD->isImplicit()) {
+        assert(!ImplicitVirtualDtor && 
+               "Did already see an implicit virtual dtor!");
+        ImplicitVirtualDtor = DD;
+        continue;
+      } 
+
+      // Add the complete dtor.
+      MethodVtableIndices[GlobalDecl(DD, Dtor_Complete)] = CurrentIndex++;
+      
+      // Add the deleting dtor.
+      MethodVtableIndices[GlobalDecl(DD, Dtor_Deleting)] = CurrentIndex++;
+    } else {
+      // Add the entry.
+      MethodVtableIndices[MD] = CurrentIndex++;
+    }
+  }
+
+  if (ImplicitVirtualDtor) {
+    // Itanium C++ ABI 2.5.2:
+    // If a class has an implicitly-defined virtual destructor, 
+    // its entries come after the declared virtual function pointers.
+
+    // Add the complete dtor.
+    MethodVtableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Complete)] = 
+      CurrentIndex++;
+    
+    // Add the deleting dtor.
+    MethodVtableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Deleting)] = 
+      CurrentIndex++;
+  }
+  
+  NumVirtualFunctionPointers[RD] = CurrentIndex;
+}
+
+uint64_t CGVtableInfo::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
+  llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I = 
+    NumVirtualFunctionPointers.find(RD);
+  if (I != NumVirtualFunctionPointers.end())
+    return I->second;
+
+  ComputeMethodVtableIndices(RD);
+
+  I = NumVirtualFunctionPointers.find(RD);
+  assert(I != NumVirtualFunctionPointers.end() && "Did not find entry!");
+  return I->second;
+}
+      
+uint64_t CGVtableInfo::getMethodVtableIndex(GlobalDecl GD) {
+  MethodVtableIndicesTy::iterator I = MethodVtableIndices.find(GD);
+  if (I != MethodVtableIndices.end())
+    return I->second;
+  
+  const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+  ComputeMethodVtableIndices(RD);
+
+  I = MethodVtableIndices.find(GD);
+  assert(I != MethodVtableIndices.end() && "Did not find index!");
+  return I->second;
+}
+
+CGVtableInfo::AdjustmentVectorTy*
+CGVtableInfo::getAdjustments(GlobalDecl GD) {
+  SavedAdjustmentsTy::iterator I = SavedAdjustments.find(GD);
+  if (I != SavedAdjustments.end())
+    return &I->second;
+
+  const CXXRecordDecl *RD = cast<CXXRecordDecl>(GD.getDecl()->getDeclContext());
+  if (!SavedAdjustmentRecords.insert(RD).second)
+    return 0;
+
+  AddressPointsMapTy AddressPoints;
+  VtableBuilder b(RD, RD, 0, CGM, false, AddressPoints);
+  D1(printf("vtable %s\n", RD->getNameAsCString()));
+  b.GenerateVtableForBase(RD);
+  b.GenerateVtableForVBases(RD);
+
+  for (VtableBuilder::SavedAdjustmentsVectorTy::iterator
+       i = b.getSavedAdjustments().begin(),
+       e = b.getSavedAdjustments().end(); i != e; i++)
+    SavedAdjustments[i->first].push_back(i->second);
+
+  I = SavedAdjustments.find(GD);
+  if (I != SavedAdjustments.end())
+    return &I->second;
+
+  return 0;
+}
+
+int64_t CGVtableInfo::getVirtualBaseOffsetIndex(const CXXRecordDecl *RD, 
+                                                const CXXRecordDecl *VBase) {
+  ClassPairTy ClassPair(RD, VBase);
+  
+  VirtualBaseClassIndiciesTy::iterator I = 
+    VirtualBaseClassIndicies.find(ClassPair);
+  if (I != VirtualBaseClassIndicies.end())
+    return I->second;
+  
+  // FIXME: This seems expensive.  Can we do a partial job to get
+  // just this data.
+  AddressPointsMapTy AddressPoints;
+  VtableBuilder b(RD, RD, 0, CGM, false, AddressPoints);
+  D1(printf("vtable %s\n", RD->getNameAsCString()));
+  b.GenerateVtableForBase(RD);
+  b.GenerateVtableForVBases(RD);
+  
+  for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
+       b.getVBIndex().begin(), E = b.getVBIndex().end(); I != E; ++I) {
+    // Insert all types.
+    ClassPairTy ClassPair(RD, I->first);
+    
+    VirtualBaseClassIndicies.insert(std::make_pair(ClassPair, I->second));
+  }
+  
+  I = VirtualBaseClassIndicies.find(ClassPair);
+  assert(I != VirtualBaseClassIndicies.end() && "Did not find index!");
+  
+  return I->second;
+}
+
+uint64_t CGVtableInfo::getVtableAddressPoint(const CXXRecordDecl *RD) {
+  uint64_t AddressPoint = 
+    (*(*(CGM.getVtableInfo().AddressPoints[RD]))[RD])[std::make_pair(RD, 0)];
+  
+  return AddressPoint;
+}
+
+llvm::GlobalVariable *
+CGVtableInfo::GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
+                             bool GenerateDefinition,
+                             const CXXRecordDecl *LayoutClass,
+                             const CXXRecordDecl *RD, uint64_t Offset,
+                             AddressPointsMapTy& AddressPoints) {
+  llvm::SmallString<256> OutName;
+  if (LayoutClass != RD)
+    CGM.getMangleContext().mangleCXXCtorVtable(LayoutClass, Offset / 8, 
+                                               RD, OutName);
+  else
+    CGM.getMangleContext().mangleCXXVtable(RD, OutName);
+  llvm::StringRef Name = OutName.str();
+
+  llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+  if (GV == 0 || CGM.getVtableInfo().AddressPoints[LayoutClass] == 0 || 
+      GV->isDeclaration()) {
+    VtableBuilder b(RD, LayoutClass, Offset, CGM, GenerateDefinition,
+                    AddressPoints);
+
+    D1(printf("vtable %s\n", RD->getNameAsCString()));
+    // First comes the vtables for all the non-virtual bases...
+    b.GenerateVtableForBase(RD, Offset);
+
+    // then the vtables for all the virtual bases.
+    b.GenerateVtableForVBases(RD, Offset);
+
+    llvm::Constant *Init = 0;
+    const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+    llvm::ArrayType *ArrayType = 
+      llvm::ArrayType::get(Int8PtrTy, b.getVtableComponents().size());
+
+    if (GenerateDefinition)
+      Init = llvm::ConstantArray::get(ArrayType, &b.getVtableComponents()[0], 
+                                      b.getVtableComponents().size());
+
+    llvm::GlobalVariable *OGV = GV;
+    
+    GV = new llvm::GlobalVariable(CGM.getModule(), ArrayType, 
+                                  /*isConstant=*/true, Linkage, Init, Name);
+    CGM.setGlobalVisibility(GV, RD);
+  
+    if (OGV) {
+      GV->takeName(OGV);
+      llvm::Constant *NewPtr = 
+        llvm::ConstantExpr::getBitCast(GV, OGV->getType());
+      OGV->replaceAllUsesWith(NewPtr);
+      OGV->eraseFromParent();
+    }
+  }
+  
+  return GV;
+}
+
+void CGVtableInfo::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+                                     const CXXRecordDecl *RD) {
+  llvm::GlobalVariable *&Vtable = Vtables[RD];
+  if (Vtable) {
+    assert(Vtable->getInitializer() && "Vtable doesn't have a definition!");
+    return;
+  }
+  
+  AddressPointsMapTy AddressPoints;
+  Vtable = GenerateVtable(Linkage, /*GenerateDefinition=*/true, RD, RD, 0,
+                          AddressPoints);
+  GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);  
+}
+
+llvm::GlobalVariable *CGVtableInfo::getVtable(const CXXRecordDecl *RD) {
+  llvm::GlobalVariable *Vtable = Vtables.lookup(RD);
+  
+  if (!Vtable) {
+    AddressPointsMapTy AddressPoints;
+    Vtable = GenerateVtable(llvm::GlobalValue::ExternalLinkage, 
+                            /*GenerateDefinition=*/false, RD, RD, 0,
+                            AddressPoints);
+  }
+
+  return Vtable;
+}
+
+void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) {
+  const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+  const CXXRecordDecl *RD = MD->getParent();
+
+  // If the class doesn't have a vtable we don't need to emit one.
+  if (!RD->isDynamicClass())
+    return;
+  
+  // Get the key function.
+  const CXXMethodDecl *KeyFunction = CGM.getContext().getKeyFunction(RD);
+  
+  if (KeyFunction) {
+    // We don't have the right key function.
+    if (KeyFunction->getCanonicalDecl() != MD->getCanonicalDecl())
+      return;
+  }
+
+  // Emit the data.
+  GenerateClassData(CGM.getVtableLinkage(RD), RD);
+
+  for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+       e = RD->method_end(); i != e; ++i) {
+    if ((*i)->isVirtual() && ((*i)->hasInlineBody() || (*i)->isImplicit())) {
+      if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(*i)) {
+        CGM.BuildThunksForVirtual(GlobalDecl(DD, Dtor_Complete));
+        CGM.BuildThunksForVirtual(GlobalDecl(DD, Dtor_Deleting));
+      } else {
+        CGM.BuildThunksForVirtual(GlobalDecl(*i));
+      }
+    }
+  }
+}
+
diff --git a/lib/CodeGen/CGVtable.h b/lib/CodeGen/CGVtable.h
new file mode 100644
index 0000000..471d638
--- /dev/null
+++ b/lib/CodeGen/CGVtable.h
@@ -0,0 +1,251 @@
+//===--- CGVtable.h - Emit LLVM Code for C++ vtables ----------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVTABLE_H
+#define CLANG_CODEGEN_CGVTABLE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/GlobalVariable.h"
+#include "GlobalDecl.h"
+
+namespace clang {
+  class CXXRecordDecl;
+
+namespace CodeGen {
+  class CodeGenModule;
+
+/// ThunkAdjustment - Virtual and non-virtual adjustment for thunks.
+class ThunkAdjustment {
+public:
+  ThunkAdjustment(int64_t NonVirtual, int64_t Virtual)
+  : NonVirtual(NonVirtual),
+    Virtual(Virtual) { }
+
+  ThunkAdjustment()
+    : NonVirtual(0), Virtual(0) { }
+
+  // isEmpty - Return whether this thunk adjustment is empty.
+  bool isEmpty() const {
+    return NonVirtual == 0 && Virtual == 0;
+  }
+
+  /// NonVirtual - The non-virtual adjustment.
+  int64_t NonVirtual;
+
+  /// Virtual - The virtual adjustment.
+  int64_t Virtual;
+};
+
+/// CovariantThunkAdjustment - Adjustment of the 'this' pointer and the
+/// return pointer for covariant thunks.
+class CovariantThunkAdjustment {
+public:
+  CovariantThunkAdjustment(const ThunkAdjustment &ThisAdjustment,
+                           const ThunkAdjustment &ReturnAdjustment)
+  : ThisAdjustment(ThisAdjustment), ReturnAdjustment(ReturnAdjustment) { }
+
+  CovariantThunkAdjustment() { }
+
+  ThunkAdjustment ThisAdjustment;
+  ThunkAdjustment ReturnAdjustment;
+};
+
+// BaseSubobject - Uniquely identifies a direct or indirect base class. 
+// Stores both the base class decl and the offset from the most derived class to
+// the base class.
+class BaseSubobject {
+  /// Base - The base class declaration.
+  const CXXRecordDecl *Base;
+  
+  /// BaseOffset - The offset from the most derived class to the base class.
+  uint64_t BaseOffset;
+  
+public:
+  BaseSubobject(const CXXRecordDecl *Base, uint64_t BaseOffset)
+    : Base(Base), BaseOffset(BaseOffset) { }
+  
+  /// getBase - Returns the base class declaration.
+  const CXXRecordDecl *getBase() const { return Base; }
+
+  /// getBaseOffset - Returns the base class offset.
+  uint64_t getBaseOffset() const { return BaseOffset; }
+
+  friend bool operator==(const BaseSubobject &LHS, const BaseSubobject &RHS) {
+    return LHS.Base == RHS.Base && LHS.BaseOffset == RHS.BaseOffset;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+namespace llvm {
+
+template<> struct DenseMapInfo<clang::CodeGen::BaseSubobject> {
+  static clang::CodeGen::BaseSubobject getEmptyKey() {
+    return clang::CodeGen::BaseSubobject(
+      DenseMapInfo<const clang::CXXRecordDecl *>::getEmptyKey(),
+      DenseMapInfo<uint64_t>::getEmptyKey());
+  }
+
+  static clang::CodeGen::BaseSubobject getTombstoneKey() {
+    return clang::CodeGen::BaseSubobject(
+      DenseMapInfo<const clang::CXXRecordDecl *>::getTombstoneKey(),
+      DenseMapInfo<uint64_t>::getTombstoneKey());
+  }
+
+  static unsigned getHashValue(const clang::CodeGen::BaseSubobject &Base) {
+    return 
+      DenseMapInfo<const clang::CXXRecordDecl *>::getHashValue(Base.getBase()) ^
+      DenseMapInfo<uint64_t>::getHashValue(Base.getBaseOffset());
+  }
+
+  static bool isEqual(const clang::CodeGen::BaseSubobject &LHS, 
+                      const clang::CodeGen::BaseSubobject &RHS) {
+    return LHS == RHS;
+  }
+};
+
+// It's OK to treat BaseSubobject as a POD type.
+template <> struct isPodLike<clang::CodeGen::BaseSubobject> {
+  static const bool value = true;
+};
+
+}
+
+namespace clang {
+namespace CodeGen {
+
+class CGVtableInfo {
+public:
+  typedef std::vector<std::pair<GlobalDecl, ThunkAdjustment> >
+      AdjustmentVectorTy;
+
+  typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t;
+  typedef llvm::DenseMap<CtorVtable_t, int64_t> AddrSubMap_t;
+  typedef llvm::DenseMap<const CXXRecordDecl *, AddrSubMap_t *> AddrMap_t;
+  llvm::DenseMap<const CXXRecordDecl *, AddrMap_t*> AddressPoints;
+
+  typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy;
+
+private:
+  CodeGenModule &CGM;
+
+  /// MethodVtableIndices - Contains the index (relative to the vtable address
+  /// point) where the function pointer for a virtual function is stored.
+  typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVtableIndicesTy;
+  MethodVtableIndicesTy MethodVtableIndices;
+
+  typedef std::pair<const CXXRecordDecl *,
+                    const CXXRecordDecl *> ClassPairTy;
+
+  /// VirtualBaseClassIndicies - Contains the index into the vtable where the
+  /// offsets for virtual bases of a class are stored.
+  typedef llvm::DenseMap<ClassPairTy, int64_t> VirtualBaseClassIndiciesTy;
+  VirtualBaseClassIndiciesTy VirtualBaseClassIndicies;
+
+  /// Vtables - All the vtables which have been defined.
+  llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> Vtables;
+  
+  /// NumVirtualFunctionPointers - Contains the number of virtual function 
+  /// pointers in the vtable for a given record decl.
+  llvm::DenseMap<const CXXRecordDecl *, uint64_t> NumVirtualFunctionPointers;
+
+  typedef llvm::DenseMap<GlobalDecl, AdjustmentVectorTy> SavedAdjustmentsTy;
+  SavedAdjustmentsTy SavedAdjustments;
+  llvm::DenseSet<const CXXRecordDecl*> SavedAdjustmentRecords;
+
+  typedef llvm::DenseMap<ClassPairTy, uint64_t> SubVTTIndiciesTy;
+  SubVTTIndiciesTy SubVTTIndicies;
+
+  /// getNumVirtualFunctionPointers - Return the number of virtual function
+  /// pointers in the vtable for a given record decl.
+  uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD);
+  
+  void ComputeMethodVtableIndices(const CXXRecordDecl *RD);
+  
+  /// GenerateClassData - Generate all the class data requires to be generated
+  /// upon definition of a KeyFunction.  This includes the vtable, the
+  /// rtti data structure and the VTT.
+  /// 
+  /// \param Linkage - The desired linkage of the vtable, the RTTI and the VTT.
+  void GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+                         const CXXRecordDecl *RD);
+ 
+  llvm::GlobalVariable *
+  GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
+                 bool GenerateDefinition, const CXXRecordDecl *LayoutClass, 
+                 const CXXRecordDecl *RD, uint64_t Offset,
+                 AddressPointsMapTy& AddressPoints);
+
+  llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+                                    bool GenerateDefinition,
+                                    const CXXRecordDecl *RD);
+
+public:
+  CGVtableInfo(CodeGenModule &CGM)
+    : CGM(CGM) { }
+
+  /// needsVTTParameter - Return whether the given global decl needs a VTT
+  /// parameter, which it does if it's a base constructor or destructor with
+  /// virtual bases.
+  static bool needsVTTParameter(GlobalDecl GD);
+
+  /// getSubVTTIndex - Return the index of the sub-VTT for the base class of the
+  /// given record decl.
+  uint64_t getSubVTTIndex(const CXXRecordDecl *RD, const CXXRecordDecl *Base);
+  
+  /// getMethodVtableIndex - Return the index (relative to the vtable address
+  /// point) where the function pointer for the given virtual function is
+  /// stored.
+  uint64_t getMethodVtableIndex(GlobalDecl GD);
+
+  /// getVirtualBaseOffsetIndex - Return the index (relative to the vtable
+  /// address point) where the offset of the virtual base that contains the
+  /// given Base is stored, otherwise, if no virtual base contains the given
+  /// class, return 0.  Base must be a virtual base class or an unambigious
+  /// base.
+  int64_t getVirtualBaseOffsetIndex(const CXXRecordDecl *RD,
+                                    const CXXRecordDecl *VBase);
+
+  AdjustmentVectorTy *getAdjustments(GlobalDecl GD);
+
+  /// getVtableAddressPoint - returns the address point of the vtable for the
+  /// given record decl.
+  /// FIXME: This should return a list of address points.
+  uint64_t getVtableAddressPoint(const CXXRecordDecl *RD);
+  
+  llvm::GlobalVariable *getVtable(const CXXRecordDecl *RD);
+  
+  /// CtorVtableInfo - Information about a constructor vtable.
+  struct CtorVtableInfo {
+    /// Vtable - The vtable itself.
+    llvm::GlobalVariable *Vtable;
+  
+    /// AddressPoints - The address points in this constructor vtable.
+    AddressPointsMapTy AddressPoints;
+    
+    CtorVtableInfo() : Vtable(0) { }
+  };
+  
+  CtorVtableInfo getCtorVtable(const CXXRecordDecl *RD, 
+                               const BaseSubobject &Base);
+  
+  llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD);
+  
+  void MaybeEmitVtable(GlobalDecl GD);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+#endif
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
new file mode 100644
index 0000000..e72a1d9
--- /dev/null
+++ b/lib/CodeGen/CMakeLists.txt
@@ -0,0 +1,34 @@
+set(LLVM_NO_RTTI 1)
+
+add_clang_library(clangCodeGen
+  CGBlocks.cpp
+  CGBuiltin.cpp
+  CGCall.cpp
+  CGClass.cpp
+  CGCXX.cpp
+  CGDebugInfo.cpp
+  CGDecl.cpp
+  CGDeclCXX.cpp
+  CGException.cpp
+  CGExpr.cpp
+  CGExprAgg.cpp
+  CGExprComplex.cpp
+  CGExprConstant.cpp
+  CGExprCXX.cpp
+  CGExprScalar.cpp
+  CGObjC.cpp
+  CGObjCGNU.cpp
+  CGObjCMac.cpp
+  CGRecordLayoutBuilder.cpp
+  CGRTTI.cpp
+  CGStmt.cpp
+  CGTemporaries.cpp
+  CGVtable.cpp
+  CGVTT.cpp
+  CodeGenFunction.cpp
+  CodeGenModule.cpp
+  CodeGenTypes.cpp
+  Mangle.cpp
+  ModuleBuilder.cpp
+  TargetInfo.cpp
+  )
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
new file mode 100644
index 0000000..5a4f94e
--- /dev/null
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -0,0 +1,844 @@
+//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-function state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+  : BlockFunction(cgm, *this, Builder), CGM(cgm),
+    Target(CGM.getContext().Target),
+    Builder(cgm.getModule().getContext()),
+    DebugInfo(0), IndirectBranch(0),
+    SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
+    CXXThisDecl(0), CXXVTTDecl(0),
+    ConditionalBranchLevel(0), TerminateHandler(0), TrapBB(0),
+    UniqueAggrDestructorCount(0) {
+  LLVMIntTy = ConvertType(getContext().IntTy);
+  LLVMPointerWidth = Target.getPointerWidth(0);
+  Exceptions = getContext().getLangOptions().Exceptions;
+  CatchUndefined = getContext().getLangOptions().CatchUndefined;
+}
+
+ASTContext &CodeGenFunction::getContext() const {
+  return CGM.getContext();
+}
+
+
+llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) {
+  llvm::BasicBlock *&BB = LabelMap[S];
+  if (BB) return BB;
+
+  // Create, but don't insert, the new block.
+  return BB = createBasicBlock(S->getName());
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
+  llvm::Value *Res = LocalDeclMap[VD];
+  assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
+  return Res;
+}
+
+llvm::Constant *
+CodeGenFunction::GetAddrOfStaticLocalVar(const VarDecl *BVD) {
+  return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
+}
+
+const llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+  return CGM.getTypes().ConvertTypeForMem(T);
+}
+
+const llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+  return CGM.getTypes().ConvertType(T);
+}
+
+bool CodeGenFunction::hasAggregateLLVMType(QualType T) {
+  return T->isRecordType() || T->isArrayType() || T->isAnyComplexType() ||
+    T->isMemberFunctionPointerType();
+}
+
+void CodeGenFunction::EmitReturnBlock() {
+  // For cleanliness, we try to avoid emitting the return block for
+  // simple cases.
+  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+  if (CurBB) {
+    assert(!CurBB->getTerminator() && "Unexpected terminated block.");
+
+    // We have a valid insert point, reuse it if it is empty or there are no
+    // explicit jumps to the return block.
+    if (CurBB->empty() || ReturnBlock->use_empty()) {
+      ReturnBlock->replaceAllUsesWith(CurBB);
+      delete ReturnBlock;
+    } else
+      EmitBlock(ReturnBlock);
+    return;
+  }
+
+  // Otherwise, if the return block is the target of a single direct
+  // branch then we can just put the code in that block instead. This
+  // cleans up functions which started with a unified return block.
+  if (ReturnBlock->hasOneUse()) {
+    llvm::BranchInst *BI =
+      dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin());
+    if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) {
+      // Reset insertion point and delete the branch.
+      Builder.SetInsertPoint(BI->getParent());
+      BI->eraseFromParent();
+      delete ReturnBlock;
+      return;
+    }
+  }
+
+  // FIXME: We are at an unreachable point, there is no reason to emit the block
+  // unless it has uses. However, we still need a place to put the debug
+  // region.end for now.
+
+  EmitBlock(ReturnBlock);
+}
+
+void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
+  assert(BreakContinueStack.empty() &&
+         "mismatched push/pop in break/continue stack!");
+  assert(BlockScopes.empty() &&
+         "did not remove all blocks from block scope map!");
+  assert(CleanupEntries.empty() &&
+         "mismatched push/pop in cleanup stack!");
+
+  // Emit function epilog (to return).
+  EmitReturnBlock();
+
+  // Emit debug descriptor for function end.
+  if (CGDebugInfo *DI = getDebugInfo()) {
+    DI->setLocation(EndLoc);
+    DI->EmitRegionEnd(CurFn, Builder);
+  }
+
+  EmitFunctionEpilog(*CurFnInfo, ReturnValue);
+  EmitEndEHSpec(CurCodeDecl);
+
+  // If someone did an indirect goto, emit the indirect goto block at the end of
+  // the function.
+  if (IndirectBranch) {
+    EmitBlock(IndirectBranch->getParent());
+    Builder.ClearInsertionPoint();
+  }
+  
+  // Remove the AllocaInsertPt instruction, which is just a convenience for us.
+  llvm::Instruction *Ptr = AllocaInsertPt;
+  AllocaInsertPt = 0;
+  Ptr->eraseFromParent();
+  
+  // If someone took the address of a label but never did an indirect goto, we
+  // made a zero entry PHI node, which is illegal, zap it now.
+  if (IndirectBranch) {
+    llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
+    if (PN->getNumIncomingValues() == 0) {
+      PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
+      PN->eraseFromParent();
+    }
+  }
+}
+
+void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
+                                    llvm::Function *Fn,
+                                    const FunctionArgList &Args,
+                                    SourceLocation StartLoc) {
+  const Decl *D = GD.getDecl();
+  
+  DidCallStackSave = false;
+  CurCodeDecl = CurFuncDecl = D;
+  FnRetTy = RetTy;
+  CurFn = Fn;
+  assert(CurFn->isDeclaration() && "Function already has body?");
+
+  // Pass inline keyword to optimizer if it appears explicitly on any
+  // declaration.
+  if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+    for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
+           RE = FD->redecls_end(); RI != RE; ++RI)
+      if (RI->isInlineSpecified()) {
+        Fn->addFnAttr(llvm::Attribute::InlineHint);
+        break;
+      }
+
+  llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
+
+  // Create a marker to make it easy to insert allocas into the entryblock
+  // later.  Don't create this with the builder, because we don't want it
+  // folded.
+  llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext));
+  AllocaInsertPt = new llvm::BitCastInst(Undef,
+                                         llvm::Type::getInt32Ty(VMContext), "",
+                                         EntryBB);
+  if (Builder.isNamePreserving())
+    AllocaInsertPt->setName("allocapt");
+
+  ReturnBlock = createBasicBlock("return");
+
+  Builder.SetInsertPoint(EntryBB);
+
+  QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0);
+
+  // Emit subprogram debug descriptor.
+  if (CGDebugInfo *DI = getDebugInfo()) {
+    DI->setLocation(StartLoc);
+    DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
+  }
+
+  // FIXME: Leaked.
+  // CC info is ignored, hopefully?
+  CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
+                                              CC_Default, false);
+
+  if (RetTy->isVoidType()) {
+    // Void type; nothing to return.
+    ReturnValue = 0;
+  } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+             hasAggregateLLVMType(CurFnInfo->getReturnType())) {
+    // Indirect aggregate return; emit returned value directly into sret slot.
+    // This reduces code size, and is also affects correctness in C++.
+    ReturnValue = CurFn->arg_begin();
+  } else {
+    ReturnValue = CreateTempAlloca(ConvertType(RetTy), "retval");
+  }
+
+  EmitStartEHSpec(CurCodeDecl);
+  EmitFunctionProlog(*CurFnInfo, CurFn, Args);
+
+  // If any of the arguments have a variably modified type, make sure to
+  // emit the type size.
+  for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+       i != e; ++i) {
+    QualType Ty = i->second;
+
+    if (Ty->isVariablyModifiedType())
+      EmitVLASize(Ty);
+  }
+}
+
+void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
+  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+  
+  // Check if we should generate debug info for this function.
+  if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
+    DebugInfo = CGM.getDebugInfo();
+
+  FunctionArgList Args;
+
+  CurGD = GD;
+  OuterTryBlock = 0;
+  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+    if (MD->isInstance()) {
+      // Create the implicit 'this' decl.
+      // FIXME: I'm not entirely sure I like using a fake decl just for code
+      // generation. Maybe we can come up with a better way?
+      CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0, SourceLocation(),
+                                              &getContext().Idents.get("this"),
+                                              MD->getThisType(getContext()));
+      Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
+      
+      // Check if we need a VTT parameter as well.
+      if (CGVtableInfo::needsVTTParameter(GD)) {
+        // FIXME: The comment about using a fake decl above applies here too.
+        QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+        CXXVTTDecl = 
+          ImplicitParamDecl::Create(getContext(), 0, SourceLocation(),
+                                    &getContext().Idents.get("vtt"), T);
+        Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType()));
+      }
+    }
+  }
+
+  if (FD->getNumParams()) {
+    const FunctionProtoType* FProto = FD->getType()->getAs<FunctionProtoType>();
+    assert(FProto && "Function def must have prototype!");
+
+    for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
+      Args.push_back(std::make_pair(FD->getParamDecl(i),
+                                    FProto->getArgType(i)));
+  }
+
+  if (const CompoundStmt *S = FD->getCompoundBody()) {
+    StartFunction(GD, FD->getResultType(), Fn, Args, S->getLBracLoc());
+
+    if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+      EmitCtorPrologue(CD, GD.getCtorType());
+      EmitStmt(S);
+      
+      // If any of the member initializers are temporaries bound to references
+      // make sure to emit their destructors.
+      EmitCleanupBlocks(0);
+      
+    } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
+      llvm::BasicBlock *DtorEpilogue  = createBasicBlock("dtor.epilogue");
+      PushCleanupBlock(DtorEpilogue);
+
+      InitializeVtablePtrs(DD->getParent());
+
+      EmitStmt(S);
+      
+      CleanupBlockInfo Info = PopCleanupBlock();
+
+      assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!");
+      EmitBlock(DtorEpilogue);
+      EmitDtorEpilogue(DD, GD.getDtorType());
+      
+      if (Info.SwitchBlock)
+        EmitBlock(Info.SwitchBlock);
+      if (Info.EndBlock)
+        EmitBlock(Info.EndBlock);
+    } else {
+      // Just a regular function, emit its body.
+      EmitStmt(S);
+    }
+    
+    FinishFunction(S->getRBracLoc());
+  } else if (FD->isImplicit()) {
+    const CXXRecordDecl *ClassDecl =
+      cast<CXXRecordDecl>(FD->getDeclContext());
+    (void) ClassDecl;
+    if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+      // FIXME: For C++0x, we want to look for implicit *definitions* of
+      // these special member functions, rather than implicit *declarations*.
+      if (CD->isCopyConstructor()) {
+        assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
+               "Cannot synthesize a non-implicit copy constructor");
+        SynthesizeCXXCopyConstructor(CD, GD.getCtorType(), Fn, Args);
+      } else if (CD->isDefaultConstructor()) {
+        assert(!ClassDecl->hasUserDeclaredConstructor() &&
+               "Cannot synthesize a non-implicit default constructor.");
+        SynthesizeDefaultConstructor(CD, GD.getCtorType(), Fn, Args);
+      } else {
+        assert(false && "Implicit constructor cannot be synthesized");
+      }
+    } else if (const CXXDestructorDecl *CD = dyn_cast<CXXDestructorDecl>(FD)) {
+      assert(!ClassDecl->hasUserDeclaredDestructor() &&
+             "Cannot synthesize a non-implicit destructor");
+      SynthesizeDefaultDestructor(CD, GD.getDtorType(), Fn, Args);
+    } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+      assert(MD->isCopyAssignment() && 
+             !ClassDecl->hasUserDeclaredCopyAssignment() &&
+             "Cannot synthesize a method that is not an implicit-defined "
+             "copy constructor");
+      SynthesizeCXXCopyAssignment(MD, Fn, Args);
+    } else {
+      assert(false && "Cannot synthesize unknown implicit function");
+    }
+  } else if (const Stmt *S = FD->getBody()) {
+    if (const CXXTryStmt *TS = dyn_cast<CXXTryStmt>(S)) {
+      OuterTryBlock = TS;
+      StartFunction(GD, FD->getResultType(), Fn, Args, TS->getTryLoc());
+      EmitStmt(TS);
+      FinishFunction(TS->getEndLoc());
+    }
+  }
+
+  // Destroy the 'this' declaration.
+  if (CXXThisDecl)
+    CXXThisDecl->Destroy(getContext());
+  
+  // Destroy the VTT declaration.
+  if (CXXVTTDecl)
+    CXXVTTDecl->Destroy(getContext());
+}
+
+/// ContainsLabel - Return true if the statement contains a label in it.  If
+/// this statement is not executed normally, it not containing a label means
+/// that we can just remove the code.
+bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
+  // Null statement, not a label!
+  if (S == 0) return false;
+
+  // If this is a label, we have to emit the code, consider something like:
+  // if (0) {  ...  foo:  bar(); }  goto foo;
+  if (isa<LabelStmt>(S))
+    return true;
+
+  // If this is a case/default statement, and we haven't seen a switch, we have
+  // to emit the code.
+  if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
+    return true;
+
+  // If this is a switch statement, we want to ignore cases below it.
+  if (isa<SwitchStmt>(S))
+    IgnoreCaseStmts = true;
+
+  // Scan subexpressions for verboten labels.
+  for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+       I != E; ++I)
+    if (ContainsLabel(*I, IgnoreCaseStmts))
+      return true;
+
+  return false;
+}
+
+
+/// ConstantFoldsToSimpleInteger - If the sepcified expression does not fold to
+/// a constant, or if it does but contains a label, return 0.  If it constant
+/// folds to 'true' and does not contain a label, return 1, if it constant folds
+/// to 'false' and does not contain a label, return -1.
+int CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond) {
+  // FIXME: Rename and handle conversion of other evaluatable things
+  // to bool.
+  Expr::EvalResult Result;
+  if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
+      Result.HasSideEffects)
+    return 0;  // Not foldable, not integer or not fully evaluatable.
+
+  if (CodeGenFunction::ContainsLabel(Cond))
+    return 0;  // Contains a label.
+
+  return Result.Val.getInt().getBoolValue() ? 1 : -1;
+}
+
+
+/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
+/// statement) to the specified blocks.  Based on the condition, this might try
+/// to simplify the codegen of the conditional based on the branch.
+///
+void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
+                                           llvm::BasicBlock *TrueBlock,
+                                           llvm::BasicBlock *FalseBlock) {
+  if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond))
+    return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
+
+  if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
+    // Handle X && Y in a condition.
+    if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
+      // If we have "1 && X", simplify the code.  "0 && X" would have constant
+      // folded if the case was simple enough.
+      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == 1) {
+        // br(1 && X) -> br(X).
+        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+      }
+
+      // If we have "X && 1", simplify the code to use an uncond branch.
+      // "X && 0" would have been constant folded to 0.
+      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
+        // br(X && 1) -> br(X).
+        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+      }
+
+      // Emit the LHS as a conditional.  If the LHS conditional is false, we
+      // want to jump to the FalseBlock.
+      llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+      EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
+      EmitBlock(LHSTrue);
+
+      // Any temporaries created here are conditional.
+      BeginConditionalBranch();
+      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+      EndConditionalBranch();
+
+      return;
+    } else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
+      // If we have "0 || X", simplify the code.  "1 || X" would have constant
+      // folded if the case was simple enough.
+      if (ConstantFoldsToSimpleInteger(CondBOp->getLHS()) == -1) {
+        // br(0 || X) -> br(X).
+        return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+      }
+
+      // If we have "X || 0", simplify the code to use an uncond branch.
+      // "X || 1" would have been constant folded to 1.
+      if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
+        // br(X || 0) -> br(X).
+        return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+      }
+
+      // Emit the LHS as a conditional.  If the LHS conditional is true, we
+      // want to jump to the TrueBlock.
+      llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+      EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
+      EmitBlock(LHSFalse);
+
+      // Any temporaries created here are conditional.
+      BeginConditionalBranch();
+      EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+      EndConditionalBranch();
+
+      return;
+    }
+  }
+
+  if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
+    // br(!x, t, f) -> br(x, f, t)
+    if (CondUOp->getOpcode() == UnaryOperator::LNot)
+      return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
+  }
+
+  if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
+    // Handle ?: operator.
+
+    // Just ignore GNU ?: extension.
+    if (CondOp->getLHS()) {
+      // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+      llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+      llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+      EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+      EmitBlock(LHSBlock);
+      EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+      EmitBlock(RHSBlock);
+      EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+      return;
+    }
+  }
+
+  // Emit the code with the fully general case.
+  llvm::Value *CondV = EvaluateExprAsBool(Cond);
+  Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
+                                       bool OmitOnError) {
+  CGM.ErrorUnsupported(S, Type, OmitOnError);
+}
+
+void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) {
+  const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
+  if (DestPtr->getType() != BP)
+    DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
+
+  // Get size and alignment info for this aggregate.
+  std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
+
+  // Don't bother emitting a zero-byte memset.
+  if (TypeInfo.first == 0)
+    return;
+
+  // FIXME: Handle variable sized types.
+  const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext,
+                                                    LLVMPointerWidth);
+
+  Builder.CreateCall4(CGM.getMemSetFn(), DestPtr,
+                 llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
+                      // TypeInfo.first describes size in bits.
+                      llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
+                      llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                             TypeInfo.second/8));
+}
+
+llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelStmt *L) {
+  // Make sure that there is a block for the indirect goto.
+  if (IndirectBranch == 0)
+    GetIndirectGotoBlock();
+  
+  llvm::BasicBlock *BB = getBasicBlockForLabel(L);
+  
+  // Make sure the indirect branch includes all of the address-taken blocks.
+  IndirectBranch->addDestination(BB);
+  return llvm::BlockAddress::get(CurFn, BB);
+}
+
+llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
+  // If we already made the indirect branch for indirect goto, return its block.
+  if (IndirectBranch) return IndirectBranch->getParent();
+  
+  CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
+  
+  const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+
+  // Create the PHI node that indirect gotos will add entries to.
+  llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, "indirect.goto.dest");
+  
+  // Create the indirect branch instruction.
+  IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
+  return IndirectBranch->getParent();
+}
+
+llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
+  llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+
+  assert(SizeEntry && "Did not emit size for type");
+  return SizeEntry;
+}
+
+llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
+  assert(Ty->isVariablyModifiedType() &&
+         "Must pass variably modified type to EmitVLASizes!");
+
+  EnsureInsertPoint();
+
+  if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
+    llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
+
+    if (!SizeEntry) {
+      const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+      // Get the element size;
+      QualType ElemTy = VAT->getElementType();
+      llvm::Value *ElemSize;
+      if (ElemTy->isVariableArrayType())
+        ElemSize = EmitVLASize(ElemTy);
+      else
+        ElemSize = llvm::ConstantInt::get(SizeTy,
+            getContext().getTypeSizeInChars(ElemTy).getQuantity());
+
+      llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
+      NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
+
+      SizeEntry = Builder.CreateMul(ElemSize, NumElements);
+    }
+
+    return SizeEntry;
+  }
+
+  if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
+    EmitVLASize(AT->getElementType());
+    return 0;
+  }
+
+  const PointerType *PT = Ty->getAs<PointerType>();
+  assert(PT && "unknown VM type!");
+  EmitVLASize(PT->getPointeeType());
+  return 0;
+}
+
+llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
+  if (CGM.getContext().getBuiltinVaListType()->isArrayType()) {
+    return EmitScalarExpr(E);
+  }
+  return EmitLValue(E).getAddress();
+}
+
+void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
+                                       llvm::BasicBlock *CleanupExitBlock,
+                                       llvm::BasicBlock *PreviousInvokeDest,
+                                       bool EHOnly) {
+  CleanupEntries.push_back(CleanupEntry(CleanupEntryBlock, CleanupExitBlock,
+                                        PreviousInvokeDest, EHOnly));
+}
+
+void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize) {
+  assert(CleanupEntries.size() >= OldCleanupStackSize &&
+         "Cleanup stack mismatch!");
+
+  while (CleanupEntries.size() > OldCleanupStackSize)
+    EmitCleanupBlock();
+}
+
+CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() {
+  CleanupEntry &CE = CleanupEntries.back();
+
+  llvm::BasicBlock *CleanupEntryBlock = CE.CleanupEntryBlock;
+
+  std::vector<llvm::BasicBlock *> Blocks;
+  std::swap(Blocks, CE.Blocks);
+
+  std::vector<llvm::BranchInst *> BranchFixups;
+  std::swap(BranchFixups, CE.BranchFixups);
+
+  bool EHOnly = CE.EHOnly;
+
+  setInvokeDest(CE.PreviousInvokeDest);
+
+  CleanupEntries.pop_back();
+
+  // Check if any branch fixups pointed to the scope we just popped. If so,
+  // we can remove them.
+  for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
+    llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0);
+    BlockScopeMap::iterator I = BlockScopes.find(Dest);
+
+    if (I == BlockScopes.end())
+      continue;
+
+    assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!");
+
+    if (I->second == CleanupEntries.size()) {
+      // We don't need to do this branch fixup.
+      BranchFixups[i] = BranchFixups.back();
+      BranchFixups.pop_back();
+      i--;
+      e--;
+      continue;
+    }
+  }
+
+  llvm::BasicBlock *SwitchBlock = CE.CleanupExitBlock;
+  llvm::BasicBlock *EndBlock = 0;
+  if (!BranchFixups.empty()) {
+    if (!SwitchBlock)
+      SwitchBlock = createBasicBlock("cleanup.switch");
+    EndBlock = createBasicBlock("cleanup.end");
+
+    llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+    Builder.SetInsertPoint(SwitchBlock);
+
+    llvm::Value *DestCodePtr
+      = CreateTempAlloca(llvm::Type::getInt32Ty(VMContext),
+                         "cleanup.dst");
+    llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+
+    // Create a switch instruction to determine where to jump next.
+    llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
+                                                BranchFixups.size());
+
+    // Restore the current basic block (if any)
+    if (CurBB) {
+      Builder.SetInsertPoint(CurBB);
+
+      // If we had a current basic block, we also need to emit an instruction
+      // to initialize the cleanup destination.
+      Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)),
+                          DestCodePtr);
+    } else
+      Builder.ClearInsertionPoint();
+
+    for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
+      llvm::BranchInst *BI = BranchFixups[i];
+      llvm::BasicBlock *Dest = BI->getSuccessor(0);
+
+      // Fixup the branch instruction to point to the cleanup block.
+      BI->setSuccessor(0, CleanupEntryBlock);
+
+      if (CleanupEntries.empty()) {
+        llvm::ConstantInt *ID;
+
+        // Check if we already have a destination for this block.
+        if (Dest == SI->getDefaultDest())
+          ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
+        else {
+          ID = SI->findCaseDest(Dest);
+          if (!ID) {
+            // No code found, get a new unique one by using the number of
+            // switch successors.
+            ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                        SI->getNumSuccessors());
+            SI->addCase(ID, Dest);
+          }
+        }
+
+        // Store the jump destination before the branch instruction.
+        new llvm::StoreInst(ID, DestCodePtr, BI);
+      } else {
+        // We need to jump through another cleanup block. Create a pad block
+        // with a branch instruction that jumps to the final destination and add
+        // it as a branch fixup to the current cleanup scope.
+
+        // Create the pad block.
+        llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
+
+        // Create a unique case ID.
+        llvm::ConstantInt *ID
+          = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                                   SI->getNumSuccessors());
+
+        // Store the jump destination before the branch instruction.
+        new llvm::StoreInst(ID, DestCodePtr, BI);
+
+        // Add it as the destination.
+        SI->addCase(ID, CleanupPad);
+
+        // Create the branch to the final destination.
+        llvm::BranchInst *BI = llvm::BranchInst::Create(Dest);
+        CleanupPad->getInstList().push_back(BI);
+
+        // And add it as a branch fixup.
+        CleanupEntries.back().BranchFixups.push_back(BI);
+      }
+    }
+  }
+
+  // Remove all blocks from the block scope map.
+  for (size_t i = 0, e = Blocks.size(); i != e; ++i) {
+    assert(BlockScopes.count(Blocks[i]) &&
+           "Did not find block in scope map!");
+
+    BlockScopes.erase(Blocks[i]);
+  }
+
+  return CleanupBlockInfo(CleanupEntryBlock, SwitchBlock, EndBlock, EHOnly);
+}
+
+void CodeGenFunction::EmitCleanupBlock() {
+  CleanupBlockInfo Info = PopCleanupBlock();
+
+  if (Info.EHOnly) {
+    // FIXME: Add this to the exceptional edge
+    if (Info.CleanupBlock->getNumUses() == 0)
+      delete Info.CleanupBlock;
+    return;
+  }
+
+  llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+  if (CurBB && !CurBB->getTerminator() &&
+      Info.CleanupBlock->getNumUses() == 0) {
+    CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList());
+    delete Info.CleanupBlock;
+  } else
+    EmitBlock(Info.CleanupBlock);
+
+  if (Info.SwitchBlock)
+    EmitBlock(Info.SwitchBlock);
+  if (Info.EndBlock)
+    EmitBlock(Info.EndBlock);
+}
+
+void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI) {
+  assert(!CleanupEntries.empty() &&
+         "Trying to add branch fixup without cleanup block!");
+
+  // FIXME: We could be more clever here and check if there's already a branch
+  // fixup for this destination and recycle it.
+  CleanupEntries.back().BranchFixups.push_back(BI);
+}
+
+void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest) {
+  if (!HaveInsertPoint())
+    return;
+
+  llvm::BranchInst* BI = Builder.CreateBr(Dest);
+
+  Builder.ClearInsertionPoint();
+
+  // The stack is empty, no need to do any cleanup.
+  if (CleanupEntries.empty())
+    return;
+
+  if (!Dest->getParent()) {
+    // We are trying to branch to a block that hasn't been inserted yet.
+    AddBranchFixup(BI);
+    return;
+  }
+
+  BlockScopeMap::iterator I = BlockScopes.find(Dest);
+  if (I == BlockScopes.end()) {
+    // We are trying to jump to a block that is outside of any cleanup scope.
+    AddBranchFixup(BI);
+    return;
+  }
+
+  assert(I->second < CleanupEntries.size() &&
+         "Trying to branch into cleanup region");
+
+  if (I->second == CleanupEntries.size() - 1) {
+    // We have a branch to a block in the same scope.
+    return;
+  }
+
+  AddBranchFixup(BI);
+}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
new file mode 100644
index 0000000..0e601e5
--- /dev/null
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -0,0 +1,1327 @@
+//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-function state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
+#define CLANG_CODEGEN_CODEGENFUNCTION_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ValueHandle.h"
+#include <map>
+#include "CodeGenModule.h"
+#include "CGBlocks.h"
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CGValue.h"
+
+namespace llvm {
+  class BasicBlock;
+  class LLVMContext;
+  class Module;
+  class SwitchInst;
+  class Twine;
+  class Value;
+}
+
+namespace clang {
+  class ASTContext;
+  class CXXDestructorDecl;
+  class CXXTryStmt;
+  class Decl;
+  class EnumConstantDecl;
+  class FunctionDecl;
+  class FunctionProtoType;
+  class LabelStmt;
+  class ObjCContainerDecl;
+  class ObjCInterfaceDecl;
+  class ObjCIvarDecl;
+  class ObjCMethodDecl;
+  class ObjCImplementationDecl;
+  class ObjCPropertyImplDecl;
+  class TargetInfo;
+  class VarDecl;
+  class ObjCForCollectionStmt;
+  class ObjCAtTryStmt;
+  class ObjCAtThrowStmt;
+  class ObjCAtSynchronizedStmt;
+
+namespace CodeGen {
+  class CodeGenModule;
+  class CodeGenTypes;
+  class CGDebugInfo;
+  class CGFunctionInfo;
+  class CGRecordLayout;
+
+/// CodeGenFunction - This class organizes the per-function state that is used
+/// while generating LLVM code.
+class CodeGenFunction : public BlockFunction {
+  CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
+  void operator=(const CodeGenFunction&);  // DO NOT IMPLEMENT
+public:
+  CodeGenModule &CGM;  // Per-module state.
+  const TargetInfo &Target;
+
+  typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
+  CGBuilderTy Builder;
+
+  /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
+  /// This excludes BlockDecls.
+  const Decl *CurFuncDecl;
+  /// CurCodeDecl - This is the inner-most code context, which includes blocks.
+  const Decl *CurCodeDecl;
+  const CGFunctionInfo *CurFnInfo;
+  QualType FnRetTy;
+  llvm::Function *CurFn;
+
+  /// CurGD - The GlobalDecl for the current function being compiled.
+  GlobalDecl CurGD;
+  /// OuterTryBlock - This is the address of the outter most try block, 0
+  /// otherwise.
+  const Stmt *OuterTryBlock;
+
+  /// ReturnBlock - Unified return block.
+  llvm::BasicBlock *ReturnBlock;
+  /// ReturnValue - The temporary alloca to hold the return value. This is null
+  /// iff the function has no return value.
+  llvm::Value *ReturnValue;
+
+  /// AllocaInsertPoint - This is an instruction in the entry block before which
+  /// we prefer to insert allocas.
+  llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+
+  const llvm::Type *LLVMIntTy;
+  uint32_t LLVMPointerWidth;
+
+  bool Exceptions;
+  bool CatchUndefined;
+public:
+  /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+  /// rethrows.
+  llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
+
+  /// PushCleanupBlock - Push a new cleanup entry on the stack and set the
+  /// passed in block as the cleanup block.
+  void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
+                        llvm::BasicBlock *CleanupExitBlock,
+                        llvm::BasicBlock *PreviousInvokeDest,
+                        bool EHOnly = false);
+  void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock) {
+    PushCleanupBlock(CleanupEntryBlock, 0, getInvokeDest(), false);
+  }
+
+  /// CleanupBlockInfo - A struct representing a popped cleanup block.
+  struct CleanupBlockInfo {
+    /// CleanupEntryBlock - the cleanup entry block
+    llvm::BasicBlock *CleanupBlock;
+
+    /// SwitchBlock - the block (if any) containing the switch instruction used
+    /// for jumping to the final destination.
+    llvm::BasicBlock *SwitchBlock;
+
+    /// EndBlock - the default destination for the switch instruction.
+    llvm::BasicBlock *EndBlock;
+
+    /// EHOnly - True iff this cleanup should only be performed on the
+    /// exceptional edge.
+    bool EHOnly;
+
+    CleanupBlockInfo(llvm::BasicBlock *cb, llvm::BasicBlock *sb,
+                     llvm::BasicBlock *eb, bool ehonly = false)
+      : CleanupBlock(cb), SwitchBlock(sb), EndBlock(eb), EHOnly(ehonly) {}
+  };
+
+  /// EHCleanupBlock - RAII object that will create a cleanup block for the
+  /// exceptional edge and set the insert point to that block.  When destroyed,
+  /// it creates the cleanup edge and sets the insert point to the previous
+  /// block.
+  class EHCleanupBlock {
+    CodeGenFunction& CGF;
+    llvm::BasicBlock *Cont;
+    llvm::BasicBlock *CleanupHandler;
+    llvm::BasicBlock *CleanupEntryBB;
+    llvm::BasicBlock *PreviousInvokeDest;
+  public:
+    EHCleanupBlock(CodeGenFunction &cgf) 
+      : CGF(cgf), Cont(CGF.createBasicBlock("cont")),
+        CleanupHandler(CGF.createBasicBlock("ehcleanup")),
+        CleanupEntryBB(CGF.createBasicBlock("ehcleanup.rest")),
+        PreviousInvokeDest(CGF.getInvokeDest()) {
+      CGF.EmitBranch(Cont);
+      llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler();
+      CGF.Builder.SetInsertPoint(CleanupEntryBB);
+      CGF.setInvokeDest(TerminateHandler);
+    }
+    ~EHCleanupBlock();
+  };
+
+  /// PopCleanupBlock - Will pop the cleanup entry on the stack, process all
+  /// branch fixups and return a block info struct with the switch block and end
+  /// block.  This will also reset the invoke handler to the previous value
+  /// from when the cleanup block was created.
+  CleanupBlockInfo PopCleanupBlock();
+
+  /// DelayedCleanupBlock - RAII object that will create a cleanup block and set
+  /// the insert point to that block. When destructed, it sets the insert point
+  /// to the previous block and pushes a new cleanup entry on the stack.
+  class DelayedCleanupBlock {
+    CodeGenFunction& CGF;
+    llvm::BasicBlock *CurBB;
+    llvm::BasicBlock *CleanupEntryBB;
+    llvm::BasicBlock *CleanupExitBB;
+    llvm::BasicBlock *CurInvokeDest;
+    bool EHOnly;
+    
+  public:
+    DelayedCleanupBlock(CodeGenFunction &cgf, bool ehonly = false)
+      : CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()),
+        CleanupEntryBB(CGF.createBasicBlock("cleanup")), CleanupExitBB(0),
+        CurInvokeDest(CGF.getInvokeDest()),
+        EHOnly(ehonly) {
+      CGF.Builder.SetInsertPoint(CleanupEntryBB);
+    }
+
+    llvm::BasicBlock *getCleanupExitBlock() {
+      if (!CleanupExitBB)
+        CleanupExitBB = CGF.createBasicBlock("cleanup.exit");
+      return CleanupExitBB;
+    }
+    
+    ~DelayedCleanupBlock() {
+      CGF.PushCleanupBlock(CleanupEntryBB, CleanupExitBB, CurInvokeDest,
+                           EHOnly);
+      // FIXME: This is silly, move this into the builder.
+      if (CurBB)
+        CGF.Builder.SetInsertPoint(CurBB);
+      else
+        CGF.Builder.ClearInsertionPoint();
+    }
+  };
+
+  /// \brief Enters a new scope for capturing cleanups, all of which will be
+  /// executed once the scope is exited.
+  class CleanupScope {
+    CodeGenFunction& CGF;
+    size_t CleanupStackDepth;
+    bool OldDidCallStackSave;
+    bool PerformCleanup;
+
+    CleanupScope(const CleanupScope &); // DO NOT IMPLEMENT
+    CleanupScope &operator=(const CleanupScope &); // DO NOT IMPLEMENT
+
+  public:
+    /// \brief Enter a new cleanup scope.
+    explicit CleanupScope(CodeGenFunction &CGF) 
+      : CGF(CGF), PerformCleanup(true) 
+    {
+      CleanupStackDepth = CGF.CleanupEntries.size();
+      OldDidCallStackSave = CGF.DidCallStackSave;
+    }
+
+    /// \brief Exit this cleanup scope, emitting any accumulated
+    /// cleanups.
+    ~CleanupScope() {
+      if (PerformCleanup) {
+        CGF.DidCallStackSave = OldDidCallStackSave;
+        CGF.EmitCleanupBlocks(CleanupStackDepth);
+      }
+    }
+
+    /// \brief Determine whether this scope requires any cleanups.
+    bool requiresCleanups() const {
+      return CGF.CleanupEntries.size() > CleanupStackDepth;
+    }
+
+    /// \brief Force the emission of cleanups now, instead of waiting
+    /// until this object is destroyed.
+    void ForceCleanup() {
+      assert(PerformCleanup && "Already forced cleanup");
+      CGF.DidCallStackSave = OldDidCallStackSave;
+      CGF.EmitCleanupBlocks(CleanupStackDepth);
+      PerformCleanup = false;
+    }
+  };
+
+  /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup
+  /// blocks that have been added.
+  void EmitCleanupBlocks(size_t OldCleanupStackSize);
+
+  /// EmitBranchThroughCleanup - Emit a branch from the current insert block
+  /// through the cleanup handling code (if any) and then on to \arg Dest.
+  ///
+  /// FIXME: Maybe this should really be in EmitBranch? Don't we always want
+  /// this behavior for branches?
+  void EmitBranchThroughCleanup(llvm::BasicBlock *Dest);
+
+  /// BeginConditionalBranch - Should be called before a conditional part of an
+  /// expression is emitted. For example, before the RHS of the expression below
+  /// is emitted:
+  ///
+  /// b && f(T());
+  ///
+  /// This is used to make sure that any temporaries created in the conditional
+  /// branch are only destroyed if the branch is taken.
+  void BeginConditionalBranch() {
+    ++ConditionalBranchLevel;
+  }
+
+  /// EndConditionalBranch - Should be called after a conditional part of an
+  /// expression has been emitted.
+  void EndConditionalBranch() {
+    assert(ConditionalBranchLevel != 0 &&
+           "Conditional branch mismatch!");
+    
+    --ConditionalBranchLevel;
+  }
+
+private:
+  CGDebugInfo *DebugInfo;
+
+  /// IndirectBranch - The first time an indirect goto is seen we create a block
+  /// with an indirect branch.  Every time we see the address of a label taken,
+  /// we add the label to the indirect goto.  Every subsequent indirect goto is
+  /// codegen'd as a jump to the IndirectBranch's basic block.
+  llvm::IndirectBrInst *IndirectBranch;
+
+  /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
+  /// decls.
+  llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+
+  /// LabelMap - This keeps track of the LLVM basic block for each C label.
+  llvm::DenseMap<const LabelStmt*, llvm::BasicBlock*> LabelMap;
+
+  // BreakContinueStack - This keeps track of where break and continue
+  // statements should jump to.
+  struct BreakContinue {
+    BreakContinue(llvm::BasicBlock *bb, llvm::BasicBlock *cb)
+      : BreakBlock(bb), ContinueBlock(cb) {}
+
+    llvm::BasicBlock *BreakBlock;
+    llvm::BasicBlock *ContinueBlock;
+  };
+  llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
+
+  /// SwitchInsn - This is nearest current switch instruction. It is null if if
+  /// current context is not in a switch.
+  llvm::SwitchInst *SwitchInsn;
+
+  /// CaseRangeBlock - This block holds if condition check for last case
+  /// statement range in current switch instruction.
+  llvm::BasicBlock *CaseRangeBlock;
+
+  /// InvokeDest - This is the nearest exception target for calls
+  /// which can unwind, when exceptions are being used.
+  llvm::BasicBlock *InvokeDest;
+
+  // VLASizeMap - This keeps track of the associated size for each VLA type.
+  // We track this by the size expression rather than the type itself because
+  // in certain situations, like a const qualifier applied to an VLA typedef,
+  // multiple VLA types can share the same size expression.
+  // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
+  // enter/leave scopes.
+  llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
+
+  /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
+  /// calling llvm.stacksave for multiple VLAs in the same scope.
+  bool DidCallStackSave;
+
+  struct CleanupEntry {
+    /// CleanupEntryBlock - The block of code that does the actual cleanup.
+    llvm::BasicBlock *CleanupEntryBlock;
+
+    /// CleanupExitBlock - The cleanup exit block.
+    llvm::BasicBlock *CleanupExitBlock;
+    
+    /// Blocks - Basic blocks that were emitted in the current cleanup scope.
+    std::vector<llvm::BasicBlock *> Blocks;
+
+    /// BranchFixups - Branch instructions to basic blocks that haven't been
+    /// inserted into the current function yet.
+    std::vector<llvm::BranchInst *> BranchFixups;
+
+    /// PreviousInvokeDest - The invoke handler from the start of the cleanup
+    /// region.
+    llvm::BasicBlock *PreviousInvokeDest;
+
+    /// EHOnly - Perform this only on the exceptional edge, not the main edge.
+    bool EHOnly;
+
+    explicit CleanupEntry(llvm::BasicBlock *CleanupEntryBlock,
+                          llvm::BasicBlock *CleanupExitBlock,
+                          llvm::BasicBlock *PreviousInvokeDest,
+                          bool ehonly)
+      : CleanupEntryBlock(CleanupEntryBlock),
+        CleanupExitBlock(CleanupExitBlock),
+        PreviousInvokeDest(PreviousInvokeDest),
+        EHOnly(ehonly) {}
+  };
+
+  /// CleanupEntries - Stack of cleanup entries.
+  llvm::SmallVector<CleanupEntry, 8> CleanupEntries;
+
+  typedef llvm::DenseMap<llvm::BasicBlock*, size_t> BlockScopeMap;
+
+  /// BlockScopes - Map of which "cleanup scope" scope basic blocks have.
+  BlockScopeMap BlockScopes;
+
+  /// CXXThisDecl - When generating code for a C++ member function,
+  /// this will hold the implicit 'this' declaration.
+  ImplicitParamDecl *CXXThisDecl;
+
+  /// CXXVTTDecl - When generating code for a base object constructor or
+  /// base object destructor with virtual bases, this will hold the implicit
+  /// VTT parameter.
+  ImplicitParamDecl *CXXVTTDecl;
+  
+  /// CXXLiveTemporaryInfo - Holds information about a live C++ temporary.
+  struct CXXLiveTemporaryInfo {
+    /// Temporary - The live temporary.
+    const CXXTemporary *Temporary;
+
+    /// ThisPtr - The pointer to the temporary.
+    llvm::Value *ThisPtr;
+
+    /// DtorBlock - The destructor block.
+    llvm::BasicBlock *DtorBlock;
+
+    /// CondPtr - If this is a conditional temporary, this is the pointer to the
+    /// condition variable that states whether the destructor should be called
+    /// or not.
+    llvm::Value *CondPtr;
+
+    CXXLiveTemporaryInfo(const CXXTemporary *temporary,
+                         llvm::Value *thisptr, llvm::BasicBlock *dtorblock,
+                         llvm::Value *condptr)
+      : Temporary(temporary), ThisPtr(thisptr), DtorBlock(dtorblock),
+      CondPtr(condptr) { }
+  };
+
+  llvm::SmallVector<CXXLiveTemporaryInfo, 4> LiveTemporaries;
+
+  /// ConditionalBranchLevel - Contains the nesting level of the current
+  /// conditional branch. This is used so that we know if a temporary should be
+  /// destroyed conditionally.
+  unsigned ConditionalBranchLevel;
+
+
+  /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
+  /// type as well as the field number that contains the actual data.
+  llvm::DenseMap<const ValueDecl *, std::pair<const llvm::Type *, 
+                                              unsigned> > ByRefValueInfo;
+  
+  /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
+  /// number that holds the value.
+  unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+
+  llvm::BasicBlock *TerminateHandler;
+  llvm::BasicBlock *TrapBB;
+
+  int UniqueAggrDestructorCount;
+public:
+  CodeGenFunction(CodeGenModule &cgm);
+
+  ASTContext &getContext() const;
+  CGDebugInfo *getDebugInfo() { return DebugInfo; }
+
+  llvm::BasicBlock *getInvokeDest() { return InvokeDest; }
+  void setInvokeDest(llvm::BasicBlock *B) { InvokeDest = B; }
+
+  llvm::LLVMContext &getLLVMContext() { return VMContext; }
+
+  //===--------------------------------------------------------------------===//
+  //                                  Objective-C
+  //===--------------------------------------------------------------------===//
+
+  void GenerateObjCMethod(const ObjCMethodDecl *OMD);
+
+  void StartObjCMethod(const ObjCMethodDecl *MD,
+                       const ObjCContainerDecl *CD);
+
+  /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
+  void GenerateObjCGetter(ObjCImplementationDecl *IMP,
+                          const ObjCPropertyImplDecl *PID);
+
+  /// GenerateObjCSetter - Synthesize an Objective-C property setter function
+  /// for the given property.
+  void GenerateObjCSetter(ObjCImplementationDecl *IMP,
+                          const ObjCPropertyImplDecl *PID);
+
+  //===--------------------------------------------------------------------===//
+  //                                  Block Bits
+  //===--------------------------------------------------------------------===//
+
+  llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
+  llvm::Constant *BuildDescriptorBlockDecl(bool BlockHasCopyDispose,
+                                           CharUnits Size,
+                                           const llvm::StructType *,
+                                           std::vector<HelperInfo> *);
+
+  llvm::Function *GenerateBlockFunction(const BlockExpr *BExpr,
+                                        const BlockInfo& Info,
+                                        const Decl *OuterFuncDecl,
+                                  llvm::DenseMap<const Decl*, llvm::Value*> ldm,
+                                        CharUnits &Size, CharUnits &Align,
+                      llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
+                                        bool &subBlockHasCopyDispose);
+
+  void BlockForwardSelf();
+  llvm::Value *LoadBlockStruct();
+
+  CharUnits AllocateBlockDecl(const BlockDeclRefExpr *E);
+  llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E);
+  const llvm::Type *BuildByRefType(const ValueDecl *D);
+
+  void GenerateCode(GlobalDecl GD, llvm::Function *Fn);
+  void StartFunction(GlobalDecl GD, QualType RetTy,
+                     llvm::Function *Fn,
+                     const FunctionArgList &Args,
+                     SourceLocation StartLoc);
+
+  /// EmitReturnBlock - Emit the unified return block, trying to avoid its
+  /// emission when possible.
+  void EmitReturnBlock();
+
+  /// FinishFunction - Complete IR generation of the current function. It is
+  /// legal to call this function even if there is no current insertion point.
+  void FinishFunction(SourceLocation EndLoc=SourceLocation());
+
+  /// DynamicTypeAdjust - Do the non-virtual and virtual adjustments on an
+  /// object pointer to alter the dynamic type of the pointer.  Used by
+  /// GenerateCovariantThunk for building thunks.
+  llvm::Value *DynamicTypeAdjust(llvm::Value *V, 
+                                 const ThunkAdjustment &Adjustment);
+
+  /// GenerateThunk - Generate a thunk for the given method
+  llvm::Constant *GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
+                                bool Extern, 
+                                const ThunkAdjustment &ThisAdjustment);
+  llvm::Constant *
+  GenerateCovariantThunk(llvm::Function *Fn, GlobalDecl GD,
+                         bool Extern,
+                         const CovariantThunkAdjustment &Adjustment);
+
+  void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type);
+
+  void InitializeVtablePtrs(const CXXRecordDecl *ClassDecl);
+
+  void InitializeVtablePtrsRecursive(const CXXRecordDecl *ClassDecl,
+                                     llvm::Constant *Vtable,
+                                     CGVtableInfo::AddrSubMap_t& AddressPoints,
+                                     llvm::Value *ThisPtr,
+                                     uint64_t Offset);
+
+  void SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor,
+                                    CXXCtorType Type,
+                                    llvm::Function *Fn,
+                                    const FunctionArgList &Args);
+
+  void SynthesizeCXXCopyAssignment(const CXXMethodDecl *CD,
+                                   llvm::Function *Fn,
+                                   const FunctionArgList &Args);
+
+  void SynthesizeDefaultConstructor(const CXXConstructorDecl *Ctor,
+                                    CXXCtorType Type,
+                                    llvm::Function *Fn,
+                                    const FunctionArgList &Args);
+
+  void SynthesizeDefaultDestructor(const CXXDestructorDecl *Dtor,
+                                   CXXDtorType Type,
+                                   llvm::Function *Fn,
+                                   const FunctionArgList &Args);
+
+  /// EmitDtorEpilogue - Emit all code that comes at the end of class's
+  /// destructor. This is to call destructors on members and base classes in
+  /// reverse order of their construction.
+  void EmitDtorEpilogue(const CXXDestructorDecl *Dtor,
+                        CXXDtorType Type);
+
+  /// EmitFunctionProlog - Emit the target specific LLVM code to load the
+  /// arguments for the given function. This is also responsible for naming the
+  /// LLVM function arguments.
+  void EmitFunctionProlog(const CGFunctionInfo &FI,
+                          llvm::Function *Fn,
+                          const FunctionArgList &Args);
+
+  /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
+  /// given temporary.
+  void EmitFunctionEpilog(const CGFunctionInfo &FI, llvm::Value *ReturnValue);
+
+  /// EmitStartEHSpec - Emit the start of the exception spec.
+  void EmitStartEHSpec(const Decl *D);
+
+  /// EmitEndEHSpec - Emit the end of the exception spec.
+  void EmitEndEHSpec(const Decl *D);
+
+  /// getTerminateHandler - Return a handler that just calls terminate.
+  llvm::BasicBlock *getTerminateHandler();
+
+  const llvm::Type *ConvertTypeForMem(QualType T);
+  const llvm::Type *ConvertType(QualType T);
+
+  /// LoadObjCSelf - Load the value of self. This function is only valid while
+  /// generating code for an Objective-C method.
+  llvm::Value *LoadObjCSelf();
+
+  /// TypeOfSelfObject - Return type of object that this self represents.
+  QualType TypeOfSelfObject();
+
+  /// hasAggregateLLVMType - Return true if the specified AST type will map into
+  /// an aggregate LLVM type or is void.
+  static bool hasAggregateLLVMType(QualType T);
+
+  /// createBasicBlock - Create an LLVM basic block.
+  llvm::BasicBlock *createBasicBlock(const char *Name="",
+                                     llvm::Function *Parent=0,
+                                     llvm::BasicBlock *InsertBefore=0) {
+#ifdef NDEBUG
+    return llvm::BasicBlock::Create(VMContext, "", Parent, InsertBefore);
+#else
+    return llvm::BasicBlock::Create(VMContext, Name, Parent, InsertBefore);
+#endif
+  }
+
+  /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
+  /// label maps to.
+  llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S);
+
+  /// SimplifyForwardingBlocks - If the given basic block is only a branch to
+  /// another basic block, simplify it. This assumes that no other code could
+  /// potentially reference the basic block.
+  void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
+
+  /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
+  /// adding a fall-through branch from the current insert block if
+  /// necessary. It is legal to call this function even if there is no current
+  /// insertion point.
+  ///
+  /// IsFinished - If true, indicates that the caller has finished emitting
+  /// branches to the given block and does not expect to emit code into it. This
+  /// means the block can be ignored if it is unreachable.
+  void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
+
+  /// EmitBranch - Emit a branch to the specified basic block from the current
+  /// insert block, taking care to avoid creation of branches from dummy
+  /// blocks. It is legal to call this function even if there is no current
+  /// insertion point.
+  ///
+  /// This function clears the current insertion point. The caller should follow
+  /// calls to this function with calls to Emit*Block prior to generation new
+  /// code.
+  void EmitBranch(llvm::BasicBlock *Block);
+
+  /// HaveInsertPoint - True if an insertion point is defined. If not, this
+  /// indicates that the current code being emitted is unreachable.
+  bool HaveInsertPoint() const {
+    return Builder.GetInsertBlock() != 0;
+  }
+
+  /// EnsureInsertPoint - Ensure that an insertion point is defined so that
+  /// emitted IR has a place to go. Note that by definition, if this function
+  /// creates a block then that block is unreachable; callers may do better to
+  /// detect when no insertion point is defined and simply skip IR generation.
+  void EnsureInsertPoint() {
+    if (!HaveInsertPoint())
+      EmitBlock(createBasicBlock());
+  }
+
+  /// ErrorUnsupported - Print out an error that codegen doesn't support the
+  /// specified stmt yet.
+  void ErrorUnsupported(const Stmt *S, const char *Type,
+                        bool OmitOnError=false);
+
+  //===--------------------------------------------------------------------===//
+  //                                  Helpers
+  //===--------------------------------------------------------------------===//
+
+  Qualifiers MakeQualifiers(QualType T) {
+    Qualifiers Quals = getContext().getCanonicalType(T).getQualifiers();
+    Quals.setObjCGCAttr(getContext().getObjCGCAttrKind(T));
+    return Quals;
+  }
+
+  /// CreateTempAlloca - This creates a alloca and inserts it into the entry
+  /// block. The caller is responsible for setting an appropriate alignment on
+  /// the alloca.
+  llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
+                                     const llvm::Twine &Name = "tmp");
+
+  /// CreateMemTemp - Create a temporary memory object of the given type, with
+  /// appropriate alignment.
+  llvm::Value *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
+
+  /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+  /// expression and compare the result against zero, returning an Int1Ty value.
+  llvm::Value *EvaluateExprAsBool(const Expr *E);
+
+  /// EmitAnyExpr - Emit code to compute the specified expression which can have
+  /// any type.  The result is returned as an RValue struct.  If this is an
+  /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+  /// the result should be returned.
+  ///
+  /// \param IgnoreResult - True if the resulting value isn't used.
+  RValue EmitAnyExpr(const Expr *E, llvm::Value *AggLoc = 0,
+                     bool IsAggLocVolatile = false, bool IgnoreResult = false,
+                     bool IsInitializer = false);
+
+  // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
+  // or the value of the expression, depending on how va_list is defined.
+  llvm::Value *EmitVAListRef(const Expr *E);
+
+  /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+  /// always be accessible even if no aggregate location is provided.
+  RValue EmitAnyExprToTemp(const Expr *E, bool IsAggLocVolatile = false,
+                           bool IsInitializer = false);
+
+  /// EmitAggregateCopy - Emit an aggrate copy.
+  ///
+  /// \param isVolatile - True iff either the source or the destination is
+  /// volatile.
+  void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+                         QualType EltTy, bool isVolatile=false);
+
+  void EmitAggregateClear(llvm::Value *DestPtr, QualType Ty);
+
+  /// StartBlock - Start new block named N. If insert block is a dummy block
+  /// then reuse it.
+  void StartBlock(const char *N);
+
+  /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
+  llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD);
+
+  /// GetAddrOfLocalVar - Return the address of a local variable.
+  llvm::Value *GetAddrOfLocalVar(const VarDecl *VD);
+
+  /// getAccessedFieldNo - Given an encoded value and a result number, return
+  /// the input field number being accessed.
+  static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
+
+  llvm::BlockAddress *GetAddrOfLabel(const LabelStmt *L);
+  llvm::BasicBlock *GetIndirectGotoBlock();
+
+  /// EmitMemSetToZero - Generate code to memset a value of the given type to 0.
+  void EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty);
+
+  // EmitVAArg - Generate code to get an argument from the passed in pointer
+  // and update it accordingly. The return value is a pointer to the argument.
+  // FIXME: We should be able to get rid of this method and use the va_arg
+  // instruction in LLVM instead once it works well enough.
+  llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
+
+  /// EmitVLASize - Generate code for any VLA size expressions that might occur
+  /// in a variably modified type. If Ty is a VLA, will return the value that
+  /// corresponds to the size in bytes of the VLA type. Will return 0 otherwise.
+  ///
+  /// This function can be called with a null (unreachable) insert point.
+  llvm::Value *EmitVLASize(QualType Ty);
+
+  // GetVLASize - Returns an LLVM value that corresponds to the size in bytes
+  // of a variable length array type.
+  llvm::Value *GetVLASize(const VariableArrayType *);
+
+  /// LoadCXXThis - Load the value of 'this'. This function is only valid while
+  /// generating code for an C++ member function.
+  llvm::Value *LoadCXXThis();
+
+  /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
+  /// virtual bases.
+  llvm::Value *LoadCXXVTT();
+  
+  /// GetAddressOfBaseClass - This function will add the necessary delta to the
+  /// load of 'this' and returns address of the base class.
+  // FIXME. This currently only does a derived to non-virtual base conversion.
+  // Other kinds of conversions will come later.
+  llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
+                                     const CXXRecordDecl *ClassDecl,
+                                     const CXXRecordDecl *BaseClassDecl,
+                                     bool NullCheckValue);
+  
+  llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
+                                        const CXXRecordDecl *ClassDecl,
+                                        const CXXRecordDecl *DerivedClassDecl,
+                                        bool NullCheckValue);
+
+  llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
+                                         const CXXRecordDecl *ClassDecl,
+                                         const CXXRecordDecl *BaseClassDecl);
+    
+  void EmitClassAggrMemberwiseCopy(llvm::Value *DestValue,
+                                   llvm::Value *SrcValue,
+                                   const ArrayType *Array,
+                                   const CXXRecordDecl *BaseClassDecl,
+                                   QualType Ty);
+
+  void EmitClassAggrCopyAssignment(llvm::Value *DestValue,
+                                   llvm::Value *SrcValue,
+                                   const ArrayType *Array,
+                                   const CXXRecordDecl *BaseClassDecl,
+                                   QualType Ty);
+
+  void EmitClassMemberwiseCopy(llvm::Value *DestValue, llvm::Value *SrcValue,
+                               const CXXRecordDecl *ClassDecl,
+                               const CXXRecordDecl *BaseClassDecl,
+                               QualType Ty);
+
+  void EmitClassCopyAssignment(llvm::Value *DestValue, llvm::Value *SrcValue,
+                               const CXXRecordDecl *ClassDecl,
+                               const CXXRecordDecl *BaseClassDecl,
+                               QualType Ty);
+
+  void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
+                              llvm::Value *This,
+                              CallExpr::const_arg_iterator ArgBeg,
+                              CallExpr::const_arg_iterator ArgEnd);
+
+  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+                                  const ConstantArrayType *ArrayTy,
+                                  llvm::Value *ArrayPtr,
+                                  CallExpr::const_arg_iterator ArgBeg,
+                                  CallExpr::const_arg_iterator ArgEnd);
+  
+  void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+                                  llvm::Value *NumElements,
+                                  llvm::Value *ArrayPtr,
+                                  CallExpr::const_arg_iterator ArgBeg,
+                                  CallExpr::const_arg_iterator ArgEnd);
+
+  void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+                                 const ArrayType *Array,
+                                 llvm::Value *This);
+
+  void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
+                                 llvm::Value *NumElements,
+                                 llvm::Value *This);
+
+  llvm::Constant *GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
+                                                const ArrayType *Array,
+                                                llvm::Value *This);
+
+  void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
+                             llvm::Value *This);
+
+  void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
+  void PopCXXTemporary();
+
+  llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
+  void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
+
+  void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
+                      QualType DeleteTy);
+
+  llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
+  llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
+
+  void EmitCheck(llvm::Value *, unsigned Size);
+
+  llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+                                       bool isInc, bool isPre);
+  ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+                                         bool isInc, bool isPre);
+  //===--------------------------------------------------------------------===//
+  //                            Declaration Emission
+  //===--------------------------------------------------------------------===//
+
+  /// EmitDecl - Emit a declaration.
+  ///
+  /// This function can be called with a null (unreachable) insert point.
+  void EmitDecl(const Decl &D);
+
+  /// EmitBlockVarDecl - Emit a block variable declaration.
+  ///
+  /// This function can be called with a null (unreachable) insert point.
+  void EmitBlockVarDecl(const VarDecl &D);
+
+  /// EmitLocalBlockVarDecl - Emit a local block variable declaration.
+  ///
+  /// This function can be called with a null (unreachable) insert point.
+  void EmitLocalBlockVarDecl(const VarDecl &D);
+
+  void EmitStaticBlockVarDecl(const VarDecl &D,
+                              llvm::GlobalValue::LinkageTypes Linkage);
+
+  /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
+  void EmitParmDecl(const VarDecl &D, llvm::Value *Arg);
+
+  //===--------------------------------------------------------------------===//
+  //                             Statement Emission
+  //===--------------------------------------------------------------------===//
+
+  /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
+  void EmitStopPoint(const Stmt *S);
+
+  /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
+  /// this function even if there is no current insertion point.
+  ///
+  /// This function may clear the current insertion point; callers should use
+  /// EnsureInsertPoint if they wish to subsequently generate code without first
+  /// calling EmitBlock, EmitBranch, or EmitStmt.
+  void EmitStmt(const Stmt *S);
+
+  /// EmitSimpleStmt - Try to emit a "simple" statement which does not
+  /// necessarily require an insertion point or debug information; typically
+  /// because the statement amounts to a jump or a container of other
+  /// statements.
+  ///
+  /// \return True if the statement was handled.
+  bool EmitSimpleStmt(const Stmt *S);
+
+  RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+                          llvm::Value *AggLoc = 0, bool isAggVol = false);
+
+  /// EmitLabel - Emit the block for the given label. It is legal to call this
+  /// function even if there is no current insertion point.
+  void EmitLabel(const LabelStmt &S); // helper for EmitLabelStmt.
+
+  void EmitLabelStmt(const LabelStmt &S);
+  void EmitGotoStmt(const GotoStmt &S);
+  void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
+  void EmitIfStmt(const IfStmt &S);
+  void EmitWhileStmt(const WhileStmt &S);
+  void EmitDoStmt(const DoStmt &S);
+  void EmitForStmt(const ForStmt &S);
+  void EmitReturnStmt(const ReturnStmt &S);
+  void EmitDeclStmt(const DeclStmt &S);
+  void EmitBreakStmt(const BreakStmt &S);
+  void EmitContinueStmt(const ContinueStmt &S);
+  void EmitSwitchStmt(const SwitchStmt &S);
+  void EmitDefaultStmt(const DefaultStmt &S);
+  void EmitCaseStmt(const CaseStmt &S);
+  void EmitCaseStmtRange(const CaseStmt &S);
+  void EmitAsmStmt(const AsmStmt &S);
+
+  void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
+  void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
+  void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
+  void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+
+  void EmitCXXTryStmt(const CXXTryStmt &S);
+  
+  //===--------------------------------------------------------------------===//
+  //                         LValue Expression Emission
+  //===--------------------------------------------------------------------===//
+
+  /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
+  RValue GetUndefRValue(QualType Ty);
+
+  /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
+  /// and issue an ErrorUnsupported style diagnostic (using the
+  /// provided Name).
+  RValue EmitUnsupportedRValue(const Expr *E,
+                               const char *Name);
+
+  /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
+  /// an ErrorUnsupported style diagnostic (using the provided Name).
+  LValue EmitUnsupportedLValue(const Expr *E,
+                               const char *Name);
+
+  /// EmitLValue - Emit code to compute a designator that specifies the location
+  /// of the expression.
+  ///
+  /// This can return one of two things: a simple address or a bitfield
+  /// reference.  In either case, the LLVM Value* in the LValue structure is
+  /// guaranteed to be an LLVM pointer type.
+  ///
+  /// If this returns a bitfield reference, nothing about the pointee type of
+  /// the LLVM value is known: For example, it may not be a pointer to an
+  /// integer.
+  ///
+  /// If this returns a normal address, and if the lvalue's C type is fixed
+  /// size, this method guarantees that the returned pointer type will point to
+  /// an LLVM type of the same size of the lvalue's type.  If the lvalue has a
+  /// variable length type, this is not possible.
+  ///
+  LValue EmitLValue(const Expr *E);
+
+  /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
+  /// checking code to guard against undefined behavior.  This is only
+  /// suitable when we know that the address will be used to access the
+  /// object.
+  LValue EmitCheckedLValue(const Expr *E);
+
+  /// EmitLoadOfScalar - Load a scalar value from an address, taking
+  /// care to appropriately convert from the memory representation to
+  /// the LLVM value representation.
+  llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+                                QualType Ty);
+
+  /// EmitStoreOfScalar - Store a scalar value to an address, taking
+  /// care to appropriately convert from the memory representation to
+  /// the LLVM value representation.
+  void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+                         bool Volatile, QualType Ty);
+
+  /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+  /// this method emits the address of the lvalue, then loads the result as an
+  /// rvalue, returning the rvalue.
+  RValue EmitLoadOfLValue(LValue V, QualType LVType);
+  RValue EmitLoadOfExtVectorElementLValue(LValue V, QualType LVType);
+  RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType);
+  RValue EmitLoadOfPropertyRefLValue(LValue LV, QualType ExprType);
+  RValue EmitLoadOfKVCRefLValue(LValue LV, QualType ExprType);
+
+
+  /// EmitStoreThroughLValue - Store the specified rvalue into the specified
+  /// lvalue, where both are guaranteed to the have the same type, and that type
+  /// is 'Ty'.
+  void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty);
+  void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst,
+                                                QualType Ty);
+  void EmitStoreThroughPropertyRefLValue(RValue Src, LValue Dst, QualType Ty);
+  void EmitStoreThroughKVCRefLValue(RValue Src, LValue Dst, QualType Ty);
+
+  /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
+  /// EmitStoreThroughLValue.
+  ///
+  /// \param Result [out] - If non-null, this will be set to a Value* for the
+  /// bit-field contents after the store, appropriate for use as the result of
+  /// an assignment to the bit-field.
+  void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty,
+                                      llvm::Value **Result=0);
+
+  // Note: only availabe for agg return types
+  LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
+  // Note: only available for agg return types
+  LValue EmitCallExprLValue(const CallExpr *E);
+  // Note: only available for agg return types
+  LValue EmitVAArgExprLValue(const VAArgExpr *E);
+  LValue EmitDeclRefLValue(const DeclRefExpr *E);
+  LValue EmitStringLiteralLValue(const StringLiteral *E);
+  LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
+  LValue EmitPredefinedFunctionName(unsigned Type);
+  LValue EmitPredefinedLValue(const PredefinedExpr *E);
+  LValue EmitUnaryOpLValue(const UnaryOperator *E);
+  LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
+  LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
+  LValue EmitMemberExpr(const MemberExpr *E);
+  LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
+  LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+  LValue EmitConditionalOperatorLValue(const ConditionalOperator *E);
+  LValue EmitCastLValue(const CastExpr *E);
+  LValue EmitNullInitializationLValue(const CXXZeroInitValueExpr *E);
+  
+  llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+                              const ObjCIvarDecl *Ivar);
+  LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
+                            unsigned CVRQualifiers);
+  
+  /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
+  /// if the Field is a reference, this will return the address of the reference
+  /// and not the address of the value stored in the reference.
+  LValue EmitLValueForFieldInitialization(llvm::Value* Base, 
+                                          const FieldDecl* Field,
+                                          unsigned CVRQualifiers);
+  
+  LValue EmitLValueForIvar(QualType ObjectTy,
+                           llvm::Value* Base, const ObjCIvarDecl *Ivar,
+                           unsigned CVRQualifiers);
+
+  LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
+                                unsigned CVRQualifiers);
+
+  LValue EmitBlockDeclRefLValue(const BlockDeclRefExpr *E);
+
+  LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
+  LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
+  LValue EmitCXXExprWithTemporariesLValue(const CXXExprWithTemporaries *E);
+  LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
+  
+  LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
+  LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
+  LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
+  LValue EmitObjCKVCRefLValue(const ObjCImplicitSetterGetterRefExpr *E);
+  LValue EmitObjCSuperExprLValue(const ObjCSuperExpr *E);
+  LValue EmitStmtExprLValue(const StmtExpr *E);
+  LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
+  
+  //===--------------------------------------------------------------------===//
+  //                         Scalar Expression Emission
+  //===--------------------------------------------------------------------===//
+
+  /// EmitCall - Generate a call of the given function, expecting the given
+  /// result type, and using the given argument list which specifies both the
+  /// LLVM arguments and the types they were derived from.
+  ///
+  /// \param TargetDecl - If given, the decl of the function in a direct call;
+  /// used to set attributes on the call (noreturn, etc.).
+  RValue EmitCall(const CGFunctionInfo &FnInfo,
+                  llvm::Value *Callee,
+                  ReturnValueSlot ReturnValue,
+                  const CallArgList &Args,
+                  const Decl *TargetDecl = 0);
+
+  RValue EmitCall(QualType FnType, llvm::Value *Callee,
+                  ReturnValueSlot ReturnValue,
+                  CallExpr::const_arg_iterator ArgBeg,
+                  CallExpr::const_arg_iterator ArgEnd,
+                  const Decl *TargetDecl = 0);
+  RValue EmitCallExpr(const CallExpr *E, 
+                      ReturnValueSlot ReturnValue = ReturnValueSlot());
+
+  llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+                                const llvm::Type *Ty);
+  llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type, 
+                                llvm::Value *&This, const llvm::Type *Ty);
+
+  RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
+                           llvm::Value *Callee,
+                           ReturnValueSlot ReturnValue,
+                           llvm::Value *This,
+                           llvm::Value *VTT,
+                           CallExpr::const_arg_iterator ArgBeg,
+                           CallExpr::const_arg_iterator ArgEnd);
+  RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
+                               ReturnValueSlot ReturnValue);
+  RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+                                      ReturnValueSlot ReturnValue);
+
+  RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+                                       const CXXMethodDecl *MD,
+                                       ReturnValueSlot ReturnValue);
+
+  
+  RValue EmitBuiltinExpr(const FunctionDecl *FD,
+                         unsigned BuiltinID, const CallExpr *E);
+
+  RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
+
+  /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
+  /// is unhandled by the current target.
+  llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+  llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+  llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+  llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
+  llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+  llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
+  RValue EmitObjCMessageExpr(const ObjCMessageExpr *E);
+  RValue EmitObjCPropertyGet(const Expr *E);
+  RValue EmitObjCSuperPropertyGet(const Expr *Exp, const Selector &S);
+  void EmitObjCPropertySet(const Expr *E, RValue Src);
+  void EmitObjCSuperPropertySet(const Expr *E, const Selector &S, RValue Src);
+
+
+  /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
+  /// expression. Will emit a temporary variable if E is not an LValue.
+  RValue EmitReferenceBindingToExpr(const Expr* E, bool IsInitializer = false);
+
+  //===--------------------------------------------------------------------===//
+  //                           Expression Emission
+  //===--------------------------------------------------------------------===//
+
+  // Expressions are broken into three classes: scalar, complex, aggregate.
+
+  /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
+  /// scalar type, returning the result.
+  llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
+
+  /// EmitScalarConversion - Emit a conversion from the specified type to the
+  /// specified destination type, both of which are LLVM scalar types.
+  llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
+                                    QualType DstTy);
+
+  /// EmitComplexToScalarConversion - Emit a conversion from the specified
+  /// complex type to the specified destination type, where the destination type
+  /// is an LLVM scalar type.
+  llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
+                                             QualType DstTy);
+
+
+  /// EmitAggExpr - Emit the computation of the specified expression of
+  /// aggregate type.  The result is computed into DestPtr.  Note that if
+  /// DestPtr is null, the value of the aggregate expression is not needed.
+  void EmitAggExpr(const Expr *E, llvm::Value *DestPtr, bool VolatileDest,
+                   bool IgnoreResult = false, bool IsInitializer = false,
+                   bool RequiresGCollection = false);
+
+  /// EmitAggExprToLValue - Emit the computation of the specified expression of
+  /// aggregate type into a temporary LValue.
+  LValue EmitAggExprToLValue(const Expr *E);
+
+  /// EmitGCMemmoveCollectable - Emit special API for structs with object
+  /// pointers.
+  void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+                                QualType Ty);
+
+  /// EmitComplexExpr - Emit the computation of the specified expression of
+  /// complex type, returning the result.
+  ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal = false,
+                                bool IgnoreImag = false,
+                                bool IgnoreRealAssign = false,
+                                bool IgnoreImagAssign = false);
+
+  /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+  /// of complex type, storing into the specified Value*.
+  void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
+                               bool DestIsVolatile);
+
+  /// StoreComplexToAddr - Store a complex number into the specified address.
+  void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
+                          bool DestIsVolatile);
+  /// LoadComplexFromAddr - Load a complex number from the specified address.
+  ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
+
+  /// CreateStaticBlockVarDecl - Create a zero-initialized LLVM global for a
+  /// static block var decl.
+  llvm::GlobalVariable *CreateStaticBlockVarDecl(const VarDecl &D,
+                                                 const char *Separator,
+                                       llvm::GlobalValue::LinkageTypes Linkage);
+  
+  /// AddInitializerToGlobalBlockVarDecl - Add the initializer for 'D' to the
+  /// global variable that has already been created for it.  If the initializer
+  /// has a different type than GV does, this may free GV and return a different
+  /// one.  Otherwise it just returns GV.
+  llvm::GlobalVariable *
+  AddInitializerToGlobalBlockVarDecl(const VarDecl &D,
+                                     llvm::GlobalVariable *GV);
+  
+
+  /// EmitStaticCXXBlockVarDeclInit - Create the initializer for a C++ runtime
+  /// initialized static block var decl.
+  void EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
+                                     llvm::GlobalVariable *GV);
+
+  /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
+  /// variable with global storage.
+  void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr);
+
+  /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
+  /// with the C++ runtime so that its destructor will be called at exit.
+  void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
+                                     llvm::Constant *DeclPtr);
+
+  /// GenerateCXXGlobalInitFunc - Generates code for initializing global
+  /// variables.
+  void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+                                 llvm::Constant **Decls,
+                                 unsigned NumDecls);
+
+  void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn, const VarDecl *D);
+
+  void EmitCXXConstructExpr(llvm::Value *Dest, const CXXConstructExpr *E);
+
+  RValue EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
+                                    llvm::Value *AggLoc = 0,
+                                    bool IsAggLocVolatile = false,
+                                    bool IsInitializer = false);
+
+  void EmitCXXThrowExpr(const CXXThrowExpr *E);
+
+  //===--------------------------------------------------------------------===//
+  //                             Internal Helpers
+  //===--------------------------------------------------------------------===//
+
+  /// ContainsLabel - Return true if the statement contains a label in it.  If
+  /// this statement is not executed normally, it not containing a label means
+  /// that we can just remove the code.
+  static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
+
+  /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+  /// to a constant, or if it does but contains a label, return 0.  If it
+  /// constant folds to 'true' and does not contain a label, return 1, if it
+  /// constant folds to 'false' and does not contain a label, return -1.
+  int ConstantFoldsToSimpleInteger(const Expr *Cond);
+
+  /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
+  /// if statement) to the specified blocks.  Based on the condition, this might
+  /// try to simplify the codegen of the conditional based on the branch.
+  void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
+                            llvm::BasicBlock *FalseBlock);
+
+  /// getTrapBB - Create a basic block that will call the trap intrinsic.  We'll
+  /// generate a branch around the created basic block as necessary.
+  llvm::BasicBlock* getTrapBB();
+private:
+
+  void EmitReturnOfRValue(RValue RV, QualType Ty);
+
+  /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
+  /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
+  ///
+  /// \param AI - The first function argument of the expansion.
+  /// \return The argument following the last expanded function
+  /// argument.
+  llvm::Function::arg_iterator
+  ExpandTypeFromArgs(QualType Ty, LValue Dst,
+                     llvm::Function::arg_iterator AI);
+
+  /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
+  /// Ty, into individual arguments on the provided vector \arg Args. See
+  /// ABIArgInfo::Expand.
+  void ExpandTypeToArgs(QualType Ty, RValue Src,
+                        llvm::SmallVector<llvm::Value*, 16> &Args);
+
+  llvm::Value* EmitAsmInput(const AsmStmt &S,
+                            const TargetInfo::ConstraintInfo &Info,
+                            const Expr *InputExpr, std::string &ConstraintStr);
+
+  /// EmitCleanupBlock - emits a single cleanup block.
+  void EmitCleanupBlock();
+
+  /// AddBranchFixup - adds a branch instruction to the list of fixups for the
+  /// current cleanup scope.
+  void AddBranchFixup(llvm::BranchInst *BI);
+
+  /// EmitCallArg - Emit a single call argument.
+  RValue EmitCallArg(const Expr *E, QualType ArgType);
+
+  /// EmitCallArgs - Emit call arguments for a function.
+  /// The CallArgTypeInfo parameter is used for iterating over the known
+  /// argument types of the function being called.
+  template<typename T>
+  void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
+                    CallExpr::const_arg_iterator ArgBeg,
+                    CallExpr::const_arg_iterator ArgEnd) {
+      CallExpr::const_arg_iterator Arg = ArgBeg;
+
+    // First, use the argument types that the type info knows about
+    if (CallArgTypeInfo) {
+      for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
+           E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
+        assert(Arg != ArgEnd && "Running over edge of argument list!");
+        QualType ArgType = *I;
+
+        assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+               getTypePtr() ==
+               getContext().getCanonicalType(Arg->getType()).getTypePtr() &&
+               "type mismatch in call argument!");
+
+        Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+                                      ArgType));
+      }
+
+      // Either we've emitted all the call args, or we have a call to a
+      // variadic function.
+      assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
+             "Extra arguments in non-variadic function!");
+
+    }
+
+    // If we still have any arguments, emit them using the type of the argument.
+    for (; Arg != ArgEnd; ++Arg) {
+      QualType ArgType = Arg->getType();
+      Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+                                    ArgType));
+    }
+  }
+};
+
+
+}  // end namespace CodeGen
+}  // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
new file mode 100644
index 0000000..a6b546e
--- /dev/null
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -0,0 +1,1804 @@
+//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-module state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CGCall.h"
+#include "CGObjCRuntime.h"
+#include "Mangle.h"
+#include "TargetInfo.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Module.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
+                             llvm::Module &M, const llvm::TargetData &TD,
+                             Diagnostic &diags)
+  : BlockModule(C, M, TD, Types, *this), Context(C),
+    Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
+    TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
+    Types(C, M, TD, getTargetCodeGenInfo().getABIInfo()),
+    MangleCtx(C), VtableInfo(*this), Runtime(0),
+    MemCpyFn(0), MemMoveFn(0), MemSetFn(0), CFConstantStringClassRef(0),
+    VMContext(M.getContext()) {
+
+  if (!Features.ObjC1)
+    Runtime = 0;
+  else if (!Features.NeXTRuntime)
+    Runtime = CreateGNUObjCRuntime(*this);
+  else if (Features.ObjCNonFragileABI)
+    Runtime = CreateMacNonFragileABIObjCRuntime(*this);
+  else
+    Runtime = CreateMacObjCRuntime(*this);
+
+  // If debug info generation is enabled, create the CGDebugInfo object.
+  DebugInfo = CodeGenOpts.DebugInfo ? new CGDebugInfo(*this) : 0;
+}
+
+CodeGenModule::~CodeGenModule() {
+  delete Runtime;
+  delete DebugInfo;
+}
+
+void CodeGenModule::createObjCRuntime() {
+  if (!Features.NeXTRuntime)
+    Runtime = CreateGNUObjCRuntime(*this);
+  else if (Features.ObjCNonFragileABI)
+    Runtime = CreateMacNonFragileABIObjCRuntime(*this);
+  else
+    Runtime = CreateMacObjCRuntime(*this);
+}
+
+void CodeGenModule::Release() {
+  EmitDeferred();
+  EmitCXXGlobalInitFunc();
+  if (Runtime)
+    if (llvm::Function *ObjCInitFunction = Runtime->ModuleInitFunction())
+      AddGlobalCtor(ObjCInitFunction);
+  EmitCtorList(GlobalCtors, "llvm.global_ctors");
+  EmitCtorList(GlobalDtors, "llvm.global_dtors");
+  EmitAnnotations();
+  EmitLLVMUsed();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type,
+                                     bool OmitOnError) {
+  if (OmitOnError && getDiags().hasErrorOccurred())
+    return;
+  unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+                                               "cannot compile this %0 yet");
+  std::string Msg = Type;
+  getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
+    << Msg << S->getSourceRange();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified decl yet.
+void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
+                                     bool OmitOnError) {
+  if (OmitOnError && getDiags().hasErrorOccurred())
+    return;
+  unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+                                               "cannot compile this %0 yet");
+  std::string Msg = Type;
+  getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
+}
+
+LangOptions::VisibilityMode
+CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
+  if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+    if (VD->getStorageClass() == VarDecl::PrivateExtern)
+      return LangOptions::Hidden;
+
+  if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) {
+    switch (attr->getVisibility()) {
+    default: assert(0 && "Unknown visibility!");
+    case VisibilityAttr::DefaultVisibility:
+      return LangOptions::Default;
+    case VisibilityAttr::HiddenVisibility:
+      return LangOptions::Hidden;
+    case VisibilityAttr::ProtectedVisibility:
+      return LangOptions::Protected;
+    }
+  }
+
+  // This decl should have the same visibility as its parent.
+  if (const DeclContext *DC = D->getDeclContext()) 
+    return getDeclVisibilityMode(cast<Decl>(DC));
+
+  return getLangOptions().getVisibilityMode();
+}
+
+void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
+                                        const Decl *D) const {
+  // Internal definitions always have default visibility.
+  if (GV->hasLocalLinkage()) {
+    GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+    return;
+  }
+
+  switch (getDeclVisibilityMode(D)) {
+  default: assert(0 && "Unknown visibility!");
+  case LangOptions::Default:
+    return GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+  case LangOptions::Hidden:
+    return GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+  case LangOptions::Protected:
+    return GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
+  }
+}
+
+const char *CodeGenModule::getMangledName(const GlobalDecl &GD) {
+  const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
+
+  if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
+    return getMangledCXXCtorName(D, GD.getCtorType());
+  if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
+    return getMangledCXXDtorName(D, GD.getDtorType());
+
+  return getMangledName(ND);
+}
+
+/// \brief Retrieves the mangled name for the given declaration.
+///
+/// If the given declaration requires a mangled name, returns an
+/// const char* containing the mangled name.  Otherwise, returns
+/// the unmangled name.
+///
+const char *CodeGenModule::getMangledName(const NamedDecl *ND) {
+  if (!getMangleContext().shouldMangleDeclName(ND)) {
+    assert(ND->getIdentifier() && "Attempt to mangle unnamed decl.");
+    return ND->getNameAsCString();
+  }
+
+  llvm::SmallString<256> Name;
+  getMangleContext().mangleName(ND, Name);
+  Name += '\0';
+  return UniqueMangledName(Name.begin(), Name.end());
+}
+
+const char *CodeGenModule::UniqueMangledName(const char *NameStart,
+                                             const char *NameEnd) {
+  assert(*(NameEnd - 1) == '\0' && "Mangled name must be null terminated!");
+
+  return MangledNames.GetOrCreateValue(NameStart, NameEnd).getKeyData();
+}
+
+/// AddGlobalCtor - Add a function to the list that will be called before
+/// main() runs.
+void CodeGenModule::AddGlobalCtor(llvm::Function * Ctor, int Priority) {
+  // FIXME: Type coercion of void()* types.
+  GlobalCtors.push_back(std::make_pair(Ctor, Priority));
+}
+
+/// AddGlobalDtor - Add a function to the list that will be called
+/// when the module is unloaded.
+void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
+  // FIXME: Type coercion of void()* types.
+  GlobalDtors.push_back(std::make_pair(Dtor, Priority));
+}
+
+void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
+  // Ctor function type is void()*.
+  llvm::FunctionType* CtorFTy =
+    llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                            std::vector<const llvm::Type*>(),
+                            false);
+  llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+
+  // Get the type of a ctor entry, { i32, void ()* }.
+  llvm::StructType* CtorStructTy =
+    llvm::StructType::get(VMContext, llvm::Type::getInt32Ty(VMContext),
+                          llvm::PointerType::getUnqual(CtorFTy), NULL);
+
+  // Construct the constructor and destructor arrays.
+  std::vector<llvm::Constant*> Ctors;
+  for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
+    std::vector<llvm::Constant*> S;
+    S.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+                I->second, false));
+    S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy));
+    Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
+  }
+
+  if (!Ctors.empty()) {
+    llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
+    new llvm::GlobalVariable(TheModule, AT, false,
+                             llvm::GlobalValue::AppendingLinkage,
+                             llvm::ConstantArray::get(AT, Ctors),
+                             GlobalName);
+  }
+}
+
+void CodeGenModule::EmitAnnotations() {
+  if (Annotations.empty())
+    return;
+
+  // Create a new global variable for the ConstantStruct in the Module.
+  llvm::Constant *Array =
+  llvm::ConstantArray::get(llvm::ArrayType::get(Annotations[0]->getType(),
+                                                Annotations.size()),
+                           Annotations);
+  llvm::GlobalValue *gv =
+  new llvm::GlobalVariable(TheModule, Array->getType(), false,
+                           llvm::GlobalValue::AppendingLinkage, Array,
+                           "llvm.global.annotations");
+  gv->setSection("llvm.metadata");
+}
+
+static CodeGenModule::GVALinkage
+GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
+                      const LangOptions &Features) {
+  CodeGenModule::GVALinkage External = CodeGenModule::GVA_StrongExternal;
+
+  Linkage L = FD->getLinkage();
+  if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
+      FD->getType()->getLinkage() == UniqueExternalLinkage)
+    L = UniqueExternalLinkage;
+  
+  switch (L) {
+  case NoLinkage:
+  case InternalLinkage:
+  case UniqueExternalLinkage:
+    return CodeGenModule::GVA_Internal;
+    
+  case ExternalLinkage:
+    switch (FD->getTemplateSpecializationKind()) {
+    case TSK_Undeclared:
+    case TSK_ExplicitSpecialization:
+      External = CodeGenModule::GVA_StrongExternal;
+      break;
+
+    case TSK_ExplicitInstantiationDefinition:
+      // FIXME: explicit instantiation definitions should use weak linkage
+      return CodeGenModule::GVA_StrongExternal;
+
+    case TSK_ExplicitInstantiationDeclaration:
+    case TSK_ImplicitInstantiation:
+      External = CodeGenModule::GVA_TemplateInstantiation;
+      break;
+    }
+  }
+
+  if (!FD->isInlined())
+    return External;
+    
+  if (!Features.CPlusPlus || FD->hasAttr<GNUInlineAttr>()) {
+    // GNU or C99 inline semantics. Determine whether this symbol should be
+    // externally visible.
+    if (FD->isInlineDefinitionExternallyVisible())
+      return External;
+
+    // C99 inline semantics, where the symbol is not externally visible.
+    return CodeGenModule::GVA_C99Inline;
+  }
+
+  // C++0x [temp.explicit]p9:
+  //   [ Note: The intent is that an inline function that is the subject of 
+  //   an explicit instantiation declaration will still be implicitly 
+  //   instantiated when used so that the body can be considered for 
+  //   inlining, but that no out-of-line copy of the inline function would be
+  //   generated in the translation unit. -- end note ]
+  if (FD->getTemplateSpecializationKind() 
+                                       == TSK_ExplicitInstantiationDeclaration)
+    return CodeGenModule::GVA_C99Inline;
+  
+  return CodeGenModule::GVA_CXXInline;
+}
+
+/// SetFunctionDefinitionAttributes - Set attributes for a global.
+///
+/// FIXME: This is currently only done for aliases and functions, but not for
+/// variables (these details are set in EmitGlobalVarDefinition for variables).
+void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
+                                                    llvm::GlobalValue *GV) {
+  GVALinkage Linkage = GetLinkageForFunction(getContext(), D, Features);
+
+  if (Linkage == GVA_Internal) {
+    GV->setLinkage(llvm::Function::InternalLinkage);
+  } else if (D->hasAttr<DLLExportAttr>()) {
+    GV->setLinkage(llvm::Function::DLLExportLinkage);
+  } else if (D->hasAttr<WeakAttr>()) {
+    GV->setLinkage(llvm::Function::WeakAnyLinkage);
+  } else if (Linkage == GVA_C99Inline) {
+    // In C99 mode, 'inline' functions are guaranteed to have a strong
+    // definition somewhere else, so we can use available_externally linkage.
+    GV->setLinkage(llvm::Function::AvailableExternallyLinkage);
+  } else if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) {
+    // In C++, the compiler has to emit a definition in every translation unit
+    // that references the function.  We should use linkonce_odr because
+    // a) if all references in this translation unit are optimized away, we
+    // don't need to codegen it.  b) if the function persists, it needs to be
+    // merged with other definitions. c) C++ has the ODR, so we know the
+    // definition is dependable.
+    GV->setLinkage(llvm::Function::LinkOnceODRLinkage);
+  } else {
+    assert(Linkage == GVA_StrongExternal);
+    // Otherwise, we have strong external linkage.
+    GV->setLinkage(llvm::Function::ExternalLinkage);
+  }
+
+  SetCommonAttributes(D, GV);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+                                              const CGFunctionInfo &Info,
+                                              llvm::Function *F) {
+  unsigned CallingConv;
+  AttributeListType AttributeList;
+  ConstructAttributeList(Info, D, AttributeList, CallingConv);
+  F->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
+                                          AttributeList.size()));
+  F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+}
+
+void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
+                                                           llvm::Function *F) {
+  if (!Features.Exceptions && !Features.ObjCNonFragileABI)
+    F->addFnAttr(llvm::Attribute::NoUnwind);
+
+  if (D->hasAttr<AlwaysInlineAttr>())
+    F->addFnAttr(llvm::Attribute::AlwaysInline);
+
+  if (D->hasAttr<NoInlineAttr>())
+    F->addFnAttr(llvm::Attribute::NoInline);
+
+  if (Features.getStackProtectorMode() == LangOptions::SSPOn)
+    F->addFnAttr(llvm::Attribute::StackProtect);
+  else if (Features.getStackProtectorMode() == LangOptions::SSPReq)
+    F->addFnAttr(llvm::Attribute::StackProtectReq);
+  
+  if (const AlignedAttr *AA = D->getAttr<AlignedAttr>()) {
+    unsigned width = Context.Target.getCharWidth();
+    F->setAlignment(AA->getAlignment() / width);
+    while ((AA = AA->getNext<AlignedAttr>()))
+      F->setAlignment(std::max(F->getAlignment(), AA->getAlignment() / width));
+  }
+  // C++ ABI requires 2-byte alignment for member functions.
+  if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
+    F->setAlignment(2);
+}
+
+void CodeGenModule::SetCommonAttributes(const Decl *D,
+                                        llvm::GlobalValue *GV) {
+  setGlobalVisibility(GV, D);
+
+  if (D->hasAttr<UsedAttr>())
+    AddUsedGlobal(GV);
+
+  if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+    GV->setSection(SA->getName());
+
+  getTargetCodeGenInfo().SetTargetAttributes(D, GV, *this);
+}
+
+void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
+                                                  llvm::Function *F,
+                                                  const CGFunctionInfo &FI) {
+  SetLLVMFunctionAttributes(D, FI, F);
+  SetLLVMFunctionAttributesForDefinition(D, F);
+
+  F->setLinkage(llvm::Function::InternalLinkage);
+
+  SetCommonAttributes(D, F);
+}
+
+void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
+                                          llvm::Function *F,
+                                          bool IsIncompleteFunction) {
+  const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+  if (!IsIncompleteFunction)
+    SetLLVMFunctionAttributes(FD, getTypes().getFunctionInfo(GD), F);
+
+  // Only a few attributes are set on declarations; these may later be
+  // overridden by a definition.
+
+  if (FD->hasAttr<DLLImportAttr>()) {
+    F->setLinkage(llvm::Function::DLLImportLinkage);
+  } else if (FD->hasAttr<WeakAttr>() ||
+             FD->hasAttr<WeakImportAttr>()) {
+    // "extern_weak" is overloaded in LLVM; we probably should have
+    // separate linkage types for this.
+    F->setLinkage(llvm::Function::ExternalWeakLinkage);
+  } else {
+    F->setLinkage(llvm::Function::ExternalLinkage);
+  }
+
+  if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
+    F->setSection(SA->getName());
+}
+
+void CodeGenModule::AddUsedGlobal(llvm::GlobalValue *GV) {
+  assert(!GV->isDeclaration() &&
+         "Only globals with definition can force usage.");
+  LLVMUsed.push_back(GV);
+}
+
+void CodeGenModule::EmitLLVMUsed() {
+  // Don't create llvm.used if there is no need.
+  if (LLVMUsed.empty())
+    return;
+
+  const llvm::Type *i8PTy = llvm::Type::getInt8PtrTy(VMContext);
+
+  // Convert LLVMUsed to what ConstantArray needs.
+  std::vector<llvm::Constant*> UsedArray;
+  UsedArray.resize(LLVMUsed.size());
+  for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
+    UsedArray[i] =
+     llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
+                                      i8PTy);
+  }
+
+  if (UsedArray.empty())
+    return;
+  llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, UsedArray.size());
+
+  llvm::GlobalVariable *GV =
+    new llvm::GlobalVariable(getModule(), ATy, false,
+                             llvm::GlobalValue::AppendingLinkage,
+                             llvm::ConstantArray::get(ATy, UsedArray),
+                             "llvm.used");
+
+  GV->setSection("llvm.metadata");
+}
+
+void CodeGenModule::EmitDeferred() {
+  // Emit code for any potentially referenced deferred decls.  Since a
+  // previously unused static decl may become used during the generation of code
+  // for a static function, iterate until no  changes are made.
+  while (!DeferredDeclsToEmit.empty()) {
+    GlobalDecl D = DeferredDeclsToEmit.back();
+    DeferredDeclsToEmit.pop_back();
+
+    // The mangled name for the decl must have been emitted in GlobalDeclMap.
+    // Look it up to see if it was defined with a stronger definition (e.g. an
+    // extern inline function with a strong function redefinition).  If so,
+    // just ignore the deferred decl.
+    llvm::GlobalValue *CGRef = GlobalDeclMap[getMangledName(D)];
+    assert(CGRef && "Deferred decl wasn't referenced?");
+
+    if (!CGRef->isDeclaration())
+      continue;
+
+    // Otherwise, emit the definition and move on to the next one.
+    EmitGlobalDefinition(D);
+  }
+}
+
+/// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
+/// annotation information for a given GlobalValue.  The annotation struct is
+/// {i8 *, i8 *, i8 *, i32}.  The first field is a constant expression, the
+/// GlobalValue being annotated.  The second field is the constant string
+/// created from the AnnotateAttr's annotation.  The third field is a constant
+/// string containing the name of the translation unit.  The fourth field is
+/// the line number in the file of the annotated value declaration.
+///
+/// FIXME: this does not unique the annotation string constants, as llvm-gcc
+///        appears to.
+///
+llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
+                                                const AnnotateAttr *AA,
+                                                unsigned LineNo) {
+  llvm::Module *M = &getModule();
+
+  // get [N x i8] constants for the annotation string, and the filename string
+  // which are the 2nd and 3rd elements of the global annotation structure.
+  const llvm::Type *SBP = llvm::Type::getInt8PtrTy(VMContext);
+  llvm::Constant *anno = llvm::ConstantArray::get(VMContext,
+                                                  AA->getAnnotation(), true);
+  llvm::Constant *unit = llvm::ConstantArray::get(VMContext,
+                                                  M->getModuleIdentifier(),
+                                                  true);
+
+  // Get the two global values corresponding to the ConstantArrays we just
+  // created to hold the bytes of the strings.
+  llvm::GlobalValue *annoGV =
+    new llvm::GlobalVariable(*M, anno->getType(), false,
+                             llvm::GlobalValue::PrivateLinkage, anno,
+                             GV->getName());
+  // translation unit name string, emitted into the llvm.metadata section.
+  llvm::GlobalValue *unitGV =
+    new llvm::GlobalVariable(*M, unit->getType(), false,
+                             llvm::GlobalValue::PrivateLinkage, unit,
+                             ".str");
+
+  // Create the ConstantStruct for the global annotation.
+  llvm::Constant *Fields[4] = {
+    llvm::ConstantExpr::getBitCast(GV, SBP),
+    llvm::ConstantExpr::getBitCast(annoGV, SBP),
+    llvm::ConstantExpr::getBitCast(unitGV, SBP),
+    llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LineNo)
+  };
+  return llvm::ConstantStruct::get(VMContext, Fields, 4, false);
+}
+
+bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
+  // Never defer when EmitAllDecls is specified or the decl has
+  // attribute used.
+  if (Features.EmitAllDecls || Global->hasAttr<UsedAttr>())
+    return false;
+
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+    // Constructors and destructors should never be deferred.
+    if (FD->hasAttr<ConstructorAttr>() ||
+        FD->hasAttr<DestructorAttr>())
+      return false;
+
+    // The key function for a class must never be deferred.
+    if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Global)) {
+      const CXXRecordDecl *RD = MD->getParent();
+      if (MD->isOutOfLine() && RD->isDynamicClass()) {
+        const CXXMethodDecl *KeyFunction = getContext().getKeyFunction(RD);
+        if (KeyFunction && 
+            KeyFunction->getCanonicalDecl() == MD->getCanonicalDecl())
+          return false;
+      }
+    }
+
+    GVALinkage Linkage = GetLinkageForFunction(getContext(), FD, Features);
+
+    // static, static inline, always_inline, and extern inline functions can
+    // always be deferred.  Normal inline functions can be deferred in C99/C++.
+    if (Linkage == GVA_Internal || Linkage == GVA_C99Inline ||
+        Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
+      return true;
+    return false;
+  }
+
+  const VarDecl *VD = cast<VarDecl>(Global);
+  assert(VD->isFileVarDecl() && "Invalid decl");
+
+  // We never want to defer structs that have non-trivial constructors or 
+  // destructors.
+  
+  // FIXME: Handle references.
+  if (const RecordType *RT = VD->getType()->getAs<RecordType>()) {
+    if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+      if (!RD->hasTrivialConstructor() || !RD->hasTrivialDestructor())
+        return false;
+    }
+  }
+      
+  // Static data may be deferred, but out-of-line static data members
+  // cannot be.
+  Linkage L = VD->getLinkage();
+  if (L == ExternalLinkage && getContext().getLangOptions().CPlusPlus &&
+      VD->getType()->getLinkage() == UniqueExternalLinkage)
+    L = UniqueExternalLinkage;
+
+  switch (L) {
+  case NoLinkage:
+  case InternalLinkage:
+  case UniqueExternalLinkage:
+    // Initializer has side effects?
+    if (VD->getInit() && VD->getInit()->HasSideEffects(Context))
+      return false;
+    return !(VD->isStaticDataMember() && VD->isOutOfLine());
+
+  case ExternalLinkage:
+    break;
+  }
+
+  return false;
+}
+
+void CodeGenModule::EmitGlobal(GlobalDecl GD) {
+  const ValueDecl *Global = cast<ValueDecl>(GD.getDecl());
+
+  // If this is an alias definition (which otherwise looks like a declaration)
+  // emit it now.
+  if (Global->hasAttr<AliasAttr>())
+    return EmitAliasDefinition(Global);
+
+  // Ignore declarations, they will be emitted on their first use.
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+    // Forward declarations are emitted lazily on first use.
+    if (!FD->isThisDeclarationADefinition())
+      return;
+  } else {
+    const VarDecl *VD = cast<VarDecl>(Global);
+    assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
+
+    if (VD->isThisDeclarationADefinition() != VarDecl::Definition)
+      return;
+  }
+
+  // Defer code generation when possible if this is a static definition, inline
+  // function etc.  These we only want to emit if they are used.
+  if (MayDeferGeneration(Global)) {
+    // If the value has already been used, add it directly to the
+    // DeferredDeclsToEmit list.
+    const char *MangledName = getMangledName(GD);
+    if (GlobalDeclMap.count(MangledName))
+      DeferredDeclsToEmit.push_back(GD);
+    else {
+      // Otherwise, remember that we saw a deferred decl with this name.  The
+      // first use of the mangled name will cause it to move into
+      // DeferredDeclsToEmit.
+      DeferredDecls[MangledName] = GD;
+    }
+    return;
+  }
+
+  // Otherwise emit the definition.
+  EmitGlobalDefinition(GD);
+}
+
+void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
+  const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+
+  PrettyStackTraceDecl CrashInfo((ValueDecl *)D, D->getLocation(), 
+                                 Context.getSourceManager(),
+                                 "Generating code for declaration");
+  
+  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+    getVtableInfo().MaybeEmitVtable(GD);
+    if (MD->isVirtual() && MD->isOutOfLine() &&
+        (!isa<CXXDestructorDecl>(D) || GD.getDtorType() != Dtor_Base)) {
+      if (isa<CXXDestructorDecl>(D)) {
+        GlobalDecl CanonGD(cast<CXXDestructorDecl>(D->getCanonicalDecl()),
+                           GD.getDtorType());
+        BuildThunksForVirtual(CanonGD);
+      } else {
+        BuildThunksForVirtual(MD->getCanonicalDecl());
+      }
+    }
+  }
+  
+  if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+    EmitCXXConstructor(CD, GD.getCtorType());
+  else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
+    EmitCXXDestructor(DD, GD.getDtorType());
+  else if (isa<FunctionDecl>(D))
+    EmitGlobalFunctionDefinition(GD);
+  else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+    EmitGlobalVarDefinition(VD);
+  else {
+    assert(0 && "Invalid argument to EmitGlobalDefinition()");
+  }
+}
+
+/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
+/// module, create and return an llvm Function with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this.  This is used
+/// to set the attributes on the function when it is first created.
+llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(const char *MangledName,
+                                                       const llvm::Type *Ty,
+                                                       GlobalDecl D) {
+  // Lookup the entry, lazily creating it if necessary.
+  llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+  if (Entry) {
+    if (Entry->getType()->getElementType() == Ty)
+      return Entry;
+
+    // Make sure the result is of the correct type.
+    const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+    return llvm::ConstantExpr::getBitCast(Entry, PTy);
+  }
+
+  // This function doesn't have a complete type (for example, the return
+  // type is an incomplete struct). Use a fake type instead, and make
+  // sure not to try to set attributes.
+  bool IsIncompleteFunction = false;
+  if (!isa<llvm::FunctionType>(Ty)) {
+    Ty = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+                                 std::vector<const llvm::Type*>(), false);
+    IsIncompleteFunction = true;
+  }
+  llvm::Function *F = llvm::Function::Create(cast<llvm::FunctionType>(Ty),
+                                             llvm::Function::ExternalLinkage,
+                                             "", &getModule());
+  F->setName(MangledName);
+  if (D.getDecl())
+    SetFunctionAttributes(D, F, IsIncompleteFunction);
+  Entry = F;
+
+  // This is the first use or definition of a mangled name.  If there is a
+  // deferred decl with this name, remember that we need to emit it at the end
+  // of the file.
+  llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
+    DeferredDecls.find(MangledName);
+  if (DDI != DeferredDecls.end()) {
+    // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+    // list, and remove it from DeferredDecls (since we don't need it anymore).
+    DeferredDeclsToEmit.push_back(DDI->second);
+    DeferredDecls.erase(DDI);
+  } else if (const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl())) {
+    // If this the first reference to a C++ inline function in a class, queue up
+    // the deferred function body for emission.  These are not seen as
+    // top-level declarations.
+    if (FD->isThisDeclarationADefinition() && MayDeferGeneration(FD))
+      DeferredDeclsToEmit.push_back(D);
+    // A called constructor which has no definition or declaration need be
+    // synthesized.
+    else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
+      if (CD->isImplicit())
+        DeferredDeclsToEmit.push_back(D);
+    } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
+      if (DD->isImplicit())
+        DeferredDeclsToEmit.push_back(D);
+    } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+      if (MD->isCopyAssignment() && MD->isImplicit())
+        DeferredDeclsToEmit.push_back(D);
+    }
+  }
+
+  return F;
+}
+
+/// GetAddrOfFunction - Return the address of the given function.  If Ty is
+/// non-null, then this function will use the specified type if it has to
+/// create it (this occurs when we see a definition of the function).
+llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
+                                                 const llvm::Type *Ty) {
+  // If there was no specific requested type, just convert it now.
+  if (!Ty)
+    Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
+  return GetOrCreateLLVMFunction(getMangledName(GD), Ty, GD);
+}
+
+/// CreateRuntimeFunction - Create a new runtime function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeFunction(const llvm::FunctionType *FTy,
+                                     const char *Name) {
+  // Convert Name to be a uniqued string from the IdentifierInfo table.
+  Name = getContext().Idents.get(Name).getNameStart();
+  return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl());
+}
+
+static bool DeclIsConstantGlobal(ASTContext &Context, const VarDecl *D) {
+  if (!D->getType().isConstant(Context) && !D->getType()->isReferenceType())
+    return false;
+  if (Context.getLangOptions().CPlusPlus &&
+      Context.getBaseElementType(D->getType())->getAs<RecordType>()) {
+    // FIXME: We should do something fancier here!
+    return false;
+  }
+  return true;
+}
+
+/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
+/// create and return an llvm GlobalVariable with the specified type.  If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this.  This is used
+/// to set the attributes on the global when it is first created.
+llvm::Constant *CodeGenModule::GetOrCreateLLVMGlobal(const char *MangledName,
+                                                     const llvm::PointerType*Ty,
+                                                     const VarDecl *D) {
+  // Lookup the entry, lazily creating it if necessary.
+  llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+  if (Entry) {
+    if (Entry->getType() == Ty)
+      return Entry;
+
+    // Make sure the result is of the correct type.
+    return llvm::ConstantExpr::getBitCast(Entry, Ty);
+  }
+
+  // This is the first use or definition of a mangled name.  If there is a
+  // deferred decl with this name, remember that we need to emit it at the end
+  // of the file.
+  llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
+    DeferredDecls.find(MangledName);
+  if (DDI != DeferredDecls.end()) {
+    // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+    // list, and remove it from DeferredDecls (since we don't need it anymore).
+    DeferredDeclsToEmit.push_back(DDI->second);
+    DeferredDecls.erase(DDI);
+  }
+
+  llvm::GlobalVariable *GV =
+    new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
+                             llvm::GlobalValue::ExternalLinkage,
+                             0, "", 0,
+                             false, Ty->getAddressSpace());
+  GV->setName(MangledName);
+
+  // Handle things which are present even on external declarations.
+  if (D) {
+    // FIXME: This code is overly simple and should be merged with other global
+    // handling.
+    GV->setConstant(DeclIsConstantGlobal(Context, D));
+
+    // FIXME: Merge with other attribute handling code.
+    if (D->getStorageClass() == VarDecl::PrivateExtern)
+      GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+
+    if (D->hasAttr<WeakAttr>() ||
+        D->hasAttr<WeakImportAttr>())
+      GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+    GV->setThreadLocal(D->isThreadSpecified());
+  }
+
+  return Entry = GV;
+}
+
+
+/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+/// given global variable.  If Ty is non-null and if the global doesn't exist,
+/// then it will be greated with the specified type instead of whatever the
+/// normal requested type would be.
+llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
+                                                  const llvm::Type *Ty) {
+  assert(D->hasGlobalStorage() && "Not a global variable");
+  QualType ASTTy = D->getType();
+  if (Ty == 0)
+    Ty = getTypes().ConvertTypeForMem(ASTTy);
+
+  const llvm::PointerType *PTy =
+    llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
+  return GetOrCreateLLVMGlobal(getMangledName(D), PTy, D);
+}
+
+/// CreateRuntimeVariable - Create a new runtime global variable with the
+/// specified type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeVariable(const llvm::Type *Ty,
+                                     const char *Name) {
+  // Convert Name to be a uniqued string from the IdentifierInfo table.
+  Name = getContext().Idents.get(Name).getNameStart();
+  return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0);
+}
+
+void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
+  assert(!D->getInit() && "Cannot emit definite definitions here!");
+
+  if (MayDeferGeneration(D)) {
+    // If we have not seen a reference to this variable yet, place it
+    // into the deferred declarations table to be emitted if needed
+    // later.
+    const char *MangledName = getMangledName(D);
+    if (GlobalDeclMap.count(MangledName) == 0) {
+      DeferredDecls[MangledName] = D;
+      return;
+    }
+  }
+
+  // The tentative definition is the only definition.
+  EmitGlobalVarDefinition(D);
+}
+
+llvm::GlobalVariable::LinkageTypes 
+CodeGenModule::getVtableLinkage(const CXXRecordDecl *RD) {
+  if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
+    return llvm::GlobalVariable::InternalLinkage;
+
+  if (const CXXMethodDecl *KeyFunction
+                                    = RD->getASTContext().getKeyFunction(RD)) {
+    // If this class has a key function, use that to determine the linkage of
+    // the vtable.
+    const FunctionDecl *Def = 0;
+    if (KeyFunction->getBody(Def))
+      KeyFunction = cast<CXXMethodDecl>(Def);
+    
+    switch (KeyFunction->getTemplateSpecializationKind()) {
+      case TSK_Undeclared:
+      case TSK_ExplicitSpecialization:
+        if (KeyFunction->isInlined())
+          return llvm::GlobalVariable::WeakODRLinkage;
+        
+        return llvm::GlobalVariable::ExternalLinkage;
+        
+      case TSK_ImplicitInstantiation:
+      case TSK_ExplicitInstantiationDefinition:
+        return llvm::GlobalVariable::WeakODRLinkage;
+        
+      case TSK_ExplicitInstantiationDeclaration:
+        // FIXME: Use available_externally linkage. However, this currently
+        // breaks LLVM's build due to undefined symbols.
+        //      return llvm::GlobalVariable::AvailableExternallyLinkage;
+        return llvm::GlobalVariable::WeakODRLinkage;
+    }
+  }
+  
+  switch (RD->getTemplateSpecializationKind()) {
+  case TSK_Undeclared:
+  case TSK_ExplicitSpecialization:
+  case TSK_ImplicitInstantiation:
+  case TSK_ExplicitInstantiationDefinition:
+    return llvm::GlobalVariable::WeakODRLinkage;
+    
+  case TSK_ExplicitInstantiationDeclaration:
+    // FIXME: Use available_externally linkage. However, this currently
+    // breaks LLVM's build due to undefined symbols.
+    //   return llvm::GlobalVariable::AvailableExternallyLinkage;
+    return llvm::GlobalVariable::WeakODRLinkage;
+  }
+  
+  // Silence GCC warning.
+  return llvm::GlobalVariable::WeakODRLinkage;
+}
+
+static CodeGenModule::GVALinkage
+GetLinkageForVariable(ASTContext &Context, const VarDecl *VD) {
+  // If this is a static data member, compute the kind of template
+  // specialization. Otherwise, this variable is not part of a
+  // template.
+  TemplateSpecializationKind TSK = TSK_Undeclared;
+  if (VD->isStaticDataMember())
+    TSK = VD->getTemplateSpecializationKind();
+
+  Linkage L = VD->getLinkage();
+  if (L == ExternalLinkage && Context.getLangOptions().CPlusPlus &&
+      VD->getType()->getLinkage() == UniqueExternalLinkage)
+    L = UniqueExternalLinkage;
+
+  switch (L) {
+  case NoLinkage:
+  case InternalLinkage:
+  case UniqueExternalLinkage:
+    return CodeGenModule::GVA_Internal;
+
+  case ExternalLinkage:
+    switch (TSK) {
+    case TSK_Undeclared:
+    case TSK_ExplicitSpecialization:
+
+      // FIXME: ExplicitInstantiationDefinition should be weak!
+    case TSK_ExplicitInstantiationDefinition:
+      return CodeGenModule::GVA_StrongExternal;
+      
+    case TSK_ExplicitInstantiationDeclaration:
+      llvm_unreachable("Variable should not be instantiated");
+      // Fall through to treat this like any other instantiation.
+        
+    case TSK_ImplicitInstantiation:
+      return CodeGenModule::GVA_TemplateInstantiation;      
+    }
+  }
+
+  return CodeGenModule::GVA_StrongExternal;
+}
+
+CharUnits CodeGenModule::GetTargetTypeStoreSize(const llvm::Type *Ty) const {
+    return CharUnits::fromQuantity(
+      TheTargetData.getTypeStoreSizeInBits(Ty) / Context.getCharWidth());
+}
+
+void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
+  llvm::Constant *Init = 0;
+  QualType ASTTy = D->getType();
+  bool NonConstInit = false;
+
+  const Expr *InitExpr = D->getAnyInitializer();
+  
+  if (!InitExpr) {
+    // This is a tentative definition; tentative definitions are
+    // implicitly initialized with { 0 }.
+    //
+    // Note that tentative definitions are only emitted at the end of
+    // a translation unit, so they should never have incomplete
+    // type. In addition, EmitTentativeDefinition makes sure that we
+    // never attempt to emit a tentative definition if a real one
+    // exists. A use may still exists, however, so we still may need
+    // to do a RAUW.
+    assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
+    Init = EmitNullConstant(D->getType());
+  } else {
+    Init = EmitConstantExpr(InitExpr, D->getType());
+
+    if (!Init) {
+      QualType T = InitExpr->getType();
+      if (getLangOptions().CPlusPlus) {
+        EmitCXXGlobalVarDeclInitFunc(D);
+        Init = EmitNullConstant(T);
+        NonConstInit = true;
+      } else {
+        ErrorUnsupported(D, "static initializer");
+        Init = llvm::UndefValue::get(getTypes().ConvertType(T));
+      }
+    }
+  }
+
+  const llvm::Type* InitType = Init->getType();
+  llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
+
+  // Strip off a bitcast if we got one back.
+  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+    assert(CE->getOpcode() == llvm::Instruction::BitCast ||
+           // all zero index gep.
+           CE->getOpcode() == llvm::Instruction::GetElementPtr);
+    Entry = CE->getOperand(0);
+  }
+
+  // Entry is now either a Function or GlobalVariable.
+  llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Entry);
+
+  // We have a definition after a declaration with the wrong type.
+  // We must make a new GlobalVariable* and update everything that used OldGV
+  // (a declaration or tentative definition) with the new GlobalVariable*
+  // (which will be a definition).
+  //
+  // This happens if there is a prototype for a global (e.g.
+  // "extern int x[];") and then a definition of a different type (e.g.
+  // "int x[10];"). This also happens when an initializer has a different type
+  // from the type of the global (this happens with unions).
+  if (GV == 0 ||
+      GV->getType()->getElementType() != InitType ||
+      GV->getType()->getAddressSpace() != ASTTy.getAddressSpace()) {
+
+    // Remove the old entry from GlobalDeclMap so that we'll create a new one.
+    GlobalDeclMap.erase(getMangledName(D));
+
+    // Make a new global with the correct type, this is now guaranteed to work.
+    GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
+    GV->takeName(cast<llvm::GlobalValue>(Entry));
+
+    // Replace all uses of the old global with the new global
+    llvm::Constant *NewPtrForOldDecl =
+        llvm::ConstantExpr::getBitCast(GV, Entry->getType());
+    Entry->replaceAllUsesWith(NewPtrForOldDecl);
+
+    // Erase the old global, since it is no longer used.
+    cast<llvm::GlobalValue>(Entry)->eraseFromParent();
+  }
+
+  if (const AnnotateAttr *AA = D->getAttr<AnnotateAttr>()) {
+    SourceManager &SM = Context.getSourceManager();
+    AddAnnotation(EmitAnnotateAttr(GV, AA,
+                              SM.getInstantiationLineNumber(D->getLocation())));
+  }
+
+  GV->setInitializer(Init);
+
+  // If it is safe to mark the global 'constant', do so now.
+  GV->setConstant(false);
+  if (!NonConstInit && DeclIsConstantGlobal(Context, D))
+    GV->setConstant(true);
+
+  GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
+
+  // Set the llvm linkage type as appropriate.
+  GVALinkage Linkage = GetLinkageForVariable(getContext(), D);
+  if (Linkage == GVA_Internal)
+    GV->setLinkage(llvm::Function::InternalLinkage);
+  else if (D->hasAttr<DLLImportAttr>())
+    GV->setLinkage(llvm::Function::DLLImportLinkage);
+  else if (D->hasAttr<DLLExportAttr>())
+    GV->setLinkage(llvm::Function::DLLExportLinkage);
+  else if (D->hasAttr<WeakAttr>()) {
+    if (GV->isConstant())
+      GV->setLinkage(llvm::GlobalVariable::WeakODRLinkage);
+    else
+      GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+  } else if (Linkage == GVA_TemplateInstantiation)
+    GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);   
+  else if (!getLangOptions().CPlusPlus && !CodeGenOpts.NoCommon &&
+           !D->hasExternalStorage() && !D->getInit() &&
+           !D->getAttr<SectionAttr>()) {
+    GV->setLinkage(llvm::GlobalVariable::CommonLinkage);
+    // common vars aren't constant even if declared const.
+    GV->setConstant(false);
+  } else
+    GV->setLinkage(llvm::GlobalVariable::ExternalLinkage);
+
+  SetCommonAttributes(D, GV);
+
+  // Emit global variable debug information.
+  if (CGDebugInfo *DI = getDebugInfo()) {
+    DI->setLocation(D->getLocation());
+    DI->EmitGlobalVariable(GV, D);
+  }
+}
+
+/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
+/// implement a function with no prototype, e.g. "int foo() {}".  If there are
+/// existing call uses of the old function in the module, this adjusts them to
+/// call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them.  If there is a bitcast in the way, it
+/// won't inline them.  Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+                                                      llvm::Function *NewFn) {
+  // If we're redefining a global as a function, don't transform it.
+  llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
+  if (OldFn == 0) return;
+
+  const llvm::Type *NewRetTy = NewFn->getReturnType();
+  llvm::SmallVector<llvm::Value*, 4> ArgList;
+
+  for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
+       UI != E; ) {
+    // TODO: Do invokes ever occur in C code?  If so, we should handle them too.
+    unsigned OpNo = UI.getOperandNo();
+    llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*UI++);
+    if (!CI || OpNo != 0) continue;
+
+    // If the return types don't match exactly, and if the call isn't dead, then
+    // we can't transform this call.
+    if (CI->getType() != NewRetTy && !CI->use_empty())
+      continue;
+
+    // If the function was passed too few arguments, don't transform.  If extra
+    // arguments were passed, we silently drop them.  If any of the types
+    // mismatch, we don't transform.
+    unsigned ArgNo = 0;
+    bool DontTransform = false;
+    for (llvm::Function::arg_iterator AI = NewFn->arg_begin(),
+         E = NewFn->arg_end(); AI != E; ++AI, ++ArgNo) {
+      if (CI->getNumOperands()-1 == ArgNo ||
+          CI->getOperand(ArgNo+1)->getType() != AI->getType()) {
+        DontTransform = true;
+        break;
+      }
+    }
+    if (DontTransform)
+      continue;
+
+    // Okay, we can transform this.  Create the new call instruction and copy
+    // over the required information.
+    ArgList.append(CI->op_begin()+1, CI->op_begin()+1+ArgNo);
+    llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList.begin(),
+                                                     ArgList.end(), "", CI);
+    ArgList.clear();
+    if (!NewCall->getType()->isVoidTy())
+      NewCall->takeName(CI);
+    NewCall->setAttributes(CI->getAttributes());
+    NewCall->setCallingConv(CI->getCallingConv());
+
+    // Finally, remove the old call, replacing any uses with the new one.
+    if (!CI->use_empty())
+      CI->replaceAllUsesWith(NewCall);
+
+    // Copy any custom metadata attached with CI.
+    if (llvm::MDNode *DbgNode = CI->getMetadata("dbg"))
+      NewCall->setMetadata("dbg", DbgNode);
+    CI->eraseFromParent();
+  }
+}
+
+
+void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
+  const llvm::FunctionType *Ty;
+  const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
+
+  if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+    bool isVariadic = D->getType()->getAs<FunctionProtoType>()->isVariadic();
+
+    Ty = getTypes().GetFunctionType(getTypes().getFunctionInfo(MD), isVariadic);
+  } else {
+    Ty = cast<llvm::FunctionType>(getTypes().ConvertType(D->getType()));
+
+    // As a special case, make sure that definitions of K&R function
+    // "type foo()" aren't declared as varargs (which forces the backend
+    // to do unnecessary work).
+    if (D->getType()->isFunctionNoProtoType()) {
+      assert(Ty->isVarArg() && "Didn't lower type as expected");
+      // Due to stret, the lowered function could have arguments.
+      // Just create the same type as was lowered by ConvertType
+      // but strip off the varargs bit.
+      std::vector<const llvm::Type*> Args(Ty->param_begin(), Ty->param_end());
+      Ty = llvm::FunctionType::get(Ty->getReturnType(), Args, false);
+    }
+  }
+
+  // Get or create the prototype for the function.
+  llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
+
+  // Strip off a bitcast if we got one back.
+  if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+    assert(CE->getOpcode() == llvm::Instruction::BitCast);
+    Entry = CE->getOperand(0);
+  }
+
+
+  if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() != Ty) {
+    llvm::GlobalValue *OldFn = cast<llvm::GlobalValue>(Entry);
+
+    // If the types mismatch then we have to rewrite the definition.
+    assert(OldFn->isDeclaration() &&
+           "Shouldn't replace non-declaration");
+
+    // F is the Function* for the one with the wrong type, we must make a new
+    // Function* and update everything that used F (a declaration) with the new
+    // Function* (which will be a definition).
+    //
+    // This happens if there is a prototype for a function
+    // (e.g. "int f()") and then a definition of a different type
+    // (e.g. "int f(int x)").  Start by making a new function of the
+    // correct type, RAUW, then steal the name.
+    GlobalDeclMap.erase(getMangledName(D));
+    llvm::Function *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
+    NewFn->takeName(OldFn);
+
+    // If this is an implementation of a function without a prototype, try to
+    // replace any existing uses of the function (which may be calls) with uses
+    // of the new function
+    if (D->getType()->isFunctionNoProtoType()) {
+      ReplaceUsesOfNonProtoTypeWithRealFunction(OldFn, NewFn);
+      OldFn->removeDeadConstantUsers();
+    }
+
+    // Replace uses of F with the Function we will endow with a body.
+    if (!Entry->use_empty()) {
+      llvm::Constant *NewPtrForOldDecl =
+        llvm::ConstantExpr::getBitCast(NewFn, Entry->getType());
+      Entry->replaceAllUsesWith(NewPtrForOldDecl);
+    }
+
+    // Ok, delete the old function now, which is dead.
+    OldFn->eraseFromParent();
+
+    Entry = NewFn;
+  }
+
+  llvm::Function *Fn = cast<llvm::Function>(Entry);
+
+  CodeGenFunction(*this).GenerateCode(D, Fn);
+
+  SetFunctionDefinitionAttributes(D, Fn);
+  SetLLVMFunctionAttributesForDefinition(D, Fn);
+
+  if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
+    AddGlobalCtor(Fn, CA->getPriority());
+  if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
+    AddGlobalDtor(Fn, DA->getPriority());
+}
+
+void CodeGenModule::EmitAliasDefinition(const ValueDecl *D) {
+  const AliasAttr *AA = D->getAttr<AliasAttr>();
+  assert(AA && "Not an alias?");
+
+  const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+
+  // Unique the name through the identifier table.
+  const char *AliaseeName = AA->getAliasee().c_str();
+  AliaseeName = getContext().Idents.get(AliaseeName).getNameStart();
+
+  // Create a reference to the named value.  This ensures that it is emitted
+  // if a deferred decl.
+  llvm::Constant *Aliasee;
+  if (isa<llvm::FunctionType>(DeclTy))
+    Aliasee = GetOrCreateLLVMFunction(AliaseeName, DeclTy, GlobalDecl());
+  else
+    Aliasee = GetOrCreateLLVMGlobal(AliaseeName,
+                                    llvm::PointerType::getUnqual(DeclTy), 0);
+
+  // Create the new alias itself, but don't set a name yet.
+  llvm::GlobalValue *GA =
+    new llvm::GlobalAlias(Aliasee->getType(),
+                          llvm::Function::ExternalLinkage,
+                          "", Aliasee, &getModule());
+
+  // See if there is already something with the alias' name in the module.
+  const char *MangledName = getMangledName(D);
+  llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+
+  if (Entry && !Entry->isDeclaration()) {
+    // If there is a definition in the module, then it wins over the alias.
+    // This is dubious, but allow it to be safe.  Just ignore the alias.
+    GA->eraseFromParent();
+    return;
+  }
+
+  if (Entry) {
+    // If there is a declaration in the module, then we had an extern followed
+    // by the alias, as in:
+    //   extern int test6();
+    //   ...
+    //   int test6() __attribute__((alias("test7")));
+    //
+    // Remove it and replace uses of it with the alias.
+
+    Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
+                                                          Entry->getType()));
+    Entry->eraseFromParent();
+  }
+
+  // Now we know that there is no conflict, set the name.
+  Entry = GA;
+  GA->setName(MangledName);
+
+  // Set attributes which are particular to an alias; this is a
+  // specialization of the attributes which may be set on a global
+  // variable/function.
+  if (D->hasAttr<DLLExportAttr>()) {
+    if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+      // The dllexport attribute is ignored for undefined symbols.
+      if (FD->getBody())
+        GA->setLinkage(llvm::Function::DLLExportLinkage);
+    } else {
+      GA->setLinkage(llvm::Function::DLLExportLinkage);
+    }
+  } else if (D->hasAttr<WeakAttr>() ||
+             D->hasAttr<WeakImportAttr>()) {
+    GA->setLinkage(llvm::Function::WeakAnyLinkage);
+  }
+
+  SetCommonAttributes(D, GA);
+}
+
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+                                                  unsigned BuiltinID) {
+  assert((Context.BuiltinInfo.isLibFunction(BuiltinID) ||
+          Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) &&
+         "isn't a lib fn");
+
+  // Get the name, skip over the __builtin_ prefix (if necessary).
+  const char *Name = Context.BuiltinInfo.GetName(BuiltinID);
+  if (Context.BuiltinInfo.isLibFunction(BuiltinID))
+    Name += 10;
+
+  const llvm::FunctionType *Ty =
+    cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+
+  // Unique the name through the identifier table.
+  Name = getContext().Idents.get(Name).getNameStart();
+  return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl(FD));
+}
+
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys,
+                                            unsigned NumTys) {
+  return llvm::Intrinsic::getDeclaration(&getModule(),
+                                         (llvm::Intrinsic::ID)IID, Tys, NumTys);
+}
+
+llvm::Function *CodeGenModule::getMemCpyFn() {
+  if (MemCpyFn) return MemCpyFn;
+  const llvm::Type *IntPtr = TheTargetData.getIntPtrType(VMContext);
+  return MemCpyFn = getIntrinsic(llvm::Intrinsic::memcpy, &IntPtr, 1);
+}
+
+llvm::Function *CodeGenModule::getMemMoveFn() {
+  if (MemMoveFn) return MemMoveFn;
+  const llvm::Type *IntPtr = TheTargetData.getIntPtrType(VMContext);
+  return MemMoveFn = getIntrinsic(llvm::Intrinsic::memmove, &IntPtr, 1);
+}
+
+llvm::Function *CodeGenModule::getMemSetFn() {
+  if (MemSetFn) return MemSetFn;
+  const llvm::Type *IntPtr = TheTargetData.getIntPtrType(VMContext);
+  return MemSetFn = getIntrinsic(llvm::Intrinsic::memset, &IntPtr, 1);
+}
+
+static llvm::StringMapEntry<llvm::Constant*> &
+GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
+                         const StringLiteral *Literal,
+                         bool TargetIsLSB,
+                         bool &IsUTF16,
+                         unsigned &StringLength) {
+  unsigned NumBytes = Literal->getByteLength();
+
+  // Check for simple case.
+  if (!Literal->containsNonAsciiOrNull()) {
+    StringLength = NumBytes;
+    return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
+                                                StringLength));
+  }
+
+  // Otherwise, convert the UTF8 literals into a byte string.
+  llvm::SmallVector<UTF16, 128> ToBuf(NumBytes);
+  const UTF8 *FromPtr = (UTF8 *)Literal->getStrData();
+  UTF16 *ToPtr = &ToBuf[0];
+
+  ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+                                               &ToPtr, ToPtr + NumBytes,
+                                               strictConversion);
+
+  // Check for conversion failure.
+  if (Result != conversionOK) {
+    // FIXME: Have Sema::CheckObjCString() validate the UTF-8 string and remove
+    // this duplicate code.
+    assert(Result == sourceIllegal && "UTF-8 to UTF-16 conversion failed");
+    StringLength = NumBytes;
+    return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
+                                                StringLength));
+  }
+
+  // ConvertUTF8toUTF16 returns the length in ToPtr.
+  StringLength = ToPtr - &ToBuf[0];
+
+  // Render the UTF-16 string into a byte array and convert to the target byte
+  // order.
+  //
+  // FIXME: This isn't something we should need to do here.
+  llvm::SmallString<128> AsBytes;
+  AsBytes.reserve(StringLength * 2);
+  for (unsigned i = 0; i != StringLength; ++i) {
+    unsigned short Val = ToBuf[i];
+    if (TargetIsLSB) {
+      AsBytes.push_back(Val & 0xFF);
+      AsBytes.push_back(Val >> 8);
+    } else {
+      AsBytes.push_back(Val >> 8);
+      AsBytes.push_back(Val & 0xFF);
+    }
+  }
+  // Append one extra null character, the second is automatically added by our
+  // caller.
+  AsBytes.push_back(0);
+
+  IsUTF16 = true;
+  return Map.GetOrCreateValue(llvm::StringRef(AsBytes.data(), AsBytes.size()));
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
+  unsigned StringLength = 0;
+  bool isUTF16 = false;
+  llvm::StringMapEntry<llvm::Constant*> &Entry =
+    GetConstantCFStringEntry(CFConstantStringMap, Literal,
+                             getTargetData().isLittleEndian(),
+                             isUTF16, StringLength);
+
+  if (llvm::Constant *C = Entry.getValue())
+    return C;
+
+  llvm::Constant *Zero =
+      llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
+  llvm::Constant *Zeros[] = { Zero, Zero };
+
+  // If we don't already have it, get __CFConstantStringClassReference.
+  if (!CFConstantStringClassRef) {
+    const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+    Ty = llvm::ArrayType::get(Ty, 0);
+    llvm::Constant *GV = CreateRuntimeVariable(Ty,
+                                           "__CFConstantStringClassReference");
+    // Decay array -> ptr
+    CFConstantStringClassRef =
+      llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+  }
+
+  QualType CFTy = getContext().getCFConstantStringType();
+
+  const llvm::StructType *STy =
+    cast<llvm::StructType>(getTypes().ConvertType(CFTy));
+
+  std::vector<llvm::Constant*> Fields(4);
+
+  // Class pointer.
+  Fields[0] = CFConstantStringClassRef;
+
+  // Flags.
+  const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+  Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
+    llvm::ConstantInt::get(Ty, 0x07C8);
+
+  // String pointer.
+  llvm::Constant *C = llvm::ConstantArray::get(VMContext, Entry.getKey().str());
+
+  llvm::GlobalValue::LinkageTypes Linkage;
+  bool isConstant;
+  if (isUTF16) {
+    // FIXME: why do utf strings get "_" labels instead of "L" labels?
+    Linkage = llvm::GlobalValue::InternalLinkage;
+    // Note: -fwritable-strings doesn't make unicode CFStrings writable, but
+    // does make plain ascii ones writable.
+    isConstant = true;
+  } else {
+    Linkage = llvm::GlobalValue::PrivateLinkage;
+    isConstant = !Features.WritableStrings;
+  }
+  
+  llvm::GlobalVariable *GV =
+    new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
+                             ".str");
+  if (isUTF16) {
+    CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
+    GV->setAlignment(Align.getQuantity());
+  }
+  Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
+
+  // String length.
+  Ty = getTypes().ConvertType(getContext().LongTy);
+  Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
+
+  // The struct.
+  C = llvm::ConstantStruct::get(STy, Fields);
+  GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+                                llvm::GlobalVariable::PrivateLinkage, C,
+                                "_unnamed_cfstring_");
+  if (const char *Sect = getContext().Target.getCFStringSection())
+    GV->setSection(Sect);
+  Entry.setValue(GV);
+
+  return GV;
+}
+
+/// GetStringForStringLiteral - Return the appropriate bytes for a
+/// string literal, properly padded to match the literal type.
+std::string CodeGenModule::GetStringForStringLiteral(const StringLiteral *E) {
+  const char *StrData = E->getStrData();
+  unsigned Len = E->getByteLength();
+
+  const ConstantArrayType *CAT =
+    getContext().getAsConstantArrayType(E->getType());
+  assert(CAT && "String isn't pointer or array!");
+
+  // Resize the string to the right size.
+  std::string Str(StrData, StrData+Len);
+  uint64_t RealLen = CAT->getSize().getZExtValue();
+
+  if (E->isWide())
+    RealLen *= getContext().Target.getWCharWidth()/8;
+
+  Str.resize(RealLen, '\0');
+
+  return Str;
+}
+
+/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
+/// constant array for the given string literal.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
+  // FIXME: This can be more efficient.
+  // FIXME: We shouldn't need to bitcast the constant in the wide string case.
+  llvm::Constant *C = GetAddrOfConstantString(GetStringForStringLiteral(S));
+  if (S->isWide()) {
+    llvm::Type *DestTy =
+        llvm::PointerType::getUnqual(getTypes().ConvertType(S->getType()));
+    C = llvm::ConstantExpr::getBitCast(C, DestTy);
+  }
+  return C;
+}
+
+/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+/// array for the given ObjCEncodeExpr node.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
+  std::string Str;
+  getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+
+  return GetAddrOfConstantCString(Str);
+}
+
+
+/// GenerateWritableString -- Creates storage for a string literal.
+static llvm::Constant *GenerateStringLiteral(const std::string &str,
+                                             bool constant,
+                                             CodeGenModule &CGM,
+                                             const char *GlobalName) {
+  // Create Constant for this string literal. Don't add a '\0'.
+  llvm::Constant *C =
+      llvm::ConstantArray::get(CGM.getLLVMContext(), str, false);
+
+  // Create a global variable for this string
+  return new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
+                                  llvm::GlobalValue::PrivateLinkage,
+                                  C, GlobalName);
+}
+
+/// GetAddrOfConstantString - Returns a pointer to a character array
+/// containing the literal. This contents are exactly that of the
+/// given string, i.e. it will not be null terminated automatically;
+/// see GetAddrOfConstantCString. Note that whether the result is
+/// actually a pointer to an LLVM constant depends on
+/// Feature.WriteableStrings.
+///
+/// The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantString(const std::string &str,
+                                                       const char *GlobalName) {
+  bool IsConstant = !Features.WritableStrings;
+
+  // Get the default prefix if a name wasn't specified.
+  if (!GlobalName)
+    GlobalName = ".str";
+
+  // Don't share any string literals if strings aren't constant.
+  if (!IsConstant)
+    return GenerateStringLiteral(str, false, *this, GlobalName);
+
+  llvm::StringMapEntry<llvm::Constant *> &Entry =
+    ConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]);
+
+  if (Entry.getValue())
+    return Entry.getValue();
+
+  // Create a global variable for this.
+  llvm::Constant *C = GenerateStringLiteral(str, true, *this, GlobalName);
+  Entry.setValue(C);
+  return C;
+}
+
+/// GetAddrOfConstantCString - Returns a pointer to a character
+/// array containing the literal and a terminating '\-'
+/// character. The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &str,
+                                                        const char *GlobalName){
+  return GetAddrOfConstantString(str + '\0', GlobalName);
+}
+
+/// EmitObjCPropertyImplementations - Emit information for synthesized
+/// properties for an implementation.
+void CodeGenModule::EmitObjCPropertyImplementations(const
+                                                    ObjCImplementationDecl *D) {
+  for (ObjCImplementationDecl::propimpl_iterator
+         i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
+    ObjCPropertyImplDecl *PID = *i;
+
+    // Dynamic is just for type-checking.
+    if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+      ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+      // Determine which methods need to be implemented, some may have
+      // been overridden. Note that ::isSynthesized is not the method
+      // we want, that just indicates if the decl came from a
+      // property. What we want to know is if the method is defined in
+      // this implementation.
+      if (!D->getInstanceMethod(PD->getGetterName()))
+        CodeGenFunction(*this).GenerateObjCGetter(
+                                 const_cast<ObjCImplementationDecl *>(D), PID);
+      if (!PD->isReadOnly() &&
+          !D->getInstanceMethod(PD->getSetterName()))
+        CodeGenFunction(*this).GenerateObjCSetter(
+                                 const_cast<ObjCImplementationDecl *>(D), PID);
+    }
+  }
+}
+
+/// EmitNamespace - Emit all declarations in a namespace.
+void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
+  for (RecordDecl::decl_iterator I = ND->decls_begin(), E = ND->decls_end();
+       I != E; ++I)
+    EmitTopLevelDecl(*I);
+}
+
+// EmitLinkageSpec - Emit all declarations in a linkage spec.
+void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
+  if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
+      LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
+    ErrorUnsupported(LSD, "linkage spec");
+    return;
+  }
+
+  for (RecordDecl::decl_iterator I = LSD->decls_begin(), E = LSD->decls_end();
+       I != E; ++I)
+    EmitTopLevelDecl(*I);
+}
+
+/// EmitTopLevelDecl - Emit code for a single top level declaration.
+void CodeGenModule::EmitTopLevelDecl(Decl *D) {
+  // If an error has occurred, stop code generation, but continue
+  // parsing and semantic analysis (to ensure all warnings and errors
+  // are emitted).
+  if (Diags.hasErrorOccurred())
+    return;
+
+  // Ignore dependent declarations.
+  if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
+    return;
+
+  switch (D->getKind()) {
+  case Decl::CXXConversion:
+  case Decl::CXXMethod:
+  case Decl::Function:
+    // Skip function templates
+    if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate())
+      return;
+
+    EmitGlobal(cast<FunctionDecl>(D));
+    break;
+      
+  case Decl::Var:
+    EmitGlobal(cast<VarDecl>(D));
+    break;
+
+  // C++ Decls
+  case Decl::Namespace:
+    EmitNamespace(cast<NamespaceDecl>(D));
+    break;
+    // No code generation needed.
+  case Decl::UsingShadow:
+  case Decl::Using:
+  case Decl::UsingDirective:
+  case Decl::ClassTemplate:
+  case Decl::FunctionTemplate:
+  case Decl::NamespaceAlias:
+    break;
+  case Decl::CXXConstructor:
+    // Skip function templates
+    if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate())
+      return;
+      
+    EmitCXXConstructors(cast<CXXConstructorDecl>(D));
+    break;
+  case Decl::CXXDestructor:
+    EmitCXXDestructors(cast<CXXDestructorDecl>(D));
+    break;
+
+  case Decl::StaticAssert:
+    // Nothing to do.
+    break;
+
+  // Objective-C Decls
+
+  // Forward declarations, no (immediate) code generation.
+  case Decl::ObjCClass:
+  case Decl::ObjCForwardProtocol:
+  case Decl::ObjCCategory:
+  case Decl::ObjCInterface:
+    break;
+
+  case Decl::ObjCProtocol:
+    Runtime->GenerateProtocol(cast<ObjCProtocolDecl>(D));
+    break;
+
+  case Decl::ObjCCategoryImpl:
+    // Categories have properties but don't support synthesize so we
+    // can ignore them here.
+    Runtime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
+    break;
+
+  case Decl::ObjCImplementation: {
+    ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
+    EmitObjCPropertyImplementations(OMD);
+    Runtime->GenerateClass(OMD);
+    break;
+  }
+  case Decl::ObjCMethod: {
+    ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(D);
+    // If this is not a prototype, emit the body.
+    if (OMD->getBody())
+      CodeGenFunction(*this).GenerateObjCMethod(OMD);
+    break;
+  }
+  case Decl::ObjCCompatibleAlias:
+    // compatibility-alias is a directive and has no code gen.
+    break;
+
+  case Decl::LinkageSpec:
+    EmitLinkageSpec(cast<LinkageSpecDecl>(D));
+    break;
+
+  case Decl::FileScopeAsm: {
+    FileScopeAsmDecl *AD = cast<FileScopeAsmDecl>(D);
+    llvm::StringRef AsmString = AD->getAsmString()->getString();
+
+    const std::string &S = getModule().getModuleInlineAsm();
+    if (S.empty())
+      getModule().setModuleInlineAsm(AsmString);
+    else
+      getModule().setModuleInlineAsm(S + '\n' + AsmString.str());
+    break;
+  }
+
+  default:
+    // Make sure we handled everything we should, every other kind is a
+    // non-top-level decl.  FIXME: Would be nice to have an isTopLevelDeclKind
+    // function. Need to recode Decl::Kind to do that easily.
+    assert(isa<TypeDecl>(D) && "Unsupported decl kind");
+  }
+}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
new file mode 100644
index 0000000..8280766
--- /dev/null
+++ b/lib/CodeGen/CodeGenModule.h
@@ -0,0 +1,522 @@
+//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-translation-unit state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENMODULE_H
+#define CLANG_CODEGEN_CODEGENMODULE_H
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "CGBlocks.h"
+#include "CGCall.h"
+#include "CGCXX.h"
+#include "CGVtable.h"
+#include "CodeGenTypes.h"
+#include "GlobalDecl.h"
+#include "Mangle.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/ValueHandle.h"
+#include <list>
+
+namespace llvm {
+  class Module;
+  class Constant;
+  class Function;
+  class GlobalValue;
+  class TargetData;
+  class FunctionType;
+  class LLVMContext;
+}
+
+namespace clang {
+  class TargetCodeGenInfo;
+  class ASTContext;
+  class FunctionDecl;
+  class IdentifierInfo;
+  class ObjCMethodDecl;
+  class ObjCImplementationDecl;
+  class ObjCCategoryImplDecl;
+  class ObjCProtocolDecl;
+  class ObjCEncodeExpr;
+  class BlockExpr;
+  class CharUnits;
+  class Decl;
+  class Expr;
+  class Stmt;
+  class StringLiteral;
+  class NamedDecl;
+  class ValueDecl;
+  class VarDecl;
+  class LangOptions;
+  class CodeGenOptions;
+  class Diagnostic;
+  class AnnotateAttr;
+  class CXXDestructorDecl;
+
+namespace CodeGen {
+
+  class CodeGenFunction;
+  class CGDebugInfo;
+  class CGObjCRuntime;
+
+  
+/// CodeGenModule - This class organizes the cross-function state that is used
+/// while generating LLVM code.
+class CodeGenModule : public BlockModule {
+  CodeGenModule(const CodeGenModule&);  // DO NOT IMPLEMENT
+  void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
+
+  typedef std::vector<std::pair<llvm::Constant*, int> > CtorList;
+
+  ASTContext &Context;
+  const LangOptions &Features;
+  const CodeGenOptions &CodeGenOpts;
+  llvm::Module &TheModule;
+  const llvm::TargetData &TheTargetData;
+  mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
+  Diagnostic &Diags;
+  CodeGenTypes Types;
+  MangleContext MangleCtx;
+
+  /// VtableInfo - Holds information about C++ vtables.
+  CGVtableInfo VtableInfo;
+  
+  CGObjCRuntime* Runtime;
+  CGDebugInfo* DebugInfo;
+  
+  llvm::Function *MemCpyFn;
+  llvm::Function *MemMoveFn;
+  llvm::Function *MemSetFn;
+
+  /// GlobalDeclMap - Mapping of decl names (represented as unique
+  /// character pointers from either the identifier table or the set
+  /// of mangled names) to global variables we have already
+  /// emitted. Note that the entries in this map are the actual
+  /// globals and therefore may not be of the same type as the decl,
+  /// they should be bitcasted on retrieval. Also note that the
+  /// globals are keyed on their source mangled name, not the global name
+  /// (which may change with attributes such as asm-labels).  The key
+  /// to this map should be generated using getMangledName().
+  ///
+  /// Note that this map always lines up exactly with the contents of the LLVM
+  /// IR symbol table, but this is quicker to query since it is doing uniqued
+  /// pointer lookups instead of full string lookups.
+  llvm::DenseMap<const char*, llvm::GlobalValue*> GlobalDeclMap;
+
+  /// \brief Contains the strings used for mangled names.
+  ///
+  /// FIXME: Eventually, this should map from the semantic/canonical
+  /// declaration for each global entity to its mangled name (if it
+  /// has one).
+  llvm::StringSet<> MangledNames;
+
+  /// DeferredDecls - This contains all the decls which have definitions but
+  /// which are deferred for emission and therefore should only be output if
+  /// they are actually used.  If a decl is in this, then it is known to have
+  /// not been referenced yet.  The key to this map is a uniqued mangled name.
+  llvm::DenseMap<const char*, GlobalDecl> DeferredDecls;
+
+  /// DeferredDeclsToEmit - This is a list of deferred decls which we have seen
+  /// that *are* actually referenced.  These get code generated when the module
+  /// is done.
+  std::vector<GlobalDecl> DeferredDeclsToEmit;
+
+  /// LLVMUsed - List of global values which are required to be
+  /// present in the object file; bitcast to i8*. This is used for
+  /// forcing visibility of symbols which may otherwise be optimized
+  /// out.
+  std::vector<llvm::WeakVH> LLVMUsed;
+
+  /// GlobalCtors - Store the list of global constructors and their respective
+  /// priorities to be emitted when the translation unit is complete.
+  CtorList GlobalCtors;
+
+  /// GlobalDtors - Store the list of global destructors and their respective
+  /// priorities to be emitted when the translation unit is complete.
+  CtorList GlobalDtors;
+
+  std::vector<llvm::Constant*> Annotations;
+
+  llvm::StringMap<llvm::Constant*> CFConstantStringMap;
+  llvm::StringMap<llvm::Constant*> ConstantStringMap;
+
+  /// CXXGlobalInits - Variables with global initializers that need to run
+  /// before main.
+  std::vector<llvm::Constant*> CXXGlobalInits;
+
+  /// CFConstantStringClassRef - Cached reference to the class for constant
+  /// strings. This value has type int * but is actually an Obj-C class pointer.
+  llvm::Constant *CFConstantStringClassRef;
+
+  /// Lazily create the Objective-C runtime
+  void createObjCRuntime();
+
+  llvm::LLVMContext &VMContext;
+public:
+  CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
+                llvm::Module &M, const llvm::TargetData &TD, Diagnostic &Diags);
+
+  ~CodeGenModule();
+
+  /// Release - Finalize LLVM code generation.
+  void Release();
+
+  /// getObjCRuntime() - Return a reference to the configured
+  /// Objective-C runtime.
+  CGObjCRuntime &getObjCRuntime() {
+    if (!Runtime) createObjCRuntime();
+    return *Runtime;
+  }
+
+  /// hasObjCRuntime() - Return true iff an Objective-C runtime has
+  /// been configured.
+  bool hasObjCRuntime() { return !!Runtime; }
+
+  CGDebugInfo *getDebugInfo() { return DebugInfo; }
+  ASTContext &getContext() const { return Context; }
+  const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+  const LangOptions &getLangOptions() const { return Features; }
+  llvm::Module &getModule() const { return TheModule; }
+  CodeGenTypes &getTypes() { return Types; }
+  MangleContext &getMangleContext() { return MangleCtx; }
+  CGVtableInfo &getVtableInfo() { return VtableInfo; }
+  Diagnostic &getDiags() const { return Diags; }
+  const llvm::TargetData &getTargetData() const { return TheTargetData; }
+  llvm::LLVMContext &getLLVMContext() { return VMContext; }
+  const TargetCodeGenInfo &getTargetCodeGenInfo() const;
+
+  /// getDeclVisibilityMode - Compute the visibility of the decl \arg D.
+  LangOptions::VisibilityMode getDeclVisibilityMode(const Decl *D) const;
+
+  /// setGlobalVisibility - Set the visibility for the given LLVM
+  /// GlobalValue.
+  void setGlobalVisibility(llvm::GlobalValue *GV, const Decl *D) const;
+
+  /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+  /// given global variable.  If Ty is non-null and if the global doesn't exist,
+  /// then it will be greated with the specified type instead of whatever the
+  /// normal requested type would be.
+  llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
+                                     const llvm::Type *Ty = 0);
+
+  /// GetAddrOfFunction - Return the address of the given function.  If Ty is
+  /// non-null, then this function will use the specified type if it has to
+  /// create it.
+  llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
+                                    const llvm::Type *Ty = 0);
+
+  /// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor 
+  /// for the given type.
+  llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty);
+
+  llvm::Constant *GetAddrOfThunk(GlobalDecl GD,
+                                 const ThunkAdjustment &ThisAdjustment);
+  llvm::Constant *GetAddrOfCovariantThunk(GlobalDecl GD,
+                                const CovariantThunkAdjustment &ThisAdjustment);
+  void BuildThunksForVirtual(GlobalDecl GD);
+  void BuildThunksForVirtualRecursive(GlobalDecl GD, GlobalDecl BaseOGD);
+
+  /// BuildThunk - Build a thunk for the given method.
+  llvm::Constant *BuildThunk(GlobalDecl GD, bool Extern, 
+                             const ThunkAdjustment &ThisAdjustment);
+
+  /// BuildCoVariantThunk - Build a thunk for the given method
+  llvm::Constant *
+  BuildCovariantThunk(const GlobalDecl &GD, bool Extern,
+                      const CovariantThunkAdjustment &Adjustment);
+
+  /// GetNonVirtualBaseClassOffset - Returns the offset from a derived class to 
+  /// its base class. Returns null if the offset is 0. 
+  llvm::Constant *
+  GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+                               const CXXRecordDecl *BaseClassDecl);
+
+  /// ComputeThunkAdjustment - Returns the two parts required to compute the
+  /// offset for an object.
+  ThunkAdjustment ComputeThunkAdjustment(const CXXRecordDecl *ClassDecl,
+                                         const CXXRecordDecl *BaseClassDecl);
+  
+  /// GetStringForStringLiteral - Return the appropriate bytes for a string
+  /// literal, properly padded to match the literal type. If only the address of
+  /// a constant is needed consider using GetAddrOfConstantStringLiteral.
+  std::string GetStringForStringLiteral(const StringLiteral *E);
+
+  /// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
+  /// for the given string.
+  llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+
+  /// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
+  /// for the given string literal.
+  llvm::Constant *GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
+
+  /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+  /// array for the given ObjCEncodeExpr node.
+  llvm::Constant *GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
+
+  /// GetAddrOfConstantString - Returns a pointer to a character array
+  /// containing the literal. This contents are exactly that of the given
+  /// string, i.e. it will not be null terminated automatically; see
+  /// GetAddrOfConstantCString. Note that whether the result is actually a
+  /// pointer to an LLVM constant depends on Feature.WriteableStrings.
+  ///
+  /// The result has pointer to array type.
+  ///
+  /// \param GlobalName If provided, the name to use for the global
+  /// (if one is created).
+  llvm::Constant *GetAddrOfConstantString(const std::string& str,
+                                          const char *GlobalName=0);
+
+  /// GetAddrOfConstantCString - Returns a pointer to a character array
+  /// containing the literal and a terminating '\0' character. The result has
+  /// pointer to array type.
+  ///
+  /// \param GlobalName If provided, the name to use for the global (if one is
+  /// created).
+  llvm::Constant *GetAddrOfConstantCString(const std::string &str,
+                                           const char *GlobalName=0);
+
+  /// GetAddrOfCXXConstructor - Return the address of the constructor of the
+  /// given type.
+  llvm::Function *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+                                          CXXCtorType Type);
+
+  /// GetAddrOfCXXDestructor - Return the address of the constructor of the
+  /// given type.
+  llvm::Function *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+                                         CXXDtorType Type);
+
+  /// getBuiltinLibFunction - Given a builtin id for a function like
+  /// "__builtin_fabsf", return a Function* for "fabsf".
+  llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
+                                     unsigned BuiltinID);
+
+  llvm::Function *getMemCpyFn();
+  llvm::Function *getMemMoveFn();
+  llvm::Function *getMemSetFn();
+  llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0,
+                               unsigned NumTys = 0);
+
+  /// EmitTopLevelDecl - Emit code for a single top level declaration.
+  void EmitTopLevelDecl(Decl *D);
+
+  /// AddUsedGlobal - Add a global which should be forced to be
+  /// present in the object file; these are emitted to the llvm.used
+  /// metadata global.
+  void AddUsedGlobal(llvm::GlobalValue *GV);
+
+  void AddAnnotation(llvm::Constant *C) { Annotations.push_back(C); }
+
+  /// CreateRuntimeFunction - Create a new runtime function with the specified
+  /// type and name.
+  llvm::Constant *CreateRuntimeFunction(const llvm::FunctionType *Ty,
+                                        const char *Name);
+  /// CreateRuntimeVariable - Create a new runtime global variable with the
+  /// specified type and name.
+  llvm::Constant *CreateRuntimeVariable(const llvm::Type *Ty,
+                                        const char *Name);
+
+  void UpdateCompletedType(const TagDecl *TD) {
+    // Make sure that this type is translated.
+    Types.UpdateCompletedType(TD);
+  }
+
+  /// EmitConstantExpr - Try to emit the given expression as a
+  /// constant; returns 0 if the expression cannot be emitted as a
+  /// constant.
+  llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
+                                   CodeGenFunction *CGF = 0);
+
+  /// EmitNullConstant - Return the result of value-initializing the given
+  /// type, i.e. a null expression of the given type.  This is usually,
+  /// but not always, an LLVM null constant.
+  llvm::Constant *EmitNullConstant(QualType T);
+
+  llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
+                                   const AnnotateAttr *AA, unsigned LineNo);
+
+  llvm::Constant *EmitPointerToDataMember(const FieldDecl *FD);
+
+  /// ErrorUnsupported - Print out an error that codegen doesn't support the
+  /// specified stmt yet.
+  /// \param OmitOnError - If true, then this error should only be emitted if no
+  /// other errors have been reported.
+  void ErrorUnsupported(const Stmt *S, const char *Type,
+                        bool OmitOnError=false);
+
+  /// ErrorUnsupported - Print out an error that codegen doesn't support the
+  /// specified decl yet.
+  /// \param OmitOnError - If true, then this error should only be emitted if no
+  /// other errors have been reported.
+  void ErrorUnsupported(const Decl *D, const char *Type,
+                        bool OmitOnError=false);
+
+  /// SetInternalFunctionAttributes - Set the attributes on the LLVM
+  /// function for the given decl and function info. This applies
+  /// attributes necessary for handling the ABI as well as user
+  /// specified attributes like section.
+  void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F,
+                                     const CGFunctionInfo &FI);
+
+  /// SetLLVMFunctionAttributes - Set the LLVM function attributes
+  /// (sext, zext, etc).
+  void SetLLVMFunctionAttributes(const Decl *D,
+                                 const CGFunctionInfo &Info,
+                                 llvm::Function *F);
+
+  /// SetLLVMFunctionAttributesForDefinition - Set the LLVM function attributes
+  /// which only apply to a function definintion.
+  void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
+
+  /// ReturnTypeUsesSret - Return true iff the given type uses 'sret' when used
+  /// as a return type.
+  bool ReturnTypeUsesSret(const CGFunctionInfo &FI);
+
+  /// ConstructAttributeList - Get the LLVM attributes and calling convention to
+  /// use for a particular function type.
+  ///
+  /// \param Info - The function type information.
+  /// \param TargetDecl - The decl these attributes are being constructed
+  /// for. If supplied the attributes applied to this decl may contribute to the
+  /// function attributes and calling convention.
+  /// \param PAL [out] - On return, the attribute list to use.
+  /// \param CallingConv [out] - On return, the LLVM calling convention to use.
+  void ConstructAttributeList(const CGFunctionInfo &Info,
+                              const Decl *TargetDecl,
+                              AttributeListType &PAL,
+                              unsigned &CallingConv);
+
+  const char *getMangledName(const GlobalDecl &D);
+
+  const char *getMangledName(const NamedDecl *ND);
+  const char *getMangledCXXCtorName(const CXXConstructorDecl *D,
+                                    CXXCtorType Type);
+  const char *getMangledCXXDtorName(const CXXDestructorDecl *D,
+                                    CXXDtorType Type);
+
+  void EmitTentativeDefinition(const VarDecl *D);
+
+  enum GVALinkage {
+    GVA_Internal,
+    GVA_C99Inline,
+    GVA_CXXInline,
+    GVA_StrongExternal,
+    GVA_TemplateInstantiation
+  };
+
+  /// getVtableLinkage - Return the appropriate linkage for the vtable, VTT,
+  /// and type information of the given class.
+  static llvm::GlobalVariable::LinkageTypes 
+  getVtableLinkage(const CXXRecordDecl *RD);
+
+  /// GetTargetTypeStoreSize - Return the store size, in character units, of
+  /// the given LLVM type.
+  CharUnits GetTargetTypeStoreSize(const llvm::Type *Ty) const;
+  
+private:
+  /// UniqueMangledName - Unique a name by (if necessary) inserting it into the
+  /// MangledNames string map.
+  const char *UniqueMangledName(const char *NameStart, const char *NameEnd);
+
+  llvm::Constant *GetOrCreateLLVMFunction(const char *MangledName,
+                                          const llvm::Type *Ty,
+                                          GlobalDecl D);
+  llvm::Constant *GetOrCreateLLVMGlobal(const char *MangledName,
+                                        const llvm::PointerType *PTy,
+                                        const VarDecl *D);
+
+  /// SetCommonAttributes - Set attributes which are common to any
+  /// form of a global definition (alias, Objective-C method,
+  /// function, global variable).
+  ///
+  /// NOTE: This should only be called for definitions.
+  void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+  /// SetFunctionDefinitionAttributes - Set attributes for a global definition.
+  void SetFunctionDefinitionAttributes(const FunctionDecl *D,
+                                       llvm::GlobalValue *GV);
+
+  /// SetFunctionAttributes - Set function attributes for a function
+  /// declaration.
+  void SetFunctionAttributes(GlobalDecl GD,
+                             llvm::Function *F,
+                             bool IsIncompleteFunction);
+
+  /// EmitGlobal - Emit code for a singal global function or var decl. Forward
+  /// declarations are emitted lazily.
+  void EmitGlobal(GlobalDecl D);
+
+  void EmitGlobalDefinition(GlobalDecl D);
+
+  void EmitGlobalFunctionDefinition(GlobalDecl GD);
+  void EmitGlobalVarDefinition(const VarDecl *D);
+  void EmitAliasDefinition(const ValueDecl *D);
+  void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
+
+  // C++ related functions.
+
+  void EmitNamespace(const NamespaceDecl *D);
+  void EmitLinkageSpec(const LinkageSpecDecl *D);
+
+  /// EmitCXXConstructors - Emit constructors (base, complete) from a
+  /// C++ constructor Decl.
+  void EmitCXXConstructors(const CXXConstructorDecl *D);
+
+  /// EmitCXXConstructor - Emit a single constructor with the given type from
+  /// a C++ constructor Decl.
+  void EmitCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type);
+
+  /// EmitCXXDestructors - Emit destructors (base, complete) from a
+  /// C++ destructor Decl.
+  void EmitCXXDestructors(const CXXDestructorDecl *D);
+
+  /// EmitCXXDestructor - Emit a single destructor with the given type from
+  /// a C++ destructor Decl.
+  void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
+
+  /// EmitCXXGlobalInitFunc - Emit a function that initializes C++ globals.
+  void EmitCXXGlobalInitFunc();
+
+  void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D);
+
+  // FIXME: Hardcoding priority here is gross.
+  void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
+  void AddGlobalDtor(llvm::Function *Dtor, int Priority=65535);
+
+  /// EmitCtorList - Generates a global array of functions and priorities using
+  /// the given list and name. This array will have appending linkage and is
+  /// suitable for use as a LLVM constructor or destructor array.
+  void EmitCtorList(const CtorList &Fns, const char *GlobalName);
+
+  void EmitAnnotations(void);
+
+  /// EmitDeferred - Emit any needed decls for which code generation
+  /// was deferred.
+  void EmitDeferred(void);
+
+  /// EmitLLVMUsed - Emit the llvm.used metadata used to force
+  /// references to global which may otherwise be optimized out.
+  void EmitLLVMUsed(void);
+
+  /// MayDeferGeneration - Determine if the given decl can be emitted
+  /// lazily; this is only relevant for definitions. The given decl
+  /// must be either a function or var decl.
+  bool MayDeferGeneration(const ValueDecl *D);
+};
+}  // end namespace CodeGen
+}  // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
new file mode 100644
index 0000000..bcae945
--- /dev/null
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -0,0 +1,501 @@
+//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTypes.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+
+#include "CGCall.h"
+#include "CGRecordLayoutBuilder.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
+                           const llvm::TargetData &TD, const ABIInfo &Info)
+  : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
+    TheABIInfo(Info) {
+}
+
+CodeGenTypes::~CodeGenTypes() {
+  for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
+         I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
+      I != E; ++I)
+    delete I->second;
+
+  for (llvm::FoldingSet<CGFunctionInfo>::iterator
+       I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
+    delete &*I++;
+}
+
+/// ConvertType - Convert the specified type to its LLVM form.
+const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
+  llvm::PATypeHolder Result = ConvertTypeRecursive(T);
+
+  // Any pointers that were converted defered evaluation of their pointee type,
+  // creating an opaque type instead.  This is in order to avoid problems with
+  // circular types.  Loop through all these defered pointees, if any, and
+  // resolve them now.
+  while (!PointersToResolve.empty()) {
+    std::pair<QualType, llvm::OpaqueType*> P = PointersToResolve.pop_back_val();
+    
+    // We can handle bare pointers here because we know that the only pointers
+    // to the Opaque type are P.second and from other types.  Refining the
+    // opqaue type away will invalidate P.second, but we don't mind :).
+    const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
+    P.second->refineAbstractTypeTo(NT);
+  }
+
+  return Result;
+}
+
+const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
+  T = Context.getCanonicalType(T);
+
+  // See if type is already cached.
+  llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
+    I = TypeCache.find(T.getTypePtr());
+  // If type is found in map and this is not a definition for a opaque
+  // place holder type then use it. Otherwise, convert type T.
+  if (I != TypeCache.end())
+    return I->second.get();
+
+  const llvm::Type *ResultType = ConvertNewType(T);
+  TypeCache.insert(std::make_pair(T.getTypePtr(),
+                                  llvm::PATypeHolder(ResultType)));
+  return ResultType;
+}
+
+const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) {
+  const llvm::Type *ResultType = ConvertTypeRecursive(T);
+  if (ResultType->isInteger(1))
+    return llvm::IntegerType::get(getLLVMContext(),
+                                  (unsigned)Context.getTypeSize(T));
+  // FIXME: Should assert that the llvm type and AST type has the same size.
+  return ResultType;
+}
+
+/// ConvertTypeForMem - Convert type T into a llvm::Type.  This differs from
+/// ConvertType in that it is used to convert to the memory representation for
+/// a type.  For example, the scalar representation for _Bool is i1, but the
+/// memory representation is usually i8 or i32, depending on the target.
+const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
+  const llvm::Type *R = ConvertType(T);
+
+  // If this is a non-bool type, don't map it.
+  if (!R->isInteger(1))
+    return R;
+
+  // Otherwise, return an integer of the target-specified size.
+  return llvm::IntegerType::get(getLLVMContext(),
+                                (unsigned)Context.getTypeSize(T));
+
+}
+
+// Code to verify a given function type is complete, i.e. the return type
+// and all of the argument types are complete.
+static const TagType *VerifyFuncTypeComplete(const Type* T) {
+  const FunctionType *FT = cast<FunctionType>(T);
+  if (const TagType* TT = FT->getResultType()->getAs<TagType>())
+    if (!TT->getDecl()->isDefinition())
+      return TT;
+  if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
+    for (unsigned i = 0; i < FPT->getNumArgs(); i++)
+      if (const TagType* TT = FPT->getArgType(i)->getAs<TagType>())
+        if (!TT->getDecl()->isDefinition())
+          return TT;
+  return 0;
+}
+
+/// UpdateCompletedType - When we find the full definition for a TagDecl,
+/// replace the 'opaque' type we previously made for it if applicable.
+void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+  const Type *Key = Context.getTagDeclType(TD).getTypePtr();
+  llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+    TagDeclTypes.find(Key);
+  if (TDTI == TagDeclTypes.end()) return;
+
+  // Remember the opaque LLVM type for this tagdecl.
+  llvm::PATypeHolder OpaqueHolder = TDTI->second;
+  assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
+         "Updating compilation of an already non-opaque type?");
+
+  // Remove it from TagDeclTypes so that it will be regenerated.
+  TagDeclTypes.erase(TDTI);
+
+  // Generate the new type.
+  const llvm::Type *NT = ConvertTagDeclType(TD);
+
+  // Refine the old opaque type to its new definition.
+  cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
+
+  // Since we just completed a tag type, check to see if any function types
+  // were completed along with the tag type.
+  // FIXME: This is very inefficient; if we track which function types depend
+  // on which tag types, though, it should be reasonably efficient.
+  llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
+  for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
+    if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
+      // This function type still depends on an incomplete tag type; make sure
+      // that tag type has an associated opaque type.
+      ConvertTagDeclType(TT->getDecl());
+    } else {
+      // This function no longer depends on an incomplete tag type; create the
+      // function type, and refine the opaque type to the new function type.
+      llvm::PATypeHolder OpaqueHolder = i->second;
+      const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
+      cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
+      FunctionTypes.erase(i);
+    }
+  }
+}
+
+static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
+                                          const llvm::fltSemantics &format) {
+  if (&format == &llvm::APFloat::IEEEsingle)
+    return llvm::Type::getFloatTy(VMContext);
+  if (&format == &llvm::APFloat::IEEEdouble)
+    return llvm::Type::getDoubleTy(VMContext);
+  if (&format == &llvm::APFloat::IEEEquad)
+    return llvm::Type::getFP128Ty(VMContext);
+  if (&format == &llvm::APFloat::PPCDoubleDouble)
+    return llvm::Type::getPPC_FP128Ty(VMContext);
+  if (&format == &llvm::APFloat::x87DoubleExtended)
+    return llvm::Type::getX86_FP80Ty(VMContext);
+  assert(0 && "Unknown float format!");
+  return 0;
+}
+
+const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
+  const clang::Type &Ty = *Context.getCanonicalType(T).getTypePtr();
+
+  switch (Ty.getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+    assert(false && "Non-canonical or dependent types aren't possible.");
+    break;
+
+  case Type::Builtin: {
+    switch (cast<BuiltinType>(Ty).getKind()) {
+    case BuiltinType::Void:
+    case BuiltinType::ObjCId:
+    case BuiltinType::ObjCClass:
+    case BuiltinType::ObjCSel:
+      // LLVM void type can only be used as the result of a function call.  Just
+      // map to the same as char.
+      return llvm::IntegerType::get(getLLVMContext(), 8);
+
+    case BuiltinType::Bool:
+      // Note that we always return bool as i1 for use as a scalar type.
+      return llvm::Type::getInt1Ty(getLLVMContext());
+
+    case BuiltinType::Char_S:
+    case BuiltinType::Char_U:
+    case BuiltinType::SChar:
+    case BuiltinType::UChar:
+    case BuiltinType::Short:
+    case BuiltinType::UShort:
+    case BuiltinType::Int:
+    case BuiltinType::UInt:
+    case BuiltinType::Long:
+    case BuiltinType::ULong:
+    case BuiltinType::LongLong:
+    case BuiltinType::ULongLong:
+    case BuiltinType::WChar:
+    case BuiltinType::Char16:
+    case BuiltinType::Char32:
+      return llvm::IntegerType::get(getLLVMContext(),
+        static_cast<unsigned>(Context.getTypeSize(T)));
+
+    case BuiltinType::Float:
+    case BuiltinType::Double:
+    case BuiltinType::LongDouble:
+      return getTypeForFormat(getLLVMContext(),
+                              Context.getFloatTypeSemantics(T));
+
+    case BuiltinType::NullPtr: {
+      // Model std::nullptr_t as i8*
+      const llvm::Type *Ty = llvm::IntegerType::get(getLLVMContext(), 8);
+      return llvm::PointerType::getUnqual(Ty);
+    }
+        
+    case BuiltinType::UInt128:
+    case BuiltinType::Int128:
+      return llvm::IntegerType::get(getLLVMContext(), 128);
+    
+    case BuiltinType::Overload:
+    case BuiltinType::Dependent:
+    case BuiltinType::UndeducedAuto:
+      assert(0 && "Unexpected builtin type!");
+      break;
+    }
+    assert(0 && "Unknown builtin type!");
+    break;
+  }
+  case Type::Complex: {
+    const llvm::Type *EltTy =
+      ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
+    return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL);
+  }
+  case Type::LValueReference:
+  case Type::RValueReference: {
+    const ReferenceType &RTy = cast<ReferenceType>(Ty);
+    QualType ETy = RTy.getPointeeType();
+    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
+    PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+    return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+  }
+  case Type::Pointer: {
+    const PointerType &PTy = cast<PointerType>(Ty);
+    QualType ETy = PTy.getPointeeType();
+    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
+    PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
+    return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
+  }
+
+  case Type::VariableArray: {
+    const VariableArrayType &A = cast<VariableArrayType>(Ty);
+    assert(A.getIndexTypeCVRQualifiers() == 0 &&
+           "FIXME: We only handle trivial array types so far!");
+    // VLAs resolve to the innermost element type; this matches
+    // the return of alloca, and there isn't any obviously better choice.
+    return ConvertTypeForMemRecursive(A.getElementType());
+  }
+  case Type::IncompleteArray: {
+    const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
+    assert(A.getIndexTypeCVRQualifiers() == 0 &&
+           "FIXME: We only handle trivial array types so far!");
+    // int X[] -> [0 x int]
+    return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0);
+  }
+  case Type::ConstantArray: {
+    const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
+    const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
+    return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
+  }
+  case Type::ExtVector:
+  case Type::Vector: {
+    const VectorType &VT = cast<VectorType>(Ty);
+    return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
+                                 VT.getNumElements());
+  }
+  case Type::FunctionNoProto:
+  case Type::FunctionProto: {
+    // First, check whether we can build the full function type.
+    if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) {
+      // This function's type depends on an incomplete tag type; make sure
+      // we have an opaque type corresponding to the tag type.
+      ConvertTagDeclType(TT->getDecl());
+      // Create an opaque type for this function type, save it, and return it.
+      llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
+      FunctionTypes.insert(std::make_pair(&Ty, ResultType));
+      return ResultType;
+    }
+    // The function type can be built; call the appropriate routines to
+    // build it.
+    if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty))
+      return GetFunctionType(getFunctionInfo(FPT), FPT->isVariadic());
+
+    const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
+    return GetFunctionType(getFunctionInfo(FNPT), true);
+  }
+
+  case Type::ObjCInterface: {
+    // Objective-C interfaces are always opaque (outside of the
+    // runtime, which can do whatever it likes); we never refine
+    // these.
+    const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
+    if (!T)
+        T = llvm::OpaqueType::get(getLLVMContext());
+    return T;
+  }
+
+  case Type::ObjCObjectPointer: {
+    // Protocol qualifications do not influence the LLVM type, we just return a
+    // pointer to the underlying interface type. We don't need to worry about
+    // recursive conversion.
+    const llvm::Type *T =
+      ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
+    return llvm::PointerType::getUnqual(T);
+  }
+
+  case Type::Record:
+  case Type::Enum: {
+    const TagDecl *TD = cast<TagType>(Ty).getDecl();
+    const llvm::Type *Res = ConvertTagDeclType(TD);
+
+    std::string TypeName(TD->getKindName());
+    TypeName += '.';
+
+    // Name the codegen type after the typedef name
+    // if there is no tag type name available
+    if (TD->getIdentifier())
+      // FIXME: We should not have to check for a null decl context here.
+      // Right now we do it because the implicit Obj-C decls don't have one.
+      TypeName += TD->getDeclContext() ? TD->getQualifiedNameAsString() :
+        TD->getNameAsString();
+    else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
+      // FIXME: We should not have to check for a null decl context here.
+      // Right now we do it because the implicit Obj-C decls don't have one.
+      TypeName += TdT->getDecl()->getDeclContext() ? 
+        TdT->getDecl()->getQualifiedNameAsString() :
+        TdT->getDecl()->getNameAsString();
+    else
+      TypeName += "anon";
+
+    TheModule.addTypeName(TypeName, Res);
+    return Res;
+  }
+
+  case Type::BlockPointer: {
+    const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
+    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get(getLLVMContext());
+    PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
+    return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
+  }
+
+  case Type::MemberPointer: {
+    // FIXME: This is ABI dependent. We use the Itanium C++ ABI.
+    // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
+    // If we ever want to support other ABIs this needs to be abstracted.
+
+    QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
+    const llvm::Type *PtrDiffTy =
+        ConvertTypeRecursive(Context.getPointerDiffType());
+    if (ETy->isFunctionType())
+      return llvm::StructType::get(TheModule.getContext(), PtrDiffTy, PtrDiffTy,
+                                   NULL);
+    return PtrDiffTy;
+  }
+
+  case Type::TemplateSpecialization:
+    assert(false && "Dependent types can't get here");
+  }
+
+  // FIXME: implement.
+  return llvm::OpaqueType::get(getLLVMContext());
+}
+
+/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+/// enum.
+const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
+
+  // TagDecl's are not necessarily unique, instead use the (clang)
+  // type connected to the decl.
+  const Type *Key =
+    Context.getTagDeclType(TD).getTypePtr();
+  llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+    TagDeclTypes.find(Key);
+
+  // If we've already compiled this tag type, use the previous definition.
+  if (TDTI != TagDeclTypes.end())
+    return TDTI->second;
+
+  // If this is still a forward declaration, just define an opaque
+  // type to use for this tagged decl.
+  if (!TD->isDefinition()) {
+    llvm::Type *ResultType = llvm::OpaqueType::get(getLLVMContext());
+    TagDeclTypes.insert(std::make_pair(Key, ResultType));
+    return ResultType;
+  }
+
+  // Okay, this is a definition of a type.  Compile the implementation now.
+
+  if (TD->isEnum())  // Don't bother storing enums in TagDeclTypes.
+    return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
+
+  // This decl could well be recursive.  In this case, insert an opaque
+  // definition of this type, which the recursive uses will get.  We will then
+  // refine this opaque version later.
+
+  // Create new OpaqueType now for later use in case this is a recursive
+  // type.  This will later be refined to the actual type.
+  llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext());
+  TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
+
+  const RecordDecl *RD = cast<const RecordDecl>(TD);
+
+  // Force conversion of non-virtual base classes recursively.
+  if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(TD)) {    
+    for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+         e = RD->bases_end(); i != e; ++i) {
+      if (!i->isVirtual()) {
+        const CXXRecordDecl *Base =
+          cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+        ConvertTagDeclType(Base);
+      }
+    }
+  }
+
+  // Layout fields.
+  CGRecordLayout *Layout = CGRecordLayoutBuilder::ComputeLayout(*this, RD);
+
+  CGRecordLayouts[Key] = Layout;
+  const llvm::Type *ResultType = Layout->getLLVMType();
+
+  // Refine our Opaque type to ResultType.  This can invalidate ResultType, so
+  // make sure to read the result out of the holder.
+  cast<llvm::OpaqueType>(ResultHolder.get())
+    ->refineAbstractTypeTo(ResultType);
+
+  return ResultHolder.get();
+}
+
+/// getLLVMFieldNo - Return llvm::StructType element number
+/// that corresponds to the field FD.
+unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) {
+  assert(!FD->isBitField() && "Don't use getLLVMFieldNo on bit fields!");
+
+  llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD);
+  assert (I != FieldInfo.end()  && "Unable to find field info");
+  return I->second;
+}
+
+/// addFieldInfo - Assign field number to field FD.
+void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) {
+  FieldInfo[FD] = No;
+}
+
+/// getBitFieldInfo - Return the BitFieldInfo  that corresponds to the field FD.
+CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) {
+  llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator
+    I = BitFields.find(FD);
+  assert (I != BitFields.end()  && "Unable to find bitfield info");
+  return I->second;
+}
+
+/// addBitFieldInfo - Assign a start bit and a size to field FD.
+void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned FieldNo,
+                                   unsigned Start, unsigned Size) {
+  BitFields.insert(std::make_pair(FD, BitFieldInfo(FieldNo, Start, Size)));
+}
+
+/// getCGRecordLayout - Return record layout info for the given llvm::Type.
+const CGRecordLayout &
+CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const {
+  const Type *Key = Context.getTagDeclType(TD).getTypePtr();
+  llvm::DenseMap<const Type*, CGRecordLayout *>::const_iterator I
+    = CGRecordLayouts.find(Key);
+  assert (I != CGRecordLayouts.end()
+          && "Unable to find record layout information for type");
+  return *I->second;
+}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
new file mode 100644
index 0000000..87ba0bc
--- /dev/null
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -0,0 +1,254 @@
+//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTYPES_H
+#define CLANG_CODEGEN_CODEGENTYPES_H
+
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include <vector>
+
+#include "CGCall.h"
+#include "GlobalDecl.h"
+
+namespace llvm {
+  class FunctionType;
+  class Module;
+  class OpaqueType;
+  class PATypeHolder;
+  class TargetData;
+  class Type;
+  class LLVMContext;
+}
+
+namespace clang {
+  class ABIInfo;
+  class ASTContext;
+  class CXXConstructorDecl;
+  class CXXDestructorDecl;
+  class CXXMethodDecl;
+  class FieldDecl;
+  class FunctionProtoType;
+  class ObjCInterfaceDecl;
+  class ObjCIvarDecl;
+  class PointerType;
+  class QualType;
+  class RecordDecl;
+  class TagDecl;
+  class TargetInfo;
+  class Type;
+
+namespace CodeGen {
+  class CodeGenTypes;
+
+  /// CGRecordLayout - This class handles struct and union layout info while
+  /// lowering AST types to LLVM types.
+  class CGRecordLayout {
+    CGRecordLayout(); // DO NOT IMPLEMENT
+
+    /// LLVMType - The LLVMType corresponding to this record layout.
+    const llvm::Type *LLVMType;
+
+    /// ContainsPointerToDataMember - Whether one of the fields in this record 
+    /// layout is a pointer to data member, or a struct that contains pointer to
+    /// data member.
+    bool ContainsPointerToDataMember;
+
+  public:
+    CGRecordLayout(const llvm::Type *T, bool ContainsPointerToDataMember)
+      : LLVMType(T), ContainsPointerToDataMember(ContainsPointerToDataMember) { }
+
+    /// getLLVMType - Return llvm type associated with this record.
+    const llvm::Type *getLLVMType() const {
+      return LLVMType;
+    }
+
+    /// containsPointerToDataMember - Whether this struct contains pointers to
+    /// data members.
+    bool containsPointerToDataMember() const {
+      return ContainsPointerToDataMember;
+    }
+  };
+
+/// CodeGenTypes - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTypes {
+  ASTContext &Context;
+  const TargetInfo &Target;
+  llvm::Module& TheModule;
+  const llvm::TargetData& TheTargetData;
+  const ABIInfo& TheABIInfo;
+
+  llvm::SmallVector<std::pair<QualType,
+                              llvm::OpaqueType *>, 8>  PointersToResolve;
+
+  llvm::DenseMap<const Type*, llvm::PATypeHolder> TagDeclTypes;
+
+  llvm::DenseMap<const Type*, llvm::PATypeHolder> FunctionTypes;
+
+  /// The opaque type map for Objective-C interfaces. All direct
+  /// manipulation is done by the runtime interfaces, which are
+  /// responsible for coercing to the appropriate type; these opaque
+  /// types are never refined.
+  llvm::DenseMap<const ObjCInterfaceType*, const llvm::Type *> InterfaceTypes;
+
+  /// CGRecordLayouts - This maps llvm struct type with corresponding
+  /// record layout info.
+  /// FIXME : If CGRecordLayout is less than 16 bytes then use
+  /// inline it in the map.
+  llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+
+  /// FieldInfo - This maps struct field with corresponding llvm struct type
+  /// field no. This info is populated by record organizer.
+  llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
+
+  /// FunctionInfos - Hold memoized CGFunctionInfo results.
+  llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+
+public:
+  struct BitFieldInfo {
+    BitFieldInfo(unsigned FieldNo,
+                 unsigned Start,
+                 unsigned Size)
+      : FieldNo(FieldNo), Start(Start), Size(Size) {}
+
+    unsigned FieldNo;
+    unsigned Start;
+    unsigned Size;
+  };
+
+private:
+  llvm::DenseMap<const FieldDecl *, BitFieldInfo> BitFields;
+
+  /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder)
+  /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
+  /// used instead of llvm::Type because it allows us to bypass potential
+  /// dangling type pointers due to type refinement on llvm side.
+  llvm::DenseMap<Type *, llvm::PATypeHolder> TypeCache;
+
+  /// ConvertNewType - Convert type T into a llvm::Type. Do not use this
+  /// method directly because it does not do any type caching. This method
+  /// is available only for ConvertType(). CovertType() is preferred
+  /// interface to convert type T into a llvm::Type.
+  const llvm::Type *ConvertNewType(QualType T);
+public:
+  CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD,
+               const ABIInfo &Info);
+  ~CodeGenTypes();
+
+  const llvm::TargetData &getTargetData() const { return TheTargetData; }
+  const TargetInfo &getTarget() const { return Target; }
+  ASTContext &getContext() const { return Context; }
+  const ABIInfo &getABIInfo() const { return TheABIInfo; }
+  llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
+
+  /// ConvertType - Convert type T into a llvm::Type.
+  const llvm::Type *ConvertType(QualType T);
+  const llvm::Type *ConvertTypeRecursive(QualType T);
+
+  /// ConvertTypeForMem - Convert type T into a llvm::Type.  This differs from
+  /// ConvertType in that it is used to convert to the memory representation for
+  /// a type.  For example, the scalar representation for _Bool is i1, but the
+  /// memory representation is usually i8 or i32, depending on the target.
+  const llvm::Type *ConvertTypeForMem(QualType T);
+  const llvm::Type *ConvertTypeForMemRecursive(QualType T);
+
+  /// GetFunctionType - Get the LLVM function type for \arg Info.
+  const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
+                                            bool IsVariadic);
+
+
+  /// GetFunctionTypeForVtable - Get the LLVM function type for use in a vtable,
+  /// given a CXXMethodDecl. If the method to has an incomplete return type, 
+  /// and/or incomplete argument types, this will return the opaque type.
+  const llvm::Type *GetFunctionTypeForVtable(const CXXMethodDecl *MD);
+                                                     
+  const CGRecordLayout &getCGRecordLayout(const TagDecl*) const;
+
+  /// getLLVMFieldNo - Return llvm::StructType element number
+  /// that corresponds to the field FD.
+  unsigned getLLVMFieldNo(const FieldDecl *FD);
+
+  /// UpdateCompletedType - When we find the full definition for a TagDecl,
+  /// replace the 'opaque' type we previously made for it if applicable.
+  void UpdateCompletedType(const TagDecl *TD);
+
+private:
+  const CGFunctionInfo &getFunctionInfo(const FunctionNoProtoType *FTNP);
+  const CGFunctionInfo &getFunctionInfo(const FunctionProtoType *FTP);
+
+public:
+  /// getFunctionInfo - Get the function info for the specified function decl.
+  const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
+  
+  const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
+  const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
+  const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
+  const CGFunctionInfo &getFunctionInfo(const CXXConstructorDecl *D,
+                                        CXXCtorType Type);
+  const CGFunctionInfo &getFunctionInfo(const CXXDestructorDecl *D,
+                                        CXXDtorType Type);
+
+  const CGFunctionInfo &getFunctionInfo(const CallArgList &Args,
+                                        const FunctionType *Ty) {
+    return getFunctionInfo(Ty->getResultType(), Args,
+                           Ty->getCallConv(), Ty->getNoReturnAttr());
+  }
+
+  // getFunctionInfo - Get the function info for a member function.
+  const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
+                                        const FunctionProtoType *FTP);
+  
+  /// getFunctionInfo - Get the function info for a function described by a
+  /// return type and argument types. If the calling convention is not
+  /// specified, the "C" calling convention will be used.
+  const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+                                        const CallArgList &Args,
+                                        CallingConv CC,
+                                        bool NoReturn);
+  const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+                                        const FunctionArgList &Args,
+                                        CallingConv CC,
+                                        bool NoReturn);
+  const CGFunctionInfo &getFunctionInfo(QualType RetTy,
+                                  const llvm::SmallVector<QualType, 16> &ArgTys,
+                                        CallingConv CC,
+                                        bool NoReturn);
+
+public:  // These are internal details of CGT that shouldn't be used externally.
+  /// addFieldInfo - Assign field number to field FD.
+  void addFieldInfo(const FieldDecl *FD, unsigned FieldNo);
+
+  /// addBitFieldInfo - Assign a start bit and a size to field FD.
+  void addBitFieldInfo(const FieldDecl *FD, unsigned FieldNo,
+                       unsigned Start, unsigned Size);
+
+  /// getBitFieldInfo - Return the BitFieldInfo  that corresponds to the field
+  /// FD.
+  BitFieldInfo getBitFieldInfo(const FieldDecl *FD);
+
+  /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
+  /// enum.
+  const llvm::Type *ConvertTagDeclType(const TagDecl *TD);
+
+  /// GetExpandedTypes - Expand the type \arg Ty into the LLVM
+  /// argument types it would be passed as on the provided vector \arg
+  /// ArgTys. See ABIArgInfo::Expand.
+  void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys);
+};
+
+}  // end namespace CodeGen
+}  // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/GlobalDecl.h b/lib/CodeGen/GlobalDecl.h
new file mode 100644
index 0000000..b8a98d7
--- /dev/null
+++ b/lib/CodeGen/GlobalDecl.h
@@ -0,0 +1,113 @@
+//===--- GlobalDecl.h - Global declaration holder ---------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A GlobalDecl can hold either a regular variable/function or a C++ ctor/dtor
+// together with its type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_GLOBALDECL_H
+#define CLANG_CODEGEN_GLOBALDECL_H
+
+#include "CGCXX.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+
+namespace clang {
+
+namespace CodeGen {
+
+/// GlobalDecl - represents a global declaration. This can either be a
+/// CXXConstructorDecl and the constructor type (Base, Complete).
+/// a CXXDestructorDecl and the destructor type (Base, Complete) or
+/// a VarDecl, a FunctionDecl or a BlockDecl.
+class GlobalDecl {
+  llvm::PointerIntPair<const Decl*, 2> Value;
+
+  void Init(const Decl *D) {
+    assert(!isa<CXXConstructorDecl>(D) && "Use other ctor with ctor decls!");
+    assert(!isa<CXXDestructorDecl>(D) && "Use other ctor with dtor decls!");
+
+    Value.setPointer(D);
+  }
+  
+public:
+  GlobalDecl() {}
+
+  GlobalDecl(const VarDecl *D) { Init(D);}
+  GlobalDecl(const FunctionDecl *D) { Init(D); }
+  GlobalDecl(const BlockDecl *D) { Init(D); }
+  GlobalDecl(const ObjCMethodDecl *D) { Init(D); }
+
+  GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
+  : Value(D, Type) {}
+  GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
+  : Value(D, Type) {}
+
+  const Decl *getDecl() const { return Value.getPointer(); }
+
+  CXXCtorType getCtorType() const {
+    assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!");
+    return static_cast<CXXCtorType>(Value.getInt());
+  }
+
+  CXXDtorType getDtorType() const {
+    assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!");
+    return static_cast<CXXDtorType>(Value.getInt());
+  }
+  
+  friend bool operator==(const GlobalDecl &LHS, const GlobalDecl &RHS) {
+    return LHS.Value == RHS.Value;
+  }
+  
+  void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
+
+  static GlobalDecl getFromOpaquePtr(void *P) {
+    GlobalDecl GD;
+    GD.Value.setFromOpaqueValue(P);
+    return GD;
+  }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+namespace llvm {
+  template<class> struct DenseMapInfo;
+
+  template<> struct DenseMapInfo<clang::CodeGen::GlobalDecl> {
+    static inline clang::CodeGen::GlobalDecl getEmptyKey() {
+      return clang::CodeGen::GlobalDecl();
+    }
+  
+    static inline clang::CodeGen::GlobalDecl getTombstoneKey() {
+      return clang::CodeGen::GlobalDecl::
+        getFromOpaquePtr(reinterpret_cast<void*>(-1));
+    }
+
+    static unsigned getHashValue(clang::CodeGen::GlobalDecl GD) {
+      return DenseMapInfo<void*>::getHashValue(GD.getAsOpaquePtr());
+    }
+    
+    static bool isEqual(clang::CodeGen::GlobalDecl LHS, 
+                        clang::CodeGen::GlobalDecl RHS) {
+      return LHS == RHS;
+    }
+      
+  };
+  
+  // GlobalDecl isn't *technically* a POD type. However, its copy constructor,
+  // copy assignment operator, and destructor are all trivial.
+  template <>
+  struct isPodLike<clang::CodeGen::GlobalDecl> {
+    static const bool value = true;
+  };
+} // end namespace llvm
+
+#endif
diff --git a/lib/CodeGen/Makefile b/lib/CodeGen/Makefile
new file mode 100644
index 0000000..83cb367
--- /dev/null
+++ b/lib/CodeGen/Makefile
@@ -0,0 +1,25 @@
+##===- clang/lib/CodeGen/Makefile --------------------------*- Makefile -*-===##
+# 
+#                     The LLVM Compiler Infrastructure
+#
+# This file is distributed under the University of Illinois Open Source
+# License. See LICENSE.TXT for details.
+# 
+##===----------------------------------------------------------------------===##
+#
+# This implements the AST -> LLVM code generation library for the 
+# C-Language front-end.
+#
+##===----------------------------------------------------------------------===##
+
+LEVEL = ../../../..
+LIBRARYNAME := clangCodeGen
+BUILD_ARCHIVE = 1
+
+CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
+ifdef CLANG_VENDOR
+CPPFLAGS += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
+endif
+
+include $(LEVEL)/Makefile.common
+
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
new file mode 100644
index 0000000..a302225
--- /dev/null
+++ b/lib/CodeGen/Mangle.cpp
@@ -0,0 +1,1855 @@
+//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+//   http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+#include "Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "CGVtable.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+  
+static const CXXMethodDecl *getStructor(const CXXMethodDecl *MD) {
+  assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
+         "Passed in decl is not a ctor or dtor!");
+  
+  if (const TemplateDecl *TD = MD->getPrimaryTemplate()) {
+    MD = cast<CXXMethodDecl>(TD->getTemplatedDecl());
+
+    assert((isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) &&
+           "Templated decl is not a ctor or dtor!");
+  }
+    
+  return MD;
+}
+
+static const unsigned UnknownArity = ~0U;
+  
+/// CXXNameMangler - Manage the mangling of a single name.
+class CXXNameMangler {
+  MangleContext &Context;
+  llvm::raw_svector_ostream Out;
+
+  const CXXMethodDecl *Structor;
+  unsigned StructorType;
+
+  llvm::DenseMap<uintptr_t, unsigned> Substitutions;
+
+  ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+  CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res)
+    : Context(C), Out(Res), Structor(0), StructorType(0) { }
+  CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
+                 const CXXConstructorDecl *D, CXXCtorType Type)
+    : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { }
+  CXXNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res,
+                 const CXXDestructorDecl *D, CXXDtorType Type)
+    : Context(C), Out(Res), Structor(getStructor(D)), StructorType(Type) { }
+
+#if MANGLE_CHECKER
+  ~CXXNameMangler() {
+    if (Out.str()[0] == '\01')
+      return;
+    
+    int status = 0;
+    char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
+    assert(status == 0 && "Could not demangle mangled name!");
+    free(result);
+  }
+#endif
+  llvm::raw_svector_ostream &getStream() { return Out; }
+
+  void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z");
+  void mangleCallOffset(const ThunkAdjustment &Adjustment);
+  void mangleNumber(int64_t Number);
+  void mangleFunctionEncoding(const FunctionDecl *FD);
+  void mangleName(const NamedDecl *ND);
+  void mangleType(QualType T);
+
+private:
+  bool mangleSubstitution(const NamedDecl *ND);
+  bool mangleSubstitution(QualType T);
+  bool mangleSubstitution(uintptr_t Ptr);
+
+  bool mangleStandardSubstitution(const NamedDecl *ND);
+
+  void addSubstitution(const NamedDecl *ND) {
+    ND = cast<NamedDecl>(ND->getCanonicalDecl());
+
+    addSubstitution(reinterpret_cast<uintptr_t>(ND));
+  }
+  void addSubstitution(QualType T);
+  void addSubstitution(uintptr_t Ptr);
+
+  void mangleUnresolvedScope(NestedNameSpecifier *Qualifier);
+  void mangleUnresolvedName(NestedNameSpecifier *Qualifier,
+                            DeclarationName Name,
+                            unsigned KnownArity = UnknownArity);
+
+  void mangleName(const TemplateDecl *TD,
+                  const TemplateArgument *TemplateArgs,
+                  unsigned NumTemplateArgs);
+  void mangleUnqualifiedName(const NamedDecl *ND) {
+    mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity);
+  }
+  void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
+                             unsigned KnownArity);
+  void mangleUnscopedName(const NamedDecl *ND);
+  void mangleUnscopedTemplateName(const TemplateDecl *ND);
+  void mangleSourceName(const IdentifierInfo *II);
+  void mangleLocalName(const NamedDecl *ND);
+  void mangleNestedName(const NamedDecl *ND, const DeclContext *DC);
+  void mangleNestedName(const TemplateDecl *TD,
+                        const TemplateArgument *TemplateArgs,
+                        unsigned NumTemplateArgs);
+  void manglePrefix(const DeclContext *DC);
+  void mangleTemplatePrefix(const TemplateDecl *ND);
+  void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
+  void mangleQualifiers(Qualifiers Quals);
+
+  void mangleObjCMethodName(const ObjCMethodDecl *MD);
+  
+  // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+  void mangleType(const TagType*);
+  void mangleBareFunctionType(const FunctionType *T,
+                              bool MangleReturnType);
+
+  void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
+  void mangleMemberExpr(const Expr *Base, bool IsArrow,
+                        NestedNameSpecifier *Qualifier,
+                        DeclarationName Name,
+                        unsigned KnownArity);
+  void mangleCalledExpression(const Expr *E, unsigned KnownArity);
+  void mangleExpression(const Expr *E);
+  void mangleCXXCtorType(CXXCtorType T);
+  void mangleCXXDtorType(CXXDtorType T);
+
+  void mangleTemplateArgs(const TemplateArgument *TemplateArgs,
+                          unsigned NumTemplateArgs);
+  void mangleTemplateArgs(const TemplateArgumentList &L);
+  void mangleTemplateArg(const TemplateArgument &A);
+
+  void mangleTemplateParameter(unsigned Index);
+};
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+  D = D->getCanonicalDecl();
+  for (const DeclContext *DC = D->getDeclContext();
+       !DC->isTranslationUnit(); DC = DC->getParent()) {
+    if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+      return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+  }
+
+  return false;
+}
+
+bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
+  // In C, functions with no attributes never need to be mangled. Fastpath them.
+  if (!getASTContext().getLangOptions().CPlusPlus && !D->hasAttrs())
+    return false;
+
+  // Any decl can be declared with __asm("foo") on it, and this takes precedence
+  // over all other naming in the .o file.
+  if (D->hasAttr<AsmLabelAttr>())
+    return true;
+
+  // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+  // (always) as does passing a C++ member function and a function
+  // whose name is not a simple identifier.
+  const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+  if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+             !FD->getDeclName().isIdentifier()))
+    return true;
+
+  // Otherwise, no mangling is done outside C++ mode.
+  if (!getASTContext().getLangOptions().CPlusPlus)
+    return false;
+
+  // No mangling in an "implicit extern C" header.
+  if (D->getLocation().isValid() &&
+      getASTContext().getSourceManager().
+      isInExternCSystemHeader(D->getLocation()))
+    return false;
+
+  // Variables at global scope with non-internal linkage are not mangled
+  if (!FD) {
+    const DeclContext *DC = D->getDeclContext();
+    // Check for extern variable declared locally.
+    if (isa<FunctionDecl>(DC) && D->hasLinkage())
+      while (!DC->isNamespace() && !DC->isTranslationUnit())
+        DC = DC->getParent();
+    if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage)
+      return false;
+  }
+
+  // C functions and "main" are not mangled.
+  if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+    return false;
+
+  return true;
+}
+
+void CXXNameMangler::mangle(const NamedDecl *D, llvm::StringRef Prefix) {
+  // Any decl can be declared with __asm("foo") on it, and this takes precedence
+  // over all other naming in the .o file.
+  if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+    // If we have an asm name, then we use it as the mangling.
+    Out << '\01';  // LLVM IR Marker for __asm("foo")
+    Out << ALA->getLabel();
+    return;
+  }
+
+  // <mangled-name> ::= _Z <encoding>
+  //            ::= <data name>
+  //            ::= <special-name>
+  Out << Prefix;
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+    mangleFunctionEncoding(FD);
+  else
+    mangleName(cast<VarDecl>(D));
+}
+
+void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+  // <encoding> ::= <function name> <bare-function-type>
+  mangleName(FD);
+
+  // Don't mangle in the type if this isn't a decl we should typically mangle.
+  if (!Context.shouldMangleDeclName(FD))
+    return;
+
+  // Whether the mangling of a function type includes the return type depends on
+  // the context and the nature of the function. The rules for deciding whether
+  // the return type is included are:
+  //
+  //   1. Template functions (names or types) have return types encoded, with
+  //   the exceptions listed below.
+  //   2. Function types not appearing as part of a function name mangling,
+  //   e.g. parameters, pointer types, etc., have return type encoded, with the
+  //   exceptions listed below.
+  //   3. Non-template function names do not have return types encoded.
+  //
+  // The exceptions mentioned in (1) and (2) above, for which the return type is
+  // never included, are
+  //   1. Constructors.
+  //   2. Destructors.
+  //   3. Conversion operator functions, e.g. operator int.
+  bool MangleReturnType = false;
+  if (FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate()) {
+    if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) ||
+          isa<CXXConversionDecl>(FD)))
+      MangleReturnType = true;
+
+    // Mangle the type of the primary template.
+    FD = PrimaryTemplate->getTemplatedDecl();
+  }
+
+  // Do the canonicalization out here because parameter types can
+  // undergo additional canonicalization (e.g. array decay).
+  FunctionType *FT = cast<FunctionType>(Context.getASTContext()
+                                          .getCanonicalType(FD->getType()));
+
+  mangleBareFunctionType(FT, MangleReturnType);
+}
+
+/// isStd - Return whether a given namespace is the 'std' namespace.
+static bool isStd(const NamespaceDecl *NS) {
+  const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
+  return II && II->isStr("std");
+}
+
+static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
+  while (isa<LinkageSpecDecl>(DC)) {
+    assert(cast<LinkageSpecDecl>(DC)->getLanguage() ==
+           LinkageSpecDecl::lang_cxx && "Unexpected linkage decl!");
+    DC = DC->getParent();
+  }
+  
+  return DC;
+}
+
+// isStdNamespace - Return whether a given decl context is a toplevel 'std'
+// namespace.
+static bool isStdNamespace(const DeclContext *DC) {
+  if (!DC->isNamespace())
+    return false;
+  
+  if (!IgnoreLinkageSpecDecls(DC->getParent())->isTranslationUnit())
+    return false;
+  
+  return isStd(cast<NamespaceDecl>(DC));
+}
+
+static const TemplateDecl *
+isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+  // Check if we have a function template.
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){
+    if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
+      TemplateArgs = FD->getTemplateSpecializationArgs();
+      return TD;
+    }
+  }
+
+  // Check if we have a class template.
+  if (const ClassTemplateSpecializationDecl *Spec =
+        dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+    TemplateArgs = &Spec->getTemplateArgs();
+    return Spec->getSpecializedTemplate();
+  }
+
+  return 0;
+}
+
+void CXXNameMangler::mangleName(const NamedDecl *ND) {
+  //  <name> ::= <nested-name>
+  //         ::= <unscoped-name>
+  //         ::= <unscoped-template-name> <template-args>
+  //         ::= <local-name>
+  //
+  const DeclContext *DC = ND->getDeclContext();
+
+  // If this is an extern variable declared locally, the relevant DeclContext
+  // is that of the containing namespace, or the translation unit.
+  if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+    while (!DC->isNamespace() && !DC->isTranslationUnit())
+      DC = DC->getParent();
+
+  while (isa<LinkageSpecDecl>(DC))
+    DC = DC->getParent();
+
+  if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+    // Check if we have a template.
+    const TemplateArgumentList *TemplateArgs = 0;
+    if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+      mangleUnscopedTemplateName(TD);
+      mangleTemplateArgs(*TemplateArgs);
+      return;
+    }
+
+    mangleUnscopedName(ND);
+    return;
+  }
+
+  if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) {
+    mangleLocalName(ND);
+    return;
+  }
+
+  mangleNestedName(ND, DC);
+}
+void CXXNameMangler::mangleName(const TemplateDecl *TD,
+                                const TemplateArgument *TemplateArgs,
+                                unsigned NumTemplateArgs) {
+  const DeclContext *DC = IgnoreLinkageSpecDecls(TD->getDeclContext());
+
+  if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+    mangleUnscopedTemplateName(TD);
+    mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
+  } else {
+    mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
+  }
+}
+
+void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) {
+  //  <unscoped-name> ::= <unqualified-name>
+  //                  ::= St <unqualified-name>   # ::std::
+  if (isStdNamespace(ND->getDeclContext()))
+    Out << "St";
+
+  mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
+  //     <unscoped-template-name> ::= <unscoped-name>
+  //                              ::= <substitution>
+  if (mangleSubstitution(ND))
+    return;
+
+  // <template-template-param> ::= <template-param>
+  if (const TemplateTemplateParmDecl *TTP
+                                     = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+    mangleTemplateParameter(TTP->getIndex());
+    return;
+  } 
+
+  mangleUnscopedName(ND->getTemplatedDecl());
+  addSubstitution(ND);
+}
+
+void CXXNameMangler::mangleNumber(int64_t Number) {
+  //  <number> ::= [n] <non-negative decimal integer>
+  if (Number < 0) {
+    Out << 'n';
+    Number = -Number;
+  }
+  
+  Out << Number;
+}
+
+void CXXNameMangler::mangleCallOffset(const ThunkAdjustment &Adjustment) {
+  //  <call-offset>  ::= h <nv-offset> _
+  //                 ::= v <v-offset> _
+  //  <nv-offset>    ::= <offset number>        # non-virtual base override
+  //  <v-offset>     ::= <offset number> _ <virtual offset number>
+  //                      # virtual base override, with vcall offset
+  if (!Adjustment.Virtual) {
+    Out << 'h';
+    mangleNumber(Adjustment.NonVirtual);
+    Out << '_';
+    return;
+  }
+  
+  Out << 'v';
+  mangleNumber(Adjustment.NonVirtual);
+  Out << '_';
+  mangleNumber(Adjustment.Virtual);
+  Out << '_';
+}
+
+void CXXNameMangler::mangleUnresolvedScope(NestedNameSpecifier *Qualifier) {
+  Qualifier = getASTContext().getCanonicalNestedNameSpecifier(Qualifier);
+  switch (Qualifier->getKind()) {
+  case NestedNameSpecifier::Global:
+    // nothing
+    break;
+  case NestedNameSpecifier::Namespace:
+    mangleName(Qualifier->getAsNamespace());
+    break;
+  case NestedNameSpecifier::TypeSpec:
+  case NestedNameSpecifier::TypeSpecWithTemplate:
+    mangleType(QualType(Qualifier->getAsType(), 0));
+    break;
+  case NestedNameSpecifier::Identifier:
+    mangleUnresolvedScope(Qualifier->getPrefix());
+    mangleSourceName(Qualifier->getAsIdentifier());
+    break;
+  }
+}
+
+/// Mangles a name which was not resolved to a specific entity.
+void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *Qualifier,
+                                          DeclarationName Name,
+                                          unsigned KnownArity) {
+  if (Qualifier)
+    mangleUnresolvedScope(Qualifier);
+  // FIXME: ambiguity of unqualified lookup with ::
+
+  mangleUnqualifiedName(0, Name, KnownArity);
+}
+
+void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+                                           DeclarationName Name,
+                                           unsigned KnownArity) {
+  //  <unqualified-name> ::= <operator-name>
+  //                     ::= <ctor-dtor-name>
+  //                     ::= <source-name>
+  switch (Name.getNameKind()) {
+  case DeclarationName::Identifier: {
+    if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+      // We must avoid conflicts between internally- and externally-
+      // linked variable declaration names in the same TU. 
+      // This naming convention is the same as that followed by GCC, though it
+      // shouldn't actually matter.
+      if (ND && isa<VarDecl>(ND) && ND->getLinkage() == InternalLinkage &&
+          ND->getDeclContext()->isFileContext())
+        Out << 'L';
+
+      mangleSourceName(II);
+      break;
+    }
+
+    // Otherwise, an anonymous entity.  We must have a declaration.
+    assert(ND && "mangling empty name without declaration");
+
+    if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+      if (NS->isAnonymousNamespace()) {
+        // This is how gcc mangles these names.
+        Out << "12_GLOBAL__N_1";
+        break;
+      }
+    }
+
+    // We must have an anonymous struct.
+    const TagDecl *TD = cast<TagDecl>(ND);
+    if (const TypedefDecl *D = TD->getTypedefForAnonDecl()) {
+      assert(TD->getDeclContext() == D->getDeclContext() &&
+             "Typedef should not be in another decl context!");
+      assert(D->getDeclName().getAsIdentifierInfo() &&
+             "Typedef was not named!");
+      mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+      break;
+    }
+
+    // Get a unique id for the anonymous struct.
+    uint64_t AnonStructId = Context.getAnonymousStructId(TD);
+
+    // Mangle it as a source name in the form
+    // [n] $_<id>
+    // where n is the length of the string.
+    llvm::SmallString<8> Str;
+    Str += "$_";
+    Str += llvm::utostr(AnonStructId);
+
+    Out << Str.size();
+    Out << Str.str();
+    break;
+  }
+
+  case DeclarationName::ObjCZeroArgSelector:
+  case DeclarationName::ObjCOneArgSelector:
+  case DeclarationName::ObjCMultiArgSelector:
+    assert(false && "Can't mangle Objective-C selector names here!");
+    break;
+
+  case DeclarationName::CXXConstructorName:
+    if (ND == Structor)
+      // If the named decl is the C++ constructor we're mangling, use the type
+      // we were given.
+      mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
+    else
+      // Otherwise, use the complete constructor name. This is relevant if a
+      // class with a constructor is declared within a constructor.
+      mangleCXXCtorType(Ctor_Complete);
+    break;
+
+  case DeclarationName::CXXDestructorName:
+    if (ND == Structor)
+      // If the named decl is the C++ destructor we're mangling, use the type we
+      // were given.
+      mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
+    else
+      // Otherwise, use the complete destructor name. This is relevant if a
+      // class with a destructor is declared within a destructor.
+      mangleCXXDtorType(Dtor_Complete);
+    break;
+
+  case DeclarationName::CXXConversionFunctionName:
+    // <operator-name> ::= cv <type>    # (cast)
+    Out << "cv";
+    mangleType(Context.getASTContext().getCanonicalType(Name.getCXXNameType()));
+    break;
+
+  case DeclarationName::CXXOperatorName: {
+    unsigned Arity;
+    if (ND) {
+      Arity = cast<FunctionDecl>(ND)->getNumParams();
+    
+      // If we have a C++ member function, we need to include the 'this' pointer.
+      // FIXME: This does not make sense for operators that are static, but their
+      // names stay the same regardless of the arity (operator new for instance).
+      if (isa<CXXMethodDecl>(ND))
+        Arity++;
+    } else
+      Arity = KnownArity;
+
+    mangleOperatorName(Name.getCXXOverloadedOperator(), Arity);
+    break;
+  }
+
+  case DeclarationName::CXXLiteralOperatorName:
+    // FIXME: This mangling is not yet official.
+    Out << "li";
+    mangleSourceName(Name.getCXXLiteralIdentifier());
+    break;
+
+  case DeclarationName::CXXUsingDirective:
+    assert(false && "Can't mangle a using directive name!");
+    break;
+  }
+}
+
+void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+  // <source-name> ::= <positive length number> <identifier>
+  // <number> ::= [n] <non-negative decimal integer>
+  // <identifier> ::= <unqualified source code identifier>
+  Out << II->getLength() << II->getName();
+}
+
+void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
+                                      const DeclContext *DC) {
+  // <nested-name> ::= N [<CV-qualifiers>] <prefix> <unqualified-name> E
+  //               ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+
+  Out << 'N';
+  if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND))
+    mangleQualifiers(Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+
+  // Check if we have a template.
+  const TemplateArgumentList *TemplateArgs = 0;
+  if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+    mangleTemplatePrefix(TD);
+    mangleTemplateArgs(*TemplateArgs);
+  } else {
+    manglePrefix(DC);
+    mangleUnqualifiedName(ND);
+  }
+
+  Out << 'E';
+}
+void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
+                                      const TemplateArgument *TemplateArgs,
+                                      unsigned NumTemplateArgs) {
+  // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+
+  Out << 'N';
+
+  mangleTemplatePrefix(TD);
+  mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
+
+  Out << 'E';
+}
+
+void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
+  // <local-name> := Z <function encoding> E <entity name> [<discriminator>]
+  //              := Z <function encoding> E s [<discriminator>]
+  // <discriminator> := _ <non-negative number>
+  Out << 'Z';
+  
+  if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(ND->getDeclContext()))
+    mangleObjCMethodName(MD);
+  else  
+    mangleFunctionEncoding(cast<FunctionDecl>(ND->getDeclContext()));
+
+  Out << 'E';
+  mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::manglePrefix(const DeclContext *DC) {
+  //  <prefix> ::= <prefix> <unqualified-name>
+  //           ::= <template-prefix> <template-args>
+  //           ::= <template-param>
+  //           ::= # empty
+  //           ::= <substitution>
+
+  while (isa<LinkageSpecDecl>(DC))
+    DC = DC->getParent();
+
+  if (DC->isTranslationUnit())
+    return;
+
+  if (mangleSubstitution(cast<NamedDecl>(DC)))
+    return;
+
+  // Check if we have a template.
+  const TemplateArgumentList *TemplateArgs = 0;
+  if (const TemplateDecl *TD = isTemplate(cast<NamedDecl>(DC), TemplateArgs)) {
+    mangleTemplatePrefix(TD);
+    mangleTemplateArgs(*TemplateArgs);
+  } else {
+    manglePrefix(DC->getParent());
+    mangleUnqualifiedName(cast<NamedDecl>(DC));
+  }
+
+  addSubstitution(cast<NamedDecl>(DC));
+}
+
+void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
+  // <template-prefix> ::= <prefix> <template unqualified-name>
+  //                   ::= <template-param>
+  //                   ::= <substitution>
+  // <template-template-param> ::= <template-param>
+  //                               <substitution>
+
+  if (mangleSubstitution(ND))
+    return;
+
+  // <template-template-param> ::= <template-param>
+  if (const TemplateTemplateParmDecl *TTP
+                                     = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+    mangleTemplateParameter(TTP->getIndex());
+    return;
+  } 
+
+  manglePrefix(ND->getDeclContext());
+  mangleUnqualifiedName(ND->getTemplatedDecl());
+  addSubstitution(ND);
+}
+
+void
+CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
+  switch (OO) {
+  // <operator-name> ::= nw     # new
+  case OO_New: Out << "nw"; break;
+  //              ::= na        # new[]
+  case OO_Array_New: Out << "na"; break;
+  //              ::= dl        # delete
+  case OO_Delete: Out << "dl"; break;
+  //              ::= da        # delete[]
+  case OO_Array_Delete: Out << "da"; break;
+  //              ::= ps        # + (unary)
+  //              ::= pl        # +
+  case OO_Plus: 
+    assert((Arity == 1 || Arity == 2) && "Invalid arity!");
+    Out << (Arity == 1? "ps" : "pl"); break;
+  //              ::= ng        # - (unary)
+  //              ::= mi        # -
+  case OO_Minus: 
+    assert((Arity == 1 || Arity == 2) && "Invalid arity!");
+    Out << (Arity == 1? "ng" : "mi"); break;
+  //              ::= ad        # & (unary)
+  //              ::= an        # &
+  case OO_Amp: 
+    assert((Arity == 1 || Arity == 2) && "Invalid arity!");
+    Out << (Arity == 1? "ad" : "an"); break;
+  //              ::= de        # * (unary)
+  //              ::= ml        # *
+  case OO_Star: 
+    assert((Arity == 1 || Arity == 2) && "Invalid arity!");
+    Out << (Arity == 1? "de" : "ml"); break;
+  //              ::= co        # ~
+  case OO_Tilde: Out << "co"; break;
+  //              ::= dv        # /
+  case OO_Slash: Out << "dv"; break;
+  //              ::= rm        # %
+  case OO_Percent: Out << "rm"; break;
+  //              ::= or        # |
+  case OO_Pipe: Out << "or"; break;
+  //              ::= eo        # ^
+  case OO_Caret: Out << "eo"; break;
+  //              ::= aS        # =
+  case OO_Equal: Out << "aS"; break;
+  //              ::= pL        # +=
+  case OO_PlusEqual: Out << "pL"; break;
+  //              ::= mI        # -=
+  case OO_MinusEqual: Out << "mI"; break;
+  //              ::= mL        # *=
+  case OO_StarEqual: Out << "mL"; break;
+  //              ::= dV        # /=
+  case OO_SlashEqual: Out << "dV"; break;
+  //              ::= rM        # %=
+  case OO_PercentEqual: Out << "rM"; break;
+  //              ::= aN        # &=
+  case OO_AmpEqual: Out << "aN"; break;
+  //              ::= oR        # |=
+  case OO_PipeEqual: Out << "oR"; break;
+  //              ::= eO        # ^=
+  case OO_CaretEqual: Out << "eO"; break;
+  //              ::= ls        # <<
+  case OO_LessLess: Out << "ls"; break;
+  //              ::= rs        # >>
+  case OO_GreaterGreater: Out << "rs"; break;
+  //              ::= lS        # <<=
+  case OO_LessLessEqual: Out << "lS"; break;
+  //              ::= rS        # >>=
+  case OO_GreaterGreaterEqual: Out << "rS"; break;
+  //              ::= eq        # ==
+  case OO_EqualEqual: Out << "eq"; break;
+  //              ::= ne        # !=
+  case OO_ExclaimEqual: Out << "ne"; break;
+  //              ::= lt        # <
+  case OO_Less: Out << "lt"; break;
+  //              ::= gt        # >
+  case OO_Greater: Out << "gt"; break;
+  //              ::= le        # <=
+  case OO_LessEqual: Out << "le"; break;
+  //              ::= ge        # >=
+  case OO_GreaterEqual: Out << "ge"; break;
+  //              ::= nt        # !
+  case OO_Exclaim: Out << "nt"; break;
+  //              ::= aa        # &&
+  case OO_AmpAmp: Out << "aa"; break;
+  //              ::= oo        # ||
+  case OO_PipePipe: Out << "oo"; break;
+  //              ::= pp        # ++
+  case OO_PlusPlus: Out << "pp"; break;
+  //              ::= mm        # --
+  case OO_MinusMinus: Out << "mm"; break;
+  //              ::= cm        # ,
+  case OO_Comma: Out << "cm"; break;
+  //              ::= pm        # ->*
+  case OO_ArrowStar: Out << "pm"; break;
+  //              ::= pt        # ->
+  case OO_Arrow: Out << "pt"; break;
+  //              ::= cl        # ()
+  case OO_Call: Out << "cl"; break;
+  //              ::= ix        # []
+  case OO_Subscript: Out << "ix"; break;
+
+  //              ::= qu        # ?
+  // The conditional operator can't be overloaded, but we still handle it when
+  // mangling expressions.
+  case OO_Conditional: Out << "qu"; break;
+
+  case OO_None:
+  case NUM_OVERLOADED_OPERATORS:
+    assert(false && "Not an overloaded operator");
+    break;
+  }
+}
+
+void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
+  // <CV-qualifiers> ::= [r] [V] [K]    # restrict (C99), volatile, const
+  if (Quals.hasRestrict())
+    Out << 'r';
+  if (Quals.hasVolatile())
+    Out << 'V';
+  if (Quals.hasConst())
+    Out << 'K';
+
+  // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+  llvm::SmallString<64> Name;
+  llvm::raw_svector_ostream OS(Name);
+  
+  const ObjCContainerDecl *CD = 
+    dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
+  assert (CD && "Missing container decl in GetNameForMethod");
+  OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
+  if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
+    OS << '(' << CID->getNameAsString() << ')';
+  OS << ' ' << MD->getSelector().getAsString() << ']';
+  
+  Out << OS.str().size() << OS.str();
+}
+
+void CXXNameMangler::mangleType(QualType T) {
+  // Only operate on the canonical type!
+  T = Context.getASTContext().getCanonicalType(T);
+
+  bool IsSubstitutable = T.hasLocalQualifiers() || !isa<BuiltinType>(T);
+  if (IsSubstitutable && mangleSubstitution(T))
+    return;
+
+  if (Qualifiers Quals = T.getLocalQualifiers()) {
+    mangleQualifiers(Quals);
+    // Recurse:  even if the qualified type isn't yet substitutable,
+    // the unqualified type might be.
+    mangleType(T.getLocalUnqualifiedType());
+  } else {
+    switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+    case Type::CLASS: \
+      llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+      return;
+#define TYPE(CLASS, PARENT) \
+    case Type::CLASS: \
+      mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+      break;
+#include "clang/AST/TypeNodes.def"
+    }
+  }
+
+  // Add the substitution.
+  if (IsSubstitutable)
+    addSubstitution(T);
+}
+
+void CXXNameMangler::mangleType(const BuiltinType *T) {
+  //  <type>         ::= <builtin-type>
+  //  <builtin-type> ::= v  # void
+  //                 ::= w  # wchar_t
+  //                 ::= b  # bool
+  //                 ::= c  # char
+  //                 ::= a  # signed char
+  //                 ::= h  # unsigned char
+  //                 ::= s  # short
+  //                 ::= t  # unsigned short
+  //                 ::= i  # int
+  //                 ::= j  # unsigned int
+  //                 ::= l  # long
+  //                 ::= m  # unsigned long
+  //                 ::= x  # long long, __int64
+  //                 ::= y  # unsigned long long, __int64
+  //                 ::= n  # __int128
+  // UNSUPPORTED:    ::= o  # unsigned __int128
+  //                 ::= f  # float
+  //                 ::= d  # double
+  //                 ::= e  # long double, __float80
+  // UNSUPPORTED:    ::= g  # __float128
+  // UNSUPPORTED:    ::= Dd # IEEE 754r decimal floating point (64 bits)
+  // UNSUPPORTED:    ::= De # IEEE 754r decimal floating point (128 bits)
+  // UNSUPPORTED:    ::= Df # IEEE 754r decimal floating point (32 bits)
+  // UNSUPPORTED:    ::= Dh # IEEE 754r half-precision floating point (16 bits)
+  //                 ::= Di # char32_t
+  //                 ::= Ds # char16_t
+  //                 ::= u <source-name>    # vendor extended type
+  // From our point of view, std::nullptr_t is a builtin, but as far as mangling
+  // is concerned, it's a type called std::nullptr_t.
+  switch (T->getKind()) {
+  case BuiltinType::Void: Out << 'v'; break;
+  case BuiltinType::Bool: Out << 'b'; break;
+  case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
+  case BuiltinType::UChar: Out << 'h'; break;
+  case BuiltinType::UShort: Out << 't'; break;
+  case BuiltinType::UInt: Out << 'j'; break;
+  case BuiltinType::ULong: Out << 'm'; break;
+  case BuiltinType::ULongLong: Out << 'y'; break;
+  case BuiltinType::UInt128: Out << 'o'; break;
+  case BuiltinType::SChar: Out << 'a'; break;
+  case BuiltinType::WChar: Out << 'w'; break;
+  case BuiltinType::Char16: Out << "Ds"; break;
+  case BuiltinType::Char32: Out << "Di"; break;
+  case BuiltinType::Short: Out << 's'; break;
+  case BuiltinType::Int: Out << 'i'; break;
+  case BuiltinType::Long: Out << 'l'; break;
+  case BuiltinType::LongLong: Out << 'x'; break;
+  case BuiltinType::Int128: Out << 'n'; break;
+  case BuiltinType::Float: Out << 'f'; break;
+  case BuiltinType::Double: Out << 'd'; break;
+  case BuiltinType::LongDouble: Out << 'e'; break;
+  case BuiltinType::NullPtr: Out << "St9nullptr_t"; break;
+
+  case BuiltinType::Overload:
+  case BuiltinType::Dependent:
+    assert(false &&
+           "Overloaded and dependent types shouldn't get to name mangling");
+    break;
+  case BuiltinType::UndeducedAuto:
+    assert(0 && "Should not see undeduced auto here");
+    break;
+  case BuiltinType::ObjCId: Out << "11objc_object"; break;
+  case BuiltinType::ObjCClass: Out << "10objc_class"; break;
+  case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
+  }
+}
+
+// <type>          ::= <function-type>
+// <function-type> ::= F [Y] <bare-function-type> E
+void CXXNameMangler::mangleType(const FunctionProtoType *T) {
+  Out << 'F';
+  // FIXME: We don't have enough information in the AST to produce the 'Y'
+  // encoding for extern "C" function types.
+  mangleBareFunctionType(T, /*MangleReturnType=*/true);
+  Out << 'E';
+}
+void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+  llvm_unreachable("Can't mangle K&R function prototypes");
+}
+void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
+                                            bool MangleReturnType) {
+  // We should never be mangling something without a prototype.
+  const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+  // <bare-function-type> ::= <signature type>+
+  if (MangleReturnType)
+    mangleType(Proto->getResultType());
+
+  if (Proto->getNumArgs() == 0) {
+    Out << 'v';
+    return;
+  }
+
+  for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+                                         ArgEnd = Proto->arg_type_end();
+       Arg != ArgEnd; ++Arg)
+    mangleType(*Arg);
+
+  // <builtin-type>      ::= z  # ellipsis
+  if (Proto->isVariadic())
+    Out << 'z';
+}
+
+// <type>            ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+  mangleName(T->getDecl());
+}
+
+// <type>            ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const EnumType *T) {
+  mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const RecordType *T) {
+  mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const TagType *T) {
+  mangleName(T->getDecl());
+}
+
+// <type>       ::= <array-type>
+// <array-type> ::= A <positive dimension number> _ <element type>
+//              ::= A [<dimension expression>] _ <element type>
+void CXXNameMangler::mangleType(const ConstantArrayType *T) {
+  Out << 'A' << T->getSize() << '_';
+  mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const VariableArrayType *T) {
+  Out << 'A';
+  mangleExpression(T->getSizeExpr());
+  Out << '_';
+  mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+  Out << 'A';
+  mangleExpression(T->getSizeExpr());
+  Out << '_';
+  mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
+  Out << 'A' << '_';
+  mangleType(T->getElementType());
+}
+
+// <type>                   ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= M <class type> <member type>
+void CXXNameMangler::mangleType(const MemberPointerType *T) {
+  Out << 'M';
+  mangleType(QualType(T->getClass(), 0));
+  QualType PointeeType = T->getPointeeType();
+  if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+    mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
+    mangleType(FPT);
+  } else
+    mangleType(PointeeType);
+}
+
+// <type>           ::= <template-param>
+void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+  mangleTemplateParameter(T->getIndex());
+}
+
+// FIXME: <type> ::= <template-template-param> <template-args>
+
+// <type> ::= P <type>   # pointer-to
+void CXXNameMangler::mangleType(const PointerType *T) {
+  Out << 'P';
+  mangleType(T->getPointeeType());
+}
+void CXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+  Out << 'P';
+  mangleType(T->getPointeeType());
+}
+
+// <type> ::= R <type>   # reference-to
+void CXXNameMangler::mangleType(const LValueReferenceType *T) {
+  Out << 'R';
+  mangleType(T->getPointeeType());
+}
+
+// <type> ::= O <type>   # rvalue reference-to (C++0x)
+void CXXNameMangler::mangleType(const RValueReferenceType *T) {
+  Out << 'O';
+  mangleType(T->getPointeeType());
+}
+
+// <type> ::= C <type>   # complex pair (C 2000)
+void CXXNameMangler::mangleType(const ComplexType *T) {
+  Out << 'C';
+  mangleType(T->getElementType());
+}
+
+// GNU extension: vector types
+void CXXNameMangler::mangleType(const VectorType *T) {
+  Out << "U8__vector";
+  mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const ExtVectorType *T) {
+  mangleType(static_cast<const VectorType*>(T));
+}
+void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+  Out << "U8__vector";
+  mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+  mangleSourceName(T->getDecl()->getIdentifier());
+}
+
+void CXXNameMangler::mangleType(const BlockPointerType *T) {
+  Out << "U13block_pointer";
+  mangleType(T->getPointeeType());
+}
+
+void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+  TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl();
+  assert(TD && "FIXME: Support dependent template names!");
+
+  mangleName(TD, T->getArgs(), T->getNumArgs());
+}
+
+void CXXNameMangler::mangleType(const TypenameType *T) {
+  // Typename types are always nested
+  Out << 'N';
+
+  const Type *QTy = T->getQualifier()->getAsType();
+  if (const TemplateSpecializationType *TST =
+        dyn_cast<TemplateSpecializationType>(QTy)) {
+    if (!mangleSubstitution(QualType(TST, 0))) {
+      TemplateDecl *TD = TST->getTemplateName().getAsTemplateDecl();
+      assert(TD && "FIXME: Support dependent template names");
+      mangleTemplatePrefix(TD);
+      mangleTemplateArgs(TST->getArgs(), TST->getNumArgs());
+      addSubstitution(QualType(TST, 0));
+    }
+  } else if (const TemplateTypeParmType *TTPT =
+              dyn_cast<TemplateTypeParmType>(QTy)) {
+    // We use the QualType mangle type variant here because it handles
+    // substitutions.
+    mangleType(QualType(TTPT, 0));
+  } else
+    assert(false && "Unhandled type!");
+
+  mangleSourceName(T->getIdentifier());
+
+  Out << 'E';
+}
+
+void CXXNameMangler::mangleIntegerLiteral(QualType T, 
+                                          const llvm::APSInt &Value) {
+  //  <expr-primary> ::= L <type> <value number> E # integer literal
+  Out << 'L';
+  
+  mangleType(T);
+  if (T->isBooleanType()) {
+    // Boolean values are encoded as 0/1.
+    Out << (Value.getBoolValue() ? '1' : '0');
+  } else {
+    if (Value.isNegative())
+      Out << 'n';
+    Value.abs().print(Out, false);
+  }
+  Out << 'E';
+  
+}
+
+void CXXNameMangler::mangleCalledExpression(const Expr *E, unsigned Arity) {
+  if (E->getType() != getASTContext().OverloadTy)
+    mangleExpression(E);
+  // propagate arity to dependent overloads?
+
+  llvm::PointerIntPair<OverloadExpr*,1> R
+    = OverloadExpr::find(const_cast<Expr*>(E));
+  if (R.getInt())
+    Out << "an"; // &
+  const OverloadExpr *Ovl = R.getPointer();
+  if (const UnresolvedMemberExpr *ME = dyn_cast<UnresolvedMemberExpr>(Ovl)) {
+    mangleMemberExpr(ME->getBase(), ME->isArrow(), ME->getQualifier(),
+                     ME->getMemberName(), Arity);
+    return;
+  }
+
+  mangleUnresolvedName(Ovl->getQualifier(), Ovl->getName(), Arity);
+}
+
+/// Mangles a member expression.  Implicit accesses are not handled,
+/// but that should be okay, because you shouldn't be able to
+/// make an implicit access in a function template declaration.
+///
+/// The standard ABI does not describe how member expressions should
+/// be mangled, so this is very unstandardized.  We mangle as if it
+/// were a binary operator, except that the RHS is mangled as an
+/// abstract name.
+///
+/// The standard ABI also does not assign a mangling to the dot
+/// operator, so we arbitrarily select 'me'.
+void CXXNameMangler::mangleMemberExpr(const Expr *Base,
+                                      bool IsArrow,
+                                      NestedNameSpecifier *Qualifier,
+                                      DeclarationName Member,
+                                      unsigned Arity) {
+  Out << (IsArrow ? "pt" : "me");
+  mangleExpression(Base);
+  mangleUnresolvedName(Qualifier, Member, Arity);
+}
+
+void CXXNameMangler::mangleExpression(const Expr *E) {
+  // <expression> ::= <unary operator-name> <expression>
+  //              ::= <binary operator-name> <expression> <expression>
+  //              ::= <trinary operator-name> <expression> <expression> <expression>
+  //              ::= cl <expression>* E	     # call
+  //              ::= cv <type> expression           # conversion with one argument
+  //              ::= cv <type> _ <expression>* E # conversion with a different number of arguments
+  //              ::= st <type>		             # sizeof (a type)
+  //              ::= at <type>                      # alignof (a type)
+  //              ::= <template-param>
+  //              ::= <function-param>
+  //              ::= sr <type> <unqualified-name>                   # dependent name
+  //              ::= sr <type> <unqualified-name> <template-args>   # dependent template-id
+  //              ::= sZ <template-param>                            # size of a parameter pack
+  //              ::= <expr-primary>
+  // <expr-primary> ::= L <type> <value number> E    # integer literal
+  //                ::= L <type <value float> E      # floating literal
+  //                ::= L <mangled-name> E           # external name
+  switch (E->getStmtClass()) {
+  default:
+    llvm_unreachable("unexpected statement kind");
+    break;
+
+  case Expr::CallExprClass: {
+    const CallExpr *CE = cast<CallExpr>(E);
+    Out << "cl";
+    mangleCalledExpression(CE->getCallee(), CE->getNumArgs());
+    for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I)
+      mangleExpression(CE->getArg(I));
+    Out << "E";
+    break;
+  }
+
+  case Expr::MemberExprClass: {
+    const MemberExpr *ME = cast<MemberExpr>(E);
+    mangleMemberExpr(ME->getBase(), ME->isArrow(),
+                     ME->getQualifier(), ME->getMemberDecl()->getDeclName(),
+                     UnknownArity);
+    break;
+  }
+
+  case Expr::UnresolvedMemberExprClass: {
+    const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
+    mangleMemberExpr(ME->getBase(), ME->isArrow(),
+                     ME->getQualifier(), ME->getMemberName(),
+                     UnknownArity);
+    break;
+  }
+
+  case Expr::CXXDependentScopeMemberExprClass: {
+    const CXXDependentScopeMemberExpr *ME
+      = cast<CXXDependentScopeMemberExpr>(E);
+    mangleMemberExpr(ME->getBase(), ME->isArrow(),
+                     ME->getQualifier(), ME->getMember(),
+                     UnknownArity);
+    break;
+  }
+
+  case Expr::UnresolvedLookupExprClass: {
+    // The ABI doesn't cover how to mangle overload sets, so we mangle
+    // using something as close as possible to the original lookup
+    // expression.
+    const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
+    mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), UnknownArity);
+    break;
+  }
+
+  case Expr::CXXUnresolvedConstructExprClass: { 
+    const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
+    unsigned N = CE->arg_size();
+
+    Out << "cv";
+    mangleType(CE->getType());
+    if (N != 1) Out << "_";
+    for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+    if (N != 1) Out << "E";
+    break;    
+  }
+
+  case Expr::CXXTemporaryObjectExprClass:
+  case Expr::CXXConstructExprClass: {
+    const CXXConstructExpr *CE = cast<CXXConstructExpr>(E);
+    unsigned N = CE->getNumArgs();
+
+    Out << "cv";
+    mangleType(CE->getType());
+    if (N != 1) Out << "_";
+    for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+    if (N != 1) Out << "E";
+    break;
+  }
+
+  case Expr::SizeOfAlignOfExprClass: {
+    const SizeOfAlignOfExpr *SAE = cast<SizeOfAlignOfExpr>(E);
+    if (SAE->isSizeOf()) Out << "s";
+    else Out << "a";
+    if (SAE->isArgumentType()) {
+      Out << "t";
+      mangleType(SAE->getArgumentType());
+    } else {
+      Out << "z";
+      mangleExpression(SAE->getArgumentExpr());
+    }
+    break;
+  }
+
+  case Expr::UnaryOperatorClass: {
+    const UnaryOperator *UO = cast<UnaryOperator>(E);
+    mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()), 
+                       /*Arity=*/1);
+    mangleExpression(UO->getSubExpr());
+    break;
+  }
+      
+  case Expr::BinaryOperatorClass: {
+    const BinaryOperator *BO = cast<BinaryOperator>(E);
+    mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()), 
+                       /*Arity=*/2);
+    mangleExpression(BO->getLHS());
+    mangleExpression(BO->getRHS());                     
+    break;
+  }
+
+  case Expr::ConditionalOperatorClass: {
+    const ConditionalOperator *CO = cast<ConditionalOperator>(E);
+    mangleOperatorName(OO_Conditional, /*Arity=*/3);
+    mangleExpression(CO->getCond());
+    mangleExpression(CO->getLHS());
+    mangleExpression(CO->getRHS());
+    break;
+  }
+
+  case Expr::ImplicitCastExprClass: {
+    mangleExpression(cast<ImplicitCastExpr>(E)->getSubExpr());
+    break;
+  }
+
+  case Expr::CStyleCastExprClass:
+  case Expr::CXXStaticCastExprClass:
+  case Expr::CXXDynamicCastExprClass:
+  case Expr::CXXReinterpretCastExprClass:
+  case Expr::CXXConstCastExprClass:
+  case Expr::CXXFunctionalCastExprClass: {
+    const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
+    Out << "cv";
+    mangleType(ECE->getType());
+    mangleExpression(ECE->getSubExpr());
+    break;
+  }
+    
+  case Expr::CXXOperatorCallExprClass: {
+    const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
+    unsigned NumArgs = CE->getNumArgs();
+    mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
+    // Mangle the arguments.
+    for (unsigned i = 0; i != NumArgs; ++i)
+      mangleExpression(CE->getArg(i));
+    break;
+  }
+      
+  case Expr::ParenExprClass:
+    mangleExpression(cast<ParenExpr>(E)->getSubExpr());
+    break;
+
+  case Expr::DeclRefExprClass: {
+    const Decl *D = cast<DeclRefExpr>(E)->getDecl();
+
+    switch (D->getKind()) {
+    default: assert(false && "Unhandled decl kind!");
+    case Decl::NonTypeTemplateParm: {
+      const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
+      mangleTemplateParameter(PD->getIndex());
+      break;
+    }
+
+    }
+
+    break;
+  }
+
+  case Expr::DependentScopeDeclRefExprClass: {
+    const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
+    const Type *QTy = DRE->getQualifier()->getAsType();
+    assert(QTy && "Qualifier was not type!");
+
+    // ::= sr <type> <unqualified-name>                   # dependent name
+    Out << "sr";
+    mangleType(QualType(QTy, 0));
+
+    assert(DRE->getDeclName().getNameKind() == DeclarationName::Identifier &&
+           "Unhandled decl name kind!");
+    mangleSourceName(DRE->getDeclName().getAsIdentifierInfo());
+
+    break;
+  }
+
+  case Expr::FloatingLiteralClass: {
+    const FloatingLiteral *FL = cast<FloatingLiteral>(E);
+    Out << "L";
+    mangleType(FL->getType());
+
+    // TODO: avoid this copy with careful stream management.
+    llvm::SmallVector<char,20> Buffer;
+    FL->getValue().bitcastToAPInt().toString(Buffer, 16, false);
+    Out.write(Buffer.data(), Buffer.size());
+
+    Out << "E";
+    break;
+  }
+
+  case Expr::IntegerLiteralClass:
+    mangleIntegerLiteral(E->getType(), 
+                         llvm::APSInt(cast<IntegerLiteral>(E)->getValue()));
+    break;
+
+  }
+}
+
+// FIXME: <type> ::= G <type>   # imaginary (C 2000)
+// FIXME: <type> ::= U <source-name> <type>     # vendor extended type qualifier
+
+void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
+  // <ctor-dtor-name> ::= C1  # complete object constructor
+  //                  ::= C2  # base object constructor
+  //                  ::= C3  # complete object allocating constructor
+  //
+  switch (T) {
+  case Ctor_Complete:
+    Out << "C1";
+    break;
+  case Ctor_Base:
+    Out << "C2";
+    break;
+  case Ctor_CompleteAllocating:
+    Out << "C3";
+    break;
+  }
+}
+
+void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
+  // <ctor-dtor-name> ::= D0  # deleting destructor
+  //                  ::= D1  # complete object destructor
+  //                  ::= D2  # base object destructor
+  //
+  switch (T) {
+  case Dtor_Deleting:
+    Out << "D0";
+    break;
+  case Dtor_Complete:
+    Out << "D1";
+    break;
+  case Dtor_Base:
+    Out << "D2";
+    break;
+  }
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateArgumentList &L) {
+  // <template-args> ::= I <template-arg>+ E
+  Out << "I";
+  for (unsigned i = 0, e = L.size(); i != e; ++i)
+    mangleTemplateArg(L[i]);
+  Out << "E";
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateArgument *TemplateArgs,
+                                        unsigned NumTemplateArgs) {
+  // <template-args> ::= I <template-arg>+ E
+  Out << "I";
+  for (unsigned i = 0; i != NumTemplateArgs; ++i)
+    mangleTemplateArg(TemplateArgs[i]);
+  Out << "E";
+}
+
+void CXXNameMangler::mangleTemplateArg(const TemplateArgument &A) {
+  // <template-arg> ::= <type>              # type or template
+  //                ::= X <expression> E    # expression
+  //                ::= <expr-primary>      # simple expressions
+  //                ::= I <template-arg>* E # argument pack
+  //                ::= sp <expression>     # pack expansion of (C++0x)
+  switch (A.getKind()) {
+  default:
+    assert(0 && "Unknown template argument kind!");
+  case TemplateArgument::Type:
+    mangleType(A.getAsType());
+    break;
+  case TemplateArgument::Template:
+    assert(A.getAsTemplate().getAsTemplateDecl() &&
+           "FIXME: Support dependent template names");
+    mangleName(A.getAsTemplate().getAsTemplateDecl());
+    break;      
+  case TemplateArgument::Expression:
+    Out << 'X';
+    mangleExpression(A.getAsExpr());
+    Out << 'E';
+    break;
+  case TemplateArgument::Integral:
+    mangleIntegerLiteral(A.getIntegralType(), *A.getAsIntegral());
+    break;
+  case TemplateArgument::Declaration: {
+    //  <expr-primary> ::= L <mangled-name> E # external name
+
+    // FIXME: Clang produces AST's where pointer-to-member-function expressions
+    // and pointer-to-function expressions are represented as a declaration not
+    // an expression; this is not how gcc represents them and this changes the
+    // mangling.
+    Out << 'L';
+    // References to external entities use the mangled name; if the name would
+    // not normally be manged then mangle it as unqualified.
+    //
+    // FIXME: The ABI specifies that external names here should have _Z, but
+    // gcc leaves this off.
+    mangle(cast<NamedDecl>(A.getAsDecl()), "Z");
+    Out << 'E';
+    break;
+  }
+  }
+}
+
+void CXXNameMangler::mangleTemplateParameter(unsigned Index) {
+  // <template-param> ::= T_    # first template parameter
+  //                  ::= T <parameter-2 non-negative number> _
+  if (Index == 0)
+    Out << "T_";
+  else
+    Out << 'T' << (Index - 1) << '_';
+}
+
+// <substitution> ::= S <seq-id> _
+//                ::= S_
+bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
+  // Try one of the standard substitutions first.
+  if (mangleStandardSubstitution(ND))
+    return true;
+
+  ND = cast<NamedDecl>(ND->getCanonicalDecl());
+  return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
+}
+
+bool CXXNameMangler::mangleSubstitution(QualType T) {
+  if (!T.getCVRQualifiers()) {
+    if (const RecordType *RT = T->getAs<RecordType>())
+      return mangleSubstitution(RT->getDecl());
+  }
+
+  uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+
+  return mangleSubstitution(TypePtr);
+}
+
+bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
+  llvm::DenseMap<uintptr_t, unsigned>::iterator I =
+    Substitutions.find(Ptr);
+  if (I == Substitutions.end())
+    return false;
+
+  unsigned SeqID = I->second;
+  if (SeqID == 0)
+    Out << "S_";
+  else {
+    SeqID--;
+
+    // <seq-id> is encoded in base-36, using digits and upper case letters.
+    char Buffer[10];
+    char *BufferPtr = Buffer + 9;
+
+    *BufferPtr = 0;
+    if (SeqID == 0) *--BufferPtr = '0';
+
+    while (SeqID) {
+      assert(BufferPtr > Buffer && "Buffer overflow!");
+
+      unsigned char c = static_cast<unsigned char>(SeqID) % 36;
+
+      *--BufferPtr =  (c < 10 ? '0' + c : 'A' + c - 10);
+      SeqID /= 36;
+    }
+
+    Out << 'S' << BufferPtr << '_';
+  }
+
+  return true;
+}
+
+static bool isCharType(QualType T) {
+  if (T.isNull())
+    return false;
+
+  return T->isSpecificBuiltinType(BuiltinType::Char_S) ||
+    T->isSpecificBuiltinType(BuiltinType::Char_U);
+}
+
+/// isCharSpecialization - Returns whether a given type is a template
+/// specialization of a given name with a single argument of type char.
+static bool isCharSpecialization(QualType T, const char *Name) {
+  if (T.isNull())
+    return false;
+
+  const RecordType *RT = T->getAs<RecordType>();
+  if (!RT)
+    return false;
+
+  const ClassTemplateSpecializationDecl *SD =
+    dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+  if (!SD)
+    return false;
+
+  if (!isStdNamespace(SD->getDeclContext()))
+    return false;
+
+  const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+  if (TemplateArgs.size() != 1)
+    return false;
+
+  if (!isCharType(TemplateArgs[0].getAsType()))
+    return false;
+
+  return SD->getIdentifier()->getName() == Name;
+}
+
+template <std::size_t StrLen>
+bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl *SD,
+                                const char (&Str)[StrLen]) {
+  if (!SD->getIdentifier()->isStr(Str))
+    return false;
+  
+  const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+  if (TemplateArgs.size() != 2)
+    return false;
+    
+  if (!isCharType(TemplateArgs[0].getAsType()))
+    return false;
+    
+  if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+    return false;
+    
+  return true;
+}
+  
+bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
+  // <substitution> ::= St # ::std::
+  if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+    if (isStd(NS)) {
+      Out << "St";
+      return true;
+    }
+  }
+
+  if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
+    if (!isStdNamespace(TD->getDeclContext()))
+      return false;
+
+    // <substitution> ::= Sa # ::std::allocator
+    if (TD->getIdentifier()->isStr("allocator")) {
+      Out << "Sa";
+      return true;
+    }
+
+    // <<substitution> ::= Sb # ::std::basic_string
+    if (TD->getIdentifier()->isStr("basic_string")) {
+      Out << "Sb";
+      return true;
+    }
+  }
+
+  if (const ClassTemplateSpecializationDecl *SD =
+        dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+    //    <substitution> ::= Ss # ::std::basic_string<char,
+    //                            ::std::char_traits<char>,
+    //                            ::std::allocator<char> >
+    if (SD->getIdentifier()->isStr("basic_string")) {
+      const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+
+      if (TemplateArgs.size() != 3)
+        return false;
+
+      if (!isCharType(TemplateArgs[0].getAsType()))
+        return false;
+
+      if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+        return false;
+
+      if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator"))
+        return false;
+
+      Out << "Ss";
+      return true;
+    }
+
+    //    <substitution> ::= Si # ::std::basic_istream<char,
+    //                            ::std::char_traits<char> >
+    if (isStreamCharSpecialization(SD, "basic_istream")) {
+      Out << "Si";
+      return true;
+    }
+
+    //    <substitution> ::= So # ::std::basic_ostream<char,
+    //                            ::std::char_traits<char> >
+    if (isStreamCharSpecialization(SD, "basic_ostream")) {
+      Out << "So";
+      return true;
+    }
+    
+    //    <substitution> ::= Sd # ::std::basic_iostream<char,
+    //                            ::std::char_traits<char> >
+    if (isStreamCharSpecialization(SD, "basic_iostream")) {
+      Out << "Sd";
+      return true;
+    }
+  }
+  return false;
+}
+
+void CXXNameMangler::addSubstitution(QualType T) {
+  if (!T.getCVRQualifiers()) {
+    if (const RecordType *RT = T->getAs<RecordType>()) {
+      addSubstitution(RT->getDecl());
+      return;
+    }
+  }
+
+  uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+  addSubstitution(TypePtr);
+}
+
+void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
+  unsigned SeqID = Substitutions.size();
+
+  assert(!Substitutions.count(Ptr) && "Substitution already exists!");
+  Substitutions[Ptr] = SeqID;
+}
+
+//
+
+/// \brief Mangles the name of the declaration D and emits that name to the
+/// given output stream.
+///
+/// If the declaration D requires a mangled name, this routine will emit that
+/// mangled name to \p os and return true. Otherwise, \p os will be unchanged
+/// and this routine will return false. In this case, the caller should just
+/// emit the identifier of the declaration (\c D->getIdentifier()) as its
+/// name.
+void MangleContext::mangleName(const NamedDecl *D,
+                               llvm::SmallVectorImpl<char> &Res) {
+  assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+          "Invalid mangleName() call, argument is not a variable or function!");
+  assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+         "Invalid mangleName() call on 'structor decl!");
+
+  PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+                                 getASTContext().getSourceManager(),
+                                 "Mangling declaration");
+
+  CXXNameMangler Mangler(*this, Res);
+  return Mangler.mangle(D);
+}
+
+void MangleContext::mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+                                  llvm::SmallVectorImpl<char> &Res) {
+  CXXNameMangler Mangler(*this, Res, D, Type);
+  Mangler.mangle(D);
+}
+
+void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+                                  llvm::SmallVectorImpl<char> &Res) {
+  CXXNameMangler Mangler(*this, Res, D, Type);
+  Mangler.mangle(D);
+}
+
+/// \brief Mangles the a thunk with the offset n for the declaration D and
+/// emits that name to the given output stream.
+void MangleContext::mangleThunk(const FunctionDecl *FD, 
+                                const ThunkAdjustment &ThisAdjustment,
+                                llvm::SmallVectorImpl<char> &Res) {
+  assert(!isa<CXXDestructorDecl>(FD) &&
+         "Use mangleCXXDtor for destructor decls!");
+
+  //  <special-name> ::= T <call-offset> <base encoding>
+  //                      # base is the nominal target function of thunk
+  CXXNameMangler Mangler(*this, Res);
+  Mangler.getStream() << "_ZT";
+  Mangler.mangleCallOffset(ThisAdjustment);
+  Mangler.mangleFunctionEncoding(FD);
+}
+
+void MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *D,
+                                       CXXDtorType Type,
+                                       const ThunkAdjustment &ThisAdjustment,
+                                       llvm::SmallVectorImpl<char> &Res) {
+  //  <special-name> ::= T <call-offset> <base encoding>
+  //                      # base is the nominal target function of thunk
+  CXXNameMangler Mangler(*this, Res, D, Type);
+  Mangler.getStream() << "_ZT";
+  Mangler.mangleCallOffset(ThisAdjustment);
+  Mangler.mangleFunctionEncoding(D);
+}
+
+/// \brief Mangles the a covariant thunk for the declaration D and emits that
+/// name to the given output stream.
+void 
+MangleContext::mangleCovariantThunk(const FunctionDecl *FD,
+                                    const CovariantThunkAdjustment& Adjustment,
+                                    llvm::SmallVectorImpl<char> &Res) {
+  assert(!isa<CXXDestructorDecl>(FD) &&
+         "No such thing as a covariant thunk for a destructor!");
+
+  //  <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
+  //                      # base is the nominal target function of thunk
+  //                      # first call-offset is 'this' adjustment
+  //                      # second call-offset is result adjustment
+  CXXNameMangler Mangler(*this, Res);
+  Mangler.getStream() << "_ZTc";
+  Mangler.mangleCallOffset(Adjustment.ThisAdjustment);
+  Mangler.mangleCallOffset(Adjustment.ReturnAdjustment);
+  Mangler.mangleFunctionEncoding(FD);
+}
+
+/// mangleGuardVariable - Returns the mangled name for a guard variable
+/// for the passed in VarDecl.
+void MangleContext::mangleGuardVariable(const VarDecl *D,
+                                        llvm::SmallVectorImpl<char> &Res) {
+  //  <special-name> ::= GV <object name>       # Guard variable for one-time
+  //                                            # initialization
+  CXXNameMangler Mangler(*this, Res);
+  Mangler.getStream() << "_ZGV";
+  Mangler.mangleName(D);
+}
+
+void MangleContext::mangleCXXVtable(const CXXRecordDecl *RD,
+                                    llvm::SmallVectorImpl<char> &Res) {
+  // <special-name> ::= TV <type>  # virtual table
+  CXXNameMangler Mangler(*this, Res);
+  Mangler.getStream() << "_ZTV";
+  Mangler.mangleName(RD);
+}
+
+void MangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+                                 llvm::SmallVectorImpl<char> &Res) {
+  // <special-name> ::= TT <type>  # VTT structure
+  CXXNameMangler Mangler(*this, Res);
+  Mangler.getStream() << "_ZTT";
+  Mangler.mangleName(RD);
+}
+
+void MangleContext::mangleCXXCtorVtable(const CXXRecordDecl *RD, int64_t Offset,
+                                        const CXXRecordDecl *Type,
+                                        llvm::SmallVectorImpl<char> &Res) {
+  // <special-name> ::= TC <type> <offset number> _ <base type>
+  CXXNameMangler Mangler(*this, Res);
+  Mangler.getStream() << "_ZTC";
+  Mangler.mangleName(RD);
+  Mangler.getStream() << Offset;
+  Mangler.getStream() << "_";
+  Mangler.mangleName(Type);
+}
+
+void MangleContext::mangleCXXRTTI(QualType Ty,
+                                  llvm::SmallVectorImpl<char> &Res) {
+  // <special-name> ::= TI <type>  # typeinfo structure
+  assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers");
+  CXXNameMangler Mangler(*this, Res);
+  Mangler.getStream() << "_ZTI";
+  Mangler.mangleType(Ty);
+}
+
+void MangleContext::mangleCXXRTTIName(QualType Ty,
+                                      llvm::SmallVectorImpl<char> &Res) {
+  // <special-name> ::= TS <type>  # typeinfo name (null terminated byte string)
+  CXXNameMangler Mangler(*this, Res);
+  Mangler.getStream() << "_ZTS";
+  Mangler.mangleType(Ty);
+}
diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h
new file mode 100644
index 0000000..8d96295
--- /dev/null
+++ b/lib/CodeGen/Mangle.h
@@ -0,0 +1,95 @@
+//===--- Mangle.h - Mangle C++ Names ----------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+//   http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CODEGEN_MANGLE_H
+#define LLVM_CLANG_CODEGEN_MANGLE_H
+
+#include "CGCXX.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+  template<typename T> class SmallVectorImpl;
+}
+
+namespace clang {
+  class ASTContext;
+  class CXXConstructorDecl;
+  class CXXDestructorDecl;
+  class FunctionDecl;
+  class NamedDecl;
+  class VarDecl;
+
+namespace CodeGen {
+  class CovariantThunkAdjustment;
+  class ThunkAdjustment;
+   
+/// MangleContext - Context for tracking state which persists across multiple
+/// calls to the C++ name mangler.
+class MangleContext {
+  ASTContext &Context;
+
+  llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
+
+public:
+  explicit MangleContext(ASTContext &Context)
+    : Context(Context) { }
+
+  ASTContext &getASTContext() const { return Context; }
+
+  uint64_t getAnonymousStructId(const TagDecl *TD) {
+    std::pair<llvm::DenseMap<const TagDecl *,
+      uint64_t>::iterator, bool> Result =
+      AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
+    return Result.first->second;
+  }
+
+  /// @name Mangler Entry Points
+  /// @{
+
+  bool shouldMangleDeclName(const NamedDecl *D);
+
+  void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
+  void mangleThunk(const FunctionDecl *FD, 
+                   const ThunkAdjustment &ThisAdjustment,
+                   llvm::SmallVectorImpl<char> &);
+  void mangleCXXDtorThunk(const CXXDestructorDecl *D, CXXDtorType Type,
+                          const ThunkAdjustment &ThisAdjustment,
+                          llvm::SmallVectorImpl<char> &);
+  void mangleCovariantThunk(const FunctionDecl *FD, 
+                            const CovariantThunkAdjustment& Adjustment,
+                            llvm::SmallVectorImpl<char> &);
+  void mangleGuardVariable(const VarDecl *D, llvm::SmallVectorImpl<char> &);
+  void mangleCXXVtable(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &);
+  void mangleCXXVTT(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &);
+  void mangleCXXCtorVtable(const CXXRecordDecl *RD, int64_t Offset,
+                           const CXXRecordDecl *Type,
+                           llvm::SmallVectorImpl<char> &);
+  void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
+  void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
+  void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+                     llvm::SmallVectorImpl<char> &);
+  void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+                     llvm::SmallVectorImpl<char> &);
+
+  /// @}
+};
+  
+}
+}
+
+#endif
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
new file mode 100644
index 0000000..1e1edc1
--- /dev/null
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -0,0 +1,100 @@
+//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This builds an AST and converts it to LLVM Code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "CodeGenModule.h"
+#include "clang/CodeGen/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/OwningPtr.h"
+using namespace clang;
+
+namespace {
+  class CodeGeneratorImpl : public CodeGenerator {
+    Diagnostic &Diags;
+    llvm::OwningPtr<const llvm::TargetData> TD;
+    ASTContext *Ctx;
+    const CodeGenOptions CodeGenOpts;  // Intentionally copied in.
+  protected:
+    llvm::OwningPtr<llvm::Module> M;
+    llvm::OwningPtr<CodeGen::CodeGenModule> Builder;
+  public:
+    CodeGeneratorImpl(Diagnostic &diags, const std::string& ModuleName,
+                      const CodeGenOptions &CGO, llvm::LLVMContext& C)
+      : Diags(diags), CodeGenOpts(CGO), M(new llvm::Module(ModuleName, C)) {}
+
+    virtual ~CodeGeneratorImpl() {}
+
+    virtual llvm::Module* GetModule() {
+      return M.get();
+    }
+
+    virtual llvm::Module* ReleaseModule() {
+      return M.take();
+    }
+
+    virtual void Initialize(ASTContext &Context) {
+      Ctx = &Context;
+
+      M->setTargetTriple(Ctx->Target.getTriple().getTriple());
+      M->setDataLayout(Ctx->Target.getTargetDescription());
+      TD.reset(new llvm::TargetData(Ctx->Target.getTargetDescription()));
+      Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts,
+                                               *M, *TD, Diags));
+    }
+
+    virtual void HandleTopLevelDecl(DeclGroupRef DG) {
+      // Make sure to emit all elements of a Decl.
+      for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+        Builder->EmitTopLevelDecl(*I);
+    }
+
+    /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
+    /// to (e.g. struct, union, enum, class) is completed. This allows the
+    /// client hack on the type, which can occur at any point in the file
+    /// (because these can be defined in declspecs).
+    virtual void HandleTagDeclDefinition(TagDecl *D) {
+      Builder->UpdateCompletedType(D);
+    }
+
+    virtual void HandleTranslationUnit(ASTContext &Ctx) {
+      if (Diags.hasErrorOccurred()) {
+        M.reset();
+        return;
+      }
+
+      if (Builder)
+        Builder->Release();
+    }
+
+    virtual void CompleteTentativeDefinition(VarDecl *D) {
+      if (Diags.hasErrorOccurred())
+        return;
+
+      Builder->EmitTentativeDefinition(D);
+    }
+  };
+}
+
+CodeGenerator *clang::CreateLLVMCodeGen(Diagnostic &Diags,
+                                        const std::string& ModuleName,
+                                        const CodeGenOptions &CGO,
+                                        llvm::LLVMContext& C) {
+  return new CodeGeneratorImpl(Diags, ModuleName, CGO, C);
+}
diff --git a/lib/CodeGen/README.txt b/lib/CodeGen/README.txt
new file mode 100644
index 0000000..e6d6109
--- /dev/null
+++ b/lib/CodeGen/README.txt
@@ -0,0 +1,47 @@
+IRgen optimization opportunities.
+
+//===---------------------------------------------------------------------===//
+
+The common pattern of
+--
+short x; // or char, etc
+(x == 10)
+--
+generates an zext/sext of x which can easily be avoided.
+
+//===---------------------------------------------------------------------===//
+
+Bitfields accesses can be shifted to simplify masking and sign
+extension. For example, if the bitfield width is 8 and it is
+appropriately aligned then is is a lot shorter to just load the char
+directly.
+
+//===---------------------------------------------------------------------===//
+
+It may be worth avoiding creation of alloca's for formal arguments
+for the common situation where the argument is never written to or has
+its address taken. The idea would be to begin generating code by using
+the argument directly and if its address is taken or it is stored to
+then generate the alloca and patch up the existing code.
+
+In theory, the same optimization could be a win for block local
+variables as long as the declaration dominates all statements in the
+block.
+
+NOTE: The main case we care about this for is for -O0 -g compile time
+performance, and in that scenario we will need to emit the alloca
+anyway currently to emit proper debug info. So this is blocked by
+being able to emit debug information which refers to an LLVM
+temporary, not an alloca.
+
+//===---------------------------------------------------------------------===//
+
+We should try and avoid generating basic blocks which only contain
+jumps. At -O0, this penalizes us all the way from IRgen (malloc &
+instruction overhead), all the way down through code generation and
+assembly time.
+
+On 176.gcc:expr.ll, it looks like over 12% of basic blocks are just
+direct branches!
+
+//===---------------------------------------------------------------------===//
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
new file mode 100644
index 0000000..6f650fc
--- /dev/null
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -0,0 +1,1955 @@
+//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace CodeGen;
+
+ABIInfo::~ABIInfo() {}
+
+void ABIArgInfo::dump() const {
+  llvm::raw_ostream &OS = llvm::errs();
+  OS << "(ABIArgInfo Kind=";
+  switch (TheKind) {
+  case Direct:
+    OS << "Direct";
+    break;
+  case Extend:
+    OS << "Extend";
+    break;
+  case Ignore:
+    OS << "Ignore";
+    break;
+  case Coerce:
+    OS << "Coerce Type=";
+    getCoerceToType()->print(OS);
+    break;
+  case Indirect:
+    OS << "Indirect Align=" << getIndirectAlign();
+    break;
+  case Expand:
+    OS << "Expand";
+    break;
+  }
+  OS << ")\n";
+}
+
+TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
+                         bool AllowArrays) {
+  if (FD->isUnnamedBitfield())
+    return true;
+
+  QualType FT = FD->getType();
+
+    // Constant arrays of empty records count as empty, strip them off.
+  if (AllowArrays)
+    while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
+      FT = AT->getElementType();
+
+  return isEmptyRecord(Context, FT, AllowArrays);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
+  const RecordType *RT = T->getAs<RecordType>();
+  if (!RT)
+    return 0;
+  const RecordDecl *RD = RT->getDecl();
+  if (RD->hasFlexibleArrayMember())
+    return false;
+  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+         i != e; ++i)
+    if (!isEmptyField(Context, *i, AllowArrays))
+      return false;
+  return true;
+}
+
+/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
+/// a non-trivial destructor or a non-trivial copy constructor.
+static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
+  const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+  if (!RD)
+    return false;
+  
+  return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
+}
+
+/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
+/// a record type with either a non-trivial destructor or a non-trivial copy
+/// constructor.
+static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
+  const RecordType *RT = T->getAs<RecordType>();
+  if (!RT)
+    return false;
+
+  return hasNonTrivialDestructorOrCopyConstructor(RT);
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+  const RecordType *RT = T->getAsStructureType();
+  if (!RT)
+    return 0;
+
+  const RecordDecl *RD = RT->getDecl();
+  if (RD->hasFlexibleArrayMember())
+    return 0;
+
+  const Type *Found = 0;
+  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+         i != e; ++i) {
+    const FieldDecl *FD = *i;
+    QualType FT = FD->getType();
+
+    // Ignore empty fields.
+    if (isEmptyField(Context, FD, true))
+      continue;
+
+    // If we already found an element then this isn't a single-element
+    // struct.
+    if (Found)
+      return 0;
+
+    // Treat single element arrays as the element.
+    while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+      if (AT->getSize().getZExtValue() != 1)
+        break;
+      FT = AT->getElementType();
+    }
+
+    if (!CodeGenFunction::hasAggregateLLVMType(FT)) {
+      Found = FT.getTypePtr();
+    } else {
+      Found = isSingleElementStruct(FT, Context);
+      if (!Found)
+        return 0;
+    }
+  }
+
+  return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+  if (!Ty->getAs<BuiltinType>() && !Ty->isAnyPointerType() &&
+      !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
+      !Ty->isBlockPointerType())
+    return false;
+
+  uint64_t Size = Context.getTypeSize(Ty);
+  return Size == 32 || Size == 64;
+}
+
+/// canExpandIndirectArgument - Test whether an argument type which is to be
+/// passed indirectly (on the stack) would have the equivalent layout if it was
+/// expanded into separate arguments. If so, we prefer to do the latter to avoid
+/// inhibiting optimizations.
+///
+// FIXME: This predicate is missing many cases, currently it just follows
+// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
+// should probably make this smarter, or better yet make the LLVM backend
+// capable of handling it.
+static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
+  // We can only expand structure types.
+  const RecordType *RT = Ty->getAs<RecordType>();
+  if (!RT)
+    return false;
+
+  // We can only expand (C) structures.
+  //
+  // FIXME: This needs to be generalized to handle classes as well.
+  const RecordDecl *RD = RT->getDecl();
+  if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
+    return false;
+
+  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+         i != e; ++i) {
+    const FieldDecl *FD = *i;
+
+    if (!is32Or64BitBasicType(FD->getType(), Context))
+      return false;
+
+    // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+    // how to expand them yet, and the predicate for telling if a bitfield still
+    // counts as "basic" is more complicated than what we were doing previously.
+    if (FD->isBitField())
+      return false;
+  }
+
+  return true;
+}
+
+static bool typeContainsSSEVector(const RecordDecl *RD, ASTContext &Context) {
+  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+         i != e; ++i) {
+    const FieldDecl *FD = *i;
+
+    if (FD->getType()->isVectorType() &&
+        Context.getTypeSize(FD->getType()) >= 128)
+      return true;
+
+    if (const RecordType* RT = FD->getType()->getAs<RecordType>())
+      if (typeContainsSSEVector(RT->getDecl(), Context))
+        return true;
+  }
+
+  return false;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+  ABIArgInfo classifyReturnType(QualType RetTy,
+                                ASTContext &Context,
+                                llvm::LLVMContext &VMContext) const;
+
+  ABIArgInfo classifyArgumentType(QualType RetTy,
+                                  ASTContext &Context,
+                                  llvm::LLVMContext &VMContext) const;
+
+  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+                           llvm::LLVMContext &VMContext) const {
+    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+                                            VMContext);
+    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+         it != ie; ++it)
+      it->info = classifyArgumentType(it->type, Context, VMContext);
+  }
+
+  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                 CodeGenFunction &CGF) const;
+};
+
+class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+  DefaultTargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
+};
+
+llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                       CodeGenFunction &CGF) const {
+  return 0;
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty,
+                                                ASTContext &Context,
+                                          llvm::LLVMContext &VMContext) const {
+  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+    return ABIArgInfo::getIndirect(0);
+  } else {
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+      Ty = EnumTy->getDecl()->getIntegerType();
+
+    return (Ty->isPromotableIntegerType() ?
+            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  }
+}
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+  ASTContext &Context;
+  bool IsDarwinVectorABI;
+  bool IsSmallStructInRegABI;
+
+  static bool isRegisterSize(unsigned Size) {
+    return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+  }
+
+  static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context);
+
+  static unsigned getIndirectArgumentAlignment(QualType Ty,
+                                               ASTContext &Context);
+
+public:
+  ABIArgInfo classifyReturnType(QualType RetTy,
+                                ASTContext &Context,
+                                llvm::LLVMContext &VMContext) const;
+
+  ABIArgInfo classifyArgumentType(QualType RetTy,
+                                  ASTContext &Context,
+                                  llvm::LLVMContext &VMContext) const;
+
+  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+                           llvm::LLVMContext &VMContext) const {
+    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+                                            VMContext);
+    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+         it != ie; ++it)
+      it->info = classifyArgumentType(it->type, Context, VMContext);
+  }
+
+  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                 CodeGenFunction &CGF) const;
+
+  X86_32ABIInfo(ASTContext &Context, bool d, bool p)
+    : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
+      IsSmallStructInRegABI(p) {}
+};
+
+class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+  X86_32TargetCodeGenInfo(ASTContext &Context, bool d, bool p)
+    :TargetCodeGenInfo(new X86_32ABIInfo(Context, d, p)) {}
+};
+
+}
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// passed in a register (for the Darwin ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+                                               ASTContext &Context) {
+  uint64_t Size = Context.getTypeSize(Ty);
+
+  // Type must be register sized.
+  if (!isRegisterSize(Size))
+    return false;
+
+  if (Ty->isVectorType()) {
+    // 64- and 128- bit vectors inside structures are not returned in
+    // registers.
+    if (Size == 64 || Size == 128)
+      return false;
+
+    return true;
+  }
+
+  // If this is a builtin, pointer, enum, or complex type, it is ok.
+  if (Ty->getAs<BuiltinType>() || Ty->isAnyPointerType() || 
+      Ty->isAnyComplexType() || Ty->isEnumeralType() ||
+      Ty->isBlockPointerType())
+    return true;
+
+  // Arrays are treated like records.
+  if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+    return shouldReturnTypeInRegister(AT->getElementType(), Context);
+
+  // Otherwise, it must be a record type.
+  const RecordType *RT = Ty->getAs<RecordType>();
+  if (!RT) return false;
+
+  // FIXME: Traverse bases here too.
+
+  // Structure types are passed in register if all fields would be
+  // passed in a register.
+  for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
+         e = RT->getDecl()->field_end(); i != e; ++i) {
+    const FieldDecl *FD = *i;
+
+    // Empty fields are ignored.
+    if (isEmptyField(Context, FD, true))
+      continue;
+
+    // Check fields recursively.
+    if (!shouldReturnTypeInRegister(FD->getType(), Context))
+      return false;
+  }
+
+  return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+                                            ASTContext &Context,
+                                          llvm::LLVMContext &VMContext) const {
+  if (RetTy->isVoidType()) {
+    return ABIArgInfo::getIgnore();
+  } else if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+    // On Darwin, some vectors are returned in registers.
+    if (IsDarwinVectorABI) {
+      uint64_t Size = Context.getTypeSize(RetTy);
+
+      // 128-bit vectors are a special case; they are returned in
+      // registers and we need to make sure to pick a type the LLVM
+      // backend will like.
+      if (Size == 128)
+        return ABIArgInfo::getCoerce(llvm::VectorType::get(
+                  llvm::Type::getInt64Ty(VMContext), 2));
+
+      // Always return in register if it fits in a general purpose
+      // register, or if it is 64 bits and has a single element.
+      if ((Size == 8 || Size == 16 || Size == 32) ||
+          (Size == 64 && VT->getNumElements() == 1))
+        return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
+
+      return ABIArgInfo::getIndirect(0);
+    }
+
+    return ABIArgInfo::getDirect();
+  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+    if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+      // Structures with either a non-trivial destructor or a non-trivial
+      // copy constructor are always indirect.
+      if (hasNonTrivialDestructorOrCopyConstructor(RT))
+        return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+      
+      // Structures with flexible arrays are always indirect.
+      if (RT->getDecl()->hasFlexibleArrayMember())
+        return ABIArgInfo::getIndirect(0);
+    }
+    
+    // If specified, structs and unions are always indirect.
+    if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
+      return ABIArgInfo::getIndirect(0);
+
+    // Classify "single element" structs as their element type.
+    if (const Type *SeltTy = isSingleElementStruct(RetTy, Context)) {
+      if (const BuiltinType *BT = SeltTy->getAs<BuiltinType>()) {
+        if (BT->isIntegerType()) {
+          // We need to use the size of the structure, padding
+          // bit-fields can adjust that to be larger than the single
+          // element type.
+          uint64_t Size = Context.getTypeSize(RetTy);
+          return ABIArgInfo::getCoerce(
+            llvm::IntegerType::get(VMContext, (unsigned) Size));
+        } else if (BT->getKind() == BuiltinType::Float) {
+          assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+                 "Unexpect single element structure size!");
+          return ABIArgInfo::getCoerce(llvm::Type::getFloatTy(VMContext));
+        } else if (BT->getKind() == BuiltinType::Double) {
+          assert(Context.getTypeSize(RetTy) == Context.getTypeSize(SeltTy) &&
+                 "Unexpect single element structure size!");
+          return ABIArgInfo::getCoerce(llvm::Type::getDoubleTy(VMContext));
+        }
+      } else if (SeltTy->isPointerType()) {
+        // FIXME: It would be really nice if this could come out as the proper
+        // pointer type.
+        const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext);
+        return ABIArgInfo::getCoerce(PtrTy);
+      } else if (SeltTy->isVectorType()) {
+        // 64- and 128-bit vectors are never returned in a
+        // register when inside a structure.
+        uint64_t Size = Context.getTypeSize(RetTy);
+        if (Size == 64 || Size == 128)
+          return ABIArgInfo::getIndirect(0);
+
+        return classifyReturnType(QualType(SeltTy, 0), Context, VMContext);
+      }
+    }
+
+    // Small structures which are register sized are generally returned
+    // in a register.
+    if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, Context)) {
+      uint64_t Size = Context.getTypeSize(RetTy);
+      return ABIArgInfo::getCoerce(llvm::IntegerType::get(VMContext, Size));
+    }
+
+    return ABIArgInfo::getIndirect(0);
+  } else {
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+      RetTy = EnumTy->getDecl()->getIntegerType();
+
+    return (RetTy->isPromotableIntegerType() ?
+            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  }
+}
+
+unsigned X86_32ABIInfo::getIndirectArgumentAlignment(QualType Ty,
+                                                     ASTContext &Context) {
+  unsigned Align = Context.getTypeAlign(Ty);
+  if (Align < 128) return 0;
+  if (const RecordType* RT = Ty->getAs<RecordType>())
+    if (typeContainsSSEVector(RT->getDecl(), Context))
+      return 16;
+  return 0;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
+                                               ASTContext &Context,
+                                           llvm::LLVMContext &VMContext) const {
+  // FIXME: Set alignment on indirect arguments.
+  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+    // Structures with flexible arrays are always indirect.
+    if (const RecordType *RT = Ty->getAs<RecordType>()) {
+      // Structures with either a non-trivial destructor or a non-trivial
+      // copy constructor are always indirect.
+      if (hasNonTrivialDestructorOrCopyConstructor(RT))
+        return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+        
+      if (RT->getDecl()->hasFlexibleArrayMember())
+        return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
+                                                                    Context));
+    }
+
+    // Ignore empty structs.
+    if (Ty->isStructureType() && Context.getTypeSize(Ty) == 0)
+      return ABIArgInfo::getIgnore();
+
+    // Expand small (<= 128-bit) record types when we know that the stack layout
+    // of those arguments will match the struct. This is important because the
+    // LLVM backend isn't smart enough to remove byval, which inhibits many
+    // optimizations.
+    if (Context.getTypeSize(Ty) <= 4*32 &&
+        canExpandIndirectArgument(Ty, Context))
+      return ABIArgInfo::getExpand();
+
+    return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty, Context));
+  } else {
+    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+      Ty = EnumTy->getDecl()->getIntegerType();
+
+    return (Ty->isPromotableIntegerType() ?
+            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  }
+}
+
+llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                      CodeGenFunction &CGF) const {
+  const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+  CGBuilderTy &Builder = CGF.Builder;
+  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+                                                       "ap");
+  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+  llvm::Type *PTy =
+    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+  uint64_t Offset =
+    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+  llvm::Value *NextAddr =
+    Builder.CreateGEP(Addr, llvm::ConstantInt::get(
+                          llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+                      "ap.next");
+  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+  return AddrTyped;
+}
+
+namespace {
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+  enum Class {
+    Integer = 0,
+    SSE,
+    SSEUp,
+    X87,
+    X87Up,
+    ComplexX87,
+    NoClass,
+    Memory
+  };
+
+  /// merge - Implement the X86_64 ABI merging algorithm.
+  ///
+  /// Merge an accumulating classification \arg Accum with a field
+  /// classification \arg Field.
+  ///
+  /// \param Accum - The accumulating classification. This should
+  /// always be either NoClass or the result of a previous merge
+  /// call. In addition, this should never be Memory (the caller
+  /// should just return Memory for the aggregate).
+  Class merge(Class Accum, Class Field) const;
+
+  /// classify - Determine the x86_64 register classes in which the
+  /// given type T should be passed.
+  ///
+  /// \param Lo - The classification for the parts of the type
+  /// residing in the low word of the containing object.
+  ///
+  /// \param Hi - The classification for the parts of the type
+  /// residing in the high word of the containing object.
+  ///
+  /// \param OffsetBase - The bit offset of this type in the
+  /// containing object.  Some parameters are classified different
+  /// depending on whether they straddle an eightbyte boundary.
+  ///
+  /// If a word is unused its result will be NoClass; if a type should
+  /// be passed in Memory then at least the classification of \arg Lo
+  /// will be Memory.
+  ///
+  /// The \arg Lo class will be NoClass iff the argument is ignored.
+  ///
+  /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+  /// also be ComplexX87.
+  void classify(QualType T, ASTContext &Context, uint64_t OffsetBase,
+                Class &Lo, Class &Hi) const;
+
+  /// getCoerceResult - Given a source type \arg Ty and an LLVM type
+  /// to coerce to, chose the best way to pass Ty in the same place
+  /// that \arg CoerceTo would be passed, but while keeping the
+  /// emitted code as simple as possible.
+  ///
+  /// FIXME: Note, this should be cleaned up to just take an enumeration of all
+  /// the ways we might want to pass things, instead of constructing an LLVM
+  /// type. This makes this code more explicit, and it makes it clearer that we
+  /// are also doing this for correctness in the case of passing scalar types.
+  ABIArgInfo getCoerceResult(QualType Ty,
+                             const llvm::Type *CoerceTo,
+                             ASTContext &Context) const;
+
+  /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+  /// such that the argument will be passed in memory.
+  ABIArgInfo getIndirectResult(QualType Ty,
+                               ASTContext &Context) const;
+
+  ABIArgInfo classifyReturnType(QualType RetTy,
+                                ASTContext &Context,
+                                llvm::LLVMContext &VMContext) const;
+
+  ABIArgInfo classifyArgumentType(QualType Ty,
+                                  ASTContext &Context,
+                                  llvm::LLVMContext &VMContext,
+                                  unsigned &neededInt,
+                                  unsigned &neededSSE) const;
+
+public:
+  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+                           llvm::LLVMContext &VMContext) const;
+
+  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                 CodeGenFunction &CGF) const;
+};
+
+class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+  X86_64TargetCodeGenInfo():TargetCodeGenInfo(new X86_64ABIInfo()) {}
+};
+
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum,
+                                          Class Field) const {
+  // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+  // classified recursively so that always two fields are
+  // considered. The resulting class is calculated according to
+  // the classes of the fields in the eightbyte:
+  //
+  // (a) If both classes are equal, this is the resulting class.
+  //
+  // (b) If one of the classes is NO_CLASS, the resulting class is
+  // the other class.
+  //
+  // (c) If one of the classes is MEMORY, the result is the MEMORY
+  // class.
+  //
+  // (d) If one of the classes is INTEGER, the result is the
+  // INTEGER.
+  //
+  // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+  // MEMORY is used as class.
+  //
+  // (f) Otherwise class SSE is used.
+
+  // Accum should never be memory (we should have returned) or
+  // ComplexX87 (because this cannot be passed in a structure).
+  assert((Accum != Memory && Accum != ComplexX87) &&
+         "Invalid accumulated classification during merge.");
+  if (Accum == Field || Field == NoClass)
+    return Accum;
+  else if (Field == Memory)
+    return Memory;
+  else if (Accum == NoClass)
+    return Field;
+  else if (Accum == Integer || Field == Integer)
+    return Integer;
+  else if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+           Accum == X87 || Accum == X87Up)
+    return Memory;
+  else
+    return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty,
+                             ASTContext &Context,
+                             uint64_t OffsetBase,
+                             Class &Lo, Class &Hi) const {
+  // FIXME: This code can be simplified by introducing a simple value class for
+  // Class pairs with appropriate constructor methods for the various
+  // situations.
+
+  // FIXME: Some of the split computations are wrong; unaligned vectors
+  // shouldn't be passed in registers for example, so there is no chance they
+  // can straddle an eightbyte. Verify & simplify.
+
+  Lo = Hi = NoClass;
+
+  Class &Current = OffsetBase < 64 ? Lo : Hi;
+  Current = Memory;
+
+  if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+    BuiltinType::Kind k = BT->getKind();
+
+    if (k == BuiltinType::Void) {
+      Current = NoClass;
+    } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+      Lo = Integer;
+      Hi = Integer;
+    } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+      Current = Integer;
+    } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+      Current = SSE;
+    } else if (k == BuiltinType::LongDouble) {
+      Lo = X87;
+      Hi = X87Up;
+    }
+    // FIXME: _Decimal32 and _Decimal64 are SSE.
+    // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+  } else if (const EnumType *ET = Ty->getAs<EnumType>()) {
+    // Classify the underlying integer type.
+    classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
+  } else if (Ty->hasPointerRepresentation()) {
+    Current = Integer;
+  } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+    uint64_t Size = Context.getTypeSize(VT);
+    if (Size == 32) {
+      // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
+      // float> as integer.
+      Current = Integer;
+
+      // If this type crosses an eightbyte boundary, it should be
+      // split.
+      uint64_t EB_Real = (OffsetBase) / 64;
+      uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
+      if (EB_Real != EB_Imag)
+        Hi = Lo;
+    } else if (Size == 64) {
+      // gcc passes <1 x double> in memory. :(
+      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+        return;
+
+      // gcc passes <1 x long long> as INTEGER.
+      if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong))
+        Current = Integer;
+      else
+        Current = SSE;
+
+      // If this type crosses an eightbyte boundary, it should be
+      // split.
+      if (OffsetBase && OffsetBase != 64)
+        Hi = Lo;
+    } else if (Size == 128) {
+      Lo = SSE;
+      Hi = SSEUp;
+    }
+  } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+    QualType ET = Context.getCanonicalType(CT->getElementType());
+
+    uint64_t Size = Context.getTypeSize(Ty);
+    if (ET->isIntegralType()) {
+      if (Size <= 64)
+        Current = Integer;
+      else if (Size <= 128)
+        Lo = Hi = Integer;
+    } else if (ET == Context.FloatTy)
+      Current = SSE;
+    else if (ET == Context.DoubleTy)
+      Lo = Hi = SSE;
+    else if (ET == Context.LongDoubleTy)
+      Current = ComplexX87;
+
+    // If this complex type crosses an eightbyte boundary then it
+    // should be split.
+    uint64_t EB_Real = (OffsetBase) / 64;
+    uint64_t EB_Imag = (OffsetBase + Context.getTypeSize(ET)) / 64;
+    if (Hi == NoClass && EB_Real != EB_Imag)
+      Hi = Lo;
+  } else if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+    // Arrays are treated like structures.
+
+    uint64_t Size = Context.getTypeSize(Ty);
+
+    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+    // than two eightbytes, ..., it has class MEMORY.
+    if (Size > 128)
+      return;
+
+    // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+    // fields, it has class MEMORY.
+    //
+    // Only need to check alignment of array base.
+    if (OffsetBase % Context.getTypeAlign(AT->getElementType()))
+      return;
+
+    // Otherwise implement simplified merge. We could be smarter about
+    // this, but it isn't worth it and would be harder to verify.
+    Current = NoClass;
+    uint64_t EltSize = Context.getTypeSize(AT->getElementType());
+    uint64_t ArraySize = AT->getSize().getZExtValue();
+    for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+      Class FieldLo, FieldHi;
+      classify(AT->getElementType(), Context, Offset, FieldLo, FieldHi);
+      Lo = merge(Lo, FieldLo);
+      Hi = merge(Hi, FieldHi);
+      if (Lo == Memory || Hi == Memory)
+        break;
+    }
+
+    // Do post merger cleanup (see below). Only case we worry about is Memory.
+    if (Hi == Memory)
+      Lo = Memory;
+    assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+  } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+    uint64_t Size = Context.getTypeSize(Ty);
+
+    // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+    // than two eightbytes, ..., it has class MEMORY.
+    if (Size > 128)
+      return;
+
+    // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
+    // copy constructor or a non-trivial destructor, it is passed by invisible
+    // reference.
+    if (hasNonTrivialDestructorOrCopyConstructor(RT))
+      return;
+
+    const RecordDecl *RD = RT->getDecl();
+
+    // Assume variable sized types are passed in memory.
+    if (RD->hasFlexibleArrayMember())
+      return;
+
+    const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+    // Reset Lo class, this will be recomputed.
+    Current = NoClass;
+
+    // If this is a C++ record, classify the bases first.
+    if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+      for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+             e = CXXRD->bases_end(); i != e; ++i) {
+        assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+               "Unexpected base class!");
+        const CXXRecordDecl *Base =
+          cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+        // Classify this field.
+        //
+        // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
+        // single eightbyte, each is classified separately. Each eightbyte gets
+        // initialized to class NO_CLASS.
+        Class FieldLo, FieldHi;
+        uint64_t Offset = OffsetBase + Layout.getBaseClassOffset(Base);
+        classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+        Lo = merge(Lo, FieldLo);
+        Hi = merge(Hi, FieldHi);
+        if (Lo == Memory || Hi == Memory)
+          break;
+      }
+
+      // If this record has no fields but isn't empty, classify as INTEGER.
+      if (RD->field_empty() && Size)
+        Current = Integer;
+    }
+
+    // Classify the fields one at a time, merging the results.
+    unsigned idx = 0;
+    for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+           i != e; ++i, ++idx) {
+      uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+      bool BitField = i->isBitField();
+
+      // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+      // fields, it has class MEMORY.
+      //
+      // Note, skip this test for bit-fields, see below.
+      if (!BitField && Offset % Context.getTypeAlign(i->getType())) {
+        Lo = Memory;
+        return;
+      }
+
+      // Classify this field.
+      //
+      // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+      // exceeds a single eightbyte, each is classified
+      // separately. Each eightbyte gets initialized to class
+      // NO_CLASS.
+      Class FieldLo, FieldHi;
+
+      // Bit-fields require special handling, they do not force the
+      // structure to be passed in memory even if unaligned, and
+      // therefore they can straddle an eightbyte.
+      if (BitField) {
+        // Ignore padding bit-fields.
+        if (i->isUnnamedBitfield())
+          continue;
+
+        uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+        uint64_t Size = i->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
+
+        uint64_t EB_Lo = Offset / 64;
+        uint64_t EB_Hi = (Offset + Size - 1) / 64;
+        FieldLo = FieldHi = NoClass;
+        if (EB_Lo) {
+          assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+          FieldLo = NoClass;
+          FieldHi = Integer;
+        } else {
+          FieldLo = Integer;
+          FieldHi = EB_Hi ? Integer : NoClass;
+        }
+      } else
+        classify(i->getType(), Context, Offset, FieldLo, FieldHi);
+      Lo = merge(Lo, FieldLo);
+      Hi = merge(Hi, FieldHi);
+      if (Lo == Memory || Hi == Memory)
+        break;
+    }
+
+    // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+    //
+    // (a) If one of the classes is MEMORY, the whole argument is
+    // passed in memory.
+    //
+    // (b) If SSEUP is not preceeded by SSE, it is converted to SSE.
+
+    // The first of these conditions is guaranteed by how we implement
+    // the merge (just bail).
+    //
+    // The second condition occurs in the case of unions; for example
+    // union { _Complex double; unsigned; }.
+    if (Hi == Memory)
+      Lo = Memory;
+    if (Hi == SSEUp && Lo != SSE)
+      Hi = SSE;
+  }
+}
+
+ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
+                                          const llvm::Type *CoerceTo,
+                                          ASTContext &Context) const {
+  if (CoerceTo == llvm::Type::getInt64Ty(CoerceTo->getContext())) {
+    // Integer and pointer types will end up in a general purpose
+    // register.
+
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+      Ty = EnumTy->getDecl()->getIntegerType();
+
+    if (Ty->isIntegralType() || Ty->hasPointerRepresentation())
+      return (Ty->isPromotableIntegerType() ?
+              ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  } else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
+    // FIXME: It would probably be better to make CGFunctionInfo only map using
+    // canonical types than to canonize here.
+    QualType CTy = Context.getCanonicalType(Ty);
+
+    // Float and double end up in a single SSE reg.
+    if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
+      return ABIArgInfo::getDirect();
+
+  }
+
+  return ABIArgInfo::getCoerce(CoerceTo);
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+                                            ASTContext &Context) const {
+  // If this is a scalar LLVM value then assume LLVM will pass it in the right
+  // place naturally.
+  if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+      Ty = EnumTy->getDecl()->getIntegerType();
+
+    return (Ty->isPromotableIntegerType() ?
+            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  }
+
+  bool ByVal = !isRecordWithNonTrivialDestructorOrCopyConstructor(Ty);
+
+  // FIXME: Set alignment correctly.
+  return ABIArgInfo::getIndirect(0, ByVal);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyReturnType(QualType RetTy,
+                                            ASTContext &Context,
+                                          llvm::LLVMContext &VMContext) const {
+  // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+  // classification algorithm.
+  X86_64ABIInfo::Class Lo, Hi;
+  classify(RetTy, Context, 0, Lo, Hi);
+
+  // Check some invariants.
+  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+  const llvm::Type *ResType = 0;
+  switch (Lo) {
+  case NoClass:
+    return ABIArgInfo::getIgnore();
+
+  case SSEUp:
+  case X87Up:
+    assert(0 && "Invalid classification for lo word.");
+
+    // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+    // hidden argument.
+  case Memory:
+    return getIndirectResult(RetTy, Context);
+
+    // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+    // available register of the sequence %rax, %rdx is used.
+  case Integer:
+    ResType = llvm::Type::getInt64Ty(VMContext); break;
+
+    // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+    // available SSE register of the sequence %xmm0, %xmm1 is used.
+  case SSE:
+    ResType = llvm::Type::getDoubleTy(VMContext); break;
+
+    // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+    // returned on the X87 stack in %st0 as 80-bit x87 number.
+  case X87:
+    ResType = llvm::Type::getX86_FP80Ty(VMContext); break;
+
+    // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+    // part of the value is returned in %st0 and the imaginary part in
+    // %st1.
+  case ComplexX87:
+    assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+    ResType = llvm::StructType::get(VMContext, llvm::Type::getX86_FP80Ty(VMContext),
+                                    llvm::Type::getX86_FP80Ty(VMContext),
+                                    NULL);
+    break;
+  }
+
+  switch (Hi) {
+    // Memory was handled previously and X87 should
+    // never occur as a hi class.
+  case Memory:
+  case X87:
+    assert(0 && "Invalid classification for hi word.");
+
+  case ComplexX87: // Previously handled.
+  case NoClass: break;
+
+  case Integer:
+    ResType = llvm::StructType::get(VMContext, ResType,
+                                    llvm::Type::getInt64Ty(VMContext), NULL);
+    break;
+  case SSE:
+    ResType = llvm::StructType::get(VMContext, ResType,
+                                    llvm::Type::getDoubleTy(VMContext), NULL);
+    break;
+
+    // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+    // is passed in the upper half of the last used SSE register.
+    //
+    // SSEUP should always be preceeded by SSE, just widen.
+  case SSEUp:
+    assert(Lo == SSE && "Unexpected SSEUp classification.");
+    ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
+    break;
+
+    // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+    // returned together with the previous X87 value in %st0.
+  case X87Up:
+    // If X87Up is preceeded by X87, we don't need to do
+    // anything. However, in some cases with unions it may not be
+    // preceeded by X87. In such situations we follow gcc and pass the
+    // extra bits in an SSE reg.
+    if (Lo != X87)
+      ResType = llvm::StructType::get(VMContext, ResType,
+                                      llvm::Type::getDoubleTy(VMContext), NULL);
+    break;
+  }
+
+  return getCoerceResult(RetTy, ResType, Context);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(QualType Ty, ASTContext &Context,
+                                               llvm::LLVMContext &VMContext,
+                                               unsigned &neededInt,
+                                               unsigned &neededSSE) const {
+  X86_64ABIInfo::Class Lo, Hi;
+  classify(Ty, Context, 0, Lo, Hi);
+
+  // Check some invariants.
+  // FIXME: Enforce these by construction.
+  assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+  assert((Lo != NoClass || Hi == NoClass) && "Invalid null classification.");
+  assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+  neededInt = 0;
+  neededSSE = 0;
+  const llvm::Type *ResType = 0;
+  switch (Lo) {
+  case NoClass:
+    return ABIArgInfo::getIgnore();
+
+    // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+    // on the stack.
+  case Memory:
+
+    // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+    // COMPLEX_X87, it is passed in memory.
+  case X87:
+  case ComplexX87:
+    return getIndirectResult(Ty, Context);
+
+  case SSEUp:
+  case X87Up:
+    assert(0 && "Invalid classification for lo word.");
+
+    // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+    // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+    // and %r9 is used.
+  case Integer:
+    ++neededInt;
+    ResType = llvm::Type::getInt64Ty(VMContext);
+    break;
+
+    // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+    // available SSE register is used, the registers are taken in the
+    // order from %xmm0 to %xmm7.
+  case SSE:
+    ++neededSSE;
+    ResType = llvm::Type::getDoubleTy(VMContext);
+    break;
+  }
+
+  switch (Hi) {
+    // Memory was handled previously, ComplexX87 and X87 should
+    // never occur as hi classes, and X87Up must be preceed by X87,
+    // which is passed in memory.
+  case Memory:
+  case X87:
+  case ComplexX87:
+    assert(0 && "Invalid classification for hi word.");
+    break;
+
+  case NoClass: break;
+  case Integer:
+    ResType = llvm::StructType::get(VMContext, ResType,
+                                    llvm::Type::getInt64Ty(VMContext), NULL);
+    ++neededInt;
+    break;
+
+    // X87Up generally doesn't occur here (long double is passed in
+    // memory), except in situations involving unions.
+  case X87Up:
+  case SSE:
+    ResType = llvm::StructType::get(VMContext, ResType,
+                                    llvm::Type::getDoubleTy(VMContext), NULL);
+    ++neededSSE;
+    break;
+
+    // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+    // eightbyte is passed in the upper half of the last used SSE
+    // register.
+  case SSEUp:
+    assert(Lo == SSE && "Unexpected SSEUp classification.");
+    ResType = llvm::VectorType::get(llvm::Type::getDoubleTy(VMContext), 2);
+    break;
+  }
+
+  return getCoerceResult(Ty, ResType, Context);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+                                llvm::LLVMContext &VMContext) const {
+  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+                                          Context, VMContext);
+
+  // Keep track of the number of assigned registers.
+  unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+  // If the return value is indirect, then the hidden argument is consuming one
+  // integer register.
+  if (FI.getReturnInfo().isIndirect())
+    --freeIntRegs;
+
+  // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+  // get assigned (in left-to-right order) for passing as follows...
+  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+       it != ie; ++it) {
+    unsigned neededInt, neededSSE;
+    it->info = classifyArgumentType(it->type, Context, VMContext,
+                                    neededInt, neededSSE);
+
+    // AMD64-ABI 3.2.3p3: If there are no registers available for any
+    // eightbyte of an argument, the whole argument is passed on the
+    // stack. If registers have already been assigned for some
+    // eightbytes of such an argument, the assignments get reverted.
+    if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+      freeIntRegs -= neededInt;
+      freeSSERegs -= neededSSE;
+    } else {
+      it->info = getIndirectResult(it->type, Context);
+    }
+  }
+}
+
+static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
+                                        QualType Ty,
+                                        CodeGenFunction &CGF) {
+  llvm::Value *overflow_arg_area_p =
+    CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+  llvm::Value *overflow_arg_area =
+    CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+  // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+  // byte boundary if alignment needed by type exceeds 8 byte boundary.
+  uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+  if (Align > 8) {
+    // Note that we follow the ABI & gcc here, even though the type
+    // could in theory have an alignment greater than 16. This case
+    // shouldn't ever matter in practice.
+
+    // overflow_arg_area = (overflow_arg_area + 15) & ~15;
+    llvm::Value *Offset =
+      llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15);
+    overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
+    llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
+                                 llvm::Type::getInt64Ty(CGF.getLLVMContext()));
+    llvm::Value *Mask = llvm::ConstantInt::get(
+        llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL);
+    overflow_arg_area =
+      CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+                                 overflow_arg_area->getType(),
+                                 "overflow_arg_area.align");
+  }
+
+  // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+  llvm::Value *Res =
+    CGF.Builder.CreateBitCast(overflow_arg_area,
+                              llvm::PointerType::getUnqual(LTy));
+
+  // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+  // l->overflow_arg_area + sizeof(type).
+  // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+  // an 8 byte boundary.
+
+  uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+  llvm::Value *Offset =
+      llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()),
+                                               (SizeInBytes + 7)  & ~7);
+  overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+                                            "overflow_arg_area.next");
+  CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+  // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+  return Res;
+}
+
+llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                      CodeGenFunction &CGF) const {
+  llvm::LLVMContext &VMContext = CGF.getLLVMContext();
+  const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext);
+  const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext);
+
+  // Assume that va_list type is correct; should be pointer to LLVM type:
+  // struct {
+  //   i32 gp_offset;
+  //   i32 fp_offset;
+  //   i8* overflow_arg_area;
+  //   i8* reg_save_area;
+  // };
+  unsigned neededInt, neededSSE;
+  ABIArgInfo AI = classifyArgumentType(Ty, CGF.getContext(), VMContext,
+                                       neededInt, neededSSE);
+
+  // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+  // in the registers. If not go to step 7.
+  if (!neededInt && !neededSSE)
+    return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+  // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+  // general purpose registers needed to pass type and num_fp to hold
+  // the number of floating point registers needed.
+
+  // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+  // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+  // l->fp_offset > 304 - num_fp * 16 go to step 7.
+  //
+  // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+  // register save space).
+
+  llvm::Value *InRegs = 0;
+  llvm::Value *gp_offset_p = 0, *gp_offset = 0;
+  llvm::Value *fp_offset_p = 0, *fp_offset = 0;
+  if (neededInt) {
+    gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+    gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+    InRegs =
+      CGF.Builder.CreateICmpULE(gp_offset,
+                                llvm::ConstantInt::get(i32Ty,
+                                                       48 - neededInt * 8),
+                                "fits_in_gp");
+  }
+
+  if (neededSSE) {
+    fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+    fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+    llvm::Value *FitsInFP =
+      CGF.Builder.CreateICmpULE(fp_offset,
+                                llvm::ConstantInt::get(i32Ty,
+                                                       176 - neededSSE * 16),
+                                "fits_in_fp");
+    InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+  }
+
+  llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+  llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+  CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+  // Emit code to load the value if it was passed in registers.
+
+  CGF.EmitBlock(InRegBlock);
+
+  // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+  // an offset of l->gp_offset and/or l->fp_offset. This may require
+  // copying to a temporary location in case the parameter is passed
+  // in different register classes or requires an alignment greater
+  // than 8 for general purpose registers and 16 for XMM registers.
+  //
+  // FIXME: This really results in shameful code when we end up needing to
+  // collect arguments from different places; often what should result in a
+  // simple assembling of a structure from scattered addresses has many more
+  // loads than necessary. Can we clean this up?
+  const llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+  llvm::Value *RegAddr =
+    CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
+                           "reg_save_area");
+  if (neededInt && neededSSE) {
+    // FIXME: Cleanup.
+    assert(AI.isCoerce() && "Unexpected ABI info for mixed regs");
+    const llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+    llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
+    assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+    const llvm::Type *TyLo = ST->getElementType(0);
+    const llvm::Type *TyHi = ST->getElementType(1);
+    assert((TyLo->isFloatingPoint() ^ TyHi->isFloatingPoint()) &&
+           "Unexpected ABI info for mixed regs");
+    const llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+    const llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+    llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+    llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+    llvm::Value *RegLoAddr = TyLo->isFloatingPoint() ? FPAddr : GPAddr;
+    llvm::Value *RegHiAddr = TyLo->isFloatingPoint() ? GPAddr : FPAddr;
+    llvm::Value *V =
+      CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+    V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+    CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+    RegAddr = CGF.Builder.CreateBitCast(Tmp,
+                                        llvm::PointerType::getUnqual(LTy));
+  } else if (neededInt) {
+    RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+    RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+                                        llvm::PointerType::getUnqual(LTy));
+  } else {
+    if (neededSSE == 1) {
+      RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+      RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+                                          llvm::PointerType::getUnqual(LTy));
+    } else {
+      assert(neededSSE == 2 && "Invalid number of needed registers!");
+      // SSE registers are spaced 16 bytes apart in the register save
+      // area, we need to collect the two eightbytes together.
+      llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+      llvm::Value *RegAddrHi =
+        CGF.Builder.CreateGEP(RegAddrLo,
+                            llvm::ConstantInt::get(i32Ty, 16));
+      const llvm::Type *DblPtrTy =
+        llvm::PointerType::getUnqual(DoubleTy);
+      const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy,
+                                                         DoubleTy, NULL);
+      llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+      V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+                                                           DblPtrTy));
+      CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+      V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+                                                           DblPtrTy));
+      CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+      RegAddr = CGF.Builder.CreateBitCast(Tmp,
+                                          llvm::PointerType::getUnqual(LTy));
+    }
+  }
+
+  // AMD64-ABI 3.5.7p5: Step 5. Set:
+  // l->gp_offset = l->gp_offset + num_gp * 8
+  // l->fp_offset = l->fp_offset + num_fp * 16.
+  if (neededInt) {
+    llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8);
+    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+                            gp_offset_p);
+  }
+  if (neededSSE) {
+    llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16);
+    CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+                            fp_offset_p);
+  }
+  CGF.EmitBranch(ContBlock);
+
+  // Emit code to load the value if it was passed in memory.
+
+  CGF.EmitBlock(InMemBlock);
+  llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+  // Return the appropriate result.
+
+  CGF.EmitBlock(ContBlock);
+  llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(),
+                                                 "vaarg.addr");
+  ResAddr->reserveOperandSpace(2);
+  ResAddr->addIncoming(RegAddr, InRegBlock);
+  ResAddr->addIncoming(MemAddr, InMemBlock);
+
+  return ResAddr;
+}
+
+// PIC16 ABI Implementation
+
+namespace {
+
+class PIC16ABIInfo : public ABIInfo {
+  ABIArgInfo classifyReturnType(QualType RetTy,
+                                ASTContext &Context,
+                                llvm::LLVMContext &VMContext) const;
+
+  ABIArgInfo classifyArgumentType(QualType RetTy,
+                                  ASTContext &Context,
+                                  llvm::LLVMContext &VMContext) const;
+
+  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+                           llvm::LLVMContext &VMContext) const {
+    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+                                            VMContext);
+    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+         it != ie; ++it)
+      it->info = classifyArgumentType(it->type, Context, VMContext);
+  }
+
+  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                 CodeGenFunction &CGF) const;
+};
+
+class PIC16TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+  PIC16TargetCodeGenInfo():TargetCodeGenInfo(new PIC16ABIInfo()) {}
+};
+
+}
+
+ABIArgInfo PIC16ABIInfo::classifyReturnType(QualType RetTy,
+                                            ASTContext &Context,
+                                          llvm::LLVMContext &VMContext) const {
+  if (RetTy->isVoidType()) {
+    return ABIArgInfo::getIgnore();
+  } else {
+    return ABIArgInfo::getDirect();
+  }
+}
+
+ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
+                                              ASTContext &Context,
+                                          llvm::LLVMContext &VMContext) const {
+  return ABIArgInfo::getDirect();
+}
+
+llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                       CodeGenFunction &CGF) const {
+  return 0;
+}
+
+// ARM ABI Implementation
+
+namespace {
+
+class ARMABIInfo : public ABIInfo {
+public:
+  enum ABIKind {
+    APCS = 0,
+    AAPCS = 1,
+    AAPCS_VFP
+  };
+
+private:
+  ABIKind Kind;
+
+public:
+  ARMABIInfo(ABIKind _Kind) : Kind(_Kind) {}
+
+private:
+  ABIKind getABIKind() const { return Kind; }
+
+  ABIArgInfo classifyReturnType(QualType RetTy,
+                                ASTContext &Context,
+                                llvm::LLVMContext &VMCOntext) const;
+
+  ABIArgInfo classifyArgumentType(QualType RetTy,
+                                  ASTContext &Context,
+                                  llvm::LLVMContext &VMContext) const;
+
+  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+                           llvm::LLVMContext &VMContext) const;
+
+  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                 CodeGenFunction &CGF) const;
+};
+
+class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+  ARMTargetCodeGenInfo(ARMABIInfo::ABIKind K)
+    :TargetCodeGenInfo(new ARMABIInfo(K)) {}
+};
+
+}
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+                             llvm::LLVMContext &VMContext) const {
+  FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+                                          VMContext);
+  for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+       it != ie; ++it) {
+    it->info = classifyArgumentType(it->type, Context, VMContext);
+  }
+
+  // ARM always overrides the calling convention.
+  switch (getABIKind()) {
+  case APCS:
+    FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
+    break;
+
+  case AAPCS:
+    FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
+    break;
+
+  case AAPCS_VFP:
+    FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
+    break;
+  }
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
+                                            ASTContext &Context,
+                                          llvm::LLVMContext &VMContext) const {
+  if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+      Ty = EnumTy->getDecl()->getIntegerType();
+
+    return (Ty->isPromotableIntegerType() ?
+            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  }
+
+  // Ignore empty records.
+  if (isEmptyRecord(Context, Ty, true))
+    return ABIArgInfo::getIgnore();
+
+  // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+  // backend doesn't support byval.
+  // FIXME: This doesn't handle alignment > 64 bits.
+  const llvm::Type* ElemTy;
+  unsigned SizeRegs;
+  if (Context.getTypeAlign(Ty) > 32) {
+    ElemTy = llvm::Type::getInt64Ty(VMContext);
+    SizeRegs = (Context.getTypeSize(Ty) + 63) / 64;
+  } else {
+    ElemTy = llvm::Type::getInt32Ty(VMContext);
+    SizeRegs = (Context.getTypeSize(Ty) + 31) / 32;
+  }
+  std::vector<const llvm::Type*> LLVMFields;
+  LLVMFields.push_back(llvm::ArrayType::get(ElemTy, SizeRegs));
+  const llvm::Type* STy = llvm::StructType::get(VMContext, LLVMFields, true);
+  return ABIArgInfo::getCoerce(STy);
+}
+
+static bool isIntegerLikeType(QualType Ty,
+                              ASTContext &Context,
+                              llvm::LLVMContext &VMContext) {
+  // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
+  // is called integer-like if its size is less than or equal to one word, and
+  // the offset of each of its addressable sub-fields is zero.
+
+  uint64_t Size = Context.getTypeSize(Ty);
+
+  // Check that the type fits in a word.
+  if (Size > 32)
+    return false;
+
+  // FIXME: Handle vector types!
+  if (Ty->isVectorType())
+    return false;
+
+  // Float types are never treated as "integer like".
+  if (Ty->isRealFloatingType())
+    return false;
+
+  // If this is a builtin or pointer type then it is ok.
+  if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
+    return true;
+
+  // Small complex integer types are "integer like".
+  if (const ComplexType *CT = Ty->getAs<ComplexType>())
+    return isIntegerLikeType(CT->getElementType(), Context, VMContext);
+
+  // Single element and zero sized arrays should be allowed, by the definition
+  // above, but they are not.
+
+  // Otherwise, it must be a record type.
+  const RecordType *RT = Ty->getAs<RecordType>();
+  if (!RT) return false;
+
+  // Ignore records with flexible arrays.
+  const RecordDecl *RD = RT->getDecl();
+  if (RD->hasFlexibleArrayMember())
+    return false;
+
+  // Check that all sub-fields are at offset 0, and are themselves "integer
+  // like".
+  const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+  bool HadField = false;
+  unsigned idx = 0;
+  for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+       i != e; ++i, ++idx) {
+    const FieldDecl *FD = *i;
+
+    // Bit-fields are not addressable, we only need to verify they are "integer
+    // like". We still have to disallow a subsequent non-bitfield, for example:
+    //   struct { int : 0; int x }
+    // is non-integer like according to gcc.
+    if (FD->isBitField()) {
+      if (!RD->isUnion())
+        HadField = true;
+
+      if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+        return false;
+
+      continue;
+    }
+
+    // Check if this field is at offset 0.
+    if (Layout.getFieldOffset(idx) != 0)
+      return false;
+
+    if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+      return false;
+    
+    // Only allow at most one field in a structure. This doesn't match the
+    // wording above, but follows gcc in situations with a field following an
+    // empty structure.
+    if (!RD->isUnion()) {
+      if (HadField)
+        return false;
+
+      HadField = true;
+    }
+  }
+
+  return true;
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
+                                          ASTContext &Context,
+                                          llvm::LLVMContext &VMContext) const {
+  if (RetTy->isVoidType())
+    return ABIArgInfo::getIgnore();
+
+  if (!CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+      RetTy = EnumTy->getDecl()->getIntegerType();
+
+    return (RetTy->isPromotableIntegerType() ?
+            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  }
+
+  // Are we following APCS?
+  if (getABIKind() == APCS) {
+    if (isEmptyRecord(Context, RetTy, false))
+      return ABIArgInfo::getIgnore();
+
+    // Complex types are all returned as packed integers.
+    //
+    // FIXME: Consider using 2 x vector types if the back end handles them
+    // correctly.
+    if (RetTy->isAnyComplexType())
+      return ABIArgInfo::getCoerce(llvm::IntegerType::get(
+                                     VMContext, Context.getTypeSize(RetTy)));
+
+    // Integer like structures are returned in r0.
+    if (isIntegerLikeType(RetTy, Context, VMContext)) {
+      // Return in the smallest viable integer type.
+      uint64_t Size = Context.getTypeSize(RetTy);
+      if (Size <= 8)
+        return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
+      if (Size <= 16)
+        return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
+      return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
+    }
+
+    // Otherwise return in memory.
+    return ABIArgInfo::getIndirect(0);
+  }
+
+  // Otherwise this is an AAPCS variant.
+
+  if (isEmptyRecord(Context, RetTy, true))
+    return ABIArgInfo::getIgnore();
+
+  // Aggregates <= 4 bytes are returned in r0; other aggregates
+  // are returned indirectly.
+  uint64_t Size = Context.getTypeSize(RetTy);
+  if (Size <= 32) {
+    // Return in the smallest viable integer type.
+    if (Size <= 8)
+      return ABIArgInfo::getCoerce(llvm::Type::getInt8Ty(VMContext));
+    if (Size <= 16)
+      return ABIArgInfo::getCoerce(llvm::Type::getInt16Ty(VMContext));
+    return ABIArgInfo::getCoerce(llvm::Type::getInt32Ty(VMContext));
+  }
+
+  return ABIArgInfo::getIndirect(0);
+}
+
+llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                      CodeGenFunction &CGF) const {
+  // FIXME: Need to handle alignment
+  const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+  const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+  CGBuilderTy &Builder = CGF.Builder;
+  llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+                                                       "ap");
+  llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+  llvm::Type *PTy =
+    llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+  llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+  uint64_t Offset =
+    llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+  llvm::Value *NextAddr =
+    Builder.CreateGEP(Addr, llvm::ConstantInt::get(
+                          llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+                      "ap.next");
+  Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+  return AddrTyped;
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy,
+                                              ASTContext &Context,
+                                          llvm::LLVMContext &VMContext) const {
+  if (RetTy->isVoidType()) {
+    return ABIArgInfo::getIgnore();
+  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+    return ABIArgInfo::getIndirect(0);
+  } else {
+    // Treat an enum type as its underlying type.
+    if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+      RetTy = EnumTy->getDecl()->getIntegerType();
+
+    return (RetTy->isPromotableIntegerType() ?
+            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  }
+}
+
+// SystemZ ABI Implementation
+
+namespace {
+
+class SystemZABIInfo : public ABIInfo {
+  bool isPromotableIntegerType(QualType Ty) const;
+
+  ABIArgInfo classifyReturnType(QualType RetTy, ASTContext &Context,
+                                llvm::LLVMContext &VMContext) const;
+
+  ABIArgInfo classifyArgumentType(QualType RetTy, ASTContext &Context,
+                                  llvm::LLVMContext &VMContext) const;
+
+  virtual void computeInfo(CGFunctionInfo &FI, ASTContext &Context,
+                          llvm::LLVMContext &VMContext) const {
+    FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+                                            Context, VMContext);
+    for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+         it != ie; ++it)
+      it->info = classifyArgumentType(it->type, Context, VMContext);
+  }
+
+  virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                 CodeGenFunction &CGF) const;
+};
+
+class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+  SystemZTargetCodeGenInfo():TargetCodeGenInfo(new SystemZABIInfo()) {}
+};
+
+}
+
+bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
+  // SystemZ ABI requires all 8, 16 and 32 bit quantities to be extended.
+  if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+    switch (BT->getKind()) {
+    case BuiltinType::Bool:
+    case BuiltinType::Char_S:
+    case BuiltinType::Char_U:
+    case BuiltinType::SChar:
+    case BuiltinType::UChar:
+    case BuiltinType::Short:
+    case BuiltinType::UShort:
+    case BuiltinType::Int:
+    case BuiltinType::UInt:
+      return true;
+    default:
+      return false;
+    }
+  return false;
+}
+
+llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+                                       CodeGenFunction &CGF) const {
+  // FIXME: Implement
+  return 0;
+}
+
+
+ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy,
+                                              ASTContext &Context,
+                                           llvm::LLVMContext &VMContext) const {
+  if (RetTy->isVoidType()) {
+    return ABIArgInfo::getIgnore();
+  } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+    return ABIArgInfo::getIndirect(0);
+  } else {
+    return (isPromotableIntegerType(RetTy) ?
+            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  }
+}
+
+ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty,
+                                                ASTContext &Context,
+                                           llvm::LLVMContext &VMContext) const {
+  if (CodeGenFunction::hasAggregateLLVMType(Ty)) {
+    return ABIArgInfo::getIndirect(0);
+  } else {
+    return (isPromotableIntegerType(Ty) ?
+            ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+  }
+}
+
+// MSP430 ABI Implementation
+
+namespace {
+
+class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+  MSP430TargetCodeGenInfo():TargetCodeGenInfo(new DefaultABIInfo()) {}
+  void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+                           CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+                                                  llvm::GlobalValue *GV,
+                                             CodeGen::CodeGenModule &M) const {
+  if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+    if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
+      // Handle 'interrupt' attribute:
+      llvm::Function *F = cast<llvm::Function>(GV);
+
+      // Step 1: Set ISR calling convention.
+      F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+
+      // Step 2: Add attributes goodness.
+      F->addFnAttr(llvm::Attribute::NoInline);
+
+      // Step 3: Emit ISR vector alias.
+      unsigned Num = attr->getNumber() + 0xffe0;
+      new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+                            "vector_" +
+                            llvm::LowercaseString(llvm::utohexstr(Num)),
+                            GV, &M.getModule());
+    }
+  }
+}
+
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
+  if (TheTargetCodeGenInfo)
+    return *TheTargetCodeGenInfo;
+
+  // For now we just cache the TargetCodeGenInfo in CodeGenModule and don't
+  // free it.
+
+  const llvm::Triple &Triple(getContext().Target.getTriple());
+  switch (Triple.getArch()) {
+  default:
+    return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo);
+
+  case llvm::Triple::arm:
+  case llvm::Triple::thumb:
+    // FIXME: We want to know the float calling convention as well.
+    if (strcmp(getContext().Target.getABI(), "apcs-gnu") == 0)
+      return *(TheTargetCodeGenInfo =
+               new ARMTargetCodeGenInfo(ARMABIInfo::APCS));
+
+    return *(TheTargetCodeGenInfo =
+             new ARMTargetCodeGenInfo(ARMABIInfo::AAPCS));
+
+  case llvm::Triple::pic16:
+    return *(TheTargetCodeGenInfo = new PIC16TargetCodeGenInfo());
+
+  case llvm::Triple::systemz:
+    return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo());
+
+  case llvm::Triple::msp430:
+    return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo());
+
+  case llvm::Triple::x86:
+    switch (Triple.getOS()) {
+    case llvm::Triple::Darwin:
+      return *(TheTargetCodeGenInfo =
+               new X86_32TargetCodeGenInfo(Context, true, true));
+    case llvm::Triple::Cygwin:
+    case llvm::Triple::MinGW32:
+    case llvm::Triple::MinGW64:
+    case llvm::Triple::AuroraUX:
+    case llvm::Triple::DragonFly:
+    case llvm::Triple::FreeBSD:
+    case llvm::Triple::OpenBSD:
+      return *(TheTargetCodeGenInfo =
+               new X86_32TargetCodeGenInfo(Context, false, true));
+
+    default:
+      return *(TheTargetCodeGenInfo =
+               new X86_32TargetCodeGenInfo(Context, false, false));
+    }
+
+  case llvm::Triple::x86_64:
+    return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo());
+  }
+}
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
new file mode 100644
index 0000000..58b7b79
--- /dev/null
+++ b/lib/CodeGen/TargetInfo.h
@@ -0,0 +1,50 @@
+//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_TARGETINFO_H
+#define CLANG_CODEGEN_TARGETINFO_H
+
+namespace llvm {
+  class GlobalValue;
+}
+
+namespace clang {
+  class ABIInfo;
+  class Decl;
+
+  namespace CodeGen {
+    class CodeGenModule;
+  }
+
+  /// TargetCodeGenInfo - This class organizes various target-specific
+  /// codegeneration issues, like target-specific attributes, builtins and so
+  /// on.
+  class TargetCodeGenInfo {
+    ABIInfo *Info;
+  public:
+    // WARNING: Acquires the ownership of ABIInfo.
+    TargetCodeGenInfo(ABIInfo *info = 0):Info(info) { }
+    virtual ~TargetCodeGenInfo();
+
+    /// getABIInfo() - Returns ABI info helper for the target.
+    const ABIInfo& getABIInfo() const { return *Info; }
+
+    /// SetTargetAttributes - Provides a convenient hook to handle extra
+    /// target-specific attributes for the given global.
+    virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+                                     CodeGen::CodeGenModule &M) const { }
+  };
+}
+
+#endif // CLANG_CODEGEN_TARGETINFO_H