Remove tabs, and whitespace cleanups.
git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@81346 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 0c98c40..43b098e 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -60,14 +60,14 @@
llvm::Constant *BlockModule::getNSConcreteGlobalBlock() {
if (NSConcreteGlobalBlock == 0)
- NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
"_NSConcreteGlobalBlock");
return NSConcreteGlobalBlock;
}
llvm::Constant *BlockModule::getNSConcreteStackBlock() {
if (NSConcreteStackBlock == 0)
- NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
+ NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
"_NSConcreteStackBlock");
return NSConcreteStackBlock;
}
@@ -427,16 +427,16 @@
QualType ResultType = FnType->getAsFunctionType()->getResultType();
- const CGFunctionInfo &FnInfo =
+ const CGFunctionInfo &FnInfo =
CGM.getTypes().getFunctionInfo(ResultType, Args);
-
+
// Cast the function pointer to the right type.
- const llvm::Type *BlockFTy =
+ const llvm::Type *BlockFTy =
CGM.getTypes().GetFunctionType(FnInfo, false);
-
+
const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
Func = Builder.CreateBitCast(Func, BlockFTyPtr);
-
+
// And call the block.
return EmitCall(FnInfo, Func, Args);
}
@@ -583,7 +583,7 @@
// Check if we should generate debug info for this block.
if (CGM.getDebugInfo())
DebugInfo = CGM.getDebugInfo();
-
+
// Arrange for local static and local extern declarations to appear
// to be local to this function as well, as they are directly referenced
// in a block.
@@ -591,7 +591,7 @@
i != ldm.end();
++i) {
const VarDecl *VD = dyn_cast<VarDecl>(i->first);
-
+
if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage())
LocalDeclMap[VD] = i->second;
}
@@ -609,7 +609,7 @@
const FunctionType *BlockFunctionType = BExpr->getFunctionType();
QualType ResultType;
bool IsVariadic;
- if (const FunctionProtoType *FTy =
+ if (const FunctionProtoType *FTy =
dyn_cast<FunctionProtoType>(BlockFunctionType)) {
ResultType = FTy->getResultType();
IsVariadic = FTy->isVariadic();
@@ -721,7 +721,7 @@
ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
getContext().getPointerType(getContext().VoidTy));
Args.push_back(std::make_pair(Src, Src->getType()));
-
+
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
@@ -803,7 +803,7 @@
getContext().getPointerType(getContext().VoidTy));
Args.push_back(std::make_pair(Src, Src->getType()));
-
+
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
@@ -887,7 +887,7 @@
ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
getContext().getPointerType(getContext().VoidTy));
Args.push_back(std::make_pair(Src, Src->getType()));
-
+
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
@@ -926,7 +926,7 @@
V = Builder.CreateStructGEP(V, 6, "x");
V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
llvm::Value *SrcObj = Builder.CreateLoad(V);
-
+
flag |= BLOCK_BYREF_CALLER;
llvm::Value *N = llvm::ConstantInt::get(
@@ -951,7 +951,7 @@
getContext().getPointerType(getContext().VoidTy));
Args.push_back(std::make_pair(Src, Src->getType()));
-
+
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 61415ad..6309e1d 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -66,7 +66,7 @@
CodeGenTypes &Types;
CodeGenModule &CGM;
llvm::LLVMContext &VMContext;
-
+
ASTContext &getContext() const { return Context; }
llvm::Module &getModule() const { return TheModule; }
CodeGenTypes &getTypes() { return Types; }
@@ -89,7 +89,7 @@
/// NSConcreteStackBlock - Cached reference to the class poinnter for stack
/// blocks.
llvm::Constant *NSConcreteStackBlock;
-
+
const llvm::Type *BlockDescriptorType;
const llvm::Type *GenericBlockLiteralType;
const llvm::Type *GenericExtendedBlockLiteralType;
@@ -157,11 +157,11 @@
/// ByCopyDeclRefs - Variables from parent scopes that have been imported
/// into this block.
llvm::SmallVector<const BlockDeclRefExpr *, 8> ByCopyDeclRefs;
-
- // ByRefDeclRefs - __block variables from parent scopes that have been
+
+ // ByRefDeclRefs - __block variables from parent scopes that have been
// imported into this block.
llvm::SmallVector<const BlockDeclRefExpr *, 8> ByRefDeclRefs;
-
+
BlockInfo(const llvm::Type *blt, const char *n)
: BlockLiteralTy(blt), Name(n) {
// Skip asm prefix, if any.
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index bfb6be8..77c408a 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -25,21 +25,21 @@
/// Utility to insert an atomic instruction based on Instrinsic::ID
/// and the expression node.
-static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
+static RValue EmitBinaryAtomic(CodeGenFunction& CGF,
Intrinsic::ID Id, const CallExpr *E) {
const llvm::Type *ResType[2];
ResType[0] = CGF.ConvertType(E->getType());
ResType[1] = CGF.ConvertType(E->getArg(0)->getType());
Value *AtomF = CGF.CGM.getIntrinsic(Id, ResType, 2);
- return RValue::get(CGF.Builder.CreateCall2(AtomF,
- CGF.EmitScalarExpr(E->getArg(0)),
+ return RValue::get(CGF.Builder.CreateCall2(AtomF,
+ CGF.EmitScalarExpr(E->getArg(0)),
CGF.EmitScalarExpr(E->getArg(1))));
}
/// Utility to insert an atomic instruction based Instrinsic::ID and
// the expression node, where the return value is the result of the
// operation.
-static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
+static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
Intrinsic::ID Id, const CallExpr *E,
Instruction::BinaryOps Op) {
const llvm::Type *ResType[2];
@@ -49,26 +49,26 @@
Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
Value *Operand = CGF.EmitScalarExpr(E->getArg(1));
Value *Result = CGF.Builder.CreateCall2(AtomF, Ptr, Operand);
-
+
if (Id == Intrinsic::atomic_load_nand)
Result = CGF.Builder.CreateNot(Result);
-
-
+
+
return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
}
-RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E) {
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
if (E->Evaluate(Result, CGM.getContext())) {
if (Result.Val.isInt())
- return RValue::get(llvm::ConstantInt::get(VMContext,
+ return RValue::get(llvm::ConstantInt::get(VMContext,
Result.Val.getInt()));
else if (Result.Val.isFloat())
return RValue::get(ConstantFP::get(VMContext, Result.Val.getFloat()));
}
-
+
switch (BuiltinID) {
default: break; // Handle intrinsics and libm functions below.
case Builtin::BI__builtin___CFStringMakeConstantString:
@@ -77,13 +77,13 @@
case Builtin::BI__builtin_va_start:
case Builtin::BI__builtin_va_end: {
Value *ArgValue = EmitVAListRef(E->getArg(0));
- const llvm::Type *DestType =
+ const llvm::Type *DestType =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
if (ArgValue->getType() != DestType)
- ArgValue = Builder.CreateBitCast(ArgValue, DestType,
+ ArgValue = Builder.CreateBitCast(ArgValue, DestType,
ArgValue->getName().data());
- Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
+ Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
Intrinsic::vaend : Intrinsic::vastart;
return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
}
@@ -91,36 +91,36 @@
Value *DstPtr = EmitVAListRef(E->getArg(0));
Value *SrcPtr = EmitVAListRef(E->getArg(1));
- const llvm::Type *Type =
+ const llvm::Type *Type =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
- return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
+ return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
DstPtr, SrcPtr));
}
case Builtin::BI__builtin_abs: {
- Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
- Value *CmpResult =
- Builder.CreateICmpSGE(ArgValue,
+ Value *CmpResult =
+ Builder.CreateICmpSGE(ArgValue,
llvm::Constant::getNullValue(ArgValue->getType()),
"abscond");
- Value *Result =
+ Value *Result =
Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
-
+
return RValue::get(Result);
}
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, "cast");
@@ -130,11 +130,11 @@
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, &ArgType, 1);
- const llvm::Type *ResultType = ConvertType(E->getType());
+ const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, "cast");
@@ -145,12 +145,12 @@
case Builtin::BI__builtin_ffsll: {
// ffs(x) -> x ? cttz(x) + 1 : 0
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1);
-
+
const llvm::Type *ResultType = ConvertType(E->getType());
- Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall(F, ArgValue, "tmp"),
llvm::ConstantInt::get(ArgType, 1), "tmp");
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
@@ -164,13 +164,13 @@
case Builtin::BI__builtin_parityll: {
// parity(x) -> ctpop(x) & 1
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
-
+
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue, "tmp");
- Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1),
"tmp");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, "cast");
@@ -180,10 +180,10 @@
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
-
+
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, &ArgType, 1);
-
+
const llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue, "tmp");
if (Result->getType() != ResultType)
@@ -199,7 +199,7 @@
const llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1);
return RValue::get(Builder.CreateCall(F, ArgValue, "tmp"));
- }
+ }
case Builtin::BI__builtin_object_size: {
// FIXME: Implement. For now we just always fail and pretend we
// don't know the object size.
@@ -213,9 +213,9 @@
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
// FIXME: Technically these constants should of type 'int', yes?
- RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
- Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0);
return RValue::get(Builder.CreateCall3(F, Address, RW, Locality));
@@ -243,9 +243,9 @@
case Builtin::BI__builtin_isunordered: {
// Ordered comparisons: we know the arguments to these are matching scalar
// floating point values.
- Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
-
+
switch (BuiltinID) {
default: assert(0 && "Unknown ordered comparison");
case Builtin::BI__builtin_isgreater:
@@ -263,7 +263,7 @@
case Builtin::BI__builtin_islessgreater:
LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
break;
- case Builtin::BI__builtin_isunordered:
+ case Builtin::BI__builtin_isunordered:
LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
break;
}
@@ -352,7 +352,7 @@
case Builtin::BI__builtin_longjmp: {
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp, 0, 0);
Value *Buf = EmitScalarExpr(E->getArg(0));
- const llvm::Type *DestType =
+ const llvm::Type *DestType =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
Buf = Builder.CreateBitCast(Buf, DestType);
return RValue::get(Builder.CreateCall(F, Buf));
@@ -409,7 +409,7 @@
case Builtin::BI__sync_fetch_and_nand_8:
case Builtin::BI__sync_fetch_and_nand_16:
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
-
+
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_min, E);
@@ -425,7 +425,7 @@
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
+ return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_add, E,
llvm::Instruction::Add);
case Builtin::BI__sync_sub_and_fetch_1:
case Builtin::BI__sync_sub_and_fetch_2:
@@ -462,7 +462,7 @@
case Builtin::BI__sync_nand_and_fetch_16:
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
llvm::Instruction::And);
-
+
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
case Builtin::BI__sync_val_compare_and_swap_4:
@@ -473,7 +473,7 @@
ResType[0]= ConvertType(E->getType());
ResType[1] = ConvertType(E->getArg(0)->getType());
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
- return RValue::get(Builder.CreateCall3(AtomF,
+ return RValue::get(Builder.CreateCall3(AtomF,
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1)),
EmitScalarExpr(E->getArg(2))));
@@ -490,7 +490,7 @@
ResType[1] = llvm::PointerType::getUnqual(ResType[0]);
Value *AtomF = CGM.getIntrinsic(Intrinsic::atomic_cmp_swap, ResType, 2);
Value *OldVal = EmitScalarExpr(E->getArg(1));
- Value *PrevVal = Builder.CreateCall3(AtomF,
+ Value *PrevVal = Builder.CreateCall3(AtomF,
EmitScalarExpr(E->getArg(0)),
OldVal,
EmitScalarExpr(E->getArg(2)));
@@ -524,7 +524,7 @@
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::memory_barrier), C, C + 5);
return RValue::get(0);
}
-
+
// Library functions with special handling.
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
@@ -551,31 +551,31 @@
return RValue::get(Builder.CreateCall2(F, Base, Exponent, "tmp"));
}
}
-
+
// If this is an alias for a libm function (e.g. __builtin_sin) turn it into
// that function.
if (getContext().BuiltinInfo.isLibFunction(BuiltinID) ||
getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
- return EmitCall(CGM.getBuiltinLibFunction(BuiltinID),
+ return EmitCall(CGM.getBuiltinLibFunction(BuiltinID),
E->getCallee()->getType(), E->arg_begin(),
E->arg_end());
-
+
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
if (const char *Prefix =
- llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
+ llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
-
+
if (IntrinsicID != Intrinsic::not_intrinsic) {
SmallVector<Value*, 16> Args;
-
+
Function *F = CGM.getIntrinsic(IntrinsicID);
const llvm::FunctionType *FTy = F->getFunctionType();
-
+
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Value *ArgValue = EmitScalarExpr(E->getArg(i));
-
+
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
const llvm::Type *PTy = FTy->getParamType(i);
@@ -584,36 +584,36 @@
"Must be able to losslessly bit cast to param");
ArgValue = Builder.CreateBitCast(ArgValue, PTy);
}
-
+
Args.push_back(ArgValue);
}
-
+
Value *V = Builder.CreateCall(F, Args.data(), Args.data() + Args.size());
QualType BuiltinRetType = E->getType();
-
+
const llvm::Type *RetTy = llvm::Type::getVoidTy(VMContext);
if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType);
-
+
if (RetTy != V->getType()) {
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type");
V = Builder.CreateBitCast(V, RetTy);
}
-
+
return RValue::get(V);
}
-
+
// See if we have a target specific builtin that needs to be lowered.
if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
return RValue::get(V);
-
+
ErrorUnsupported(E, "builtin function");
-
+
// Unknown builtin, for now just dump it out and return undef.
if (hasAggregateLLVMType(E->getType()))
return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType())));
return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
-}
+}
Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
@@ -629,9 +629,9 @@
}
}
-Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
-
+
llvm::SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
@@ -639,9 +639,9 @@
switch (BuiltinID) {
default: return 0;
- case X86::BI__builtin_ia32_pslldi128:
+ case X86::BI__builtin_ia32_pslldi128:
case X86::BI__builtin_ia32_psllqi128:
- case X86::BI__builtin_ia32_psllwi128:
+ case X86::BI__builtin_ia32_psllwi128:
case X86::BI__builtin_ia32_psradi128:
case X86::BI__builtin_ia32_psrawi128:
case X86::BI__builtin_ia32_psrldi128:
@@ -655,7 +655,7 @@
Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
+
switch (BuiltinID) {
default: assert(0 && "Unsupported shift intrinsic!");
case X86::BI__builtin_ia32_pslldi128:
@@ -692,11 +692,11 @@
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
}
- case X86::BI__builtin_ia32_pslldi:
+ case X86::BI__builtin_ia32_pslldi:
case X86::BI__builtin_ia32_psllqi:
- case X86::BI__builtin_ia32_psllwi:
+ case X86::BI__builtin_ia32_psllwi:
case X86::BI__builtin_ia32_psradi:
case X86::BI__builtin_ia32_psrawi:
case X86::BI__builtin_ia32_psrldi:
@@ -707,7 +707,7 @@
Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast");
const char *name = 0;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
-
+
switch (BuiltinID) {
default: assert(0 && "Unsupported shift intrinsic!");
case X86::BI__builtin_ia32_pslldi:
@@ -744,7 +744,7 @@
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
- return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
+ return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name);
}
case X86::BI__builtin_ia32_cmpps: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps);
@@ -783,10 +783,10 @@
const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext);
llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy);
llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2);
-
+
// cast val v2i64
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
-
+
// extract (0, 1)
unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index);
@@ -799,9 +799,9 @@
}
}
-Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
default: return 0;
}
-}
+}
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index f610de8..e311912 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-// We might split this into multiple files if it gets too unwieldy
+// We might split this into multiple files if it gets too unwieldy
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
@@ -25,54 +25,54 @@
using namespace clang;
using namespace CodeGen;
-void
+void
CodeGenFunction::EmitCXXGlobalDtorRegistration(const CXXDestructorDecl *Dtor,
llvm::Constant *DeclPtr) {
// FIXME: This is ABI dependent and we use the Itanium ABI.
-
- const llvm::Type *Int8PtrTy =
+
+ const llvm::Type *Int8PtrTy =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
-
+
std::vector<const llvm::Type *> Params;
Params.push_back(Int8PtrTy);
-
+
// Get the destructor function type
- const llvm::Type *DtorFnTy =
+ const llvm::Type *DtorFnTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
DtorFnTy = llvm::PointerType::getUnqual(DtorFnTy);
-
+
Params.clear();
Params.push_back(DtorFnTy);
Params.push_back(Int8PtrTy);
Params.push_back(Int8PtrTy);
-
+
// Get the __cxa_atexit function type
// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
- const llvm::FunctionType *AtExitFnTy =
+ const llvm::FunctionType *AtExitFnTy =
llvm::FunctionType::get(ConvertType(getContext().IntTy), Params, false);
-
+
llvm::Constant *AtExitFn = CGM.CreateRuntimeFunction(AtExitFnTy,
"__cxa_atexit");
-
+
llvm::Constant *Handle = CGM.CreateRuntimeVariable(Int8PtrTy,
"__dso_handle");
-
+
llvm::Constant *DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
-
+
llvm::Value *Args[3] = { llvm::ConstantExpr::getBitCast(DtorFn, DtorFnTy),
llvm::ConstantExpr::getBitCast(DeclPtr, Int8PtrTy),
llvm::ConstantExpr::getBitCast(Handle, Int8PtrTy) };
Builder.CreateCall(AtExitFn, &Args[0], llvm::array_endof(Args));
}
-void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
+void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
llvm::Constant *DeclPtr) {
assert(D.hasGlobalStorage() &&
"VarDecl must have global storage!");
-
+
const Expr *Init = D.getInit();
QualType T = D.getType();
-
+
if (T->isReferenceType()) {
ErrorUnsupported(Init, "global variable that binds to a reference");
} else if (!hasAggregateLLVMType(T)) {
@@ -82,7 +82,7 @@
EmitComplexExprIntoAddr(Init, DeclPtr, T.isVolatileQualified());
} else {
EmitAggExpr(Init, DeclPtr, T.isVolatileQualified());
-
+
if (const RecordType *RT = T->getAs<RecordType>()) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (!RD->hasTrivialDestructor())
@@ -95,16 +95,16 @@
CodeGenModule::EmitCXXGlobalInitFunc() {
if (CXXGlobalInits.empty())
return;
-
+
const llvm::FunctionType *FTy = llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
false);
-
+
// Create our global initialization function.
// FIXME: Should this be tweakable by targets?
- llvm::Function *Fn =
+ llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
"__cxx_global_initialization", &TheModule);
-
+
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
&CXXGlobalInits[0],
CXXGlobalInits.size());
@@ -114,20 +114,20 @@
void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
const VarDecl **Decls,
unsigned NumDecls) {
- StartFunction(0, getContext().VoidTy, Fn, FunctionArgList(),
+ StartFunction(0, getContext().VoidTy, Fn, FunctionArgList(),
SourceLocation());
-
+
for (unsigned i = 0; i != NumDecls; ++i) {
const VarDecl *D = Decls[i];
-
+
llvm::Constant *DeclPtr = CGM.GetAddrOfGlobalVar(D);
EmitCXXGlobalVarDeclInit(*D, DeclPtr);
}
FinishFunction();
}
-void
-CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
+void
+CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
llvm::GlobalVariable *GV) {
// FIXME: This should use __cxa_guard_{acquire,release}?
@@ -137,36 +137,36 @@
llvm::SmallString<256> GuardVName;
llvm::raw_svector_ostream GuardVOut(GuardVName);
mangleGuardVariable(&D, getContext(), GuardVOut);
-
+
// Create the guard variable.
- llvm::GlobalValue *GuardV =
+ llvm::GlobalValue *GuardV =
new llvm::GlobalVariable(CGM.getModule(), llvm::Type::getInt64Ty(VMContext), false,
GV->getLinkage(),
llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext)),
GuardVName.str());
-
+
// Load the first byte of the guard variable.
const llvm::Type *PtrTy = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
- llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy),
+ llvm::Value *V = Builder.CreateLoad(Builder.CreateBitCast(GuardV, PtrTy),
"tmp");
-
+
// Compare it against 0.
llvm::Value *nullValue = llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext));
llvm::Value *ICmp = Builder.CreateICmpEQ(V, nullValue , "tobool");
-
+
llvm::BasicBlock *InitBlock = createBasicBlock("init");
llvm::BasicBlock *EndBlock = createBasicBlock("init.end");
// If the guard variable is 0, jump to the initializer code.
Builder.CreateCondBr(ICmp, InitBlock, EndBlock);
-
+
EmitBlock(InitBlock);
EmitCXXGlobalVarDeclInit(D, GV);
Builder.CreateStore(llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1),
Builder.CreateBitCast(GuardV, PtrTy));
-
+
EmitBlock(EndBlock);
}
@@ -175,25 +175,25 @@
llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd) {
- assert(MD->isInstance() &&
+ assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
// A call to a trivial destructor requires no code generation.
if (const CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(MD))
if (Destructor->isTrivial())
return RValue::get(0);
-
+
const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
-
+
CallArgList Args;
-
+
// Push the this ptr.
Args.push_back(std::make_pair(RValue::get(This),
MD->getThisType(getContext())));
-
+
// And the rest of the call args
EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
-
+
QualType ResultType = MD->getType()->getAsFunctionType()->getResultType();
return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args),
Callee, Args, MD);
@@ -205,11 +205,11 @@
const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
llvm::Value *This;
-
+
if (ME->isArrow())
This = EmitScalarExpr(ME->getBase());
else {
@@ -218,27 +218,27 @@
}
// C++ [class.virtual]p12:
- // Explicit qualification with the scope operator (5.1) suppresses the
+ // Explicit qualification with the scope operator (5.1) suppresses the
// virtual call mechanism.
llvm::Value *Callee;
if (MD->isVirtual() && !ME->hasQualifier())
Callee = BuildVirtualCall(MD, This, Ty);
- else if (const CXXDestructorDecl *Destructor
+ else if (const CXXDestructorDecl *Destructor
= dyn_cast<CXXDestructorDecl>(MD))
Callee = CGM.GetAddrOfFunction(GlobalDecl(Destructor, Dtor_Complete), Ty);
else
Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty);
-
- return EmitCXXMemberCall(MD, Callee, This,
+
+ return EmitCXXMemberCall(MD, Callee, This,
CE->arg_begin(), CE->arg_end());
}
-RValue
+RValue
CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD) {
- assert(MD->isInstance() &&
+ assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
-
+
if (MD->isCopyAssignment()) {
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
if (ClassDecl->hasTrivialCopyAssignment()) {
@@ -251,15 +251,15 @@
return RValue::get(This);
}
}
-
+
const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty);
-
+
llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
-
+
return EmitCXXMemberCall(MD, Callee, This,
E->arg_begin() + 1, E->arg_end());
}
@@ -268,30 +268,30 @@
CodeGenFunction::EmitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *E) {
assert((E->getCastKind() == CastExpr::CK_UserDefinedConversion) &&
"EmitCXXFunctionalCastExpr - called with wrong cast");
-
+
CXXMethodDecl *MD = E->getTypeConversionMethod();
assert(MD && "EmitCXXFunctionalCastExpr - null conversion method");
assert(isa<CXXConversionDecl>(MD) && "EmitCXXFunctionalCastExpr - not"
" method decl");
const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
-
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), Ty);
llvm::Value *This = EmitLValue(E->getSubExpr()).getAddress();
RValue RV = EmitCXXMemberCall(MD, Callee, This, 0, 0);
if (RV.isAggregate())
- RV = RValue::get(RV.getAggregateAddr());
+ RV = RValue::get(RV.getAggregateAddr());
return RV;
}
llvm::Value *CodeGenFunction::LoadCXXThis() {
- assert(isa<CXXMethodDecl>(CurFuncDecl) &&
+ assert(isa<CXXMethodDecl>(CurFuncDecl) &&
"Must be in a C++ member function decl to load 'this'");
assert(cast<CXXMethodDecl>(CurFuncDecl)->isInstance() &&
"Must be in a C++ member function decl to load 'this'");
-
+
// FIXME: What if we're inside a block?
// ans: See how CodeGenFunction::LoadObjCSelf() uses
// CodeGenFunction::BlockForwardSelf() for how to do this.
@@ -306,7 +306,7 @@
e = ClassDecl->bases_end(); i != e; ++i) {
if (i->isVirtual())
continue;
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *Base =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
if (Base == BaseClassDecl) {
NestedBasePaths.push_back(BaseClassDecl);
@@ -318,7 +318,7 @@
e = ClassDecl->bases_end(); i != e; ++i) {
if (i->isVirtual())
continue;
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *Base =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
if (GetNestedPaths(NestedBasePaths, Base, BaseClassDecl)) {
NestedBasePaths.push_back(Base);
@@ -329,34 +329,34 @@
}
llvm::Value *CodeGenFunction::AddressCXXOfBaseClass(llvm::Value *BaseValue,
- const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) {
if (ClassDecl == BaseClassDecl)
return BaseValue;
-
+
llvm::Type *I8Ptr = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
llvm::SmallVector<const CXXRecordDecl *, 16> NestedBasePaths;
GetNestedPaths(NestedBasePaths, ClassDecl, BaseClassDecl);
- assert(NestedBasePaths.size() > 0 &&
+ assert(NestedBasePaths.size() > 0 &&
"AddressCXXOfBaseClass - inheritence path failed");
NestedBasePaths.push_back(ClassDecl);
uint64_t Offset = 0;
-
+
// Accessing a member of the base class. Must add delata to
// the load of 'this'.
for (unsigned i = NestedBasePaths.size()-1; i > 0; i--) {
const CXXRecordDecl *DerivedClass = NestedBasePaths[i];
const CXXRecordDecl *BaseClass = NestedBasePaths[i-1];
- const ASTRecordLayout &Layout =
+ const ASTRecordLayout &Layout =
getContext().getASTRecordLayout(DerivedClass);
Offset += Layout.getBaseClassOffset(BaseClass) / 8;
}
- llvm::Value *OffsetVal =
+ llvm::Value *OffsetVal =
llvm::ConstantInt::get(
CGM.getTypes().ConvertType(CGM.getContext().LongTy), Offset);
BaseValue = Builder.CreateBitCast(BaseValue, I8Ptr);
BaseValue = Builder.CreateGEP(BaseValue, OffsetVal, "add.ptr");
- QualType BTy =
+ QualType BTy =
getContext().getCanonicalType(
getContext().getTypeDeclType(const_cast<CXXRecordDecl*>(BaseClassDecl)));
const llvm::Type *BasePtr = ConvertType(BTy);
@@ -377,52 +377,52 @@
llvm::Value *This) {
const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
assert(CA && "Do we support VLA for construction ?");
-
+
// Create a temporary for the loop index and initialize it with 0.
llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
"loop.index");
- llvm::Value* zeroConstant =
+ llvm::Value* zeroConstant =
llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
Builder.CreateStore(zeroConstant, IndexPtr, false);
-
+
// Start the loop with a block that tests the condition.
llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
+
EmitBlock(CondBlock);
-
+
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
+
// Generate: if (loop-index < number-of-elements fall to the loop body,
// otherwise, go to the block after the for-loop.
uint64_t NumElements = getContext().getConstantArrayElementCount(CA);
- llvm::Value * NumElementsPtr =
+ llvm::Value * NumElementsPtr =
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
"isless");
// If the condition is true, execute the body.
Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
+
EmitBlock(ForBody);
-
+
llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
// Inside the loop body, emit the constructor call on the array element.
Counter = Builder.CreateLoad(IndexPtr);
llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
EmitCXXConstructorCall(D, Ctor_Complete, Address, 0, 0);
-
+
EmitBlock(ContinueBlock);
-
+
// Emit the increment of the loop counter.
llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
Counter = Builder.CreateLoad(IndexPtr);
NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
Builder.CreateStore(NextVal, IndexPtr, false);
-
+
// Finally, branch back up to the condition for the next iteration.
EmitBranch(CondBlock);
-
+
// Emit the fall-through block.
EmitBlock(AfterFor, true);
}
@@ -435,7 +435,7 @@
llvm::Value *This) {
const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
assert(CA && "Do we support VLA for destruction ?");
- llvm::Value *One = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ llvm::Value *One = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
1);
uint64_t ElementCount = getContext().getConstantArrayElementCount(CA);
// Create a temporary for the loop index and initialize it with count of
@@ -443,54 +443,54 @@
llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
"loop.index");
// Index = ElementCount;
- llvm::Value* UpperCount =
+ llvm::Value* UpperCount =
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), ElementCount);
Builder.CreateStore(UpperCount, IndexPtr, false);
-
+
// Start the loop with a block that tests the condition.
llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
+
EmitBlock(CondBlock);
-
+
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
+
// Generate: if (loop-index != 0 fall to the loop body,
// otherwise, go to the block after the for-loop.
- llvm::Value* zeroConstant =
+ llvm::Value* zeroConstant =
llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
llvm::Value *IsNE = Builder.CreateICmpNE(Counter, zeroConstant,
"isne");
// If the condition is true, execute the body.
Builder.CreateCondBr(IsNE, ForBody, AfterFor);
-
+
EmitBlock(ForBody);
-
+
llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
// Inside the loop body, emit the constructor call on the array element.
Counter = Builder.CreateLoad(IndexPtr);
Counter = Builder.CreateSub(Counter, One);
llvm::Value *Address = Builder.CreateInBoundsGEP(This, Counter, "arrayidx");
EmitCXXDestructorCall(D, Dtor_Complete, Address);
-
+
EmitBlock(ContinueBlock);
-
+
// Emit the decrement of the loop counter.
Counter = Builder.CreateLoad(IndexPtr);
Counter = Builder.CreateSub(Counter, One, "dec");
Builder.CreateStore(Counter, IndexPtr, false);
-
+
// Finally, branch back up to the condition for the next iteration.
EmitBranch(CondBlock);
-
+
// Emit the fall-through block.
EmitBlock(AfterFor, true);
}
void
-CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
- CXXCtorType Type,
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type,
llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd) {
@@ -506,31 +506,31 @@
return;
}
}
-
+
llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
EmitCXXMemberCall(D, Callee, This, ArgBeg, ArgEnd);
}
-void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *D,
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *D,
CXXDtorType Type,
llvm::Value *This) {
llvm::Value *Callee = CGM.GetAddrOfCXXDestructor(D, Type);
-
+
EmitCXXMemberCall(D, Callee, This, 0, 0);
}
-void
-CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
+void
+CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
const CXXConstructExpr *E) {
assert(Dest && "Must have a destination!");
-
- const CXXRecordDecl *RD =
+
+ const CXXRecordDecl *RD =
cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
if (RD->hasTrivialConstructor())
return;
- // Code gen optimization to eliminate copy constructor and return
+ // Code gen optimization to eliminate copy constructor and return
// its first argument instead.
if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
CXXConstructExpr::const_arg_iterator i = E->arg_begin();
@@ -538,7 +538,7 @@
return;
}
// Call the constructor.
- EmitCXXConstructorCall(E->getConstructor(), Ctor_Complete, Dest,
+ EmitCXXConstructorCall(E->getConstructor(), Ctor_Complete, Dest,
E->arg_begin(), E->arg_end());
}
@@ -547,21 +547,21 @@
ErrorUnsupported(E, "new[] expression");
return llvm::UndefValue::get(ConvertType(E->getType()));
}
-
+
QualType AllocType = E->getAllocatedType();
FunctionDecl *NewFD = E->getOperatorNew();
const FunctionProtoType *NewFTy = NewFD->getType()->getAsFunctionProtoType();
-
+
CallArgList NewArgs;
// The allocation size is the first argument.
QualType SizeTy = getContext().getSizeType();
- llvm::Value *AllocSize =
- llvm::ConstantInt::get(ConvertType(SizeTy),
+ llvm::Value *AllocSize =
+ llvm::ConstantInt::get(ConvertType(SizeTy),
getContext().getTypeSize(AllocType) / 8);
NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
-
+
// Emit the rest of the arguments.
// FIXME: Ideally, this should just use EmitCallArgs.
CXXNewExpr::const_arg_iterator NewArg = E->placement_arg_begin();
@@ -571,24 +571,24 @@
// has already been emitted.
for (unsigned i = 1, e = NewFTy->getNumArgs(); i != e; ++i, ++NewArg) {
QualType ArgType = NewFTy->getArgType(i);
-
+
assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
- getTypePtr() ==
- getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
+ getTypePtr() ==
+ getContext().getCanonicalType(NewArg->getType()).getTypePtr() &&
"type mismatch in call argument!");
-
- NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
+
+ NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
ArgType));
-
+
}
-
- // Either we've emitted all the call args, or we have a call to a
+
+ // Either we've emitted all the call args, or we have a call to a
// variadic function.
- assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
+ assert((NewArg == E->placement_arg_end() || NewFTy->isVariadic()) &&
"Extra arguments in non-variadic function!");
-
+
// If we still have any arguments, emit them using the type of the argument.
- for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
+ for (CXXNewExpr::const_arg_iterator NewArgEnd = E->placement_arg_end();
NewArg != NewArgEnd; ++NewArg) {
QualType ArgType = NewArg->getType();
NewArgs.push_back(std::make_pair(EmitCallArg(*NewArg, ArgType),
@@ -596,7 +596,7 @@
}
// Emit the call to new.
- RValue RV =
+ RValue RV =
EmitCall(CGM.getTypes().getFunctionInfo(NewFTy->getResultType(), NewArgs),
CGM.GetAddrOfFunction(GlobalDecl(NewFD)),
NewArgs, NewFD);
@@ -618,26 +618,26 @@
NewNull = createBasicBlock("new.null");
NewNotNull = createBasicBlock("new.notnull");
NewEnd = createBasicBlock("new.end");
-
- llvm::Value *IsNull =
- Builder.CreateICmpEQ(NewPtr,
+
+ llvm::Value *IsNull =
+ Builder.CreateICmpEQ(NewPtr,
llvm::Constant::getNullValue(NewPtr->getType()),
"isnull");
-
+
Builder.CreateCondBr(IsNull, NewNull, NewNotNull);
EmitBlock(NewNotNull);
}
-
+
NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
-
+
if (AllocType->isPODType()) {
if (E->getNumConstructorArgs() > 0) {
- assert(E->getNumConstructorArgs() == 1 &&
+ assert(E->getNumConstructorArgs() == 1 &&
"Can only have one argument to initializer of POD type.");
const Expr *Init = E->getConstructorArg(0);
-
- if (!hasAggregateLLVMType(AllocType))
+
+ if (!hasAggregateLLVMType(AllocType))
Builder.CreateStore(EmitScalarExpr(Init), NewPtr);
else if (AllocType->isAnyComplexType())
EmitComplexExprIntoAddr(Init, NewPtr, AllocType.isVolatileQualified());
@@ -645,11 +645,11 @@
EmitAggExpr(Init, NewPtr, AllocType.isVolatileQualified());
}
} else {
- // Call the constructor.
+ // Call the constructor.
CXXConstructorDecl *Ctor = E->getConstructor();
-
- EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr,
- E->constructor_arg_begin(),
+
+ EmitCXXConstructorCall(Ctor, Ctor_Complete, NewPtr,
+ E->constructor_arg_begin(),
E->constructor_arg_end());
}
@@ -658,15 +658,15 @@
EmitBlock(NewNull);
Builder.CreateBr(NewEnd);
EmitBlock(NewEnd);
-
+
llvm::PHINode *PHI = Builder.CreatePHI(NewPtr->getType());
PHI->reserveOperandSpace(2);
PHI->addIncoming(NewPtr, NewNotNull);
PHI->addIncoming(llvm::Constant::getNullValue(NewPtr->getType()), NewNull);
-
+
NewPtr = PHI;
}
-
+
return NewPtr;
}
@@ -676,22 +676,22 @@
return;
};
- QualType DeleteTy =
+ QualType DeleteTy =
E->getArgument()->getType()->getAs<PointerType>()->getPointeeType();
-
+
llvm::Value *Ptr = EmitScalarExpr(E->getArgument());
-
+
// Null check the pointer.
llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
- llvm::Value *IsNull =
+ llvm::Value *IsNull =
Builder.CreateICmpEQ(Ptr, llvm::Constant::getNullValue(Ptr->getType()),
"isnull");
-
+
Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
EmitBlock(DeleteNotNull);
-
+
// Call the destructor if necessary.
if (const RecordType *RT = DeleteTy->getAs<RecordType>()) {
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
@@ -701,29 +701,29 @@
ErrorUnsupported(E, "delete expression with virtual destructor");
return;
}
-
+
EmitCXXDestructorCall(Dtor, Dtor_Complete, Ptr);
}
}
}
-
+
// Call delete.
FunctionDecl *DeleteFD = E->getOperatorDelete();
- const FunctionProtoType *DeleteFTy =
+ const FunctionProtoType *DeleteFTy =
DeleteFD->getType()->getAsFunctionProtoType();
-
+
CallArgList DeleteArgs;
QualType ArgTy = DeleteFTy->getArgType(0);
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
DeleteArgs.push_back(std::make_pair(RValue::get(DeletePtr), ArgTy));
-
+
// Emit the call to delete.
- EmitCall(CGM.getTypes().getFunctionInfo(DeleteFTy->getResultType(),
+ EmitCall(CGM.getTypes().getFunctionInfo(DeleteFTy->getResultType(),
DeleteArgs),
CGM.GetAddrOfFunction(GlobalDecl(DeleteFD)),
DeleteArgs, DeleteFD);
-
+
EmitBlock(DeleteEnd);
}
@@ -732,34 +732,34 @@
EmitGlobal(GlobalDecl(D, Ctor_Base));
}
-void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
CXXCtorType Type) {
-
+
llvm::Function *Fn = GetAddrOfCXXConstructor(D, Type);
-
+
CodeGenFunction(*this).GenerateCode(D, Fn);
-
+
SetFunctionDefinitionAttributes(D, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
}
llvm::Function *
-CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
CXXCtorType Type) {
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false);
-
+
const char *Name = getMangledCXXCtorName(D, Type);
return cast<llvm::Function>(
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
}
-const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D,
+const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D,
CXXCtorType Type) {
llvm::SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
mangleCXXCtor(D, Type, Context, Out);
-
+
Name += '\0';
return UniqueMangledName(Name.begin(), Name.end());
}
@@ -769,33 +769,33 @@
EmitCXXDestructor(D, Dtor_Base);
}
-void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type) {
llvm::Function *Fn = GetAddrOfCXXDestructor(D, Type);
-
+
CodeGenFunction(*this).GenerateCode(D, Fn);
-
+
SetFunctionDefinitionAttributes(D, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
}
llvm::Function *
-CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type) {
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D), false);
-
+
const char *Name = getMangledCXXDtorName(D, Type);
return cast<llvm::Function>(
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
}
-const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D,
+const char *CodeGenModule::getMangledCXXDtorName(const CXXDestructorDecl *D,
CXXDtorType Type) {
llvm::SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
mangleCXXDtor(D, Type, Context, Out);
-
+
Name += '\0';
return UniqueMangledName(Name.begin(), Name.end());
}
@@ -891,7 +891,7 @@
const CXXRecordDecl *RD, uint64_t Offset) {
for (CXXRecordDecl::base_class_const_iterator i =RD->bases_begin(),
e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *Base =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
if (i->isVirtual() && !SeenVBase.count(Base)) {
SeenVBase.insert(Base);
@@ -998,7 +998,7 @@
// If we can find a previously allocated slot for this, reuse it.
if (OverrideMethod(MD, m, MorallyVirtual, Offset))
return;
-
+
// else allocate a new slot.
Index[MD] = submethods.size();
submethods.push_back(m);
@@ -1029,7 +1029,7 @@
e = RD->bases_end(); i != e; ++i) {
if (i->isVirtual())
continue;
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *Base =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
if (Base != PrimaryBase || PrimaryBaseWasVirtual) {
uint64_t o = Offset + Layout.getBaseClassOffset(Base);
@@ -1090,7 +1090,7 @@
return;
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
// vtables are composed from the chain of primaries.
@@ -1113,7 +1113,7 @@
return 0;
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
std::vector<llvm::Constant *> offsets;
@@ -1154,7 +1154,7 @@
Path->push_back(std::make_pair(RD, Offset));
for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *Base =
cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
if (i->isVirtual() && !IndirectPrimary.count(Base)) {
// Mark it so we don't output it twice.
@@ -1306,7 +1306,7 @@
CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *&This,
const llvm::Type *Ty) {
// FIXME: If we know the dynamic type, we don't have to do a virtual dispatch.
-
+
// FIXME: move to Context
if (vtableinfo == 0)
vtableinfo = new VtableInfo(CGM);
@@ -1328,39 +1328,39 @@
/// array of objects from SrcValue to DestValue. Copying can be either a bitwise
/// copy or via a copy constructor call.
// FIXME. Consolidate this with EmitCXXAggrConstructorCall.
-void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
+void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
llvm::Value *Src,
const ArrayType *Array,
- const CXXRecordDecl *BaseClassDecl,
+ const CXXRecordDecl *BaseClassDecl,
QualType Ty) {
const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
assert(CA && "VLA cannot be copied over");
bool BitwiseCopy = BaseClassDecl->hasTrivialCopyConstructor();
-
+
// Create a temporary for the loop index and initialize it with 0.
llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
"loop.index");
- llvm::Value* zeroConstant =
+ llvm::Value* zeroConstant =
llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
Builder.CreateStore(zeroConstant, IndexPtr, false);
// Start the loop with a block that tests the condition.
llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
+
EmitBlock(CondBlock);
-
+
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
// Generate: if (loop-index < number-of-elements fall to the loop body,
// otherwise, go to the block after the for-loop.
uint64_t NumElements = getContext().getConstantArrayElementCount(CA);
- llvm::Value * NumElementsPtr =
+ llvm::Value * NumElementsPtr =
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
"isless");
// If the condition is true, execute the body.
Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
+
EmitBlock(ForBody);
llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
// Inside the loop body, emit the constructor call on the array element.
@@ -1369,75 +1369,75 @@
Dest = Builder.CreateInBoundsGEP(Dest, Counter, "destaddress");
if (BitwiseCopy)
EmitAggregateCopy(Dest, Src, Ty);
- else if (CXXConstructorDecl *BaseCopyCtor =
+ else if (CXXConstructorDecl *BaseCopyCtor =
BaseClassDecl->getCopyConstructor(getContext(), 0)) {
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor,
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor,
Ctor_Complete);
CallArgList CallArgs;
// Push the this (Dest) ptr.
CallArgs.push_back(std::make_pair(RValue::get(Dest),
BaseCopyCtor->getThisType(getContext())));
-
+
// Push the Src ptr.
CallArgs.push_back(std::make_pair(RValue::get(Src),
BaseCopyCtor->getParamDecl(0)->getType()));
- QualType ResultType =
+ QualType ResultType =
BaseCopyCtor->getType()->getAsFunctionType()->getResultType();
EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
Callee, CallArgs, BaseCopyCtor);
}
EmitBlock(ContinueBlock);
-
+
// Emit the increment of the loop counter.
llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
Counter = Builder.CreateLoad(IndexPtr);
NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
Builder.CreateStore(NextVal, IndexPtr, false);
-
+
// Finally, branch back up to the condition for the next iteration.
EmitBranch(CondBlock);
-
+
// Emit the fall-through block.
EmitBlock(AfterFor, true);
}
/// EmitClassAggrCopyAssignment - This routine generates code to assign a class
-/// array of objects from SrcValue to DestValue. Assignment can be either a
+/// array of objects from SrcValue to DestValue. Assignment can be either a
/// bitwise assignment or via a copy assignment operator function call.
/// FIXME. This can be consolidated with EmitClassAggrMemberwiseCopy
-void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
+void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
llvm::Value *Src,
const ArrayType *Array,
- const CXXRecordDecl *BaseClassDecl,
+ const CXXRecordDecl *BaseClassDecl,
QualType Ty) {
const ConstantArrayType *CA = dyn_cast<ConstantArrayType>(Array);
assert(CA && "VLA cannot be asssigned");
bool BitwiseAssign = BaseClassDecl->hasTrivialCopyAssignment();
-
+
// Create a temporary for the loop index and initialize it with 0.
llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
"loop.index");
- llvm::Value* zeroConstant =
+ llvm::Value* zeroConstant =
llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
Builder.CreateStore(zeroConstant, IndexPtr, false);
// Start the loop with a block that tests the condition.
llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
+
EmitBlock(CondBlock);
-
+
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
// Generate: if (loop-index < number-of-elements fall to the loop body,
// otherwise, go to the block after the for-loop.
uint64_t NumElements = getContext().getConstantArrayElementCount(CA);
- llvm::Value * NumElementsPtr =
+ llvm::Value * NumElementsPtr =
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
+ llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
"isless");
// If the condition is true, execute the body.
Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
+
EmitBlock(ForBody);
llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
// Inside the loop body, emit the assignment operator call on array element.
@@ -1457,12 +1457,12 @@
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), LTy);
-
+
CallArgList CallArgs;
// Push the this (Dest) ptr.
CallArgs.push_back(std::make_pair(RValue::get(Dest),
MD->getThisType(getContext())));
-
+
// Push the Src ptr.
CallArgs.push_back(std::make_pair(RValue::get(Src),
MD->getParamDecl(0)->getType()));
@@ -1471,16 +1471,16 @@
Callee, CallArgs, MD);
}
EmitBlock(ContinueBlock);
-
+
// Emit the increment of the loop counter.
llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
Counter = Builder.CreateLoad(IndexPtr);
NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
Builder.CreateStore(NextVal, IndexPtr, false);
-
+
// Finally, branch back up to the condition for the next iteration.
EmitBranch(CondBlock);
-
+
// Emit the fall-through block.
EmitBlock(AfterFor, true);
}
@@ -1490,7 +1490,7 @@
/// or via a copy constructor call.
void CodeGenFunction::EmitClassMemberwiseCopy(
llvm::Value *Dest, llvm::Value *Src,
- const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl, QualType Ty) {
if (ClassDecl) {
Dest = AddressCXXOfBaseClass(Dest, ClassDecl, BaseClassDecl);
@@ -1500,20 +1500,20 @@
EmitAggregateCopy(Dest, Src, Ty);
return;
}
-
- if (CXXConstructorDecl *BaseCopyCtor =
+
+ if (CXXConstructorDecl *BaseCopyCtor =
BaseClassDecl->getCopyConstructor(getContext(), 0)) {
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor,
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor,
Ctor_Complete);
CallArgList CallArgs;
// Push the this (Dest) ptr.
CallArgs.push_back(std::make_pair(RValue::get(Dest),
BaseCopyCtor->getThisType(getContext())));
-
+
// Push the Src ptr.
CallArgs.push_back(std::make_pair(RValue::get(Src),
BaseCopyCtor->getParamDecl(0)->getType()));
- QualType ResultType =
+ QualType ResultType =
BaseCopyCtor->getType()->getAsFunctionType()->getResultType();
EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
Callee, CallArgs, BaseCopyCtor);
@@ -1521,13 +1521,13 @@
}
/// EmitClassCopyAssignment - This routine generates code to copy assign a class
-/// object from SrcValue to DestValue. Assignment can be either a bitwise
+/// object from SrcValue to DestValue. Assignment can be either a bitwise
/// assignment of via an assignment operator call.
// FIXME. Consolidate this with EmitClassMemberwiseCopy as they share a lot.
void CodeGenFunction::EmitClassCopyAssignment(
llvm::Value *Dest, llvm::Value *Src,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl,
QualType Ty) {
if (ClassDecl) {
Dest = AddressCXXOfBaseClass(Dest, ClassDecl, BaseClassDecl);
@@ -1537,35 +1537,35 @@
EmitAggregateCopy(Dest, Src, Ty);
return;
}
-
+
const CXXMethodDecl *MD = 0;
- bool ConstCopyAssignOp = BaseClassDecl->hasConstCopyAssignment(getContext(),
+ bool ConstCopyAssignOp = BaseClassDecl->hasConstCopyAssignment(getContext(),
MD);
assert(ConstCopyAssignOp && "EmitClassCopyAssignment - missing copy assign");
(void)ConstCopyAssignOp;
const FunctionProtoType *FPT = MD->getType()->getAsFunctionProtoType();
- const llvm::Type *LTy =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ const llvm::Type *LTy =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
llvm::Constant *Callee = CGM.GetAddrOfFunction(GlobalDecl(MD), LTy);
-
+
CallArgList CallArgs;
// Push the this (Dest) ptr.
CallArgs.push_back(std::make_pair(RValue::get(Dest),
MD->getThisType(getContext())));
-
+
// Push the Src ptr.
CallArgs.push_back(std::make_pair(RValue::get(Src),
MD->getParamDecl(0)->getType()));
- QualType ResultType =
+ QualType ResultType =
MD->getType()->getAsFunctionType()->getResultType();
EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs),
Callee, CallArgs, MD);
}
/// SynthesizeDefaultConstructor - synthesize a default constructor
-void
+void
CodeGenFunction::SynthesizeDefaultConstructor(const CXXConstructorDecl *CD,
const FunctionDecl *FD,
llvm::Function *Fn,
@@ -1577,18 +1577,18 @@
/// SynthesizeCXXCopyConstructor - This routine implicitly defines body of a copy
/// constructor, in accordance with section 12.8 (p7 and p8) of C++03
-/// The implicitly-defined copy constructor for class X performs a memberwise
-/// copy of its subobjects. The order of copying is the same as the order
+/// The implicitly-defined copy constructor for class X performs a memberwise
+/// copy of its subobjects. The order of copying is the same as the order
/// of initialization of bases and members in a user-defined constructor
/// Each subobject is copied in the manner appropriate to its type:
-/// if the subobject is of class type, the copy constructor for the class is
+/// if the subobject is of class type, the copy constructor for the class is
/// used;
-/// if the subobject is an array, each element is copied, in the manner
+/// if the subobject is an array, each element is copied, in the manner
/// appropriate to the element type;
-/// if the subobject is of scalar type, the built-in assignment operator is
+/// if the subobject is of scalar type, the built-in assignment operator is
/// used.
-/// Virtual base class subobjects shall be copied only once by the
-/// implicitly-defined copy constructor
+/// Virtual base class subobjects shall be copied only once by the
+/// implicitly-defined copy constructor
void CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *CD,
const FunctionDecl *FD,
@@ -1598,7 +1598,7 @@
assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
"SynthesizeCXXCopyConstructor - copy constructor has definition already");
StartFunction(FD, FD->getResultType(), Fn, Args, SourceLocation());
-
+
FunctionArgList::const_iterator i = Args.begin();
const VarDecl *ThisArg = i->first;
llvm::Value *ThisObj = GetAddrOfLocalVar(ThisArg);
@@ -1606,28 +1606,28 @@
const VarDecl *SrcArg = (i+1)->first;
llvm::Value *SrcObj = GetAddrOfLocalVar(SrcArg);
llvm::Value *LoadOfSrc = Builder.CreateLoad(SrcObj);
-
+
for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
Base != ClassDecl->bases_end(); ++Base) {
// FIXME. copy constrution of virtual base NYI
if (Base->isVirtual())
continue;
-
+
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
EmitClassMemberwiseCopy(LoadOfThis, LoadOfSrc, ClassDecl, BaseClassDecl,
Base->getType());
}
-
+
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
Field != FieldEnd; ++Field) {
QualType FieldType = getContext().getCanonicalType((*Field)->getType());
- const ConstantArrayType *Array =
+ const ConstantArrayType *Array =
getContext().getAsConstantArrayType(FieldType);
if (Array)
FieldType = getContext().getBaseElementType(FieldType);
-
+
if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
CXXRecordDecl *FieldClassDecl
= cast<CXXRecordDecl>(FieldClassType->getDecl());
@@ -1636,15 +1636,15 @@
if (Array) {
const llvm::Type *BasePtr = ConvertType(FieldType);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *DestBaseAddrPtr =
+ llvm::Value *DestBaseAddrPtr =
Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- llvm::Value *SrcBaseAddrPtr =
+ llvm::Value *SrcBaseAddrPtr =
Builder.CreateBitCast(RHS.getAddress(), BasePtr);
EmitClassAggrMemberwiseCopy(DestBaseAddrPtr, SrcBaseAddrPtr, Array,
FieldClassDecl, FieldType);
}
- else
- EmitClassMemberwiseCopy(LHS.getAddress(), RHS.getAddress(),
+ else
+ EmitClassMemberwiseCopy(LHS.getAddress(), RHS.getAddress(),
0 /*ClassDecl*/, FieldClassDecl, FieldType);
continue;
}
@@ -1655,27 +1655,27 @@
EmitStoreThroughLValue(RVRHS, LHS, FieldType);
}
FinishFunction();
-}
+}
/// SynthesizeCXXCopyAssignment - Implicitly define copy assignment operator.
-/// Before the implicitly-declared copy assignment operator for a class is
-/// implicitly defined, all implicitly- declared copy assignment operators for
-/// its direct base classes and its nonstatic data members shall have been
+/// Before the implicitly-declared copy assignment operator for a class is
+/// implicitly defined, all implicitly- declared copy assignment operators for
+/// its direct base classes and its nonstatic data members shall have been
/// implicitly defined. [12.8-p12]
-/// The implicitly-defined copy assignment operator for class X performs
-/// memberwise assignment of its subob- jects. The direct base classes of X are
-/// assigned first, in the order of their declaration in
-/// the base-specifier-list, and then the immediate nonstatic data members of X
-/// are assigned, in the order in which they were declared in the class
+/// The implicitly-defined copy assignment operator for class X performs
+/// memberwise assignment of its subob- jects. The direct base classes of X are
+/// assigned first, in the order of their declaration in
+/// the base-specifier-list, and then the immediate nonstatic data members of X
+/// are assigned, in the order in which they were declared in the class
/// definition.Each subobject is assigned in the manner appropriate to its type:
-/// if the subobject is of class type, the copy assignment operator for the
-/// class is used (as if by explicit qualification; that is, ignoring any
+/// if the subobject is of class type, the copy assignment operator for the
+/// class is used (as if by explicit qualification; that is, ignoring any
/// possible virtual overriding functions in more derived classes);
///
-/// if the subobject is an array, each element is assigned, in the manner
+/// if the subobject is an array, each element is assigned, in the manner
/// appropriate to the element type;
///
-/// if the subobject is of scalar type, the built-in assignment operator is
+/// if the subobject is of scalar type, the built-in assignment operator is
/// used.
void CodeGenFunction::SynthesizeCXXCopyAssignment(const CXXMethodDecl *CD,
const FunctionDecl *FD,
@@ -1686,7 +1686,7 @@
assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
"SynthesizeCXXCopyAssignment - copy assignment has user declaration");
StartFunction(FD, FD->getResultType(), Fn, Args, SourceLocation());
-
+
FunctionArgList::const_iterator i = Args.begin();
const VarDecl *ThisArg = i->first;
llvm::Value *ThisObj = GetAddrOfLocalVar(ThisArg);
@@ -1694,28 +1694,28 @@
const VarDecl *SrcArg = (i+1)->first;
llvm::Value *SrcObj = GetAddrOfLocalVar(SrcArg);
llvm::Value *LoadOfSrc = Builder.CreateLoad(SrcObj);
-
+
for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
Base != ClassDecl->bases_end(); ++Base) {
// FIXME. copy assignment of virtual base NYI
if (Base->isVirtual())
continue;
-
+
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
EmitClassCopyAssignment(LoadOfThis, LoadOfSrc, ClassDecl, BaseClassDecl,
Base->getType());
}
-
+
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
Field != FieldEnd; ++Field) {
QualType FieldType = getContext().getCanonicalType((*Field)->getType());
- const ConstantArrayType *Array =
+ const ConstantArrayType *Array =
getContext().getAsConstantArrayType(FieldType);
if (Array)
FieldType = getContext().getBaseElementType(FieldType);
-
+
if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
CXXRecordDecl *FieldClassDecl
= cast<CXXRecordDecl>(FieldClassType->getDecl());
@@ -1732,7 +1732,7 @@
FieldClassDecl, FieldType);
}
else
- EmitClassCopyAssignment(LHS.getAddress(), RHS.getAddress(),
+ EmitClassCopyAssignment(LHS.getAddress(), RHS.getAddress(),
0 /*ClassDecl*/, FieldClassDecl, FieldType);
continue;
}
@@ -1742,12 +1742,12 @@
RValue RVRHS = EmitLoadOfLValue(RHS, FieldType);
EmitStoreThroughLValue(RVRHS, LHS, FieldType);
}
-
+
// return *this;
Builder.CreateStore(LoadOfThis, ReturnValue);
-
+
FinishFunction();
-}
+}
/// EmitCtorPrologue - This routine generates necessary code to initialize
/// base classes and non-static data members belonging to this constructor.
@@ -1756,7 +1756,7 @@
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext());
// FIXME: Add vbase initialization
llvm::Value *LoadOfThis = 0;
-
+
for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
E = CD->init_end();
B != E; ++B) {
@@ -1764,23 +1764,23 @@
if (Member->isBaseInitializer()) {
LoadOfThis = LoadCXXThis();
Type *BaseType = Member->getBaseClass();
- CXXRecordDecl *BaseClassDecl =
+ CXXRecordDecl *BaseClassDecl =
cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
- llvm::Value *V = AddressCXXOfBaseClass(LoadOfThis, ClassDecl,
+ llvm::Value *V = AddressCXXOfBaseClass(LoadOfThis, ClassDecl,
BaseClassDecl);
EmitCXXConstructorCall(Member->getConstructor(),
Ctor_Complete, V,
- Member->const_arg_begin(),
+ Member->const_arg_begin(),
Member->const_arg_end());
} else {
// non-static data member initilaizers.
FieldDecl *Field = Member->getMember();
QualType FieldType = getContext().getCanonicalType((Field)->getType());
- const ConstantArrayType *Array =
+ const ConstantArrayType *Array =
getContext().getAsConstantArrayType(FieldType);
if (Array)
FieldType = getContext().getBaseElementType(FieldType);
-
+
LoadOfThis = LoadCXXThis();
LValue LHS;
if (FieldType->isReferenceType()) {
@@ -1794,32 +1794,32 @@
}
if (FieldType->getAs<RecordType>()) {
if (!Field->isAnonymousStructOrUnion()) {
- assert(Member->getConstructor() &&
+ assert(Member->getConstructor() &&
"EmitCtorPrologue - no constructor to initialize member");
if (Array) {
const llvm::Type *BasePtr = ConvertType(FieldType);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
+ llvm::Value *BaseAddrPtr =
Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- EmitCXXAggrConstructorCall(Member->getConstructor(),
+ EmitCXXAggrConstructorCall(Member->getConstructor(),
Array, BaseAddrPtr);
}
else
EmitCXXConstructorCall(Member->getConstructor(),
Ctor_Complete, LHS.getAddress(),
- Member->const_arg_begin(),
+ Member->const_arg_begin(),
Member->const_arg_end());
continue;
}
else {
// Initializing an anonymous union data member.
FieldDecl *anonMember = Member->getAnonUnionMember();
- LHS = EmitLValueForField(LHS.getAddress(), anonMember,
+ LHS = EmitLValueForField(LHS.getAddress(), anonMember,
/*IsUnion=*/true, 0);
FieldType = anonMember->getType();
}
}
-
+
assert(Member->getNumArgs() == 1 && "Initializer count must be 1 only");
Expr *RhsExpr = *Member->arg_begin();
RValue RHS;
@@ -1834,20 +1834,20 @@
if (!CD->getNumBaseOrMemberInitializers() && !CD->isTrivial()) {
// Nontrivial default constructor with no initializer list. It may still
- // have bases classes and/or contain non-static data members which require
+ // have bases classes and/or contain non-static data members which require
// construction.
- for (CXXRecordDecl::base_class_const_iterator Base =
+ for (CXXRecordDecl::base_class_const_iterator Base =
ClassDecl->bases_begin();
Base != ClassDecl->bases_end(); ++Base) {
// FIXME. copy assignment of virtual base NYI
if (Base->isVirtual())
continue;
-
+
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
if (BaseClassDecl->hasTrivialConstructor())
continue;
- if (CXXConstructorDecl *BaseCX =
+ if (CXXConstructorDecl *BaseCX =
BaseClassDecl->getDefaultConstructor(getContext())) {
LoadOfThis = LoadCXXThis();
llvm::Value *V = AddressCXXOfBaseClass(LoadOfThis, ClassDecl,
@@ -1855,40 +1855,40 @@
EmitCXXConstructorCall(BaseCX, Ctor_Complete, V, 0, 0);
}
}
-
+
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
Field != FieldEnd; ++Field) {
QualType FieldType = getContext().getCanonicalType((*Field)->getType());
- const ConstantArrayType *Array =
+ const ConstantArrayType *Array =
getContext().getAsConstantArrayType(FieldType);
if (Array)
FieldType = getContext().getBaseElementType(FieldType);
if (!FieldType->getAs<RecordType>() || Field->isAnonymousStructOrUnion())
continue;
const RecordType *ClassRec = FieldType->getAs<RecordType>();
- CXXRecordDecl *MemberClassDecl =
+ CXXRecordDecl *MemberClassDecl =
dyn_cast<CXXRecordDecl>(ClassRec->getDecl());
if (!MemberClassDecl || MemberClassDecl->hasTrivialConstructor())
continue;
- if (CXXConstructorDecl *MamberCX =
+ if (CXXConstructorDecl *MamberCX =
MemberClassDecl->getDefaultConstructor(getContext())) {
LoadOfThis = LoadCXXThis();
LValue LHS = EmitLValueForField(LoadOfThis, *Field, false, 0);
if (Array) {
const llvm::Type *BasePtr = ConvertType(FieldType);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
+ llvm::Value *BaseAddrPtr =
Builder.CreateBitCast(LHS.getAddress(), BasePtr);
EmitCXXAggrConstructorCall(MamberCX, Array, BaseAddrPtr);
}
else
- EmitCXXConstructorCall(MamberCX, Ctor_Complete, LHS.getAddress(),
+ EmitCXXConstructorCall(MamberCX, Ctor_Complete, LHS.getAddress(),
0, 0);
}
}
}
-
+
// Initialize the vtable pointer
if (ClassDecl->isDynamicClass()) {
if (!LoadOfThis)
@@ -1904,7 +1904,7 @@
}
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
-/// destructor. This is to call destructors on members and base classes
+/// destructor. This is to call destructors on members and base classes
/// in reverse order of their construction.
/// FIXME: This needs to take a CXXDtorType.
void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD) {
@@ -1912,14 +1912,14 @@
assert(!ClassDecl->getNumVBases() &&
"FIXME: Destruction of virtual bases not supported");
(void)ClassDecl; // prevent warning.
-
+
for (CXXDestructorDecl::destr_const_iterator *B = DD->destr_begin(),
*E = DD->destr_end(); B != E; ++B) {
uintptr_t BaseOrMember = (*B);
if (DD->isMemberToDestroy(BaseOrMember)) {
FieldDecl *FD = DD->getMemberToDestroy(BaseOrMember);
QualType FieldType = getContext().getCanonicalType((FD)->getType());
- const ConstantArrayType *Array =
+ const ConstantArrayType *Array =
getContext().getAsConstantArrayType(FieldType);
if (Array)
FieldType = getContext().getBaseElementType(FieldType);
@@ -1932,9 +1932,9 @@
if (Array) {
const llvm::Type *BasePtr = ConvertType(FieldType);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
+ llvm::Value *BaseAddrPtr =
Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
Array, BaseAddrPtr);
}
else
@@ -1946,7 +1946,7 @@
CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl());
if (BaseClassDecl->hasTrivialDestructor())
continue;
- llvm::Value *V = AddressCXXOfBaseClass(LoadCXXThis(),
+ llvm::Value *V = AddressCXXOfBaseClass(LoadCXXThis(),
ClassDecl,BaseClassDecl);
EmitCXXDestructorCall(BaseClassDecl->getDestructor(getContext()),
Dtor_Complete, V);
@@ -1955,10 +1955,10 @@
if (DD->getNumBaseOrMemberDestructions() || DD->isTrivial())
return;
// Case of destructor synthesis with fields and base classes
- // which have non-trivial destructors. They must be destructed in
+ // which have non-trivial destructors. They must be destructed in
// reverse order of their construction.
llvm::SmallVector<FieldDecl *, 16> DestructedFields;
-
+
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
Field != FieldEnd; ++Field) {
@@ -1976,7 +1976,7 @@
for (int i = DestructedFields.size() -1; i >= 0; --i) {
FieldDecl *Field = DestructedFields[i];
QualType FieldType = Field->getType();
- const ConstantArrayType *Array =
+ const ConstantArrayType *Array =
getContext().getAsConstantArrayType(FieldType);
if (Array)
FieldType = getContext().getBaseElementType(FieldType);
@@ -1987,23 +1987,23 @@
if (Array) {
const llvm::Type *BasePtr = ConvertType(FieldType);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
+ llvm::Value *BaseAddrPtr =
Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
+ EmitCXXAggrDestructorCall(FieldClassDecl->getDestructor(getContext()),
Array, BaseAddrPtr);
}
else
EmitCXXDestructorCall(FieldClassDecl->getDestructor(getContext()),
Dtor_Complete, LHS.getAddress());
}
-
+
llvm::SmallVector<CXXRecordDecl*, 4> DestructedBases;
for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin();
Base != ClassDecl->bases_end(); ++Base) {
// FIXME. copy assignment of virtual base NYI
if (Base->isVirtual())
continue;
-
+
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
if (BaseClassDecl->hasTrivialDestructor())
@@ -2014,7 +2014,7 @@
return;
for (int i = DestructedBases.size() -1; i >= 0; --i) {
CXXRecordDecl *BaseClassDecl = DestructedBases[i];
- llvm::Value *V = AddressCXXOfBaseClass(LoadCXXThis(),
+ llvm::Value *V = AddressCXXOfBaseClass(LoadCXXThis(),
ClassDecl,BaseClassDecl);
EmitCXXDestructorCall(BaseClassDecl->getDestructor(getContext()),
Dtor_Complete, V);
@@ -2025,13 +2025,13 @@
const FunctionDecl *FD,
llvm::Function *Fn,
const FunctionArgList &Args) {
-
+
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext());
assert(!ClassDecl->hasUserDeclaredDestructor() &&
"SynthesizeDefaultDestructor - destructor has user declaration");
(void) ClassDecl;
-
+
StartFunction(FD, FD->getResultType(), Fn, Args, SourceLocation());
EmitDtorEpilogue(CD);
FinishFunction();
-}
+}
diff --git a/lib/CodeGen/CGCXX.h b/lib/CodeGen/CGCXX.h
index 6051d91..1e6adb0 100644
--- a/lib/CodeGen/CGCXX.h
+++ b/lib/CodeGen/CGCXX.h
@@ -30,7 +30,7 @@
Dtor_Complete, // Complete object dtor
Dtor_Base // Base object dtor
};
-
+
} // end namespace clang
#endif // CLANG_CODEGEN_CGCXX_H
diff --git a/lib/CodeGen/CGCXXTemp.cpp b/lib/CodeGen/CGCXXTemp.cpp
index 30de111..fbb2b9f 100644
--- a/lib/CodeGen/CGCXXTemp.cpp
+++ b/lib/CodeGen/CGCXXTemp.cpp
@@ -15,20 +15,20 @@
using namespace clang;
using namespace CodeGen;
-void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
+void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
llvm::Value *Ptr) {
llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor");
-
+
llvm::Value *CondPtr = 0;
-
- // Check if temporaries need to be conditional. If so, we'll create a
- // condition boolean, initialize it to 0 and
+
+ // Check if temporaries need to be conditional. If so, we'll create a
+ // condition boolean, initialize it to 0 and
if (!ConditionalTempDestructionStack.empty()) {
CondPtr = CreateTempAlloca(llvm::Type::getInt1Ty(VMContext), "cond");
-
+
// Initialize it to false. This initialization takes place right after
// the alloca insert point.
- llvm::StoreInst *SI =
+ llvm::StoreInst *SI =
new llvm::StoreInst(llvm::ConstantInt::getFalse(VMContext), CondPtr);
llvm::BasicBlock *Block = AllocaInsertPt->getParent();
Block->getInstList().insertAfter((llvm::Instruction *)AllocaInsertPt, SI);
@@ -36,8 +36,8 @@
// Now set it to true.
Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr);
}
-
- LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
+
+ LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
CondPtr));
PushCleanupBlock(DtorBlock);
@@ -45,15 +45,15 @@
void CodeGenFunction::PopCXXTemporary() {
const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
-
+
CleanupBlockInfo CleanupInfo = PopCleanupBlock();
- assert(CleanupInfo.CleanupBlock == Info.DtorBlock &&
+ assert(CleanupInfo.CleanupBlock == Info.DtorBlock &&
"Cleanup block mismatch!");
- assert(!CleanupInfo.SwitchBlock &&
+ assert(!CleanupInfo.SwitchBlock &&
"Should not have a switch block for temporary cleanup!");
- assert(!CleanupInfo.EndBlock &&
+ assert(!CleanupInfo.EndBlock &&
"Should not have an end block for temporary cleanup!");
-
+
EmitBlock(Info.DtorBlock);
llvm::BasicBlock *CondEnd = 0;
@@ -63,12 +63,12 @@
if (Info.CondPtr) {
llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
CondEnd = createBasicBlock("cond.dtor.end");
-
+
llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
Builder.CreateCondBr(Cond, CondBlock, CondEnd);
EmitBlock(CondBlock);
}
-
+
EmitCXXDestructorCall(Info.Temporary->getDestructor(),
Dtor_Complete, Info.ThisPtr);
@@ -77,7 +77,7 @@
Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
EmitBlock(CondEnd);
}
-
+
LiveTemporaries.pop_back();
}
@@ -89,7 +89,7 @@
// If we shouldn't destroy the temporaries, just emit the
// child expression.
if (!E->shouldDestroyTemporaries())
- return EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
+ return EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
/*IgnoreResult=*/false, IsInitializer);
// Keep track of the current cleanup stack depth.
@@ -97,21 +97,21 @@
(void) CleanupStackDepth;
unsigned OldNumLiveTemporaries = LiveTemporaries.size();
-
- RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
+
+ RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
/*IgnoreResult=*/false, IsInitializer);
-
+
// Pop temporaries.
while (LiveTemporaries.size() > OldNumLiveTemporaries)
PopCXXTemporary();
-
+
assert(CleanupEntries.size() == CleanupStackDepth &&
"Cleanup size mismatch!");
-
+
return RV;
}
-void
+void
CodeGenFunction::PushConditionalTempDestruction() {
// Store the current number of live temporaries.
ConditionalTempDestructionStack.push_back(LiveTemporaries.size());
@@ -120,13 +120,13 @@
void CodeGenFunction::PopConditionalTempDestruction() {
size_t NumLiveTemporaries = ConditionalTempDestructionStack.back();
ConditionalTempDestructionStack.pop_back();
-
+
// Pop temporaries.
while (LiveTemporaries.size() > NumLiveTemporaries) {
- assert(LiveTemporaries.back().CondPtr &&
+ assert(LiveTemporaries.back().CondPtr &&
"Conditional temporary must have a cond ptr!");
PopCXXTemporary();
- }
+ }
}
-
+
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 0a187fc..2da16a1 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -33,13 +33,13 @@
// FIXME: Use iterator and sidestep silly type array creation.
-const
+const
CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
- return getFunctionInfo(FTNP->getResultType(),
+ return getFunctionInfo(FTNP->getResultType(),
llvm::SmallVector<QualType, 16>());
}
-const
+const
CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
llvm::SmallVector<QualType, 16> ArgTys;
// FIXME: Kill copy.
@@ -53,7 +53,7 @@
// Add the 'this' pointer unless this is a static method.
if (MD->isInstance())
ArgTys.push_back(MD->getThisType(Context));
-
+
const FunctionProtoType *FTP = MD->getType()->getAsFunctionProtoType();
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
@@ -64,7 +64,7 @@
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isInstance())
return getFunctionInfo(MD);
-
+
const FunctionType *FTy = FD->getType()->getAsFunctionType();
if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FTy))
return getFunctionInfo(FTP);
@@ -82,21 +82,21 @@
return getFunctionInfo(MD->getResultType(), ArgTys);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const CallArgList &Args) {
// FIXME: Kill copy.
llvm::SmallVector<QualType, 16> ArgTys;
- for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
+ for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(i->second);
return getFunctionInfo(ResTy, ArgTys);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const FunctionArgList &Args) {
// FIXME: Kill copy.
llvm::SmallVector<QualType, 16> ArgTys;
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(i->second);
return getFunctionInfo(ResTy, ArgTys);
@@ -123,7 +123,7 @@
return *FI;
}
-CGFunctionInfo::CGFunctionInfo(QualType ResTy,
+CGFunctionInfo::CGFunctionInfo(QualType ResTy,
const llvm::SmallVector<QualType, 16> &ArgTys) {
NumArgs = ArgTys.size();
Args = new ArgInfo[1 + NumArgs];
@@ -134,20 +134,20 @@
/***/
-void CodeGenTypes::GetExpandedTypes(QualType Ty,
+void CodeGenTypes::GetExpandedTypes(QualType Ty,
std::vector<const llvm::Type*> &ArgTys) {
const RecordType *RT = Ty->getAsStructureType();
assert(RT && "Can only expand structure types.");
const RecordDecl *RD = RT->getDecl();
- assert(!RD->hasFlexibleArrayMember() &&
+ assert(!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.");
-
+
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
const FieldDecl *FD = *i;
- assert(!FD->isBitField() &&
+ assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
-
+
QualType FT = FD->getType();
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
GetExpandedTypes(FT, ArgTys);
@@ -157,19 +157,19 @@
}
}
-llvm::Function::arg_iterator
+llvm::Function::arg_iterator
CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
llvm::Function::arg_iterator AI) {
const RecordType *RT = Ty->getAsStructureType();
assert(RT && "Can only expand structure types.");
RecordDecl *RD = RT->getDecl();
- assert(LV.isSimple() &&
- "Unexpected non-simple lvalue during struct expansion.");
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
llvm::Value *Addr = LV.getAddress();
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
- FieldDecl *FD = *i;
+ FieldDecl *FD = *i;
QualType FT = FD->getType();
// FIXME: What are the right qualifiers here?
@@ -185,8 +185,8 @@
return AI;
}
-void
-CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+void
+CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
llvm::SmallVector<llvm::Value*, 16> &Args) {
const RecordType *RT = Ty->getAsStructureType();
assert(RT && "Can only expand structure types.");
@@ -196,16 +196,16 @@
llvm::Value *Addr = RV.getAggregateAddr();
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
- FieldDecl *FD = *i;
+ FieldDecl *FD = *i;
QualType FT = FD->getType();
-
+
// FIXME: What are the right qualifiers here?
LValue LV = EmitLValueForField(Addr, FD, false, 0);
if (CodeGenFunction::hasAggregateLLVMType(FT)) {
ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
} else {
RValue RV = EmitLoadOfLValue(LV, FT);
- assert(RV.isScalar() &&
+ assert(RV.isScalar() &&
"Unexpected non-scalar rvalue during struct expansion.");
Args.push_back(RV.getScalarVal());
}
@@ -221,7 +221,7 @@
static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
const llvm::Type *Ty,
CodeGenFunction &CGF) {
- const llvm::Type *SrcTy =
+ const llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
@@ -244,9 +244,9 @@
// Otherwise do coercion through memory. This is stupid, but
// simple.
llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
- llvm::Value *Casted =
+ llvm::Value *Casted =
CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
- llvm::StoreInst *Store =
+ llvm::StoreInst *Store =
CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
// FIXME: Use better alignment / avoid requiring aligned store.
Store->setAlignment(1);
@@ -263,7 +263,7 @@
llvm::Value *DstPtr,
CodeGenFunction &CGF) {
const llvm::Type *SrcTy = Src->getType();
- const llvm::Type *DstTy =
+ const llvm::Type *DstTy =
cast<llvm::PointerType>(DstPtr->getType())->getElementType();
uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -287,7 +287,7 @@
// to that information.
llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
CGF.Builder.CreateStore(Src, Tmp);
- llvm::Value *Casted =
+ llvm::Value *Casted =
CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
// FIXME: Use better alignment / avoid requiring aligned load.
@@ -335,11 +335,11 @@
ResultType = RetAI.getCoerceToType();
break;
}
-
- for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
const ABIArgInfo &AI = it->info;
-
+
switch (AI.getKind()) {
case ABIArgInfo::Ignore:
break;
@@ -359,7 +359,7 @@
case ABIArgInfo::Direct:
ArgTys.push_back(ConvertType(it->type));
break;
-
+
case ABIArgInfo::Expand:
GetExpandedTypes(it->type, ArgTys);
break;
@@ -414,7 +414,7 @@
break;
case ABIArgInfo::Indirect:
- PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
llvm::Attribute::StructRet |
llvm::Attribute::NoAlias));
++Index;
@@ -428,7 +428,7 @@
break;
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ assert(0 && "Invalid ABI kind for return argument");
}
if (RetAttrs)
@@ -439,12 +439,12 @@
// register variable.
signed RegParm = 0;
if (TargetDecl)
- if (const RegparmAttr *RegParmAttr
+ if (const RegparmAttr *RegParmAttr
= TargetDecl->getAttr<RegparmAttr>())
RegParm = RegParmAttr->getNumParams();
unsigned PointerWidth = getContext().Target.getPointerWidth(0);
- for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
ie = FI.arg_end(); it != ie; ++it) {
QualType ParamType = it->type;
const ABIArgInfo &AI = it->info;
@@ -483,10 +483,10 @@
case ABIArgInfo::Ignore:
// Skip increment, no matching LLVM parameter.
- continue;
+ continue;
case ABIArgInfo::Expand: {
- std::vector<const llvm::Type*> Tys;
+ std::vector<const llvm::Type*> Tys;
// FIXME: This is rather inefficient. Do we ever actually need to do
// anything here? The result should be just reconstructed on the other
// side, so extension should be a non-issue.
@@ -495,7 +495,7 @@
continue;
}
}
-
+
if (Attributes)
PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
++Index;
@@ -525,13 +525,13 @@
// Emit allocs for param decls. Give the LLVM Argument nodes names.
llvm::Function::arg_iterator AI = Fn->arg_begin();
-
+
// Name the struct return argument.
if (CGM.ReturnTypeUsesSret(FI)) {
AI->setName("agg.result");
++AI;
}
-
+
assert(FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.");
CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
@@ -556,7 +556,7 @@
V = EmitScalarConversion(V, Ty, Arg->getType());
}
}
- EmitParmDecl(*Arg, V);
+ EmitParmDecl(*Arg, V);
break;
}
@@ -580,17 +580,17 @@
EmitParmDecl(*Arg, V);
break;
}
-
+
case ABIArgInfo::Expand: {
// If this structure was expanded into multiple arguments then
// we need to create a temporary and reconstruct it from the
// arguments.
std::string Name = Arg->getNameAsString();
- llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
+ llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(Ty),
(Name + ".addr").c_str());
// FIXME: What are the right qualifiers here?
- llvm::Function::arg_iterator End =
- ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
+ llvm::Function::arg_iterator End =
+ ExpandTypeFromArgs(Ty, LValue::MakeAddr(Temp,0), AI);
EmitParmDecl(*Arg, Temp);
// Name the arguments used in expansion and increment AI.
@@ -602,14 +602,14 @@
case ABIArgInfo::Ignore:
// Initialize the local variable appropriately.
- if (hasAggregateLLVMType(Ty)) {
+ if (hasAggregateLLVMType(Ty)) {
EmitParmDecl(*Arg, CreateTempAlloca(ConvertTypeForMem(Ty)));
} else {
EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())));
}
-
+
// Skip increment, no matching LLVM parameter.
- continue;
+ continue;
case ABIArgInfo::Coerce: {
assert(AI != Fn->arg_end() && "Argument mismatch!");
@@ -668,16 +668,16 @@
case ABIArgInfo::Ignore:
break;
-
+
case ABIArgInfo::Coerce:
RV = CreateCoercedLoad(ReturnValue, RetAI.getCoerceToType(), *this);
break;
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ assert(0 && "Invalid ABI kind for return argument");
}
}
-
+
if (RV) {
Builder.CreateRet(RV);
} else {
@@ -688,12 +688,12 @@
RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
if (ArgType->isReferenceType())
return EmitReferenceBindingToExpr(E, ArgType);
-
+
return EmitAnyExprToTemp(E);
}
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
- llvm::Value *Callee,
+ llvm::Value *Callee,
const CallArgList &CallArgs,
const Decl *TargetDecl) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
@@ -703,17 +703,17 @@
// location that we would like to return into.
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
-
-
+
+
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result.
if (CGM.ReturnTypeUsesSret(CallInfo))
Args.push_back(CreateTempAlloca(ConvertTypeForMem(RetTy)));
-
+
assert(CallInfo.arg_size() == CallArgs.size() &&
"Mismatch between function signature & arguments.");
CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
- for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+ for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
I != E; ++I, ++info_it) {
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->first;
@@ -726,7 +726,7 @@
if (RV.isScalar())
EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false, I->second);
else
- StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+ StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
} else {
Args.push_back(RV.getAggregateAddr());
}
@@ -745,7 +745,7 @@
Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
}
break;
-
+
case ABIArgInfo::Ignore:
break;
@@ -758,9 +758,9 @@
} else if (RV.isComplex()) {
SrcPtr = CreateTempAlloca(ConvertTypeForMem(I->second), "coerce");
StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
- } else
+ } else
SrcPtr = RV.getAggregateAddr();
- Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
*this));
break;
}
@@ -770,7 +770,7 @@
break;
}
}
-
+
// If the callee is a bitcast of a function to a varargs pointer to function
// type, check to see if we can remove the bitcast. This handles some cases
// with unprototyped functions.
@@ -780,7 +780,7 @@
const llvm::FunctionType *CurFT =
cast<llvm::FunctionType>(CurPT->getElementType());
const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
-
+
if (CE->getOpcode() == llvm::Instruction::BitCast &&
ActualFT->getReturnType() == CurFT->getReturnType() &&
ActualFT->getNumParams() == CurFT->getNumParams() &&
@@ -791,7 +791,7 @@
ArgsMatch = false;
break;
}
-
+
// Strip the cast if we can get away with it. This is a nice cleanup,
// but also allows us to inline the function at -O0 if it is marked
// always_inline.
@@ -799,20 +799,20 @@
Callee = CalleeF;
}
}
-
+
llvm::BasicBlock *InvokeDest = getInvokeDest();
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList);
llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
AttributeList.end());
-
+
llvm::CallSite CS;
if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
Args.data(), Args.data()+Args.size());
EmitBlock(Cont);
}
@@ -828,15 +828,15 @@
if (CS.doesNotReturn()) {
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
-
+
// FIXME: For now, emit a dummy basic block because expr emitters in
// generally are not ready to handle emitting expressions at unreachable
// points.
EnsureInsertPoint();
-
+
// Return a reasonable RValue.
return GetUndefRValue(RetTy);
- }
+ }
llvm::Instruction *CI = CS.getInstruction();
if (Builder.isNamePreserving() &&
@@ -882,7 +882,7 @@
}
case ABIArgInfo::Expand:
- assert(0 && "Invalid ABI kind for return argument");
+ assert(0 && "Invalid ABI kind for return argument");
}
assert(0 && "Unhandled ABIArgInfo::Kind");
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index daf6f00..2c1048d 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -49,9 +49,9 @@
/// FunctionArgList - Type for representing both the decl and type
/// of parameters to a function. The decl must be either a
/// ParmVarDecl or ImplicitParamDecl.
- typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
+ typedef llvm::SmallVector<std::pair<const VarDecl*, QualType>,
16> FunctionArgList;
-
+
/// CGFunctionInfo - Class to encapsulate the information about a
/// function definition.
class CGFunctionInfo : public llvm::FoldingSetNode {
@@ -67,7 +67,7 @@
typedef const ArgInfo *const_arg_iterator;
typedef ArgInfo *arg_iterator;
- CGFunctionInfo(QualType ResTy,
+ CGFunctionInfo(QualType ResTy,
const llvm::SmallVector<QualType, 16> &ArgTys);
~CGFunctionInfo() { delete[] Args; }
@@ -89,7 +89,7 @@
it->type.Profile(ID);
}
template<class Iterator>
- static void Profile(llvm::FoldingSetNodeID &ID,
+ static void Profile(llvm::FoldingSetNodeID &ID,
QualType ResTy,
Iterator begin,
Iterator end) {
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index e53f1fa..aaaa4d4 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -59,7 +59,7 @@
FileName = PLoc.getFilename();
FID = PLoc.getIncludeLoc().getRawEncoding();
}
-
+
// See if this compile unit has been used before.
llvm::DICompileUnit &Unit = CompileUnitCache[FID];
if (!Unit.isNull()) return Unit;
@@ -112,10 +112,10 @@
unsigned RuntimeVers = 0;
if (LO.ObjC1)
RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
-
+
// Create new compile unit.
return Unit = DebugFactory.CreateCompileUnit(LangTag, AbsFileName.getLast(),
- AbsFileName.getDirname(),
+ AbsFileName.getDirname(),
Producer, isMain, isOptimized,
Flags, RuntimeVers);
}
@@ -144,13 +144,13 @@
case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
case BuiltinType::Float:
case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
- }
+ }
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(BT);
uint64_t Align = M->getContext().getTypeAlign(BT);
uint64_t Offset = 0;
-
- return DebugFactory.CreateBasicType(Unit,
+
+ return DebugFactory.CreateBasicType(Unit,
BT->getName(M->getContext().getLangOptions()),
Unit, 0, Size, Align,
Offset, /*flags*/ 0, Encoding);
@@ -162,17 +162,17 @@
unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
if (Ty->isComplexIntegerType())
Encoding = llvm::dwarf::DW_ATE_lo_user;
-
+
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
uint64_t Offset = 0;
-
+
return DebugFactory.CreateBasicType(Unit, "complex",
Unit, 0, Size, Align,
Offset, /*flags*/ 0, Encoding);
}
-/// getOrCreateCVRType - Get the CVR qualified type from the cache or create
+/// getOrCreateCVRType - Get the CVR qualified type from the cache or create
/// a new one if necessary.
llvm::DIType CGDebugInfo::CreateCVRType(QualType Ty, llvm::DICompileUnit Unit) {
// We will create one Derived type for one qualifier and recurse to handle any
@@ -181,19 +181,19 @@
unsigned Tag;
if (Ty.isConstQualified()) {
Tag = llvm::dwarf::DW_TAG_const_type;
- Ty.removeConst();
+ Ty.removeConst();
FromTy = getOrCreateType(Ty, Unit);
} else if (Ty.isVolatileQualified()) {
Tag = llvm::dwarf::DW_TAG_volatile_type;
- Ty.removeVolatile();
+ Ty.removeVolatile();
FromTy = getOrCreateType(Ty, Unit);
} else {
assert(Ty.isRestrictQualified() && "Unknown type qualifier for debug info");
Tag = llvm::dwarf::DW_TAG_restrict_type;
- Ty.removeRestrict();
+ Ty.removeRestrict();
FromTy = getOrCreateType(Ty, Unit);
}
-
+
// No need to fill in the Name, Line, Size, Alignment, Offset in case of
// CVR derived types.
return DebugFactory.CreateDerivedType(Tag, Unit, "", llvm::DICompileUnit(),
@@ -203,11 +203,11 @@
llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
llvm::DICompileUnit Unit) {
llvm::DIType EltTy = getOrCreateType(Ty->getPointeeType(), Unit);
-
+
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
-
+
return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
"", llvm::DICompileUnit(),
0, Size, Align, 0, 0, EltTy);
@@ -216,11 +216,11 @@
llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
llvm::DICompileUnit Unit) {
llvm::DIType EltTy = getOrCreateType(Ty->getPointeeType(), Unit);
-
+
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
-
+
return DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
"", llvm::DICompileUnit(),
0, Size, Align, 0, 0, EltTy);
@@ -274,11 +274,11 @@
EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
DefUnit, 0, FieldOffset, 0, 0, 0,
llvm::DIType(), Elements);
-
+
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
-
+
DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
Unit, "", llvm::DICompileUnit(),
0, Size, Align, 0, 0, EltTy);
@@ -344,7 +344,7 @@
EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
DefUnit, 0, FieldOffset, 0, 0, 0,
llvm::DIType(), Elements);
-
+
BlockLiteralGenericSet = true;
BlockLiteralGeneric
= DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
@@ -358,7 +358,7 @@
// Typedefs are derived from some other type. If we have a typedef of a
// typedef, make sure to emit the whole chain.
llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
-
+
// We don't set size information, but do specify where the typedef was
// declared.
std::string TyName = Ty->getDecl()->getNameAsString();
@@ -379,7 +379,7 @@
// Add the result type at least.
EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
-
+
// Set up remainder of arguments if there is a prototype.
// FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
@@ -391,7 +391,7 @@
llvm::DIArray EltTypeArray =
DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
-
+
return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
Unit, "", llvm::DICompileUnit(),
0, 0, 0, 0, 0,
@@ -402,7 +402,7 @@
llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
llvm::DICompileUnit Unit) {
RecordDecl *Decl = Ty->getDecl();
-
+
unsigned Tag;
if (Decl->isStruct())
Tag = llvm::dwarf::DW_TAG_structure_type;
@@ -425,7 +425,7 @@
DefUnit = getOrCreateCompileUnit(Decl->getLocation());
Line = PLoc.getLine();
}
-
+
// Records and classes and unions can all be recursive. To handle them, we
// first generate a debug descriptor for the struct as a forward declaration.
// Then (if it is a definition) we go through and get debug info for all of
@@ -435,7 +435,7 @@
llvm::DICompositeType FwdDecl =
DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
llvm::DIType(), llvm::DIArray());
-
+
// If this is just a forward declaration, return it.
if (!Decl->getDefinition(M->getContext()))
return FwdDecl;
@@ -451,7 +451,7 @@
unsigned FieldNo = 0;
for (RecordDecl::field_iterator I = Decl->field_begin(),
- E = Decl->field_end();
+ E = Decl->field_end();
I != E; ++I, ++FieldNo) {
FieldDecl *Field = *I;
llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
@@ -467,7 +467,7 @@
PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
llvm::DICompileUnit FieldDefUnit;
unsigned FieldLine = 0;
-
+
if (!PLoc.isInvalid()) {
FieldDefUnit = getOrCreateCompileUnit(FieldDefLoc);
FieldLine = PLoc.getLine();
@@ -477,18 +477,18 @@
uint64_t FieldSize = 0;
unsigned FieldAlign = 0;
if (!FType->isIncompleteArrayType()) {
-
+
// Bit size, align and offset of the type.
FieldSize = M->getContext().getTypeSize(FType);
Expr *BitWidth = Field->getBitWidth();
if (BitWidth)
FieldSize = BitWidth->EvaluateAsInt(M->getContext()).getZExtValue();
-
+
FieldAlign = M->getContext().getTypeAlign(FType);
}
- uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
-
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
// Create a DW_TAG_member node to remember the offset of this field in the
// struct. FIXME: This is an absolutely insane way to capture this
// information. When we gut debug info, this should be fixed.
@@ -498,14 +498,14 @@
FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
}
-
+
llvm::DIArray Elements =
DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
-
+
llvm::DICompositeType RealDecl =
DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
Align, 0, 0, llvm::DIType(), Elements);
@@ -515,7 +515,7 @@
FwdDecl.replaceAllUsesWith(RealDecl);
// Update TypeCache.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
return RealDecl;
}
@@ -523,7 +523,7 @@
llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DICompileUnit Unit) {
ObjCInterfaceDecl *Decl = Ty->getDecl();
-
+
unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
SourceManager &SM = M->getContext().getSourceManager();
@@ -534,7 +534,7 @@
PresumedLoc PLoc = SM.getPresumedLoc(Decl->getLocation());
unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
-
+
unsigned RuntimeLang = DefUnit.getLanguage();
// To handle recursive interface, we
@@ -547,7 +547,7 @@
DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, 0, 0, 0, 0,
llvm::DIType(), llvm::DIArray(),
RuntimeLang);
-
+
// If this is just a forward declaration, return it.
if (Decl->isForwardDecl())
return FwdDecl;
@@ -561,9 +561,9 @@
ObjCInterfaceDecl *SClass = Decl->getSuperClass();
if (SClass) {
- llvm::DIType SClassTy =
+ llvm::DIType SClassTy =
getOrCreateType(M->getContext().getObjCInterfaceType(SClass), Unit);
- llvm::DIType InhTag =
+ llvm::DIType InhTag =
DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_inheritance,
Unit, "", llvm::DICompileUnit(), 0, 0, 0,
0 /* offset */, 0, SClassTy);
@@ -590,13 +590,13 @@
PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
unsigned FieldLine = PLoc.isInvalid() ? 0 : PLoc.getLine();
-
+
QualType FType = Field->getType();
uint64_t FieldSize = 0;
unsigned FieldAlign = 0;
if (!FType->isIncompleteArrayType()) {
-
+
// Bit size, align and offset of the type.
FieldSize = M->getContext().getTypeSize(FType);
Expr *BitWidth = Field->getBitWidth();
@@ -606,14 +606,14 @@
FieldAlign = M->getContext().getTypeAlign(FType);
}
- uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
-
+ uint64_t FieldOffset = RL.getFieldOffset(FieldNo);
+
unsigned Flags = 0;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
Flags = llvm::DIType::FlagProtected;
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
Flags = llvm::DIType::FlagPrivate;
-
+
// Create a DW_TAG_member node to remember the offset of this field in the
// struct. FIXME: This is an absolutely insane way to capture this
// information. When we gut debug info, this should be fixed.
@@ -623,14 +623,14 @@
FieldOffset, Flags, FieldTy);
EltTys.push_back(FieldTy);
}
-
+
llvm::DIArray Elements =
DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
// Bit size, align and offset of the type.
uint64_t Size = M->getContext().getTypeSize(Ty);
uint64_t Align = M->getContext().getTypeAlign(Ty);
-
+
llvm::DICompositeType RealDecl =
DebugFactory.CreateCompositeType(Tag, Unit, Name, DefUnit, Line, Size,
Align, 0, 0, llvm::DIType(), Elements,
@@ -641,7 +641,7 @@
FwdDecl.replaceAllUsesWith(RealDecl);
// Update TypeCache.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
return RealDecl;
}
@@ -652,13 +652,13 @@
llvm::SmallVector<llvm::DIDescriptor, 32> Enumerators;
// Create DIEnumerator elements for each enumerator.
- for (EnumDecl::enumerator_iterator
+ for (EnumDecl::enumerator_iterator
Enum = Decl->enumerator_begin(), EnumEnd = Decl->enumerator_end();
Enum != EnumEnd; ++Enum) {
Enumerators.push_back(DebugFactory.CreateEnumerator(Enum->getNameAsString(),
Enum->getInitVal().getZExtValue()));
}
-
+
// Return a CompositeType for the enum itself.
llvm::DIArray EltArray =
DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
@@ -670,7 +670,7 @@
PresumedLoc PLoc = SM.getPresumedLoc(DefLoc);
unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
-
+
// Size and align of the type.
uint64_t Size = 0;
unsigned Align = 0;
@@ -678,7 +678,7 @@
Size = M->getContext().getTypeSize(Ty);
Align = M->getContext().getTypeAlign(Ty);
}
-
+
return DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_enumeration_type,
Unit, EnumName, DefUnit, Line,
Size, Align, 0, 0,
@@ -691,7 +691,7 @@
return CreateType(RT, Unit);
else if (const EnumType *ET = dyn_cast<EnumType>(Ty))
return CreateType(ET, Unit);
-
+
return llvm::DIType();
}
@@ -699,8 +699,8 @@
llvm::DICompileUnit Unit) {
uint64_t Size;
uint64_t Align;
-
-
+
+
// FIXME: make getTypeAlign() aware of VLAs and incomplete array types
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
Size = 0;
@@ -714,7 +714,7 @@
Size = M->getContext().getTypeSize(Ty);
Align = M->getContext().getTypeAlign(Ty);
}
-
+
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
// interior arrays, do we care? Why aren't nested arrays represented the
// obvious/recursive way?
@@ -722,14 +722,14 @@
QualType EltTy(Ty, 0);
while ((Ty = dyn_cast<ArrayType>(EltTy))) {
uint64_t Upper = 0;
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
if (CAT->getSize().getZExtValue())
- Upper = CAT->getSize().getZExtValue() - 1;
+ Upper = CAT->getSize().getZExtValue() - 1;
// FIXME: Verify this is right for VLAs.
Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, Upper));
EltTy = Ty->getElementType();
}
-
+
llvm::DIArray SubscriptArray =
DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
@@ -747,7 +747,7 @@
llvm::DICompileUnit Unit) {
if (Ty.isNull())
return llvm::DIType();
-
+
// Check TypeCache first.
llvm::DIType &Slot = TypeCache[Ty.getAsOpaquePtr()];
if (!Slot.isNull()) return Slot;
@@ -778,7 +778,7 @@
return llvm::DIType();
case Type::ObjCObjectPointer:
return Slot = CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
- case Type::ObjCInterface:
+ case Type::ObjCInterface:
return Slot = CreateType(cast<ObjCInterfaceType>(Ty), Unit);
case Type::Builtin: return Slot = CreateType(cast<BuiltinType>(Ty), Unit);
case Type::Complex: return Slot = CreateType(cast<ComplexType>(Ty), Unit);
@@ -788,14 +788,14 @@
case Type::Typedef: return Slot = CreateType(cast<TypedefType>(Ty), Unit);
case Type::Record:
case Type::Enum:
- return Slot = CreateType(cast<TagType>(Ty), Unit);
+ return Slot = CreateType(cast<TagType>(Ty), Unit);
case Type::FunctionProto:
case Type::FunctionNoProto:
return Slot = CreateType(cast<FunctionType>(Ty), Unit);
case Type::Elaborated:
return Slot = getOrCreateType(cast<ElaboratedType>(Ty)->getUnderlyingType(),
Unit);
-
+
case Type::ConstantArray:
case Type::ConstantArrayWithExpr:
case Type::ConstantArrayWithoutExpr:
@@ -812,7 +812,7 @@
return Slot = getOrCreateType(cast<DecltypeType>(Ty)->getUnderlyingType(),
Unit);
}
-
+
return Slot;
}
@@ -822,25 +822,25 @@
llvm::Function *Fn,
CGBuilderTy &Builder) {
const char *LinkageName = Name;
-
+
// Skip the asm prefix if it exists.
//
// FIXME: This should probably be the unmangled name?
if (Name[0] == '\01')
++Name;
-
+
// FIXME: Why is this using CurLoc???
llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
SourceManager &SM = M->getContext().getSourceManager();
unsigned LineNo = SM.getPresumedLoc(CurLoc).getLine();
-
+
llvm::DISubprogram SP =
DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
getOrCreateType(ReturnType, Unit),
Fn->hasInternalLinkage(), true/*definition*/);
-
+
DebugFactory.InsertSubprogramStart(SP, Builder.GetInsertBlock());
-
+
// Push function on region stack.
RegionStack.push_back(SP);
}
@@ -848,10 +848,10 @@
void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
-
+
// Don't bother if things are the same as last time.
SourceManager &SM = M->getContext().getSourceManager();
- if (CurLoc == PrevLoc
+ if (CurLoc == PrevLoc
|| (SM.getInstantiationLineNumber(CurLoc) ==
SM.getInstantiationLineNumber(PrevLoc)
&& SM.isFromSameFile(CurLoc, PrevLoc)))
@@ -864,7 +864,7 @@
llvm::DICompileUnit Unit = getOrCreateCompileUnit(CurLoc);
PresumedLoc PLoc = SM.getPresumedLoc(CurLoc);
DebugFactory.InsertStopPoint(Unit, PLoc.getLine(), PLoc.getColumn(),
- Builder.GetInsertBlock());
+ Builder.GetInsertBlock());
}
/// EmitRegionStart- Constructs the debug code for entering a declarative
@@ -885,7 +885,7 @@
// Provide an region stop point.
EmitStopPoint(Fn, Builder);
-
+
DebugFactory.InsertRegionEnd(RegionStack.back(), Builder.GetInsertBlock());
RegionStack.pop_back();
}
@@ -914,9 +914,9 @@
else
Unit = llvm::DICompileUnit();
-
+
// Create the descriptor for the variable.
- llvm::DIVariable D =
+ llvm::DIVariable D =
DebugFactory.CreateVariable(Tag, RegionStack.back(),Decl->getNameAsString(),
Unit, Line, Ty);
// Insert an llvm.dbg.declare into the current block.
@@ -939,7 +939,7 @@
/// EmitGlobalVariable - Emit information about a global variable.
-void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *Decl) {
// Do not emit variable debug information while generating optimized code.
@@ -959,14 +959,14 @@
QualType T = Decl->getType();
if (T->isIncompleteArrayType()) {
-
+
// CodeGen turns int[] into int[1] so we'll do the same here.
llvm::APSInt ConstVal(32);
-
+
ConstVal = 1;
QualType ET = M->getContext().getAsArrayType(T)->getElementType();
-
- T = M->getContext().getConstantArrayType(ET, ConstVal,
+
+ T = M->getContext().getConstantArrayType(ET, ConstVal,
ArrayType::Normal, 0);
}
@@ -977,7 +977,7 @@
}
/// EmitGlobalVariable - Emit information about an objective-c interface.
-void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ObjCInterfaceDecl *Decl) {
// Create global variable debug descriptor.
llvm::DICompileUnit Unit = getOrCreateCompileUnit(Decl->getLocation());
@@ -989,14 +989,14 @@
QualType T = M->getContext().getObjCInterfaceType(Decl);
if (T->isIncompleteArrayType()) {
-
+
// CodeGen turns int[] into int[1] so we'll do the same here.
llvm::APSInt ConstVal(32);
-
+
ConstVal = 1;
QualType ET = M->getContext().getAsArrayType(T)->getElementType();
-
- T = M->getContext().getConstantArrayType(ET, ConstVal,
+
+ T = M->getContext().getConstantArrayType(ET, ConstVal,
ArrayType::Normal, 0);
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index ac28e5b..682f7ae 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This is the source level debug info generator for llvm translation.
+// This is the source level debug info generator for llvm translation.
//
//===----------------------------------------------------------------------===//
@@ -29,14 +29,14 @@
namespace CodeGen {
class CodeGenModule;
-/// CGDebugInfo - This class gathers all debug information during compilation
-/// and is responsible for emitting to llvm globals or pass directly to
+/// CGDebugInfo - This class gathers all debug information during compilation
+/// and is responsible for emitting to llvm globals or pass directly to
/// the backend.
class CGDebugInfo {
CodeGenModule *M;
bool isMainCompileUnitCreated;
llvm::DIFactory DebugFactory;
-
+
SourceLocation CurLoc, PrevLoc;
/// CompileUnitCache - Cache of previously constructed CompileUnits.
@@ -45,7 +45,7 @@
/// TypeCache - Cache of previously constructed Types.
// FIXME: Eliminate this map. Be careful of iterator invalidation.
std::map<void *, llvm::DIType> TypeCache;
-
+
bool BlockLiteralGenericSet;
llvm::DIType BlockLiteralGeneric;
@@ -56,7 +56,7 @@
llvm::DIType CreateType(const ComplexType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateCVRType(QualType Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const TypedefType *Ty, llvm::DICompileUnit U);
- llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
llvm::DICompileUnit Unit);
llvm::DIType CreateType(const PointerType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DICompileUnit U);
@@ -83,12 +83,12 @@
/// start of a new function.
void EmitFunctionStart(const char *Name, QualType ReturnType,
llvm::Function *Fn, CGBuilderTy &Builder);
-
+
/// EmitRegionStart - Emit a call to llvm.dbg.region.start to indicate start
- /// of a new block.
+ /// of a new block.
void EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder);
-
- /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
+
+ /// EmitRegionEnd - Emit call to llvm.dbg.region.end to indicate end of a
/// block.
void EmitRegionEnd(llvm::Function *Fn, CGBuilderTy &Builder);
@@ -101,19 +101,19 @@
/// variable declaration.
void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
CGBuilderTy &Builder);
-
+
/// EmitGlobalVariable - Emit information about a global variable.
void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
/// EmitGlobalVariable - Emit information about an objective-c interface.
void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
-
+
private:
/// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
CGBuilderTy &Builder);
-
-
+
+
/// getOrCreateCompileUnit - Get the compile unit from the cache or create a
/// new one if necessary.
llvm::DICompileUnit getOrCreateCompileUnit(SourceLocation Loc);
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 2b8348e..e637d40 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -35,22 +35,22 @@
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
case Decl::Enum: // enum X;
- case Decl::EnumConstant: // enum ? { X = ? }
+ case Decl::EnumConstant: // enum ? { X = ? }
case Decl::CXXRecord: // struct/union/class X; [C++]
// None of these decls require codegen support.
return;
-
+
case Decl::Var: {
const VarDecl &VD = cast<VarDecl>(D);
- assert(VD.isBlockVarDecl() &&
+ assert(VD.isBlockVarDecl() &&
"Should not see file-scope variables inside a function!");
return EmitBlockVarDecl(VD);
}
-
+
case Decl::Typedef: { // typedef int X;
const TypedefDecl &TD = cast<TypedefDecl>(D);
QualType Ty = TD.getUnderlyingType();
-
+
if (Ty->isVariablyModifiedType())
EmitVLASize(Ty);
}
@@ -62,7 +62,7 @@
void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
if (D.hasAttr<AsmLabelAttr>())
CGM.ErrorUnsupported(&D, "__asm__");
-
+
switch (D.getStorageClass()) {
case VarDecl::None:
case VarDecl::Auto:
@@ -98,22 +98,22 @@
ContextName = CurFn->getName();
else
assert(0 && "Unknown context for block var decl");
-
+
Name = ContextName + Separator + D.getNameAsString();
}
const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
return new llvm::GlobalVariable(CGM.getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
- CGM.EmitNullConstant(D.getType()), Name, 0,
+ CGM.EmitNullConstant(D.getType()), Name, 0,
D.isThreadSpecified(), Ty.getAddressSpace());
}
-void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
+void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
llvm::Value *&DMEntry = LocalDeclMap[&D];
assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
-
- llvm::GlobalVariable *GV =
+
+ llvm::GlobalVariable *GV =
CreateStaticBlockVarDecl(D, ".", llvm::GlobalValue::InternalLinkage);
// Store into LocalDeclMap before generating initializer to handle
@@ -143,7 +143,7 @@
// in the LLVM type system.)
if (GV->getType() != Init->getType()) {
llvm::GlobalVariable *OldGV = GV;
-
+
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
OldGV->isConstant(),
OldGV->getLinkage(), Init, "",
@@ -154,13 +154,13 @@
GV->takeName(OldGV);
// Replace all uses of the old global with the new global
- llvm::Constant *NewPtrForOldDecl =
+ llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
OldGV->replaceAllUsesWith(NewPtrForOldDecl);
// Erase the old global, since it is no longer used.
OldGV->eraseFromParent();
- }
+ }
GV->setInitializer(Init);
}
@@ -170,14 +170,14 @@
if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
SourceManager &SM = CGM.getContext().getSourceManager();
llvm::Constant *Ann =
- CGM.EmitAnnotateAttr(GV, AA,
+ CGM.EmitAnnotateAttr(GV, AA,
SM.getInstantiationLineNumber(D.getLocation()));
CGM.AddAnnotation(Ann);
}
if (const SectionAttr *SA = D.getAttr<SectionAttr>())
GV->setSection(SA->getName());
-
+
if (D.hasAttr<UsedAttr>())
CGM.AddUsedGlobal(GV);
@@ -198,7 +198,7 @@
DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
}
}
-
+
/// BuildByRefType - This routine changes a __block variable declared as T x
/// into:
///
@@ -216,7 +216,7 @@
const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) {
QualType Ty = D->getType();
uint64_t Align = getContext().getDeclAlignInBytes(D);
-
+
const llvm::Type *LTy = ConvertType(Ty);
bool needsCopyDispose = BlockRequiresCopying(Ty);
std::vector<const llvm::Type *> Types(needsCopyDispose*2+5);
@@ -256,7 +256,7 @@
LTy = BuildByRefType(&D);
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
Alloc->setName(D.getNameAsString().c_str());
-
+
if (isByRef)
Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
Alloc->setAlignment(Align);
@@ -265,11 +265,11 @@
// Targets that don't support recursion emit locals as globals.
const char *Class =
D.getStorageClass() == VarDecl::Register ? ".reg." : ".auto.";
- DeclPtr = CreateStaticBlockVarDecl(D, Class,
+ DeclPtr = CreateStaticBlockVarDecl(D, Class,
llvm::GlobalValue
::InternalLinkage);
}
-
+
// FIXME: Can this happen?
if (Ty->isVariablyModifiedType())
EmitVLASize(Ty);
@@ -281,26 +281,26 @@
const llvm::Type *LTy =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
llvm::Value *Stack = CreateTempAlloca(LTy, "saved_stack");
-
+
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
llvm::Value *V = Builder.CreateCall(F);
-
+
Builder.CreateStore(V, Stack);
DidCallStackSave = true;
-
+
{
// Push a cleanup block and restore the stack there.
CleanupScope scope(*this);
-
+
V = Builder.CreateLoad(Stack, "tmp");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
Builder.CreateCall(F, V);
}
}
-
+
// Get the element type.
- const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
+ const llvm::Type *LElemTy = ConvertTypeForMem(Ty);
const llvm::Type *LElemPtrTy =
llvm::PointerType::get(LElemTy, D.getType().getAddressSpace());
@@ -309,7 +309,7 @@
// Downcast the VLA size expression
VLASize = Builder.CreateIntCast(VLASize, llvm::Type::getInt32Ty(VMContext),
false, "tmp");
-
+
// Allocate memory for the array.
llvm::Value *VLA = Builder.CreateAlloca(llvm::Type::getInt8Ty(VMContext),
VLASize, "vla");
@@ -323,16 +323,16 @@
// Emit debug info for local var declaration.
if (CGDebugInfo *DI = getDebugInfo()) {
assert(HaveInsertPoint() && "Unexpected unreachable point!");
-
+
DI->setLocation(D.getLocation());
if (Target.useGlobalsForAutomaticVariables()) {
DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
} else if (isByRef) {
- // FIXME: This code is broken and will not emit debug info for the
+ // FIXME: This code is broken and will not emit debug info for the
// variable. The right way to do this would be to tell LLVM that this is a
// byref pointer, and what the offset is. Unfortunately, right now it's
// not possible unless we create a DIType that corresponds to the byref
- // struct.
+ // struct.
/*
llvm::Value *Loc;
bool needsCopyDispose = BlockRequiresCopying(Ty);
@@ -369,7 +369,7 @@
EmitStoreOfScalar(RV.getScalarVal(), Loc, false, Ty);
} else if (!hasAggregateLLVMType(Init->getType())) {
llvm::Value *V = EmitScalarExpr(Init);
- EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(),
+ EmitStoreOfScalar(V, Loc, D.getType().isVolatileQualified(),
D.getType());
} else if (Init->getType()->isAnyComplexType()) {
EmitComplexExprIntoAddr(Init, Loc, D.getType().isVolatileQualified());
@@ -377,7 +377,7 @@
EmitAggExpr(Init, Loc, D.getType().isVolatileQualified());
}
}
-
+
if (isByRef) {
const llvm::PointerType *PtrToInt8Ty
= llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
@@ -448,19 +448,19 @@
const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
assert(D && "EmitLocalBlockVarDecl - destructor is nul");
assert(!Ty->getAs<ArrayType>() && "FIXME - destruction of arrays NYI");
-
+
CleanupScope scope(*this);
EmitCXXDestructorCall(D, Dtor_Complete, DeclPtr);
}
}
-
+
// Handle the cleanup attribute
if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
const FunctionDecl *FD = CA->getFunctionDecl();
-
+
llvm::Constant* F = CGM.GetAddrOfFunction(GlobalDecl(FD));
assert(F && "Could not find function!");
-
+
CleanupScope scope(*this);
const CGFunctionInfo &Info = CGM.getTypes().getFunctionInfo(FD);
@@ -469,15 +469,15 @@
// the type of the pointer. An example of this is
// void f(void* arg);
// __attribute__((cleanup(f))) void *g;
- //
+ //
// To fix this we insert a bitcast here.
QualType ArgTy = Info.arg_begin()->type;
DeclPtr = Builder.CreateBitCast(DeclPtr, ConvertType(ArgTy));
-
+
CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(DeclPtr),
+ Args.push_back(std::make_pair(RValue::get(DeclPtr),
getContext().getPointerType(D.getType())));
-
+
EmitCall(Info, F, Args);
}
@@ -489,14 +489,14 @@
}
}
-/// Emit an alloca (or GlobalValue depending on target)
+/// Emit an alloca (or GlobalValue depending on target)
/// for the specified parameter and set up LocalDeclMap.
void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg) {
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
QualType Ty = D.getType();
-
+
llvm::Value *DeclPtr;
if (!Ty->isConstantSizeType()) {
// Variable sized values always are passed by-reference.
@@ -510,7 +510,7 @@
Name += ".addr";
DeclPtr = CreateTempAlloca(LTy);
DeclPtr->setName(Name.c_str());
-
+
// Store the initial value into the alloca.
EmitStoreOfScalar(Arg, DeclPtr, Ty.isVolatileQualified(), Ty);
} else {
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 4a04bd3..969b789 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -847,7 +847,7 @@
}
std::string FunctionName =
- PredefinedExpr::ComputeName(getContext(), (PredefinedExpr::IdentType)Type,
+ PredefinedExpr::ComputeName(getContext(), (PredefinedExpr::IdentType)Type,
CurCodeDecl);
GlobalVarName += FunctionName;
@@ -1073,8 +1073,7 @@
LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
FieldDecl* Field,
bool isUnion,
- unsigned CVRQualifiers)
-{
+ unsigned CVRQualifiers) {
if (Field->isBitField())
return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index f8e9c56..8bda0f3 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -62,7 +62,7 @@
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
-
+
void VisitStmt(Stmt *S) {
CGF.ErrorUnsupported(S, "aggregate expression");
}
@@ -75,18 +75,18 @@
void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
void VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
- EmitAggLoadOfLValue(E);
+ EmitAggLoadOfLValue(E);
}
void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
EmitAggLoadOfLValue(E);
}
void VisitBlockDeclRefExpr(const BlockDeclRefExpr *E) {
- EmitAggLoadOfLValue(E);
+ EmitAggLoadOfLValue(E);
}
void VisitPredefinedExpr(const PredefinedExpr *E) {
- EmitAggLoadOfLValue(E);
+ EmitAggLoadOfLValue(E);
}
-
+
// Operators.
void VisitCastExpr(CastExpr *E);
void VisitCallExpr(const CallExpr *E);
@@ -101,7 +101,7 @@
}
void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E);
void VisitObjCImplicitSetterGetterRefExpr(ObjCImplicitSetterGetterRefExpr *E);
-
+
void VisitConditionalOperator(const ConditionalOperator *CO);
void VisitChooseExpr(const ChooseExpr *CE);
void VisitInitListExpr(InitListExpr *E);
@@ -185,18 +185,18 @@
return;
}
if (E->getCastKind() == CastExpr::CK_UserDefinedConversion) {
- if (const CXXFunctionalCastExpr *CXXFExpr =
+ if (const CXXFunctionalCastExpr *CXXFExpr =
dyn_cast<CXXFunctionalCastExpr>(E))
CGF.EmitCXXFunctionalCastExpr(CXXFExpr);
- else
+ else
if (isa<CStyleCastExpr>(E))
Visit(E->getSubExpr());
return;
}
-
+
// FIXME: Remove the CK_Unknown check here.
- assert((E->getCastKind() == CastExpr::CK_NoOp ||
- E->getCastKind() == CastExpr::CK_Unknown) &&
+ assert((E->getCastKind() == CastExpr::CK_NoOp ||
+ E->getCastKind() == CastExpr::CK_Unknown) &&
"Only no-op casts allowed!");
assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
E->getType()) &&
@@ -209,7 +209,7 @@
EmitAggLoadOfLValue(E);
return;
}
-
+
RValue RV = CGF.EmitCallExpr(E);
EmitFinalDestCopy(E, RV);
}
@@ -259,21 +259,21 @@
if (!AggLoc)
AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
- CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
+ CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(),
RValue::getAggregate(AggLoc, VolatileDest));
} else if (LHS.isKVCRef()) {
llvm::Value *AggLoc = DestPtr;
if (!AggLoc)
AggLoc = CGF.CreateTempAlloca(CGF.ConvertType(E->getRHS()->getType()));
CGF.EmitAggExpr(E->getRHS(), AggLoc, VolatileDest);
- CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
+ CGF.EmitObjCPropertySet(LHS.getKVCRefExpr(),
RValue::getAggregate(AggLoc, VolatileDest));
} else {
bool RequiresGCollection = false;
if (CGF.getContext().getLangOptions().NeXTRuntime) {
QualType LHSTy = E->getLHS()->getType();
if (const RecordType *FDTTy = LHSTy.getTypePtr()->getAs<RecordType>())
- RequiresGCollection = FDTTy->getDecl()->hasObjectMember();
+ RequiresGCollection = FDTTy->getDecl()->hasObjectMember();
}
// Codegen the RHS so that it stores directly into the LHS.
CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(),
@@ -286,27 +286,27 @@
llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
-
+
llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond());
Builder.CreateCondBr(Cond, LHSBlock, RHSBlock);
-
+
CGF.PushConditionalTempDestruction();
CGF.EmitBlock(LHSBlock);
-
+
// Handle the GNU extension for missing LHS.
assert(E->getLHS() && "Must have LHS for aggregate value");
Visit(E->getLHS());
CGF.PopConditionalTempDestruction();
CGF.EmitBranch(ContBlock);
-
+
CGF.PushConditionalTempDestruction();
CGF.EmitBlock(RHSBlock);
-
+
Visit(E->getRHS());
CGF.PopConditionalTempDestruction();
CGF.EmitBranch(ContBlock);
-
+
CGF.EmitBlock(ContBlock);
}
@@ -328,16 +328,16 @@
void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
llvm::Value *Val = DestPtr;
-
+
if (!Val) {
// Create a temporary variable.
Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp");
// FIXME: volatile
CGF.EmitAggExpr(E->getSubExpr(), Val, false);
- } else
+ } else
Visit(E->getSubExpr());
-
+
// Don't make this a live temporary if we're emitting an initializer expr.
if (!IsInitializer)
CGF.PushCXXTemporary(E->getTemporary(), Val);
@@ -346,7 +346,7 @@
void
AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
llvm::Value *Val = DestPtr;
-
+
if (!Val) {
// Create a temporary variable.
Val = CGF.CreateTempAlloca(CGF.ConvertTypeForMem(E->getType()), "tmp");
@@ -392,7 +392,7 @@
void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
#if 0
- // FIXME: Disabled while we figure out what to do about
+ // FIXME: Disabled while we figure out what to do about
// test/CodeGen/bitfield.c
//
// If we can, prefer a copy from a global; this is a lot less code for long
@@ -420,7 +420,7 @@
cast<llvm::PointerType>(DestPtr->getType());
const llvm::ArrayType *AType =
cast<llvm::ArrayType>(APType->getElementType());
-
+
uint64_t NumInitElements = E->getNumInits();
if (E->getNumInits() > 0) {
@@ -435,7 +435,7 @@
uint64_t NumArrayElements = AType->getNumElements();
QualType ElementType = CGF.getContext().getCanonicalType(E->getType());
ElementType = CGF.getContext().getAsArrayType(ElementType)->getElementType();
-
+
unsigned CVRqualifier = ElementType.getCVRQualifiers();
for (uint64_t i = 0; i != NumArrayElements; ++i) {
@@ -449,9 +449,9 @@
}
return;
}
-
+
assert(E->getType()->isRecordType() && "Only support structs/unions here!");
-
+
// Do struct initialization; this code just sets each individual member
// to the approprate value. This makes bitfield support automatic;
// the disadvantage is that the generated code is more difficult for
@@ -465,7 +465,7 @@
// specified by the initializer list.
if (!E->getInitializedFieldInUnion()) {
// Empty union; we have nothing to do.
-
+
#ifndef NDEBUG
// Make sure that it's really an empty and not a failure of
// semantic analysis.
@@ -491,7 +491,7 @@
return;
}
-
+
// Here we iterate over the fields; this makes it simpler to both
// default-initialize fields and skip over unnamed fields.
for (RecordDecl::field_iterator Field = SD->field_begin(),
@@ -528,13 +528,13 @@
/// true, DestPtr cannot be 0.
void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr,
bool VolatileDest, bool IgnoreResult,
- bool IsInitializer,
+ bool IsInitializer,
bool RequiresGCollection) {
assert(E && hasAggregateLLVMType(E->getType()) &&
"Invalid aggregate expression to emit");
assert ((DestPtr != 0 || VolatileDest == false)
&& "volatile aggregate can't be 0");
-
+
AggExprEmitter(*this, DestPtr, VolatileDest, IgnoreResult, IsInitializer,
RequiresGCollection)
.Visit(const_cast<Expr*>(E));
@@ -550,7 +550,7 @@
llvm::Value *SrcPtr, QualType Ty,
bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
-
+
// Aggregate assignment turns into llvm.memcpy. This is almost valid per
// C99 6.5.16.1p3, which states "If the value being stored in an object is
// read from another object that overlaps in anyway the storage of the first
@@ -567,14 +567,14 @@
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
if (SrcPtr->getType() != BP)
SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp");
-
+
// Get size and alignment info for this aggregate.
std::pair<uint64_t, unsigned> TypeInfo = getContext().getTypeInfo(Ty);
-
+
// FIXME: Handle variable sized types.
const llvm::Type *IntPtr =
llvm::IntegerType::get(VMContext, LLVMPointerWidth);
-
+
// FIXME: If we have a volatile struct, the optimizer can remove what might
// appear to be `extra' memory ops:
//
@@ -591,6 +591,6 @@
DestPtr, SrcPtr,
// TypeInfo.first describes size in bits.
llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
TypeInfo.second/8));
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 593406f..bea6d80 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -33,63 +33,63 @@
CodeGenModule &CGM;
CodeGenFunction *CGF;
- bool Packed;
+ bool Packed;
unsigned NextFieldOffsetInBytes;
-
+
std::vector<llvm::Constant *> Elements;
ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
: CGM(CGM), CGF(CGF), Packed(false), NextFieldOffsetInBytes(0) { }
- bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ bool AppendField(const FieldDecl *Field, uint64_t FieldOffset,
const Expr *InitExpr) {
uint64_t FieldOffsetInBytes = FieldOffset / 8;
-
- assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
+
+ assert(NextFieldOffsetInBytes <= FieldOffsetInBytes
&& "Field offset mismatch!");
-
+
// Emit the field.
llvm::Constant *C = CGM.EmitConstantExpr(InitExpr, Field->getType(), CGF);
if (!C)
return false;
unsigned FieldAlignment = getAlignment(C);
-
+
// Round up the field offset to the alignment of the field type.
- uint64_t AlignedNextFieldOffsetInBytes =
+ uint64_t AlignedNextFieldOffsetInBytes =
llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
-
+
if (AlignedNextFieldOffsetInBytes > FieldOffsetInBytes) {
std::vector<llvm::Constant *> PackedElements;
-
+
assert(!Packed && "Alignment is wrong even with a packed struct!");
-
+
// Convert the struct to a packed struct.
uint64_t ElementOffsetInBytes = 0;
-
+
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
llvm::Constant *C = Elements[i];
-
- unsigned ElementAlign =
+
+ unsigned ElementAlign =
CGM.getTargetData().getABITypeAlignment(C->getType());
- uint64_t AlignedElementOffsetInBytes =
+ uint64_t AlignedElementOffsetInBytes =
llvm::RoundUpToAlignment(ElementOffsetInBytes, ElementAlign);
-
+
if (AlignedElementOffsetInBytes > ElementOffsetInBytes) {
// We need some padding.
- uint64_t NumBytes =
+ uint64_t NumBytes =
AlignedElementOffsetInBytes - ElementOffsetInBytes;
-
+
const llvm::Type *Ty = llvm::Type::getInt8Ty(CGF->getLLVMContext());
- if (NumBytes > 1)
+ if (NumBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumBytes);
-
+
llvm::Constant *Padding = llvm::Constant::getNullValue(Ty);
PackedElements.push_back(Padding);
ElementOffsetInBytes += getSizeInBytes(Padding);
}
-
+
PackedElements.push_back(C);
ElementOffsetInBytes += getSizeInBytes(C);
}
@@ -105,51 +105,51 @@
if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
// We need to append padding.
AppendPadding(FieldOffsetInBytes - NextFieldOffsetInBytes);
-
+
assert(NextFieldOffsetInBytes == FieldOffsetInBytes &&
"Did not add enough padding!");
-
+
AlignedNextFieldOffsetInBytes = NextFieldOffsetInBytes;
}
-
+
// Add the field.
Elements.push_back(C);
NextFieldOffsetInBytes = AlignedNextFieldOffsetInBytes + getSizeInBytes(C);
return true;
}
-
- bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+
+ bool AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
const Expr *InitExpr) {
- llvm::ConstantInt *CI =
- cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr,
- Field->getType(),
+ llvm::ConstantInt *CI =
+ cast_or_null<llvm::ConstantInt>(CGM.EmitConstantExpr(InitExpr,
+ Field->getType(),
CGF));
// FIXME: Can this ever happen?
if (!CI)
return false;
-
+
if (FieldOffset > NextFieldOffsetInBytes * 8) {
// We need to add padding.
- uint64_t NumBytes =
- llvm::RoundUpToAlignment(FieldOffset -
+ uint64_t NumBytes =
+ llvm::RoundUpToAlignment(FieldOffset -
NextFieldOffsetInBytes * 8, 8) / 8;
-
+
AppendPadding(NumBytes);
}
- uint64_t FieldSize =
+ uint64_t FieldSize =
Field->getBitWidth()->EvaluateAsInt(CGM.getContext()).getZExtValue();
llvm::APInt FieldValue = CI->getValue();
-
+
// Promote the size of FieldValue if necessary
// FIXME: This should never occur, but currently it can because initializer
// constants are cast to bool, and because clang is not enforcing bitfield
// width limits.
if (FieldSize > FieldValue.getBitWidth())
FieldValue.zext(FieldSize);
-
+
// Truncate the size of FieldValue to the bit field size.
if (FieldSize < FieldValue.getBitWidth())
FieldValue.trunc(FieldSize);
@@ -158,18 +158,18 @@
// Either part of the field or the entire field can go into the previous
// byte.
assert(!Elements.empty() && "Elements can't be empty!");
-
- unsigned BitsInPreviousByte =
+
+ unsigned BitsInPreviousByte =
NextFieldOffsetInBytes * 8 - FieldOffset;
-
- bool FitsCompletelyInPreviousByte =
+
+ bool FitsCompletelyInPreviousByte =
BitsInPreviousByte >= FieldValue.getBitWidth();
-
+
llvm::APInt Tmp = FieldValue;
-
+
if (!FitsCompletelyInPreviousByte) {
unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
-
+
if (CGM.getTargetData().isBigEndian()) {
Tmp = Tmp.lshr(NewFieldWidth);
Tmp.trunc(BitsInPreviousByte);
@@ -184,7 +184,7 @@
FieldValue.trunc(NewFieldWidth);
}
}
-
+
Tmp.zext(8);
if (CGM.getTargetData().isBigEndian()) {
if (FitsCompletelyInPreviousByte)
@@ -196,14 +196,14 @@
// Or in the bits that go into the previous byte.
Tmp |= cast<llvm::ConstantInt>(Elements.back())->getValue();
Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
-
+
if (FitsCompletelyInPreviousByte)
return true;
}
-
+
while (FieldValue.getBitWidth() > 8) {
llvm::APInt Tmp;
-
+
if (CGM.getTargetData().isBigEndian()) {
// We want the high bits.
Tmp = FieldValue;
@@ -213,13 +213,13 @@
// We want the low bits.
Tmp = FieldValue;
Tmp.trunc(8);
-
+
FieldValue = FieldValue.lshr(8);
}
-
+
Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
NextFieldOffsetInBytes++;
-
+
FieldValue.trunc(FieldValue.getBitWidth() - 8);
}
@@ -231,10 +231,10 @@
if (FieldValue.getBitWidth() < 8) {
if (CGM.getTargetData().isBigEndian()) {
unsigned BitWidth = FieldValue.getBitWidth();
-
+
FieldValue.zext(8);
FieldValue = FieldValue << (8 - BitWidth);
- } else
+ } else
FieldValue.zext(8);
}
@@ -244,19 +244,19 @@
NextFieldOffsetInBytes++;
return true;
}
-
+
void AppendPadding(uint64_t NumBytes) {
if (!NumBytes)
return;
const llvm::Type *Ty = llvm::Type::getInt8Ty(CGM.getLLVMContext());
- if (NumBytes > 1)
+ if (NumBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumBytes);
llvm::Constant *C = llvm::Constant::getNullValue(Ty);
Elements.push_back(C);
assert(getAlignment(C) == 1 && "Padding must have 1 byte alignment!");
-
+
NextFieldOffsetInBytes += getSizeInBytes(C);
}
@@ -265,19 +265,19 @@
uint64_t RecordSizeInBytes = RecordSize / 8;
assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
-
+
unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
AppendPadding(NumPadBytes);
}
-
+
bool Build(InitListExpr *ILE) {
RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
-
+
unsigned FieldNo = 0;
unsigned ElementNo = 0;
- for (RecordDecl::field_iterator Field = RD->field_begin(),
- FieldEnd = RD->field_end();
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
ElementNo < ILE->getNumInits() && Field != FieldEnd;
++Field, ++FieldNo) {
if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
@@ -286,7 +286,7 @@
if (Field->isBitField()) {
if (!Field->getIdentifier())
continue;
-
+
if (!AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
ILE->getInit(ElementNo)))
return false;
@@ -295,63 +295,63 @@
ILE->getInit(ElementNo)))
return false;
}
-
+
ElementNo++;
}
-
+
uint64_t LayoutSizeInBytes = Layout.getSize() / 8;
-
+
if (NextFieldOffsetInBytes > LayoutSizeInBytes) {
// If the struct is bigger than the size of the record type,
// we must have a flexible array member at the end.
assert(RD->hasFlexibleArrayMember() &&
"Must have flexible array member if struct is bigger than type!");
-
+
// No tail padding is necessary.
return true;
}
-
+
// Append tail padding if necessary.
AppendTailPadding(Layout.getSize());
-
- assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
+
+ assert(Layout.getSize() / 8 == NextFieldOffsetInBytes &&
"Tail padding mismatch!");
-
+
return true;
}
-
+
unsigned getAlignment(const llvm::Constant *C) const {
if (Packed)
return 1;
-
+
return CGM.getTargetData().getABITypeAlignment(C->getType());
}
-
+
uint64_t getSizeInBytes(const llvm::Constant *C) const {
return CGM.getTargetData().getTypeAllocSize(C->getType());
}
-
+
public:
static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
InitListExpr *ILE) {
ConstStructBuilder Builder(CGM, CGF);
-
+
if (!Builder.Build(ILE))
return 0;
-
- llvm::Constant *Result =
+
+ llvm::Constant *Result =
llvm::ConstantStruct::get(CGM.getLLVMContext(),
Builder.Elements, Builder.Packed);
assert(llvm::RoundUpToAlignment(Builder.NextFieldOffsetInBytes,
- Builder.getAlignment(Result)) ==
+ Builder.getAlignment(Result)) ==
Builder.getSizeInBytes(Result) && "Size mismatch!");
return Result;
}
};
-
-class VISIBILITY_HIDDEN ConstExprEmitter :
+
+class VISIBILITY_HIDDEN ConstExprEmitter :
public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
CodeGenModule &CGM;
CodeGenFunction *CGF;
@@ -360,23 +360,23 @@
ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
: CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
}
-
+
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
-
+
llvm::Constant *VisitStmt(Stmt *S) {
return 0;
}
-
- llvm::Constant *VisitParenExpr(ParenExpr *PE) {
- return Visit(PE->getSubExpr());
+
+ llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
}
-
+
llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
return Visit(E->getInitializer());
}
-
+
llvm::Constant *VisitCastExpr(CastExpr* E) {
switch (E->getCastKind()) {
case CastExpr::CK_ToUnion: {
@@ -386,11 +386,11 @@
const llvm::Type *Ty = ConvertType(E->getType());
Expr *SubExpr = E->getSubExpr();
- llvm::Constant *C =
+ llvm::Constant *C =
CGM.EmitConstantExpr(SubExpr, SubExpr->getType(), CGF);
if (!C)
return 0;
-
+
// Build a struct with the union sub-element as the first member,
// and padded to the appropriate size
std::vector<llvm::Constant*> Elts;
@@ -399,7 +399,7 @@
Types.push_back(C->getType());
unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(Ty);
-
+
assert(CurSize <= TotalSize && "Union size mismatch!");
if (unsigned NumPadBytes = TotalSize - CurSize) {
const llvm::Type *Ty = llvm::Type::getInt8Ty(VMContext);
@@ -409,7 +409,7 @@
Elts.push_back(llvm::Constant::getNullValue(Ty));
Types.push_back(Ty);
}
-
+
llvm::StructType* STy =
llvm::StructType::get(C->getType()->getContext(), Types, false);
return llvm::ConstantStruct::get(STy, Elts);
@@ -438,7 +438,7 @@
unsigned NumInitElements = ILE->getNumInits();
// FIXME: Check for wide strings
// FIXME: Check for NumInitElements exactly equal to 1??
- if (NumInitElements > 0 &&
+ if (NumInitElements > 0 &&
(isa<StringLiteral>(ILE->getInit(0)) ||
isa<ObjCEncodeExpr>(ILE->getInit(0))) &&
ILE->getType()->getArrayElementTypeNoTypeQual()->isCharType())
@@ -446,7 +446,7 @@
const llvm::Type *ElemTy = AType->getElementType();
unsigned NumElements = AType->getNumElements();
- // Initialising an array requires us to automatically
+ // Initialising an array requires us to automatically
// initialise any elements that have not been initialised explicitly
unsigned NumInitableElts = std::min(NumInitElements, NumElements);
@@ -472,18 +472,18 @@
std::vector<const llvm::Type*> Types;
for (unsigned i = 0; i < Elts.size(); ++i)
Types.push_back(Elts[i]->getType());
- const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ const llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
Types, true);
return llvm::ConstantStruct::get(SType, Elts);
}
- return llvm::ConstantArray::get(AType, Elts);
+ return llvm::ConstantArray::get(AType, Elts);
}
llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
}
-
+
llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
}
@@ -511,13 +511,13 @@
for (; i < NumElements; ++i)
Elts.push_back(llvm::Constant::getNullValue(ElemTy));
- return llvm::ConstantVector::get(VType, Elts);
+ return llvm::ConstantVector::get(VType, Elts);
}
-
+
llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
return CGM.EmitNullConstant(E->getType());
}
-
+
llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
if (ILE->getType()->isScalarType()) {
// We have a scalar in braces. Just use the first element.
@@ -527,7 +527,7 @@
}
return CGM.EmitNullConstant(ILE->getType());
}
-
+
if (ILE->getType()->isArrayType())
return EmitArrayInitialization(ILE);
@@ -548,7 +548,7 @@
llvm::Constant *VisitStringLiteral(StringLiteral *E) {
assert(!E->getType()->isPointerType() && "Strings are always arrays");
-
+
// This must be a string initializing an array in a static initializer.
// Don't emit it as the address of the string, emit the string data itself
// as an inline array.
@@ -563,13 +563,13 @@
std::string Str;
CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
-
+
// Resize the string to the right size, adding zeros at the end, or
// truncating as needed.
Str.resize(CAT->getSize().getZExtValue(), '\0');
return llvm::ConstantArray::get(VMContext, Str, false);
}
-
+
llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
@@ -597,14 +597,14 @@
E->getType().getAddressSpace());
return C;
}
- case Expr::DeclRefExprClass:
+ case Expr::DeclRefExprClass:
case Expr::QualifiedDeclRefExprClass: {
NamedDecl *Decl = cast<DeclRefExpr>(E)->getDecl();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
return CGM.GetAddrOfFunction(GlobalDecl(FD));
if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
// We can never refer to a variable with local storage.
- if (!VD->hasLocalStorage()) {
+ if (!VD->hasLocalStorage()) {
if (VD->isFileVarDecl() || VD->hasExternalStorage())
return CGM.GetAddrOfGlobalVar(VD);
else if (VD->isBlockVarDecl()) {
@@ -627,10 +627,10 @@
case Expr::PredefinedExprClass: {
// __func__/__FUNCTION__ -> "". __PRETTY_FUNCTION__ -> "top level".
std::string Str;
- if (cast<PredefinedExpr>(E)->getIdentType() ==
+ if (cast<PredefinedExpr>(E)->getIdentType() ==
PredefinedExpr::PrettyFunction)
Str = "top level";
-
+
return CGM.GetAddrOfConstantCString(Str, ".tmp");
}
case Expr::AddrLabelExprClass: {
@@ -643,7 +643,7 @@
}
case Expr::CallExprClass: {
CallExpr* CE = cast<CallExpr>(E);
- if (CE->isBuiltinCall(CGM.getContext()) !=
+ if (CE->isBuiltinCall(CGM.getContext()) !=
Builtin::BI__builtin___CFStringMakeConstantString)
break;
const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
@@ -665,23 +665,23 @@
return 0;
}
};
-
+
} // end anonymous namespace.
llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
QualType DestType,
CodeGenFunction *CGF) {
Expr::EvalResult Result;
-
+
bool Success = false;
-
+
if (DestType->isReferenceType())
Success = E->EvaluateAsLValue(Result, Context);
- else
+ else
Success = E->Evaluate(Result, Context);
-
+
if (Success) {
- assert(!Result.HasSideEffects &&
+ assert(!Result.HasSideEffects &&
"Constant expr should not have any side effects!");
switch (Result.Val.getKind()) {
case APValue::Uninitialized:
@@ -689,17 +689,17 @@
return 0;
case APValue::LValue: {
const llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
- llvm::Constant *Offset =
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
Result.Val.getLValueOffset());
-
+
llvm::Constant *C;
if (const Expr *LVBase = Result.Val.getLValueBase()) {
C = ConstExprEmitter(*this, CGF).EmitLValue(const_cast<Expr*>(LVBase));
// Apply offset if necessary.
if (!Offset->isNullValue()) {
- const llvm::Type *Type =
+ const llvm::Type *Type =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Type);
Casted = llvm::ConstantExpr::getGetElementPtr(Casted, &Offset, 1);
@@ -728,9 +728,9 @@
}
}
case APValue::Int: {
- llvm::Constant *C = llvm::ConstantInt::get(VMContext,
+ llvm::Constant *C = llvm::ConstantInt::get(VMContext,
Result.Val.getInt());
-
+
if (C->getType() == llvm::Type::getInt1Ty(VMContext)) {
const llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
C = llvm::ConstantExpr::getZExt(C, BoolTy);
@@ -739,30 +739,30 @@
}
case APValue::ComplexInt: {
llvm::Constant *Complex[2];
-
+
Complex[0] = llvm::ConstantInt::get(VMContext,
Result.Val.getComplexIntReal());
- Complex[1] = llvm::ConstantInt::get(VMContext,
+ Complex[1] = llvm::ConstantInt::get(VMContext,
Result.Val.getComplexIntImag());
-
+
return llvm::ConstantStruct::get(VMContext, Complex, 2);
}
case APValue::Float:
return llvm::ConstantFP::get(VMContext, Result.Val.getFloat());
case APValue::ComplexFloat: {
llvm::Constant *Complex[2];
-
- Complex[0] = llvm::ConstantFP::get(VMContext,
+
+ Complex[0] = llvm::ConstantFP::get(VMContext,
Result.Val.getComplexFloatReal());
Complex[1] = llvm::ConstantFP::get(VMContext,
Result.Val.getComplexFloatImag());
-
+
return llvm::ConstantStruct::get(VMContext, Complex, 2);
}
case APValue::Vector: {
llvm::SmallVector<llvm::Constant *, 4> Inits;
unsigned NumElts = Result.Val.getVectorLength();
-
+
for (unsigned i = 0; i != NumElts; ++i) {
APValue &Elt = Result.Val.getVectorElt(i);
if (Elt.isInt())
@@ -787,9 +787,9 @@
// No need to check for member pointers when not compiling C++.
if (!getContext().getLangOptions().CPlusPlus)
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
-
+
if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
-
+
QualType ElementTy = CAT->getElementType();
// FIXME: Handle arrays of structs that contain member pointers.
@@ -799,8 +799,8 @@
std::vector<llvm::Constant *> Array(NumElements);
for (uint64_t i = 0; i != NumElements; ++i)
Array[i] = Element;
-
- const llvm::ArrayType *ATy =
+
+ const llvm::ArrayType *ATy =
cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
return llvm::ConstantArray::get(ATy, Array);
}
@@ -808,19 +808,19 @@
if (const RecordType *RT = T->getAs<RecordType>()) {
const RecordDecl *RD = RT->getDecl();
- // FIXME: It would be better if there was a way to explicitly compute the
+ // FIXME: It would be better if there was a way to explicitly compute the
// record layout instead of converting to a type.
Types.ConvertTagDeclType(RD);
-
+
const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
if (Layout.containsMemberPointer()) {
assert(0 && "FIXME: No support for structs with member pointers yet!");
}
}
-
+
// FIXME: Handle structs that contain member pointers.
- if (T->isMemberPointerType())
+ if (T->isMemberPointerType())
return llvm::Constant::getAllOnesValue(getTypes().ConvertTypeForMem(T));
-
+
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 3dc9590..b4ce838 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -889,8 +889,7 @@
return llvm::Constant::getNullValue(ConvertType(E->getType()));
}
-Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E)
-{
+Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E) {
Value* ResultAsPtr = EmitLValue(E->getSubExpr()).getAddress();
const llvm::Type* ResultType = ConvertType(E->getType());
return Builder.CreatePtrToInt(ResultAsPtr, ResultType, "offsetof");
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 5899912..d437df4 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -24,7 +24,7 @@
using namespace CodeGen;
/// Emits an instance of NSConstantString representing the object.
-llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
{
llvm::Constant *C = CGM.getObjCRuntime().GenerateConstantString(E);
// FIXME: This bitcast should just be made an invariant on the Runtime.
@@ -50,7 +50,7 @@
// Only the lookup mechanism and first two arguments of the method
// implementation vary between runtimes. We can get the receiver and
// arguments in generic code.
-
+
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
const Expr *ReceiverExpr = E->getReceiver();
bool isSuperMessage = false;
@@ -70,7 +70,7 @@
} else {
Receiver = Runtime.GetClass(Builder, OID);
}
-
+
isClassMessage = true;
} else if (isa<ObjCSuperExpr>(E->getReceiver())) {
isSuperMessage = true;
@@ -81,7 +81,7 @@
CallArgList Args;
EmitCallArgs(Args, E->getMethodDecl(), E->arg_begin(), E->arg_end());
-
+
if (isSuperMessage) {
// super is only valid in an Objective-C method
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
@@ -94,7 +94,7 @@
isClassMessage,
Args);
}
- return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(),
+ return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(),
Receiver, isClassMessage, Args,
E->getMethodDecl());
}
@@ -110,7 +110,7 @@
const CGFunctionInfo &FI = CGM.getTypes().getFunctionInfo(OMD);
CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
- Args.push_back(std::make_pair(OMD->getSelfDecl(),
+ Args.push_back(std::make_pair(OMD->getSelfDecl(),
OMD->getSelfDecl()->getType()));
Args.push_back(std::make_pair(OMD->getCmdDecl(),
OMD->getCmdDecl()->getType()));
@@ -123,7 +123,7 @@
}
/// Generate an Objective-C method. An Objective-C method is a C function with
-/// its pointer, name, and types registered in the class struture.
+/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
// Check if we should generate debug info for this method.
if (CGM.getDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
@@ -159,9 +159,9 @@
!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
(PD->getSetterKind() == ObjCPropertyDecl::Copy ||
PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
- llvm::Value *GetPropertyFn =
+ llvm::Value *GetPropertyFn =
CGM.getObjCRuntime().GetPropertyGetFunction();
-
+
if (!GetPropertyFn) {
CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
FinishFunction();
@@ -175,7 +175,7 @@
ValueDecl *Cmd = OMD->getCmdDecl();
llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
QualType IdTy = getContext().getObjCIdType();
- llvm::Value *SelfAsId =
+ llvm::Value *SelfAsId =
Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
llvm::Value *True =
@@ -187,12 +187,12 @@
Args.push_back(std::make_pair(RValue::get(True), getContext().BoolTy));
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
- RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args),
+ RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args),
GetPropertyFn, Args);
// We need to fix the type here. Ivars with copy & retain are
// always objects so we don't need to worry about complex or
// aggregates.
- RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
} else {
@@ -203,7 +203,7 @@
CodeGenTypes &Types = CGM.getTypes();
RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
- Types.ConvertType(PD->getType())));
+ Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
}
}
@@ -226,7 +226,7 @@
StartObjCMethod(OMD, IMP->getClassInterface());
bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
- bool IsAtomic =
+ bool IsAtomic =
!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
// Determine if we should use an objc_setProperty call for
@@ -236,16 +236,16 @@
if (IsCopy ||
(CGM.getLangOptions().getGCMode() != LangOptions::GCOnly &&
PD->getSetterKind() == ObjCPropertyDecl::Retain)) {
- llvm::Value *SetPropertyFn =
+ llvm::Value *SetPropertyFn =
CGM.getObjCRuntime().GetPropertySetFunction();
-
+
if (!SetPropertyFn) {
CGM.ErrorUnsupported(PID, "Obj-C getter requiring atomic copy");
FinishFunction();
return;
}
-
- // Emit objc_setProperty((id) self, _cmd, offset, arg,
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
// <is-atomic>, <is-copy>).
// FIXME: Can't this be simpler? This might even be worse than the
// corresponding gcc code.
@@ -253,11 +253,11 @@
ValueDecl *Cmd = OMD->getCmdDecl();
llvm::Value *CmdVal = Builder.CreateLoad(LocalDeclMap[Cmd], "cmd");
QualType IdTy = getContext().getObjCIdType();
- llvm::Value *SelfAsId =
+ llvm::Value *SelfAsId =
Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
llvm::Value *Offset = EmitIvarOffset(IMP->getClassInterface(), Ivar);
llvm::Value *Arg = LocalDeclMap[*OMD->param_begin()];
- llvm::Value *ArgAsId =
+ llvm::Value *ArgAsId =
Builder.CreateBitCast(Builder.CreateLoad(Arg, "arg"),
Types.ConvertType(IdTy));
llvm::Value *True =
@@ -269,13 +269,13 @@
Args.push_back(std::make_pair(RValue::get(CmdVal), Cmd->getType()));
Args.push_back(std::make_pair(RValue::get(Offset), getContext().LongTy));
Args.push_back(std::make_pair(RValue::get(ArgAsId), IdTy));
- Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
+ Args.push_back(std::make_pair(RValue::get(IsAtomic ? True : False),
getContext().BoolTy));
- Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
+ Args.push_back(std::make_pair(RValue::get(IsCopy ? True : False),
getContext().BoolTy));
// FIXME: We shouldn't need to get the function info here, the runtime
// already should have computed it to build the function.
- EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args),
+ EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args),
SetPropertyFn, Args);
} else {
SourceLocation Loc = PD->getLocation();
@@ -309,13 +309,13 @@
return PTy->getPointeeType();
}
-RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
+RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
const Selector &S) {
llvm::Value *Receiver = LoadObjCSelf();
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
bool isClassMessage = OMD->isClassMethod();
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
Exp->getType(),
S,
OMD->getClassInterface(),
@@ -323,7 +323,7 @@
Receiver,
isClassMessage,
CallArgList());
-
+
}
RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) {
@@ -334,11 +334,11 @@
if (isa<ObjCSuperExpr>(E->getBase()))
return EmitObjCSuperPropertyGet(E, S);
return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Exp->getType(), S,
- EmitScalarExpr(E->getBase()),
+ GenerateMessageSend(*this, Exp->getType(), S,
+ EmitScalarExpr(E->getBase()),
false, CallArgList());
} else {
- const ObjCImplicitSetterGetterRefExpr *KE =
+ const ObjCImplicitSetterGetterRefExpr *KE =
cast<ObjCImplicitSetterGetterRefExpr>(Exp);
Selector S = KE->getGetterMethod()->getSelector();
llvm::Value *Receiver;
@@ -347,11 +347,11 @@
Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
} else if (isa<ObjCSuperExpr>(KE->getBase()))
return EmitObjCSuperPropertyGet(KE, S);
- else
+ else
Receiver = EmitScalarExpr(KE->getBase());
return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Exp->getType(), S,
- Receiver,
+ GenerateMessageSend(*this, Exp->getType(), S,
+ Receiver,
KE->getInterfaceDecl() != 0, CallArgList());
}
}
@@ -365,7 +365,7 @@
bool isClassMessage = OMD->isClassMethod();
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
Args.push_back(std::make_pair(Src, Exp->getType()));
- CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
Exp->getType(),
S,
OMD->getClassInterface(),
@@ -384,13 +384,13 @@
if (isa<ObjCSuperExpr>(E->getBase())) {
EmitObjCSuperPropertySet(E, S, Src);
return;
- }
+ }
CallArgList Args;
Args.push_back(std::make_pair(Src, E->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
- EmitScalarExpr(E->getBase()),
+ CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ EmitScalarExpr(E->getBase()),
false, Args);
- } else if (const ObjCImplicitSetterGetterRefExpr *E =
+ } else if (const ObjCImplicitSetterGetterRefExpr *E =
dyn_cast<ObjCImplicitSetterGetterRefExpr>(Exp)) {
Selector S = E->getSetterMethod()->getSelector();
CallArgList Args;
@@ -404,19 +404,19 @@
} else
Receiver = EmitScalarExpr(E->getBase());
Args.push_back(std::make_pair(Src, E->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
- Receiver,
+ CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ Receiver,
E->getInterfaceDecl() != 0, Args);
} else
assert (0 && "bad expression node in EmitObjCPropertySet");
}
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
- llvm::Constant *EnumerationMutationFn =
+ llvm::Constant *EnumerationMutationFn =
CGM.getObjCRuntime().EnumerationMutationFunction();
llvm::Value *DeclAddress;
QualType ElementTy;
-
+
if (!EnumerationMutationFn) {
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
return;
@@ -427,62 +427,62 @@
assert(HaveInsertPoint() && "DeclStmt destroyed insert point!");
const Decl* D = SD->getSingleDecl();
ElementTy = cast<ValueDecl>(D)->getType();
- DeclAddress = LocalDeclMap[D];
+ DeclAddress = LocalDeclMap[D];
} else {
ElementTy = cast<Expr>(S.getElement())->getType();
DeclAddress = 0;
}
-
+
// Fast enumeration state.
QualType StateTy = getContext().getObjCFastEnumerationStateType();
- llvm::AllocaInst *StatePtr = CreateTempAlloca(ConvertType(StateTy),
+ llvm::AllocaInst *StatePtr = CreateTempAlloca(ConvertType(StateTy),
"state.ptr");
- StatePtr->setAlignment(getContext().getTypeAlign(StateTy) >> 3);
+ StatePtr->setAlignment(getContext().getTypeAlign(StateTy) >> 3);
EmitMemSetToZero(StatePtr, StateTy);
-
+
// Number of elements in the items array.
static const unsigned NumItems = 16;
-
+
// Get selector
llvm::SmallVector<IdentifierInfo*, 3> II;
II.push_back(&CGM.getContext().Idents.get("countByEnumeratingWithState"));
II.push_back(&CGM.getContext().Idents.get("objects"));
II.push_back(&CGM.getContext().Idents.get("count"));
- Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(),
+ Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(),
&II[0]);
QualType ItemsTy =
getContext().getConstantArrayType(getContext().getObjCIdType(),
- llvm::APInt(32, NumItems),
+ llvm::APInt(32, NumItems),
ArrayType::Normal, 0);
llvm::Value *ItemsPtr = CreateTempAlloca(ConvertType(ItemsTy), "items.ptr");
-
+
llvm::Value *Collection = EmitScalarExpr(S.getCollection());
-
+
CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(StatePtr),
+ Args.push_back(std::make_pair(RValue::get(StatePtr),
getContext().getPointerType(StateTy)));
-
- Args.push_back(std::make_pair(RValue::get(ItemsPtr),
+
+ Args.push_back(std::make_pair(RValue::get(ItemsPtr),
getContext().getPointerType(ItemsTy)));
-
+
const llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
- Args.push_back(std::make_pair(RValue::get(Count),
+ Args.push_back(std::make_pair(RValue::get(Count),
getContext().UnsignedLongTy));
-
- RValue CountRV =
- CGM.getObjCRuntime().GenerateMessageSend(*this,
+
+ RValue CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this,
getContext().UnsignedLongTy,
FastEnumSel,
Collection, false, Args);
llvm::Value *LimitPtr = CreateTempAlloca(UnsignedLongLTy, "limit.ptr");
Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
-
+
llvm::BasicBlock *NoElements = createBasicBlock("noelements");
llvm::BasicBlock *SetStartMutations = createBasicBlock("setstartmutations");
-
+
llvm::Value *Limit = Builder.CreateLoad(LimitPtr);
llvm::Value *Zero = llvm::Constant::getNullValue(UnsignedLongLTy);
@@ -490,60 +490,60 @@
Builder.CreateCondBr(IsZero, NoElements, SetStartMutations);
EmitBlock(SetStartMutations);
-
- llvm::Value *StartMutationsPtr =
+
+ llvm::Value *StartMutationsPtr =
CreateTempAlloca(UnsignedLongLTy);
-
- llvm::Value *StateMutationsPtrPtr =
+
+ llvm::Value *StateMutationsPtrPtr =
Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
- llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
+ llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
"mutationsptr");
-
- llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
+
+ llvm::Value *StateMutations = Builder.CreateLoad(StateMutationsPtr,
"mutations");
-
+
Builder.CreateStore(StateMutations, StartMutationsPtr);
-
+
llvm::BasicBlock *LoopStart = createBasicBlock("loopstart");
EmitBlock(LoopStart);
llvm::Value *CounterPtr = CreateTempAlloca(UnsignedLongLTy, "counter.ptr");
Builder.CreateStore(Zero, CounterPtr);
-
- llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
+
+ llvm::BasicBlock *LoopBody = createBasicBlock("loopbody");
EmitBlock(LoopBody);
StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
StateMutations = Builder.CreateLoad(StateMutationsPtr, "statemutations");
- llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
+ llvm::Value *StartMutations = Builder.CreateLoad(StartMutationsPtr,
"mutations");
- llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
+ llvm::Value *MutationsEqual = Builder.CreateICmpEQ(StateMutations,
StartMutations,
"tobool");
-
-
+
+
llvm::BasicBlock *WasMutated = createBasicBlock("wasmutated");
llvm::BasicBlock *WasNotMutated = createBasicBlock("wasnotmutated");
-
+
Builder.CreateCondBr(MutationsEqual, WasNotMutated, WasMutated);
-
+
EmitBlock(WasMutated);
llvm::Value *V =
- Builder.CreateBitCast(Collection,
+ Builder.CreateBitCast(Collection,
ConvertType(getContext().getObjCIdType()),
"tmp");
CallArgList Args2;
- Args2.push_back(std::make_pair(RValue::get(V),
+ Args2.push_back(std::make_pair(RValue::get(V),
getContext().getObjCIdType()));
// FIXME: We shouldn't need to get the function info here, the runtime already
// should have computed it to build the function.
- EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2),
+ EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2),
EnumerationMutationFn, Args2);
-
+
EmitBlock(WasNotMutated);
-
- llvm::Value *StateItemsPtr =
+
+ llvm::Value *StateItemsPtr =
Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
llvm::Value *Counter = Builder.CreateLoad(CounterPtr, "counter");
@@ -551,39 +551,39 @@
llvm::Value *EnumStateItems = Builder.CreateLoad(StateItemsPtr,
"stateitems");
- llvm::Value *CurrentItemPtr =
+ llvm::Value *CurrentItemPtr =
Builder.CreateGEP(EnumStateItems, Counter, "currentitem.ptr");
-
+
llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr, "currentitem");
-
+
// Cast the item to the right type.
CurrentItem = Builder.CreateBitCast(CurrentItem,
ConvertType(ElementTy), "tmp");
-
+
if (!DeclAddress) {
LValue LV = EmitLValue(cast<Expr>(S.getElement()));
-
+
// Set the value to null.
Builder.CreateStore(CurrentItem, LV.getAddress());
} else
Builder.CreateStore(CurrentItem, DeclAddress);
-
+
// Increment the counter.
- Counter = Builder.CreateAdd(Counter,
+ Counter = Builder.CreateAdd(Counter,
llvm::ConstantInt::get(UnsignedLongLTy, 1));
Builder.CreateStore(Counter, CounterPtr);
-
+
llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
llvm::BasicBlock *AfterBody = createBasicBlock("afterbody");
-
+
BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
EmitStmt(S.getBody());
-
+
BreakContinueStack.pop_back();
-
+
EmitBlock(AfterBody);
-
+
llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
Counter = Builder.CreateLoad(CounterPtr);
@@ -593,18 +593,18 @@
// Fetch more elements.
EmitBlock(FetchMore);
-
- CountRV =
- CGM.getObjCRuntime().GenerateMessageSend(*this,
+
+ CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this,
getContext().UnsignedLongTy,
- FastEnumSel,
+ FastEnumSel,
Collection, false, Args);
Builder.CreateStore(CountRV.getScalarVal(), LimitPtr);
Limit = Builder.CreateLoad(LimitPtr);
-
+
IsZero = Builder.CreateICmpEQ(Limit, Zero, "iszero");
Builder.CreateCondBr(IsZero, NoElements, LoopStart);
-
+
// No more elements.
EmitBlock(NoElements);
@@ -612,7 +612,7 @@
// If the element was not a declaration, set it to be null.
LValue LV = EmitLValue(cast<Expr>(S.getElement()));
-
+
// Set the value to null.
Builder.CreateStore(llvm::Constant::getNullValue(ConvertType(ElementTy)),
LV.getAddress());
@@ -621,19 +621,16 @@
EmitBlock(LoopEnd);
}
-void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S)
-{
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
}
-void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S)
-{
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
CGM.getObjCRuntime().EmitThrowStmt(*this, S);
}
void CodeGenFunction::EmitObjCAtSynchronizedStmt(
- const ObjCAtSynchronizedStmt &S)
-{
+ const ObjCAtSynchronizedStmt &S) {
CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 63d45e9..264e9d0 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -79,8 +79,8 @@
const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets);
llvm::Constant *GenerateMethodList(const std::string &ClassName,
const std::string &CategoryName,
- const llvm::SmallVectorImpl<Selector> &MethodSels,
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
bool isClassMethodList);
llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
llvm::Constant *GenerateProtocolList(
@@ -110,7 +110,7 @@
public:
CGObjCGNU(CodeGen::CodeGenModule &cgm);
virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *);
- virtual CodeGen::RValue
+ virtual CodeGen::RValue
GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -118,7 +118,7 @@
bool IsClassMessage,
const CallArgList &CallArgs,
const ObjCMethodDecl *Method);
- virtual CodeGen::RValue
+ virtual CodeGen::RValue
GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -132,8 +132,8 @@
virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel);
virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
*Method);
-
- virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD);
virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
@@ -144,7 +144,7 @@
virtual llvm::Function *GetPropertyGetFunction();
virtual llvm::Function *GetPropertySetFunction();
virtual llvm::Constant *EnumerationMutationFunction();
-
+
virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
@@ -160,7 +160,7 @@
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest);
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *DestPtr,
+ llvm::Value *DestPtr,
llvm::Value *SrcPtr,
QualType Ty);
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
@@ -197,9 +197,10 @@
return "_OBJC_CLASS_" + ClassName;
}
-static std::string SymbolNameForMethod(const std::string &ClassName, const
- std::string &CategoryName, const std::string &MethodName, bool isClassMethod)
-{
+static std::string SymbolNameForMethod(const std::string &ClassName,
+ const std::string &CategoryName,
+ const std::string &MethodName,
+ bool isClassMethod) {
return "_OBJC_METHOD_" + ClassName + "("+CategoryName+")"+
(isClassMethod ? "+" : "-") + MethodName;
}
@@ -211,13 +212,13 @@
CGM.getTypes().ConvertType(CGM.getContext().IntTy));
LongTy = cast<llvm::IntegerType>(
CGM.getTypes().ConvertType(CGM.getContext().LongTy));
-
+
Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
Zeros[1] = Zeros[0];
NULLPtr = llvm::ConstantPointerNull::get(
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)));
// C string type. Used in lots of places.
- PtrToInt8Ty =
+ PtrToInt8Ty =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
// Get the selector Type.
SelectorTy = cast<llvm::PointerType>(
@@ -225,11 +226,11 @@
PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
PtrTy = PtrToInt8Ty;
-
+
// Object type
ASTIdTy = CGM.getContext().getObjCIdType();
IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
-
+
// IMP type
std::vector<const llvm::Type*> IMPArgs;
IMPArgs.push_back(IdTy);
@@ -261,7 +262,7 @@
llvm::GlobalValue::PrivateLinkage,
".objc_untyped_selector_alias"+Sel.getAsString(),
NULL, &TheModule);
-
+
return Builder.CreateLoad(US);
}
@@ -315,7 +316,7 @@
//an OpenStep implementation, this should let them select their own class for
//constant strings.
llvm::Constant *CGObjCGNU::GenerateConstantString(const ObjCStringLiteral *SL) {
- std::string Str(SL->getString()->getStrData(),
+ std::string Str(SL->getString()->getStrData(),
SL->getString()->getByteLength());
std::vector<llvm::Constant*> Ivars;
Ivars.push_back(NULLPtr);
@@ -393,7 +394,7 @@
}
}
// Cast the pointer to a simplified version of the class structure
- ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass,
+ ReceiverClass = CGF.Builder.CreateBitCast(ReceiverClass,
llvm::PointerType::getUnqual(
llvm::StructType::get(VMContext, IdTy, IdTy, NULL)));
// Get the superclass pointer
@@ -413,7 +414,7 @@
std::vector<const llvm::Type*> Params;
Params.push_back(llvm::PointerType::getUnqual(ObjCSuperTy));
Params.push_back(SelectorTy);
- llvm::Constant *lookupFunction =
+ llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::PointerType::getUnqual(impType), Params, true),
"objc_msg_lookup_super");
@@ -425,7 +426,7 @@
return CGF.EmitCall(FnInfo, imp, ActualArgs);
}
-/// Generate code for a message send expression.
+/// Generate code for a message send expression.
CodeGen::RValue
CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
@@ -468,14 +469,14 @@
self = llvm::ConstantPointerNull::get(IdTy);
}
Params.push_back(self->getType());
- llvm::Constant *lookupFunction =
+ llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::PointerType::getUnqual(impType), Params, true),
"objc_msg_lookup_sender");
imp = CGF.Builder.CreateCall3(lookupFunction, Receiver, cmd, self);
} else {
- llvm::Constant *lookupFunction =
+ llvm::Constant *lookupFunction =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(
llvm::PointerType::getUnqual(impType), Params, true),
"objc_msg_lookup");
@@ -486,16 +487,16 @@
return CGF.EmitCall(FnInfo, imp, ActualArgs);
}
-/// Generates a MethodList. Used in construction of a objc_class and
+/// Generates a MethodList. Used in construction of a objc_class and
/// objc_category structures.
llvm::Constant *CGObjCGNU::GenerateMethodList(const std::string &ClassName,
- const std::string &CategoryName,
- const llvm::SmallVectorImpl<Selector> &MethodSels,
- const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
+ const std::string &CategoryName,
+ const llvm::SmallVectorImpl<Selector> &MethodSels,
+ const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes,
bool isClassMethodList) {
if (MethodSels.empty())
return NULLPtr;
- // Get the method structure type.
+ // Get the method structure type.
llvm::StructType *ObjCMethodTy = llvm::StructType::get(VMContext,
PtrToInt8Ty, // Really a selector, but the runtime creates it us.
PtrToInt8Ty, // Method types
@@ -530,8 +531,8 @@
llvm::PATypeHolder OpaqueNextTy = llvm::OpaqueType::get(VMContext);
llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(OpaqueNextTy);
llvm::StructType *ObjCMethodListTy = llvm::StructType::get(VMContext,
- NextPtrTy,
- IntTy,
+ NextPtrTy,
+ IntTy,
ObjCMethodArrayTy,
NULL);
// Refine next pointer type to concrete type
@@ -545,7 +546,7 @@
Methods.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
MethodTypes.size()));
Methods.push_back(MethodArray);
-
+
// Create an instance of the structure
return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
}
@@ -555,7 +556,7 @@
const llvm::SmallVectorImpl<llvm::Constant *> &IvarNames,
const llvm::SmallVectorImpl<llvm::Constant *> &IvarTypes,
const llvm::SmallVectorImpl<llvm::Constant *> &IvarOffsets) {
- // Get the method structure type.
+ // Get the method structure type.
llvm::StructType *ObjCIvarTy = llvm::StructType::get(VMContext,
PtrToInt8Ty,
PtrToInt8Ty,
@@ -575,7 +576,7 @@
llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
IvarNames.size());
-
+
Elements.clear();
Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
@@ -611,7 +612,7 @@
LongTy, // instance_size
IVars->getType(), // ivars
Methods->getType(), // methods
- // These are all filled in by the runtime, so we pretend
+ // These are all filled in by the runtime, so we pretend
PtrTy, // dtable
PtrTy, // subclass_list
PtrTy, // sibling_class
@@ -643,7 +644,7 @@
llvm::Constant *CGObjCGNU::GenerateProtocolMethodList(
const llvm::SmallVectorImpl<llvm::Constant *> &MethodNames,
const llvm::SmallVectorImpl<llvm::Constant *> &MethodTypes) {
- // Get the method structure type.
+ // Get the method structure type.
llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(VMContext,
PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
PtrToInt8Ty,
@@ -652,7 +653,7 @@
std::vector<llvm::Constant*> Elements;
for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
Elements.clear();
- Elements.push_back(MethodNames[i]);
+ Elements.push_back(MethodNames[i]);
Elements.push_back(MethodTypes[i]);
Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
}
@@ -678,7 +679,7 @@
LongTy,//FIXME: Should be size_t
ProtocolArrayTy,
NULL);
- std::vector<llvm::Constant*> Elements;
+ std::vector<llvm::Constant*> Elements;
for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
iter != endIter ; iter++) {
llvm::Constant *protocol = ExistingProtocols[*iter];
@@ -697,10 +698,10 @@
return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
}
-llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *PD) {
llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
- const llvm::Type *T =
+ const llvm::Type *T =
CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
}
@@ -723,7 +724,7 @@
InstanceMethodList->getType(),
ClassMethodList->getType(),
NULL);
- std::vector<llvm::Constant*> Elements;
+ std::vector<llvm::Constant*> Elements;
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
@@ -755,7 +756,7 @@
// Collect information about class methods:
llvm::SmallVector<llvm::Constant*, 16> ClassMethodNames;
llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
- for (ObjCProtocolDecl::classmeth_iterator
+ for (ObjCProtocolDecl::classmeth_iterator
iter = PD->classmeth_begin(), endIter = PD->classmeth_end();
iter != endIter ; iter++) {
std::string TypeStr;
@@ -778,7 +779,7 @@
InstanceMethodList->getType(),
ClassMethodList->getType(),
NULL);
- std::vector<llvm::Constant*> Elements;
+ std::vector<llvm::Constant*> Elements;
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
Elements.push_back(llvm::ConstantExpr::getIntToPtr(
@@ -787,7 +788,7 @@
Elements.push_back(ProtocolList);
Elements.push_back(InstanceMethodList);
Elements.push_back(ClassMethodList);
- ExistingProtocols[ProtocolName] =
+ ExistingProtocols[ProtocolName] =
llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
".objc_protocol"), IdTy);
}
@@ -810,7 +811,7 @@
// Collect information about class methods
llvm::SmallVector<Selector, 16> ClassMethodSels;
llvm::SmallVector<llvm::Constant*, 16> ClassMethodTypes;
- for (ObjCCategoryImplDecl::classmeth_iterator
+ for (ObjCCategoryImplDecl::classmeth_iterator
iter = OCD->classmeth_begin(), endIter = OCD->classmeth_end();
iter != endIter ; iter++) {
ClassMethodSels.push_back((*iter)->getSelector());
@@ -830,7 +831,7 @@
std::vector<llvm::Constant*> Elements;
Elements.push_back(MakeConstantString(CategoryName));
Elements.push_back(MakeConstantString(ClassName));
- // Instance method list
+ // Instance method list
Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
false), PtrTy));
@@ -842,7 +843,7 @@
Elements.push_back(llvm::ConstantExpr::getBitCast(
GenerateProtocolList(Protocols), PtrTy));
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
+ MakeGlobal(llvm::StructType::get(VMContext, PtrToInt8Ty, PtrToInt8Ty,
PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
}
@@ -850,7 +851,7 @@
ASTContext &Context = CGM.getContext();
// Get the superclass name.
- const ObjCInterfaceDecl * SuperClassDecl =
+ const ObjCInterfaceDecl * SuperClassDecl =
OID->getClassInterface()->getSuperClass();
std::string SuperClassName;
if (SuperClassDecl) {
@@ -865,7 +866,7 @@
// Emit the symbol that is used to generate linker errors if this class is
// referenced in other modules but not declared.
std::string classSymbolName = "__objc_class_name_" + ClassName;
- if (llvm::GlobalVariable *symbol =
+ if (llvm::GlobalVariable *symbol =
TheModule.getGlobalVariable(classSymbolName)) {
symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0));
} else {
@@ -873,7 +874,7 @@
llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0),
classSymbolName);
}
-
+
// Get the size of instances.
int instanceSize = Context.getASTObjCImplementationLayout(OID).getSize() / 8;
@@ -881,8 +882,8 @@
llvm::SmallVector<llvm::Constant*, 16> IvarNames;
llvm::SmallVector<llvm::Constant*, 16> IvarTypes;
llvm::SmallVector<llvm::Constant*, 16> IvarOffsets;
-
- int superInstanceSize = !SuperClassDecl ? 0 :
+
+ int superInstanceSize = !SuperClassDecl ? 0 :
Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize() / 8;
// For non-fragile ivars, set the instance size to 0 - {the size of just this
// class}. The runtime will then set this to the correct value on load.
@@ -912,7 +913,7 @@
// Collect information about instance methods
llvm::SmallVector<Selector, 16> InstanceMethodSels;
llvm::SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
- for (ObjCImplementationDecl::instmeth_iterator
+ for (ObjCImplementationDecl::instmeth_iterator
iter = OID->instmeth_begin(), endIter = OID->instmeth_end();
iter != endIter ; iter++) {
InstanceMethodSels.push_back((*iter)->getSelector());
@@ -920,7 +921,7 @@
Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
}
- for (ObjCImplDecl::propimpl_iterator
+ for (ObjCImplDecl::propimpl_iterator
iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
iter != endIter ; iter++) {
ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
@@ -974,7 +975,7 @@
ClassMethodSels, ClassMethodTypes, true);
llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
IvarOffsets);
- // Irrespective of whether we are compiling for a fragile or non-fragile ABI,
+ // Irrespective of whether we are compiling for a fragile or non-fragile ABI,
// we emit a symbol containing the offset for each ivar in the class. This
// allows code compiled for the non-Fragile ABI to inherit from code compiled
// for the legacy ABI, without causing problems. The converse is also
@@ -986,7 +987,7 @@
// the offset (third field in ivar structure)
const llvm::Type *IndexTy = llvm::Type::getInt32Ty(VMContext);
llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
- llvm::ConstantInt::get(IndexTy, 1), 0,
+ llvm::ConstantInt::get(IndexTy, 1), 0,
llvm::ConstantInt::get(IndexTy, 2) };
for (ObjCInterfaceDecl::ivar_iterator iter = ClassDecl->ivar_begin(),
@@ -1041,7 +1042,7 @@
}
-llvm::Function *CGObjCGNU::ModuleInitFunction() {
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Only emit an ObjC load function if no Objective-C stuff has been called
if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
ExistingProtocols.empty() && TypedSelectors.empty() &&
@@ -1078,12 +1079,12 @@
".objc_static_class_name"));
Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
ConstantStrings));
- llvm::StructType *StaticsListTy =
+ llvm::StructType *StaticsListTy =
llvm::StructType::get(VMContext, PtrToInt8Ty, StaticsArrayTy, NULL);
llvm::Type *StaticsListPtrTy =
llvm::PointerType::getUnqual(StaticsListTy);
Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
- llvm::ArrayType *StaticsListArrayTy =
+ llvm::ArrayType *StaticsListArrayTy =
llvm::ArrayType::get(StaticsListPtrTy, 2);
Elements.clear();
Elements.push_back(Statics);
@@ -1094,7 +1095,7 @@
// Array of classes, categories, and constant objects
llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
Classes.size() + Categories.size() + 2);
- llvm::StructType *SymTabTy = llvm::StructType::get(VMContext,
+ llvm::StructType *SymTabTy = llvm::StructType::get(VMContext,
LongTy, SelStructPtrTy,
llvm::Type::getInt16Ty(VMContext),
llvm::Type::getInt16Ty(VMContext),
@@ -1130,7 +1131,7 @@
llvm::Constant *SelectorList = MakeGlobal(
llvm::ArrayType::get(SelStructTy, Selectors.size()), Selectors,
".objc_selector_list");
- Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+ Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
SelStructPtrTy));
// Now that all of the static selectors exist, create pointers to them.
@@ -1158,7 +1159,7 @@
llvm::Constant *Idxs[] = {Zeros[0],
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
llvm::Constant *SelPtr = new llvm::GlobalVariable
- (TheModule, SelStructPtrTy,
+ (TheModule, SelStructPtrTy,
true, llvm::GlobalValue::InternalLinkage,
llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
".objc_sel_ptr");
@@ -1171,10 +1172,10 @@
(*iter).second->setAliasee(SelPtr);
}
// Number of classes defined.
- Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
Classes.size()));
// Number of categories defined
- Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
Categories.size()));
// Create an array of classes, then categories, then static object instances
Classes.insert(Classes.end(), Categories.begin(), Categories.end());
@@ -1183,7 +1184,7 @@
Classes.push_back(NULLPtr);
llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
Elements.push_back(ClassList);
- // Construct the symbol table
+ // Construct the symbol table
llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
// The symbol table is contained in a module which has some version-checking
@@ -1193,8 +1194,8 @@
Elements.clear();
// Runtime version used for compatibility checking.
if (CGM.getContext().getLangOptions().ObjCNonFragileABI) {
- Elements.push_back(llvm::ConstantInt::get(LongTy,
- NonFragileRuntimeVersion));
+ Elements.push_back(llvm::ConstantInt::get(LongTy,
+ NonFragileRuntimeVersion));
} else {
Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
}
@@ -1229,8 +1230,8 @@
}
llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
- const ObjCContainerDecl *CD) {
- const ObjCCategoryImplDecl *OCD =
+ const ObjCContainerDecl *CD) {
+ const ObjCCategoryImplDecl *OCD =
dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
std::string CategoryName = OCD ? OCD->getNameAsString() : "";
std::string ClassName = OMD->getClassInterface()->getNameAsString();
@@ -1238,50 +1239,51 @@
bool isClassMethod = !OMD->isInstanceMethod();
CodeGenTypes &Types = CGM.getTypes();
- const llvm::FunctionType *MethodTy =
+ const llvm::FunctionType *MethodTy =
Types.GetFunctionType(Types.getFunctionInfo(OMD), OMD->isVariadic());
std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
MethodName, isClassMethod);
- llvm::Function *Method = llvm::Function::Create(MethodTy,
- llvm::GlobalValue::InternalLinkage,
- FunctionName,
- &TheModule);
+ llvm::Function *Method
+ = llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ FunctionName,
+ &TheModule);
return Method;
}
llvm::Function *CGObjCGNU::GetPropertyGetFunction() {
- std::vector<const llvm::Type*> Params;
- const llvm::Type *BoolTy =
- CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
- Params.push_back(IdTy);
- Params.push_back(SelectorTy);
- // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
- Params.push_back(LongTy);
- Params.push_back(BoolTy);
- // void objc_getProperty (id, SEL, ptrdiff_t, bool)
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(IdTy, Params, false);
- return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
- "objc_getProperty"));
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+ Params.push_back(LongTy);
+ Params.push_back(BoolTy);
+ // void objc_getProperty (id, SEL, ptrdiff_t, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(IdTy, Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_getProperty"));
}
llvm::Function *CGObjCGNU::GetPropertySetFunction() {
- std::vector<const llvm::Type*> Params;
- const llvm::Type *BoolTy =
- CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
- Params.push_back(IdTy);
- Params.push_back(SelectorTy);
- // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
- Params.push_back(LongTy);
- Params.push_back(IdTy);
- Params.push_back(BoolTy);
- Params.push_back(BoolTy);
- // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
- const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
- return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
- "objc_setProperty"));
+ std::vector<const llvm::Type*> Params;
+ const llvm::Type *BoolTy =
+ CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+ Params.push_back(IdTy);
+ Params.push_back(SelectorTy);
+ // FIXME: Using LongTy for ptrdiff_t is probably broken on Win64
+ Params.push_back(LongTy);
+ Params.push_back(IdTy);
+ Params.push_back(BoolTy);
+ Params.push_back(BoolTy);
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Params, false);
+ return cast<llvm::Function>(CGM.CreateRuntimeFunction(FTy,
+ "objc_setProperty"));
}
llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
@@ -1324,7 +1326,7 @@
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
- llvm::Value *SyncArg =
+ llvm::Value *SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
CGF.Builder.CreateCall(SyncEnter, SyncArg);
@@ -1339,7 +1341,7 @@
CGF.setInvokeDest(TryHandler);
CGF.EmitBlock(TryBlock);
- CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
: cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
// Jump to @finally if there is no exception
@@ -1353,7 +1355,7 @@
int PointerWidth = td.getTypeSizeInBits(PtrTy);
assert((PointerWidth == 32 || PointerWidth == 64) &&
"Can't yet handle exceptions if pointers are not 32 or 64 bits");
- llvm::Value *llvm_eh_exception =
+ llvm::Value *llvm_eh_exception =
CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
llvm::Value *llvm_eh_selector = PointerWidth == 32 ?
CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector_i32) :
@@ -1392,13 +1394,13 @@
HasCatchAll = true;
// No further catches after this one will ever by reached
break;
- }
+ }
// All other types should be Objective-C interface pointer types.
- const ObjCObjectPointerType *OPT =
+ const ObjCObjectPointerType *OPT =
CatchDecl->getType()->getAsObjCObjectPointerType();
assert(OPT && "Invalid @catch type.");
- const ObjCInterfaceType *IT =
+ const ObjCInterfaceType *IT =
OPT->getPointeeType()->getAsObjCInterfaceType();
assert(IT && "Invalid @catch type.");
llvm::Value *EHType =
@@ -1439,11 +1441,11 @@
CGF.EmitBlock(Match);
}
-
+
if (CatchBody) {
llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc,
CGF.ConvertType(CatchParam->getType()));
-
+
// Bind the catch parameter if it exists.
if (CatchParam) {
// CatchParam is a ParmVarDecl because of the grammar
@@ -1491,7 +1493,7 @@
if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
+ if (const ObjCAtFinallyStmt* FinallyStmt =
cast<ObjCAtTryStmt>(S).getFinallyStmt())
CGF.EmitStmt(FinallyStmt->getFinallyBody());
} else {
@@ -1501,7 +1503,7 @@
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
- llvm::Value *SyncArg =
+ llvm::Value *SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
CGF.Builder.CreateCall(SyncExit, SyncArg);
@@ -1518,7 +1520,7 @@
CGF.EmitBlock(FinallyRethrow);
CGF.Builder.CreateCall(RethrowFn, CGF.Builder.CreateLoad(RethrowPtr));
CGF.Builder.CreateUnreachable();
-
+
CGF.EmitBlock(FinallyEnd);
}
@@ -1530,20 +1532,20 @@
std::vector<const llvm::Type*> Args(1, IdTy);
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
- llvm::Value *ThrowFn =
+ llvm::Value *ThrowFn =
CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
-
+
if (const Expr *ThrowExpr = S.getThrowExpr()) {
llvm::Value *Exception = CGF.EmitScalarExpr(ThrowExpr);
ExceptionAsObject = Exception;
} else {
- assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
"Unexpected rethrow outside @catch block.");
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
ExceptionAsObject =
CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy, "tmp");
-
+
// Note: This may have to be an invoke, if we want to support constructs like:
// @try {
// @throw(obj);
@@ -1566,37 +1568,32 @@
}
llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj)
-{
+ llvm::Value *AddrWeakObj) {
return 0;
}
void CGObjCGNU::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
return;
}
void CGObjCGNU::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
return;
}
void CGObjCGNU::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
return;
}
void CGObjCGNU::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
return;
}
void CGObjCGNU::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *DestPtr,
+ llvm::Value *DestPtr,
llvm::Value *SrcPtr,
QualType Ty) {
return;
@@ -1618,7 +1615,7 @@
// Don't emit the guess in non-PIC code because the linker will not be able
// to replace it with the real version for a library. In non-PIC code you
// must compile with the fragile ABI if you want to use ivars from a
- // GCC-compiled class.
+ // GCC-compiled class.
if (CGM.getLangOptions().PICLevel) {
llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
llvm::Type::getInt32Ty(VMContext), false,
@@ -1654,11 +1651,11 @@
if (OIVD == Ivars[k])
return OID;
}
-
+
// Otherwise check in the super class.
if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
return FindIvarInterface(Context, Super, OIVD);
-
+
return 0;
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 03c4f47..e685385 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -104,7 +104,7 @@
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- llvm::Type *I8Ptr =
+ llvm::Type *I8Ptr =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
QualType IvarTy = Ivar->getType();
const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
@@ -939,8 +939,7 @@
public:
CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
- CGM(cgm), VMContext(cgm.getLLVMContext())
- { }
+ CGM(cgm), VMContext(cgm.getLLVMContext()) { }
virtual llvm::Constant *GenerateConstantString(const ObjCStringLiteral *SL);
@@ -1402,8 +1401,7 @@
/* *** CGObjCMac Public Interface *** */
CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
- ObjCTypes(cgm)
-{
+ ObjCTypes(cgm) {
ObjCABI = 1;
EmitImageInfo();
}
@@ -2689,8 +2687,7 @@
/// object: objc_read_weak (id *src)
///
llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj)
-{
+ llvm::Value *AddrWeakObj) {
const llvm::Type* DestTy =
cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
@@ -2705,8 +2702,7 @@
/// objc_assign_weak (id src, id *dst)
///
void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -2726,8 +2722,7 @@
/// objc_assign_global (id src, id *dst)
///
void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -2747,8 +2742,7 @@
/// objc_assign_ivar (id src, id *dst)
///
void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -2768,8 +2762,7 @@
/// objc_assign_strongCast (id src, id *dst)
///
void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -3055,10 +3048,10 @@
if (RD) {
if (Field->isBitField()) {
CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field);
-
- const llvm::Type *Ty =
+
+ const llvm::Type *Ty =
CGM.getTypes().ConvertTypeForMemRecursive(Field->getType());
- uint64_t TypeSize =
+ uint64_t TypeSize =
CGM.getTypes().getTargetData().getTypeAllocSize(Ty);
FieldOffset = Info.FieldNo * TypeSize;
} else
@@ -3516,8 +3509,7 @@
CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
: CGObjCCommonMac(cgm),
- ObjCTypes(cgm)
-{
+ ObjCTypes(cgm) {
ObjCEmptyCacheVar = ObjCEmptyVtableVar = NULL;
ObjCABI = 2;
}
@@ -3525,8 +3517,7 @@
/* *** */
ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
- : VMContext(cgm.getLLVMContext()), CGM(cgm)
-{
+ : VMContext(cgm.getLLVMContext()), CGM(cgm) {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -3612,8 +3603,7 @@
}
ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
- : ObjCCommonTypesHelper(cgm)
-{
+ : ObjCCommonTypesHelper(cgm) {
// struct _objc_method_description {
// SEL name;
// char *types;
@@ -3666,7 +3656,7 @@
llvm::PATypeHolder ProtocolListTyHolder = llvm::OpaqueType::get(VMContext);
const llvm::Type *T =
- llvm::StructType::get(VMContext,
+ llvm::StructType::get(VMContext,
llvm::PointerType::getUnqual(ProtocolListTyHolder),
LongTy,
llvm::ArrayType::get(ProtocolTyHolder, 0),
@@ -3835,8 +3825,7 @@
}
ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm)
- : ObjCCommonTypesHelper(cgm)
-{
+ : ObjCCommonTypesHelper(cgm) {
// struct _method_list_t {
// uint32_t entsize; // sizeof(struct _objc_method)
// uint32_t method_count;
@@ -3908,7 +3897,7 @@
// uint32_t alignment;
// uint32_t size;
// }
- IvarnfABITy = llvm::StructType::get(VMContext,
+ IvarnfABITy = llvm::StructType::get(VMContext,
llvm::PointerType::getUnqual(LongTy),
Int8PtrTy,
Int8PtrTy,
@@ -5056,7 +5045,7 @@
Name += '_';
std::string SelName(Sel.getAsString());
// Replace all ':' in selector name with '_' ouch!
- for(unsigned i = 0; i < SelName.size(); i++)
+ for (unsigned i = 0; i < SelName.size(); i++)
if (SelName[i] == ':')
SelName[i] = '_';
Name += SelName;
@@ -5277,8 +5266,8 @@
/// objc_assign_ivar (id src, id *dst)
///
void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src,
+ llvm::Value *dst) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -5299,8 +5288,7 @@
///
void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -5337,8 +5325,7 @@
///
llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
CodeGen::CodeGenFunction &CGF,
- llvm::Value *AddrWeakObj)
-{
+ llvm::Value *AddrWeakObj) {
const llvm::Type* DestTy =
cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
@@ -5352,8 +5339,7 @@
/// objc_assign_weak (id src, id *dst)
///
void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
@@ -5373,8 +5359,7 @@
/// objc_assign_global (id src, id *dst)
///
void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
- llvm::Value *src, llvm::Value *dst)
-{
+ llvm::Value *src, llvm::Value *dst) {
const llvm::Type * SrcTy = src->getType();
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index d82df9d..8951ab6 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -86,7 +86,7 @@
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers,
- llvm::Value *Offset);
+ llvm::Value *Offset);
public:
virtual ~CGObjCRuntime();
@@ -101,7 +101,7 @@
virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
Selector Sel) = 0;
- /// Get a typed selector.
+ /// Get a typed selector.
virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
const ObjCMethodDecl *Method) = 0;
@@ -114,9 +114,9 @@
/// Generate a class stucture for this class.
virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
-
- /// Generate an Objective-C message send operation.
- virtual CodeGen::RValue
+
+ /// Generate an Objective-C message send operation.
+ virtual CodeGen::RValue
GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
QualType ResultType,
Selector Sel,
@@ -143,34 +143,34 @@
virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
const ObjCProtocolDecl *OPD) = 0;
- /// Generate the named protocol. Protocols contain method metadata but no
- /// implementations.
+ /// Generate the named protocol. Protocols contain method metadata but no
+ /// implementations.
virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
/// Generate a function preamble for a method with the specified
- /// types.
+ /// types.
// FIXME: Current this just generates the Function definition, but really this
// should also be generating the loads of the parameters, as the runtime
// should have full control over how parameters are passed.
- virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD) = 0;
/// Return the runtime function for getting properties.
virtual llvm::Constant *GetPropertyGetFunction() = 0;
-
+
/// Return the runtime function for setting properties.
virtual llvm::Constant *GetPropertySetFunction() = 0;
/// GetClass - Return a reference to the class for the given
/// interface decl.
- virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
const ObjCInterfaceDecl *OID) = 0;
/// EnumerationMutationFunction - Return the function that's called by the
/// compiler when a mutation is detected during foreach iteration.
virtual llvm::Constant *EnumerationMutationFunction() = 0;
-
+
virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S) = 0;
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
@@ -185,7 +185,7 @@
llvm::Value *src, llvm::Value *dest) = 0;
virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
llvm::Value *src, llvm::Value *dest) = 0;
-
+
virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
QualType ObjectTy,
llvm::Value *BaseValue,
@@ -195,12 +195,12 @@
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) = 0;
virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
- llvm::Value *DestPtr,
+ llvm::Value *DestPtr,
llvm::Value *SrcPtr,
QualType Ty) = 0;
};
-/// Creates an instance of an Objective-C runtime class.
+/// Creates an instance of an Objective-C runtime class.
//TODO: This should include some way of selecting which runtime to target.
CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index f2fd885..d26c772 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -37,7 +37,7 @@
if (LayoutFields(D))
return;
-
+
// We weren't able to layout the struct. Try again with a packed struct
Packed = true;
AlignmentAsLLVMStruct = 1;
@@ -45,52 +45,52 @@
FieldTypes.clear();
LLVMFields.clear();
LLVMBitFields.clear();
-
+
LayoutFields(D);
}
void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
uint64_t FieldOffset) {
- uint64_t FieldSize =
+ uint64_t FieldSize =
D->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
-
+
if (FieldSize == 0)
return;
uint64_t NextFieldOffset = NextFieldOffsetInBytes * 8;
unsigned NumBytesToAppend;
-
+
if (FieldOffset < NextFieldOffset) {
assert(BitsAvailableInLastField && "Bitfield size mismatch!");
assert(NextFieldOffsetInBytes && "Must have laid out at least one byte!");
-
+
// The bitfield begins in the previous bit-field.
- NumBytesToAppend =
+ NumBytesToAppend =
llvm::RoundUpToAlignment(FieldSize - BitsAvailableInLastField, 8) / 8;
} else {
assert(FieldOffset % 8 == 0 && "Field offset not aligned correctly");
// Append padding if necessary.
AppendBytes((FieldOffset - NextFieldOffset) / 8);
-
- NumBytesToAppend =
+
+ NumBytesToAppend =
llvm::RoundUpToAlignment(FieldSize, 8) / 8;
-
+
assert(NumBytesToAppend && "No bytes to append!");
}
const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
uint64_t TypeSizeInBits = getTypeSizeInBytes(Ty) * 8;
-
+
LLVMBitFields.push_back(LLVMBitFieldInfo(D, FieldOffset / TypeSizeInBits,
- FieldOffset % TypeSizeInBits,
+ FieldOffset % TypeSizeInBits,
FieldSize));
-
+
AppendBytes(NumBytesToAppend);
-
+
AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct, getTypeAlignment(Ty));
- BitsAvailableInLastField =
+ BitsAvailableInLastField =
NextFieldOffsetInBytes * 8 - (FieldOffset + FieldSize);
}
@@ -105,14 +105,14 @@
// don't affect the struct alignment.
if (!Packed && !D->getDeclName())
return false;
-
+
LayoutBitField(D, FieldOffset);
return true;
}
-
+
assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
uint64_t FieldOffsetInBytes = FieldOffset / 8;
-
+
const llvm::Type *Ty = Types.ConvertTypeForMemRecursive(D->getType());
unsigned TypeAlignment = getTypeAlignment(Ty);
@@ -122,7 +122,7 @@
assert(!Packed && "Alignment is wrong even with packed struct!");
return false;
}
-
+
if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
if (const PragmaPackAttr *PPA = RD->getAttr<PragmaPackAttr>()) {
@@ -132,72 +132,72 @@
}
// Round up the field offset to the alignment of the field type.
- uint64_t AlignedNextFieldOffsetInBytes =
+ uint64_t AlignedNextFieldOffsetInBytes =
llvm::RoundUpToAlignment(NextFieldOffsetInBytes, TypeAlignment);
if (FieldOffsetInBytes < AlignedNextFieldOffsetInBytes) {
assert(!Packed && "Could not place field even with packed struct!");
return false;
}
-
+
if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
// Even with alignment, the field offset is not at the right place,
// insert padding.
uint64_t PaddingInBytes = FieldOffsetInBytes - NextFieldOffsetInBytes;
-
+
AppendBytes(PaddingInBytes);
}
-
+
// Now append the field.
LLVMFields.push_back(LLVMFieldInfo(D, FieldTypes.size()));
AppendField(FieldOffsetInBytes, Ty);
-
+
return true;
}
void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
-
+
const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
-
+
const llvm::Type *Ty = 0;
uint64_t Size = 0;
unsigned Align = 0;
-
+
unsigned FieldNo = 0;
- for (RecordDecl::field_iterator Field = D->field_begin(),
+ for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
- assert(Layout.getFieldOffset(FieldNo) == 0 &&
+ assert(Layout.getFieldOffset(FieldNo) == 0 &&
"Union field offset did not start at the beginning of record!");
if (Field->isBitField()) {
- uint64_t FieldSize =
+ uint64_t FieldSize =
Field->getBitWidth()->EvaluateAsInt(Types.getContext()).getZExtValue();
-
+
// Ignore zero sized bit fields.
if (FieldSize == 0)
continue;
-
+
// Add the bit field info.
Types.addBitFieldInfo(*Field, 0, 0, FieldSize);
} else
Types.addFieldInfo(*Field, 0);
-
- const llvm::Type *FieldTy =
+
+ const llvm::Type *FieldTy =
Types.ConvertTypeForMemRecursive(Field->getType());
unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
uint64_t FieldSize = Types.getTargetData().getTypeAllocSize(FieldTy);
-
+
if (FieldAlign < Align)
continue;
-
+
if (FieldAlign > Align || FieldSize > Size) {
Ty = FieldTy;
Align = FieldAlign;
Size = FieldSize;
}
}
-
+
// Now add our field.
if (Ty) {
AppendField(0, Ty);
@@ -208,7 +208,7 @@
Align = 1;
}
}
-
+
// Append tail padding.
if (Layout.getSize() / 8 > Size)
AppendPadding(Layout.getSize() / 8, Align);
@@ -217,15 +217,15 @@
bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
assert(!D->isUnion() && "Can't call LayoutFields on a union!");
assert(Alignment && "Did not set alignment!");
-
+
const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
-
+
unsigned FieldNo = 0;
- for (RecordDecl::field_iterator Field = D->field_begin(),
+ for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
- assert(!Packed &&
+ assert(!Packed &&
"Could not layout fields even with a packed LLVM struct!");
return false;
}
@@ -233,21 +233,21 @@
// Append tail padding if necessary.
AppendTailPadding(Layout.getSize());
-
+
return true;
}
void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
assert(RecordSize % 8 == 0 && "Invalid record size!");
-
+
uint64_t RecordSizeInBytes = RecordSize / 8;
assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
-
+
unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
AppendBytes(NumPadBytes);
}
-void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
+void CGRecordLayoutBuilder::AppendField(uint64_t FieldOffsetInBytes,
const llvm::Type *FieldTy) {
AlignmentAsLLVMStruct = std::max(AlignmentAsLLVMStruct,
getTypeAlignment(FieldTy));
@@ -260,19 +260,19 @@
BitsAvailableInLastField = 0;
}
-void
+void
CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
const llvm::Type *FieldTy) {
AppendPadding(FieldOffsetInBytes, getTypeAlignment(FieldTy));
}
-void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
+void CGRecordLayoutBuilder::AppendPadding(uint64_t FieldOffsetInBytes,
unsigned FieldAlignment) {
assert(NextFieldOffsetInBytes <= FieldOffsetInBytes &&
"Incorrect field layout!");
-
+
// Round up the field offset to the alignment of the field type.
- uint64_t AlignedNextFieldOffsetInBytes =
+ uint64_t AlignedNextFieldOffsetInBytes =
llvm::RoundUpToAlignment(NextFieldOffsetInBytes, FieldAlignment);
if (AlignedNextFieldOffsetInBytes < FieldOffsetInBytes) {
@@ -287,11 +287,11 @@
void CGRecordLayoutBuilder::AppendBytes(uint64_t NumBytes) {
if (NumBytes == 0)
return;
-
+
const llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
if (NumBytes > 1)
Ty = llvm::ArrayType::get(Ty, NumBytes);
-
+
// Append the padding field
AppendField(NextFieldOffsetInBytes, Ty);
}
@@ -299,7 +299,7 @@
unsigned CGRecordLayoutBuilder::getTypeAlignment(const llvm::Type *Ty) const {
if (Packed)
return 1;
-
+
return Types.getTargetData().getABITypeAlignment(Ty);
}
@@ -311,26 +311,26 @@
// This record already contains a member pointer.
if (ContainsMemberPointer)
return;
-
+
// Can only have member pointers if we're compiling C++.
if (!Types.getContext().getLangOptions().CPlusPlus)
return;
-
+
QualType Ty = FD->getType();
-
+
if (Ty->isMemberPointerType()) {
// We have a member pointer!
ContainsMemberPointer = true;
return;
}
-
+
}
CGRecordLayout *
CGRecordLayoutBuilder::ComputeLayout(CodeGenTypes &Types,
const RecordDecl *D) {
CGRecordLayoutBuilder Builder(Types);
-
+
Builder.Layout(D);
const llvm::Type *Ty = llvm::StructType::get(Types.getLLVMContext(),
@@ -339,7 +339,7 @@
assert(Types.getContext().getASTRecordLayout(D).getSize() / 8 ==
Types.getTargetData().getTypeAllocSize(Ty) &&
"Type size mismatch!");
-
+
// Add all the field numbers.
for (unsigned i = 0, e = Builder.LLVMFields.size(); i != e; ++i) {
const FieldDecl *FD = Builder.LLVMFields[i].first;
@@ -351,9 +351,9 @@
// Add bitfield info.
for (unsigned i = 0, e = Builder.LLVMBitFields.size(); i != e; ++i) {
const LLVMBitFieldInfo &Info = Builder.LLVMBitFields[i];
-
+
Types.addBitFieldInfo(Info.FD, Info.FieldNo, Info.Start, Info.Size);
}
-
+
return new CGRecordLayout(Ty, Builder.ContainsMemberPointer);
}
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.h b/lib/CodeGen/CGRecordLayoutBuilder.h
index 63ddc10..d1a13aa 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.h
+++ b/lib/CodeGen/CGRecordLayoutBuilder.h
@@ -25,57 +25,57 @@
namespace clang {
class FieldDecl;
class RecordDecl;
-
+
namespace CodeGen {
class CGRecordLayout;
class CodeGenTypes;
-class CGRecordLayoutBuilder {
+class CGRecordLayoutBuilder {
CodeGenTypes &Types;
-
+
/// Packed - Whether the resulting LLVM struct will be packed or not.
bool Packed;
/// ContainsMemberPointer - Whether one of the fields is a member pointer
/// or is a struct that contains a member pointer.
bool ContainsMemberPointer;
-
+
/// Alignment - Contains the alignment of the RecordDecl.
unsigned Alignment;
/// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
/// LLVM types.
unsigned AlignmentAsLLVMStruct;
-
+
/// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
/// this will have the number of bits still available in the field.
char BitsAvailableInLastField;
/// NextFieldOffsetInBytes - Holds the next field offset in bytes.
uint64_t NextFieldOffsetInBytes;
-
+
/// FieldTypes - Holds the LLVM types that the struct is created from.
std::vector<const llvm::Type *> FieldTypes;
-
+
/// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
/// LLVMBitFieldInfo - Holds location and size information about a bit field.
struct LLVMBitFieldInfo {
- LLVMBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, unsigned Start,
+ LLVMBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, unsigned Start,
unsigned Size)
: FD(FD), FieldNo(FieldNo), Start(Start), Size(Size) { }
-
+
const FieldDecl *FD;
-
+
unsigned FieldNo;
unsigned Start;
unsigned Size;
};
llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
-
- CGRecordLayoutBuilder(CodeGenTypes &Types)
+
+ CGRecordLayoutBuilder(CodeGenTypes &Types)
: Types(Types), Packed(false), ContainsMemberPointer(false)
, Alignment(0), AlignmentAsLLVMStruct(1)
, BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
@@ -85,15 +85,15 @@
/// LayoutUnion - Will layout a union RecordDecl.
void LayoutUnion(const RecordDecl *D);
-
+
/// LayoutField - try to layout all fields in the record decl.
/// Returns false if the operation failed because the struct is not packed.
bool LayoutFields(const RecordDecl *D);
-
+
/// LayoutField - layout a single field. Returns false if the operation failed
/// because the current struct is not packed.
bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
-
+
/// LayoutBitField - layout a single bit field.
void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
@@ -107,28 +107,28 @@
/// AppendPadding - Appends enough padding bytes so that the total
/// struct size is a multiple of the field alignment.
void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
-
+
/// AppendBytes - Append a given number of bytes to the record.
void AppendBytes(uint64_t NumBytes);
/// AppendTailPadding - Append enough tail padding so that the type will have
/// the passed size.
void AppendTailPadding(uint64_t RecordSize);
-
+
unsigned getTypeAlignment(const llvm::Type *Ty) const;
uint64_t getTypeSizeInBytes(const llvm::Type *Ty) const;
/// CheckForMemberPointer - Check if the field contains a member pointer.
void CheckForMemberPointer(const FieldDecl *FD);
-
+
public:
/// ComputeLayout - Return the right record layout for a given record decl.
- static CGRecordLayout *ComputeLayout(CodeGenTypes &Types,
+ static CGRecordLayout *ComputeLayout(CodeGenTypes &Types,
const RecordDecl *D);
};
-
+
} // end namespace CodeGen
} // end namespace clang
-
-#endif
+
+#endif
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 12e3a95..26d1c3b 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -72,7 +72,7 @@
ErrorUnsupported(S, "statement");
EmitAnyExpr(cast<Expr>(S), 0, false, true);
-
+
// Expression emitters don't handle unreachable blocks yet, so look for one
// explicitly here. This handles the common case of a call to a noreturn
// function.
@@ -83,14 +83,14 @@
}
}
break;
- case Stmt::IndirectGotoStmtClass:
+ case Stmt::IndirectGotoStmtClass:
EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
-
+
case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
@@ -98,7 +98,7 @@
case Stmt::ObjCAtTryStmtClass:
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
- break;
+ break;
case Stmt::ObjCAtCatchStmtClass:
assert(0 && "@catch statements should be handled by EmitObjCAtTryStmt");
break;
@@ -111,7 +111,7 @@
case Stmt::ObjCAtSynchronizedStmtClass:
EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
break;
- case Stmt::ObjCForCollectionStmtClass:
+ case Stmt::ObjCForCollectionStmtClass:
EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
break;
}
@@ -141,7 +141,7 @@
llvm::Value *AggLoc, bool isAggVol) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
"LLVM IR generation of compound statement ('{}')");
-
+
CGDebugInfo *DI = getDebugInfo();
if (DI) {
EnsureInsertPoint();
@@ -156,7 +156,7 @@
size_t CleanupStackDepth = CleanupEntries.size();
bool OldDidCallStackSave = DidCallStackSave;
DidCallStackSave = false;
-
+
for (CompoundStmt::const_body_iterator I = S.body_begin(),
E = S.body_end()-GetLast; I != E; ++I)
EmitStmt(*I);
@@ -164,7 +164,7 @@
if (DI) {
EnsureInsertPoint();
DI->setLocation(S.getRBracLoc());
-
+
// FIXME: The llvm backend is currently not ready to deal with region_end
// for block scoping. In the presence of always_inline functions it gets so
// confused that it doesn't emit any debug info. Just disable this for now.
@@ -172,10 +172,10 @@
}
RValue RV;
- if (!GetLast)
+ if (!GetLast)
RV = RValue::get(0);
else {
- // We have to special case labels here. They are statements, but when put
+ // We have to special case labels here. They are statements, but when put
// at the end of a statement expression, they yield the value of their
// subexpression. Handle this by walking through all labels we encounter,
// emitting them before we evaluate the subexpr.
@@ -184,22 +184,22 @@
EmitLabel(*LS);
LastStmt = LS->getSubStmt();
}
-
+
EnsureInsertPoint();
-
+
RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
}
DidCallStackSave = OldDidCallStackSave;
-
+
EmitCleanupBlocks(CleanupStackDepth);
-
+
return RV;
}
void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
-
+
// If there is a cleanup stack, then we it isn't worth trying to
// simplify this block (we would need to remove it from the scope map
// and cleanup entry).
@@ -235,7 +235,7 @@
CleanupEntries.back().Blocks.push_back(BB);
}
}
-
+
CurFn->getBasicBlockList().push_back(BB);
Builder.SetInsertPoint(BB);
}
@@ -282,7 +282,7 @@
// EmitIndirectSwitches(). We need a default dest, so we use the
// current BB, but this is overwritten.
llvm::Value *V = Builder.CreatePtrToInt(EmitScalarExpr(S.getTarget()),
- llvm::Type::getInt32Ty(VMContext),
+ llvm::Type::getInt32Ty(VMContext),
"addr");
llvm::SwitchInst *I = Builder.CreateSwitch(V, Builder.GetInsertBlock());
IndirectSwitches.push_back(I);
@@ -294,7 +294,7 @@
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// C99 6.8.4.1: The first substatement is executed if the expression compares
// unequal to 0. The condition must be a scalar type.
-
+
// If the condition constant folds and can be elided, try to avoid emitting
// the condition and the dead arm of the if/else.
if (int Cond = ConstantFoldsToSimpleInteger(S.getCond())) {
@@ -302,7 +302,7 @@
const Stmt *Executed = S.getThen(), *Skipped = S.getElse();
if (Cond == -1) // Condition false?
std::swap(Executed, Skipped);
-
+
// If the skipped block has no labels in it, just emit the executed block.
// This avoids emitting dead code and simplifies the CFG substantially.
if (!ContainsLabel(Skipped)) {
@@ -320,19 +320,19 @@
if (S.getElse())
ElseBlock = createBasicBlock("if.else");
EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
-
+
// Emit the 'then' code.
EmitBlock(ThenBlock);
EmitStmt(S.getThen());
EmitBranch(ContBlock);
-
+
// Emit the 'else' code if present.
if (const Stmt *Else = S.getElse()) {
EmitBlock(ElseBlock);
EmitStmt(Else);
EmitBranch(ContBlock);
}
-
+
// Emit the continuation block for code after the if.
EmitBlock(ContBlock, true);
}
@@ -350,7 +350,7 @@
// Store the blocks to use for break and continue.
BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader));
-
+
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
// execution of the loop body.
@@ -359,23 +359,23 @@
// while(1) is common, avoid extra exit blocks. Be sure
// to correctly handle break/continue though.
bool EmitBoolCondBranch = true;
- if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
if (C->isOne())
EmitBoolCondBranch = false;
-
+
// As long as the condition is true, go to the loop body.
if (EmitBoolCondBranch)
Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
-
+
// Emit the loop body.
EmitBlock(LoopBody);
EmitStmt(S.getBody());
- BreakContinueStack.pop_back();
-
+ BreakContinueStack.pop_back();
+
// Cycle to the condition.
EmitBranch(LoopHeader);
-
+
// Emit the exit block.
EmitBlock(ExitBlock, true);
@@ -393,20 +393,20 @@
EmitBlock(LoopBody);
llvm::BasicBlock *DoCond = createBasicBlock("do.cond");
-
+
// Store the blocks to use for break and continue.
BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond));
-
+
// Emit the body of the loop into the block.
EmitStmt(S.getBody());
-
+
BreakContinueStack.pop_back();
-
+
EmitBlock(DoCond);
-
+
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
// after each execution of the loop body."
-
+
// Evaluate the conditional in the while header.
// C99 6.8.5p2/p4: The first substatement is executed if the expression
// compares unequal to 0. The condition must be a scalar type.
@@ -415,14 +415,14 @@
// "do {} while (0)" is common in macros, avoid extra blocks. Be sure
// to correctly handle break/continue though.
bool EmitBoolCondBranch = true;
- if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
if (C->isZero())
EmitBoolCondBranch = false;
// As long as the condition is true, iterate the loop.
if (EmitBoolCondBranch)
Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo);
-
+
// Emit the exit block.
EmitBlock(AfterDo);
@@ -451,25 +451,25 @@
if (S.getCond()) {
// As long as the condition is true, iterate the loop.
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
-
+
// C99 6.8.5p2/p4: The first substatement is executed if the expression
// compares unequal to 0. The condition must be a scalar type.
EmitBranchOnBoolExpr(S.getCond(), ForBody, AfterFor);
-
- EmitBlock(ForBody);
+
+ EmitBlock(ForBody);
} else {
// Treat it as a non-zero constant. Don't even create a new block for the
// body, just fall into it.
}
- // If the for loop doesn't have an increment we can just use the
+ // If the for loop doesn't have an increment we can just use the
// condition as the continue block.
llvm::BasicBlock *ContinueBlock;
if (S.getInc())
ContinueBlock = createBasicBlock("for.inc");
else
- ContinueBlock = CondBlock;
-
+ ContinueBlock = CondBlock;
+
// Store the blocks to use for break and continue.
BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock));
@@ -477,13 +477,13 @@
EmitStmt(S.getBody());
BreakContinueStack.pop_back();
-
+
// If there is an increment, emit it next.
if (S.getInc()) {
EmitBlock(ContinueBlock);
EmitStmt(S.getInc());
}
-
+
// Finally, branch back up to the condition for the next iteration.
EmitBranch(CondBlock);
@@ -508,7 +508,7 @@
void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// Emit the result value, even if unused, to evalute the side effects.
const Expr *RV = S.getRetValue();
-
+
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.
if (!ReturnValue) {
@@ -601,8 +601,8 @@
LHS++;
}
return;
- }
-
+ }
+
// The range is too big. Emit "if" condition into a new block,
// making sure to save and restore the current insertion point.
llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
@@ -617,10 +617,10 @@
Builder.SetInsertPoint(CaseRangeBlock);
// Emit range check.
- llvm::Value *Diff =
- Builder.CreateSub(SwitchInsn->getCondition(),
+ llvm::Value *Diff =
+ Builder.CreateSub(SwitchInsn->getCondition(),
llvm::ConstantInt::get(VMContext, LHS), "tmp");
- llvm::Value *Cond =
+ llvm::Value *Cond =
Builder.CreateICmpULE(Diff,
llvm::ConstantInt::get(VMContext, Range), "tmp");
Builder.CreateCondBr(Cond, CaseDest, FalseDest);
@@ -637,12 +637,12 @@
EmitCaseStmtRange(S);
return;
}
-
+
EmitBlock(createBasicBlock("sw.bb"));
llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
llvm::APSInt CaseVal = S.getLHS()->EvaluateAsInt(getContext());
SwitchInsn->addCase(llvm::ConstantInt::get(VMContext, CaseVal), CaseDest);
-
+
// Recursively emitting the statement is acceptable, but is not wonderful for
// code where we have many case statements nested together, i.e.:
// case 1:
@@ -663,14 +663,14 @@
NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
}
-
+
// Normal default recursion for non-cases.
EmitStmt(CurCase->getSubStmt());
}
void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
- assert(DefaultBlock->empty() &&
+ assert(DefaultBlock->empty() &&
"EmitDefaultStmt: Default block already defined?");
EmitBlock(DefaultBlock);
EmitStmt(S.getSubStmt());
@@ -706,13 +706,13 @@
// Emit switch body.
EmitStmt(S.getBody());
-
+
BreakContinueStack.pop_back();
// Update the default block in case explicit case range tests have
// been chained on top.
SwitchInsn->setSuccessor(0, CaseRangeBlock);
-
+
// If a default was never emitted then reroute any jumps to it and
// discard.
if (!DefaultBlock->getParent()) {
@@ -731,7 +731,7 @@
SimplifyConstraint(const char *Constraint, TargetInfo &Target,
llvm::SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
std::string Result;
-
+
while (*Constraint) {
switch (*Constraint) {
default:
@@ -749,7 +749,7 @@
assert(OutCons &&
"Must pass output names to constraints with a symbolic name");
unsigned Index;
- bool result = Target.resolveSymbolicName(Constraint,
+ bool result = Target.resolveSymbolicName(Constraint,
&(*OutCons)[0],
OutCons->size(), Index);
assert(result && "Could not resolve symbolic name"); result=result;
@@ -757,10 +757,10 @@
break;
}
}
-
+
Constraint++;
}
-
+
return Result;
}
@@ -769,9 +769,9 @@
const Expr *InputExpr,
std::string &ConstraintStr) {
llvm::Value *Arg;
- if (Info.allowsRegister() || !Info.allowsMemory()) {
+ if (Info.allowsRegister() || !Info.allowsMemory()) {
const llvm::Type *Ty = ConvertType(InputExpr->getType());
-
+
if (Ty->isSingleValueType()) {
Arg = EmitScalarExpr(InputExpr);
} else {
@@ -782,7 +782,7 @@
if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
Ty = llvm::IntegerType::get(VMContext, Size);
Ty = llvm::PointerType::getUnqual(Ty);
-
+
Arg = Builder.CreateLoad(Builder.CreateBitCast(Dest.getAddress(), Ty));
} else {
Arg = Dest.getAddress();
@@ -795,7 +795,7 @@
Arg = Dest.getAddress();
ConstraintStr += '*';
}
-
+
return Arg;
}
@@ -805,7 +805,7 @@
llvm::SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
unsigned DiagOffs;
S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
-
+
// Assemble the pieces into the final asm string.
std::string AsmString;
for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
@@ -817,19 +817,19 @@
AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
Pieces[i].getModifier() + '}';
}
-
+
// Get all the output and input constraints together.
llvm::SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
llvm::SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
- for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
S.getOutputName(i));
bool result = Target.validateOutputConstraint(Info);
assert(result && "Failed to parse output constraint"); result=result;
OutputConstraintInfos.push_back(Info);
- }
-
+ }
+
for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
S.getInputName(i));
@@ -839,9 +839,9 @@
assert(result && "Failed to parse input constraint");
InputConstraintInfos.push_back(Info);
}
-
+
std::string Constraints;
-
+
std::vector<LValue> ResultRegDests;
std::vector<QualType> ResultRegQualTys;
std::vector<const llvm::Type *> ResultRegTypes;
@@ -854,16 +854,16 @@
std::vector<llvm::Value*> InOutArgs;
std::vector<const llvm::Type*> InOutArgTypes;
- for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
// Simplify the output constraint.
std::string OutputConstraint(S.getOutputConstraint(i));
OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
-
+
const Expr *OutExpr = S.getOutputExpr(i);
OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
-
+
LValue Dest = EmitLValue(OutExpr);
if (!Constraints.empty())
Constraints += ',';
@@ -876,7 +876,7 @@
ResultRegDests.push_back(Dest);
ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
ResultTruncRegTypes.push_back(ResultRegTypes.back());
-
+
// If this output is tied to an input, and if the input is larger, then
// we need to set the actual result type of the inline asm node to be the
// same as the input type.
@@ -889,10 +889,10 @@
break;
}
assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
-
+
QualType InputTy = S.getInputExpr(InputNo)->getType();
QualType OutputTy = OutExpr->getType();
-
+
uint64_t InputSize = getContext().getTypeSize(InputTy);
if (getContext().getTypeSize(OutputTy) < InputSize) {
// Form the asm to return the value as a larger integer type.
@@ -905,13 +905,13 @@
Constraints += "=*";
Constraints += OutputConstraint;
}
-
+
if (Info.isReadWrite()) {
InOutConstraints += ',';
const Expr *InputExpr = S.getOutputExpr(i);
llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, InOutConstraints);
-
+
if (Info.allowsRegister())
InOutConstraints += llvm::utostr(i);
else
@@ -921,9 +921,9 @@
InOutArgs.push_back(Arg);
}
}
-
+
unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
-
+
for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
const Expr *InputExpr = S.getInputExpr(i);
@@ -931,14 +931,14 @@
if (!Constraints.empty())
Constraints += ',';
-
+
// Simplify the input constraint.
std::string InputConstraint(S.getInputConstraint(i));
InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
&OutputConstraintInfos);
llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
-
+
// If this input argument is tied to a larger output result, extend the
// input to be the same size as the output. The LLVM backend wants to see
// the input and output of a matching constraint be the same size. Note
@@ -948,7 +948,7 @@
unsigned Output = Info.getTiedOperand();
QualType OutputTy = S.getOutputExpr(Output)->getType();
QualType InputTy = InputExpr->getType();
-
+
if (getContext().getTypeSize(OutputTy) >
getContext().getTypeSize(InputTy)) {
// Use ptrtoint as appropriate so that we can do our extension.
@@ -959,35 +959,35 @@
Arg = Builder.CreateZExt(Arg, llvm::IntegerType::get(VMContext, OutputSize));
}
}
-
-
+
+
ArgTypes.push_back(Arg->getType());
Args.push_back(Arg);
Constraints += InputConstraint;
}
-
+
// Append the "input" part of inout constraints last.
for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
ArgTypes.push_back(InOutArgTypes[i]);
Args.push_back(InOutArgs[i]);
}
Constraints += InOutConstraints;
-
+
// Clobbers
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
std::string Clobber(S.getClobber(i)->getStrData(),
S.getClobber(i)->getByteLength());
Clobber = Target.getNormalizedGCCRegisterName(Clobber.c_str());
-
+
if (i != 0 || NumConstraints != 0)
Constraints += ',';
-
+
Constraints += "~{";
Constraints += Clobber;
Constraints += '}';
}
-
+
// Add machine specific clobbers
std::string MachineClobbers = Target.getClobbers();
if (!MachineClobbers.empty()) {
@@ -1003,17 +1003,17 @@
ResultType = ResultRegTypes[0];
else
ResultType = llvm::StructType::get(VMContext, ResultRegTypes);
-
- const llvm::FunctionType *FTy =
+
+ const llvm::FunctionType *FTy =
llvm::FunctionType::get(ResultType, ArgTypes, false);
-
- llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, AsmString, Constraints,
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, AsmString, Constraints,
S.isVolatile() || S.getNumOutputs() == 0);
llvm::CallInst *Result = Builder.CreateCall(IA, Args.begin(), Args.end());
Result->addAttribute(~0, llvm::Attribute::NoUnwind);
-
-
+
+
// Extract all of the register value results from the asm.
std::vector<llvm::Value*> RegResults;
if (ResultRegTypes.size() == 1) {
@@ -1024,10 +1024,10 @@
RegResults.push_back(Tmp);
}
}
-
+
for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
llvm::Value *Tmp = RegResults[i];
-
+
// If the result type of the LLVM IR asm doesn't match the result type of
// the expression, do the conversion.
if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
@@ -1036,13 +1036,13 @@
// ResultTruncRegTypes can be a pointer.
uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
Tmp = Builder.CreateTrunc(Tmp, llvm::IntegerType::get(VMContext, (unsigned)ResSize));
-
+
if (Tmp->getType() != TruncTy) {
assert(isa<llvm::PointerType>(TruncTy));
Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
}
}
-
+
EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i],
ResultRegQualTys[i]);
}
diff --git a/lib/CodeGen/CGValue.h b/lib/CodeGen/CGValue.h
index 5aaf951..fe97afa 100644
--- a/lib/CodeGen/CGValue.h
+++ b/lib/CodeGen/CGValue.h
@@ -37,14 +37,14 @@
// TODO: Encode this into the low bit of pointer for more efficient
// return-by-value.
enum { Scalar, Complex, Aggregate } Flavor;
-
+
bool Volatile:1;
public:
-
+
bool isScalar() const { return Flavor == Scalar; }
bool isComplex() const { return Flavor == Complex; }
bool isAggregate() const { return Flavor == Aggregate; }
-
+
bool isVolatileQualified() const { return Volatile; }
/// getScalar() - Return the Value* of this scalar value.
@@ -58,13 +58,13 @@
std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
return std::pair<llvm::Value *, llvm::Value *>(V1, V2);
}
-
+
/// getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value *getAggregateAddr() const {
assert(isAggregate() && "Not an aggregate!");
return V1;
}
-
+
static RValue get(llvm::Value *V) {
RValue ER;
ER.V1 = V;
@@ -106,7 +106,7 @@
/// bitrange.
class LValue {
// FIXME: alignment?
-
+
enum {
Simple, // This is a normal l-value, use getAddress().
VectorElt, // This is a vector element l-value (V[i]), use getVector*
@@ -123,16 +123,16 @@
Weak, // __weak object expression
Strong // __strong object expression
};
-
+
llvm::Value *V;
-
+
union {
// Index into a vector subscript: V[i]
llvm::Value *VectorIdx;
// ExtVector element subset: V.xyx
llvm::Constant *VectorElts;
-
+
// BitField start bit and size
struct {
unsigned short StartBit;
@@ -152,7 +152,7 @@
// objective-c's ivar
bool Ivar:1;
-
+
// LValue is non-gc'able for any reason, including being a parameter or local
// variable.
bool NonGC: 1;
@@ -161,7 +161,7 @@
bool GlobalObjCRef : 1;
// objective-c's gc attributes
- unsigned ObjCType : 2;
+ unsigned ObjCType : 2;
// address space
unsigned AddressSpace;
@@ -175,7 +175,7 @@
R.ObjCType = None;
R.Ivar = R.NonGC = R.GlobalObjCRef = false;
}
-
+
public:
bool isSimple() const { return LVType == Simple; }
bool isVectorElt() const { return LVType == VectorElt; }
@@ -187,10 +187,10 @@
bool isVolatileQualified() const { return Volatile; }
bool isRestrictQualified() const { return Restrict; }
unsigned getQualifiers() const {
- return (Volatile ? QualType::Volatile : 0) |
+ return (Volatile ? QualType::Volatile : 0) |
(Restrict ? QualType::Restrict : 0);
}
-
+
bool isObjCIvar() const { return Ivar; }
bool isNonGC () const { return NonGC; }
bool isGlobalObjCRef() const { return GlobalObjCRef; }
@@ -206,7 +206,7 @@
static void SetGlobalObjCRef(LValue& R, bool iValue) {
R.GlobalObjCRef = iValue;
}
-
+
static void SetObjCNonGC(LValue& R, bool iValue) {
R.NonGC = iValue;
}
@@ -218,7 +218,7 @@
else
R.ObjCType = None;
}
-
+
// simple lvalue
llvm::Value *getAddress() const { assert(isSimple()); return V; }
// vector elt lvalue
@@ -267,7 +267,7 @@
SetObjCType(GCAttrs, R);
return R;
}
-
+
static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
unsigned Qualifiers) {
LValue R;
@@ -277,7 +277,7 @@
SetQualifiers(Qualifiers,R);
return R;
}
-
+
static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
unsigned Qualifiers) {
LValue R;
@@ -312,8 +312,8 @@
SetQualifiers(Qualifiers,R);
return R;
}
-
- static LValue MakeKVCRef(const ObjCImplicitSetterGetterRefExpr *E,
+
+ static LValue MakeKVCRef(const ObjCImplicitSetterGetterRefExpr *E,
unsigned Qualifiers) {
LValue R;
R.LVType = KVCRef;
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index e3511ed..be2406d 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -23,11 +23,11 @@
using namespace clang;
using namespace CodeGen;
-CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: BlockFunction(cgm, *this, Builder), CGM(cgm),
Target(CGM.getContext().Target),
Builder(cgm.getModule().getContext()),
- DebugInfo(0), SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
+ DebugInfo(0), SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
CXXThisDecl(0) {
LLVMIntTy = ConvertType(getContext().IntTy);
LLVMPointerWidth = Target.getPointerWidth(0);
@@ -41,7 +41,7 @@
llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) {
llvm::BasicBlock *&BB = LabelMap[S];
if (BB) return BB;
-
+
// Create, but don't insert, the new block.
return BB = createBasicBlock(S->getName());
}
@@ -69,7 +69,7 @@
// FIXME: Use positive checks instead of negative ones to be more robust in
// the face of extension.
return !T->hasPointerRepresentation() && !T->isRealType() &&
- !T->isVoidType() && !T->isVectorType() && !T->isFunctionType() &&
+ !T->isVoidType() && !T->isVectorType() && !T->isFunctionType() &&
!T->isBlockPointerType() && !T->isMemberPointerType();
}
@@ -95,7 +95,7 @@
// branch then we can just put the code in that block instead. This
// cleans up functions which started with a unified return block.
if (ReturnBlock->hasOneUse()) {
- llvm::BranchInst *BI =
+ llvm::BranchInst *BI =
dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin());
if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) {
// Reset insertion point and delete the branch.
@@ -123,8 +123,8 @@
"did not remove all blocks from block scope map!");
assert(CleanupEntries.empty() &&
"mismatched push/pop in cleanup stack!");
-
- // Emit function epilog (to return).
+
+ // Emit function epilog (to return).
EmitReturnBlock();
// Emit debug descriptor for function end.
@@ -141,7 +141,7 @@
Ptr->eraseFromParent();
}
-void CodeGenFunction::StartFunction(const Decl *D, QualType RetTy,
+void CodeGenFunction::StartFunction(const Decl *D, QualType RetTy,
llvm::Function *Fn,
const FunctionArgList &Args,
SourceLocation StartLoc) {
@@ -161,14 +161,14 @@
EntryBB);
if (Builder.isNamePreserving())
AllocaInsertPt->setName("allocapt");
-
+
ReturnBlock = createBasicBlock("return");
ReturnValue = 0;
if (!RetTy->isVoidType())
ReturnValue = CreateTempAlloca(ConvertType(RetTy), "retval");
-
+
Builder.SetInsertPoint(EntryBB);
-
+
// Emit subprogram debug descriptor.
// FIXME: The cast here is a huge hack.
if (CGDebugInfo *DI = getDebugInfo()) {
@@ -177,9 +177,9 @@
DI->EmitFunctionStart(CGM.getMangledName(FD), RetTy, CurFn, Builder);
} else {
// Just use LLVM function name.
-
+
// FIXME: Remove unnecessary conversion to std::string when API settles.
- DI->EmitFunctionStart(std::string(Fn->getName()).c_str(),
+ DI->EmitFunctionStart(std::string(Fn->getName()).c_str(),
RetTy, CurFn, Builder);
}
}
@@ -187,7 +187,7 @@
// FIXME: Leaked.
CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args);
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
-
+
// If any of the arguments have a variably modified type, make sure to
// emit the type size.
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
@@ -204,27 +204,27 @@
// Check if we should generate debug info for this function.
if (CGM.getDebugInfo() && !FD->hasAttr<NoDebugAttr>())
DebugInfo = CGM.getDebugInfo();
-
+
FunctionArgList Args;
-
+
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (MD->isInstance()) {
// Create the implicit 'this' decl.
// FIXME: I'm not entirely sure I like using a fake decl just for code
// generation. Maybe we can come up with a better way?
CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0, SourceLocation(),
- &getContext().Idents.get("this"),
+ &getContext().Idents.get("this"),
MD->getThisType(getContext()));
Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
}
}
-
+
if (FD->getNumParams()) {
const FunctionProtoType* FProto = FD->getType()->getAsFunctionProtoType();
assert(FProto && "Function def must have prototype!");
for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
- Args.push_back(std::make_pair(FD->getParamDecl(i),
+ Args.push_back(std::make_pair(FD->getParamDecl(i),
FProto->getArgType(i)));
}
@@ -238,9 +238,9 @@
EmitDtorEpilogue(DD);
FinishFunction(S->getRBracLoc());
}
- else
+ else
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- const CXXRecordDecl *ClassDecl =
+ const CXXRecordDecl *ClassDecl =
cast<CXXRecordDecl>(CD->getDeclContext());
(void) ClassDecl;
if (CD->isCopyConstructor(getContext())) {
@@ -259,7 +259,7 @@
else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isCopyAssignment())
SynthesizeCXXCopyAssignment(MD, FD, Fn, Args);
-
+
// Destroy the 'this' declaration.
if (CXXThisDecl)
CXXThisDecl->Destroy(getContext());
@@ -271,27 +271,27 @@
bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
// Null statement, not a label!
if (S == 0) return false;
-
+
// If this is a label, we have to emit the code, consider something like:
// if (0) { ... foo: bar(); } goto foo;
if (isa<LabelStmt>(S))
return true;
-
+
// If this is a case/default statement, and we haven't seen a switch, we have
// to emit the code.
if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
return true;
-
+
// If this is a switch statement, we want to ignore cases below it.
if (isa<SwitchStmt>(S))
IgnoreCaseStmts = true;
-
+
// Scan subexpressions for verboten labels.
for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
I != E; ++I)
if (ContainsLabel(*I, IgnoreCaseStmts))
return true;
-
+
return false;
}
@@ -304,13 +304,13 @@
// FIXME: Rename and handle conversion of other evaluatable things
// to bool.
Expr::EvalResult Result;
- if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
+ if (!Cond->Evaluate(Result, getContext()) || !Result.Val.isInt() ||
Result.HasSideEffects)
return 0; // Not foldable, not integer or not fully evaluatable.
-
+
if (CodeGenFunction::ContainsLabel(Cond))
return 0; // Contains a label.
-
+
return Result.Val.getInt().getBoolValue() ? 1 : -1;
}
@@ -326,7 +326,7 @@
return EmitBranchOnBoolExpr(PE->getSubExpr(), TrueBlock, FalseBlock);
if (const CastExpr *E = dyn_cast<CastExpr>(Cond))
if (E->getCastKind() == CastExpr::CK_UserDefinedConversion) {
- if (const CXXFunctionalCastExpr *CXXFExpr =
+ if (const CXXFunctionalCastExpr *CXXFExpr =
dyn_cast<CXXFunctionalCastExpr>(E)) {
EmitCXXFunctionalCastExpr(CXXFExpr);
return;
@@ -335,7 +335,7 @@
return EmitBranchOnBoolExpr(E->getSubExpr(), TrueBlock, FalseBlock);
assert(false && "EmitBranchOnBoolExpr - Expected CStyleCastExpr");
}
-
+
if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
// Handle X && Y in a condition.
if (CondBOp->getOpcode() == BinaryOperator::LAnd) {
@@ -345,20 +345,20 @@
// br(1 && X) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
}
-
+
// If we have "X && 1", simplify the code to use an uncond branch.
// "X && 0" would have been constant folded to 0.
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == 1) {
// br(X && 1) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
}
-
+
// Emit the LHS as a conditional. If the LHS conditional is false, we
// want to jump to the FalseBlock.
llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
EmitBlock(LHSTrue);
-
+
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
return;
} else if (CondBOp->getOpcode() == BinaryOperator::LOr) {
@@ -368,31 +368,31 @@
// br(0 || X) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
}
-
+
// If we have "X || 0", simplify the code to use an uncond branch.
// "X || 1" would have been constant folded to 1.
if (ConstantFoldsToSimpleInteger(CondBOp->getRHS()) == -1) {
// br(X || 0) -> br(X).
return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
}
-
+
// Emit the LHS as a conditional. If the LHS conditional is true, we
// want to jump to the TrueBlock.
llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
EmitBlock(LHSFalse);
-
+
EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
return;
}
}
-
+
if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
// br(!x, t, f) -> br(x, f, t)
if (CondUOp->getOpcode() == UnaryOperator::LNot)
return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
}
-
+
if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
// Handle ?: operator.
@@ -438,25 +438,25 @@
// Don't bother emitting a zero-byte memset.
if (TypeInfo.first == 0)
return;
-
+
// FIXME: Handle variable sized types.
- const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext,
+ const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext,
LLVMPointerWidth);
Builder.CreateCall4(CGM.getMemSetFn(), DestPtr,
llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)),
// TypeInfo.first describes size in bits.
llvm::ConstantInt::get(IntPtr, TypeInfo.first/8),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
TypeInfo.second/8));
}
void CodeGenFunction::EmitIndirectSwitches() {
llvm::BasicBlock *Default;
-
+
if (IndirectSwitches.empty())
return;
-
+
if (!LabelIDs.empty()) {
Default = getBasicBlockForLabel(LabelIDs.begin()->first);
} else {
@@ -469,20 +469,20 @@
for (std::vector<llvm::SwitchInst*>::iterator i = IndirectSwitches.begin(),
e = IndirectSwitches.end(); i != e; ++i) {
llvm::SwitchInst *I = *i;
-
+
I->setSuccessor(0, Default);
- for (std::map<const LabelStmt*,unsigned>::iterator LI = LabelIDs.begin(),
+ for (std::map<const LabelStmt*,unsigned>::iterator LI = LabelIDs.begin(),
LE = LabelIDs.end(); LI != LE; ++LI) {
I->addCase(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- LI->second),
+ LI->second),
getBasicBlockForLabel(LI->first));
}
- }
+ }
}
llvm::Value *CodeGenFunction::GetVLASize(const VariableArrayType *VAT) {
llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
-
+
assert(SizeEntry && "Did not emit size for type");
return SizeEntry;
}
@@ -490,15 +490,15 @@
llvm::Value *CodeGenFunction::EmitVLASize(QualType Ty) {
assert(Ty->isVariablyModifiedType() &&
"Must pass variably modified type to EmitVLASizes!");
-
+
EnsureInsertPoint();
-
+
if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(Ty)) {
llvm::Value *&SizeEntry = VLASizeMap[VAT->getSizeExpr()];
-
+
if (!SizeEntry) {
const llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
-
+
// Get the element size;
QualType ElemTy = VAT->getElementType();
llvm::Value *ElemSize;
@@ -507,21 +507,21 @@
else
ElemSize = llvm::ConstantInt::get(SizeTy,
getContext().getTypeSize(ElemTy) / 8);
-
+
llvm::Value *NumElements = EmitScalarExpr(VAT->getSizeExpr());
NumElements = Builder.CreateIntCast(NumElements, SizeTy, false, "tmp");
-
+
SizeEntry = Builder.CreateMul(ElemSize, NumElements);
}
-
+
return SizeEntry;
}
-
+
if (const ArrayType *AT = dyn_cast<ArrayType>(Ty)) {
EmitVLASize(AT->getElementType());
return 0;
- }
-
+ }
+
const PointerType *PT = Ty->getAs<PointerType>();
assert(PT && "unknown VM type!");
EmitVLASize(PT->getPointeeType());
@@ -535,32 +535,29 @@
return EmitLValue(E).getAddress();
}
-void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupBlock)
-{
+void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupBlock) {
CleanupEntries.push_back(CleanupEntry(CleanupBlock));
}
-void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize)
-{
- assert(CleanupEntries.size() >= OldCleanupStackSize &&
+void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize) {
+ assert(CleanupEntries.size() >= OldCleanupStackSize &&
"Cleanup stack mismatch!");
-
+
while (CleanupEntries.size() > OldCleanupStackSize)
EmitCleanupBlock();
}
-CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock()
-{
+CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() {
CleanupEntry &CE = CleanupEntries.back();
-
+
llvm::BasicBlock *CleanupBlock = CE.CleanupBlock;
-
+
std::vector<llvm::BasicBlock *> Blocks;
std::swap(Blocks, CE.Blocks);
-
+
std::vector<llvm::BranchInst *> BranchFixups;
std::swap(BranchFixups, CE.BranchFixups);
-
+
CleanupEntries.pop_back();
// Check if any branch fixups pointed to the scope we just popped. If so,
@@ -568,12 +565,12 @@
for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0);
BlockScopeMap::iterator I = BlockScopes.find(Dest);
-
+
if (I == BlockScopes.end())
continue;
-
+
assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!");
-
+
if (I->second == CleanupEntries.size()) {
// We don't need to do this branch fixup.
BranchFixups[i] = BranchFixups.back();
@@ -583,29 +580,29 @@
continue;
}
}
-
+
llvm::BasicBlock *SwitchBlock = 0;
llvm::BasicBlock *EndBlock = 0;
if (!BranchFixups.empty()) {
SwitchBlock = createBasicBlock("cleanup.switch");
EndBlock = createBasicBlock("cleanup.end");
-
+
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
-
+
Builder.SetInsertPoint(SwitchBlock);
- llvm::Value *DestCodePtr = CreateTempAlloca(llvm::Type::getInt32Ty(VMContext),
+ llvm::Value *DestCodePtr = CreateTempAlloca(llvm::Type::getInt32Ty(VMContext),
"cleanup.dst");
llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
-
+
// Create a switch instruction to determine where to jump next.
- llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
+ llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
BranchFixups.size());
// Restore the current basic block (if any)
if (CurBB) {
Builder.SetInsertPoint(CurBB);
-
+
// If we had a current basic block, we also need to emit an instruction
// to initialize the cleanup destination.
Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)),
@@ -616,13 +613,13 @@
for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
llvm::BranchInst *BI = BranchFixups[i];
llvm::BasicBlock *Dest = BI->getSuccessor(0);
-
+
// Fixup the branch instruction to point to the cleanup block.
BI->setSuccessor(0, CleanupBlock);
-
+
if (CleanupEntries.empty()) {
llvm::ConstantInt *ID;
-
+
// Check if we already have a destination for this block.
if (Dest == SI->getDefaultDest())
ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0);
@@ -631,24 +628,24 @@
if (!ID) {
// No code found, get a new unique one by using the number of
// switch successors.
- ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
SI->getNumSuccessors());
SI->addCase(ID, Dest);
}
}
-
+
// Store the jump destination before the branch instruction.
new llvm::StoreInst(ID, DestCodePtr, BI);
} else {
// We need to jump through another cleanup block. Create a pad block
// with a branch instruction that jumps to the final destination and
// add it as a branch fixup to the current cleanup scope.
-
+
// Create the pad block.
llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
// Create a unique case ID.
- llvm::ConstantInt *ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ llvm::ConstantInt *ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
SI->getNumSuccessors());
// Store the jump destination before the branch instruction.
@@ -656,89 +653,86 @@
// Add it as the destination.
SI->addCase(ID, CleanupPad);
-
+
// Create the branch to the final destination.
llvm::BranchInst *BI = llvm::BranchInst::Create(Dest);
CleanupPad->getInstList().push_back(BI);
-
+
// And add it as a branch fixup.
CleanupEntries.back().BranchFixups.push_back(BI);
}
}
}
-
+
// Remove all blocks from the block scope map.
for (size_t i = 0, e = Blocks.size(); i != e; ++i) {
assert(BlockScopes.count(Blocks[i]) &&
"Did not find block in scope map!");
-
+
BlockScopes.erase(Blocks[i]);
}
-
+
return CleanupBlockInfo(CleanupBlock, SwitchBlock, EndBlock);
}
-void CodeGenFunction::EmitCleanupBlock()
-{
+void CodeGenFunction::EmitCleanupBlock() {
CleanupBlockInfo Info = PopCleanupBlock();
-
+
llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
- if (CurBB && !CurBB->getTerminator() &&
+ if (CurBB && !CurBB->getTerminator() &&
Info.CleanupBlock->getNumUses() == 0) {
CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList());
delete Info.CleanupBlock;
- } else
+ } else
EmitBlock(Info.CleanupBlock);
-
+
if (Info.SwitchBlock)
EmitBlock(Info.SwitchBlock);
if (Info.EndBlock)
EmitBlock(Info.EndBlock);
}
-void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI)
-{
- assert(!CleanupEntries.empty() &&
+void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI) {
+ assert(!CleanupEntries.empty() &&
"Trying to add branch fixup without cleanup block!");
-
+
// FIXME: We could be more clever here and check if there's already a branch
// fixup for this destination and recycle it.
CleanupEntries.back().BranchFixups.push_back(BI);
}
-void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest)
-{
+void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest) {
if (!HaveInsertPoint())
return;
-
+
llvm::BranchInst* BI = Builder.CreateBr(Dest);
-
+
Builder.ClearInsertionPoint();
-
+
// The stack is empty, no need to do any cleanup.
if (CleanupEntries.empty())
return;
-
+
if (!Dest->getParent()) {
// We are trying to branch to a block that hasn't been inserted yet.
AddBranchFixup(BI);
return;
}
-
+
BlockScopeMap::iterator I = BlockScopes.find(Dest);
if (I == BlockScopes.end()) {
// We are trying to jump to a block that is outside of any cleanup scope.
AddBranchFixup(BI);
return;
}
-
+
assert(I->second < CleanupEntries.size() &&
"Trying to branch into cleanup region");
-
+
if (I->second == CleanupEntries.size() - 1) {
// We have a branch to a block in the same scope.
return;
}
-
+
AddBranchFixup(BI);
}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 063ebf9..0fac947 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -166,20 +166,20 @@
/// this behavior for branches?
void EmitBranchThroughCleanup(llvm::BasicBlock *Dest);
- /// PushConditionalTempDestruction - Should be called before a conditional
+ /// PushConditionalTempDestruction - Should be called before a conditional
/// part of an expression is emitted. For example, before the RHS of the
/// expression below is emitted:
- ///
+ ///
/// b && f(T());
///
/// This is used to make sure that any temporaryes created in the conditional
/// branch are only destroyed if the branch is taken.
void PushConditionalTempDestruction();
-
- /// PopConditionalTempDestruction - Should be called after a conditional
+
+ /// PopConditionalTempDestruction - Should be called after a conditional
/// part of an expression has been emitted.
void PopConditionalTempDestruction();
-
+
private:
CGDebugInfo* DebugInfo;
@@ -261,37 +261,37 @@
/// CXXThisDecl - When parsing an C++ function, this will hold the implicit
/// 'this' declaration.
ImplicitParamDecl *CXXThisDecl;
-
+
/// CXXLiveTemporaryInfo - Holds information about a live C++ temporary.
struct CXXLiveTemporaryInfo {
/// Temporary - The live temporary.
const CXXTemporary *Temporary;
-
+
/// ThisPtr - The pointer to the temporary.
llvm::Value *ThisPtr;
-
+
/// DtorBlock - The destructor block.
llvm::BasicBlock *DtorBlock;
-
+
/// CondPtr - If this is a conditional temporary, this is the pointer to
/// the condition variable that states whether the destructor should be
/// called or not.
llvm::Value *CondPtr;
-
+
CXXLiveTemporaryInfo(const CXXTemporary *temporary,
llvm::Value *thisptr, llvm::BasicBlock *dtorblock,
llvm::Value *condptr)
- : Temporary(temporary), ThisPtr(thisptr), DtorBlock(dtorblock),
+ : Temporary(temporary), ThisPtr(thisptr), DtorBlock(dtorblock),
CondPtr(condptr) { }
};
-
+
llvm::SmallVector<CXXLiveTemporaryInfo, 4> LiveTemporaries;
- /// ConditionalTempDestructionStack - Contains the number of live temporaries
+ /// ConditionalTempDestructionStack - Contains the number of live temporaries
/// when PushConditionalTempDestruction was called. This is used so that
/// we know how many temporaries were created by a certain expression.
llvm::SmallVector<size_t, 4> ConditionalTempDestructionStack;
-
+
public:
CodeGenFunction(CodeGenModule &cgm);
@@ -368,32 +368,32 @@
bool Extern, int64_t nv, int64_t v);
void EmitCtorPrologue(const CXXConstructorDecl *CD);
-
+
void SynthesizeCXXCopyConstructor(const CXXConstructorDecl *CD,
const FunctionDecl *FD,
llvm::Function *Fn,
const FunctionArgList &Args);
-
+
void SynthesizeCXXCopyAssignment(const CXXMethodDecl *CD,
const FunctionDecl *FD,
llvm::Function *Fn,
const FunctionArgList &Args);
-
+
void SynthesizeDefaultConstructor(const CXXConstructorDecl *CD,
const FunctionDecl *FD,
llvm::Function *Fn,
const FunctionArgList &Args);
-
+
void SynthesizeDefaultDestructor(const CXXDestructorDecl *CD,
const FunctionDecl *FD,
llvm::Function *Fn,
const FunctionArgList &Args);
-
+
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
- /// destructor. This is to call destructors on members and base classes
+ /// destructor. This is to call destructors on members and base classes
/// in reverse order of their construction.
void EmitDtorEpilogue(const CXXDestructorDecl *DD);
-
+
/// EmitFunctionProlog - Emit the target specific LLVM code to load the
/// arguments for the given function. This is also responsible for naming the
/// LLVM function arguments.
@@ -559,42 +559,42 @@
/// LoadCXXThis - Load the value of 'this'. This function is only valid while
/// generating code for an C++ member function.
llvm::Value *LoadCXXThis();
-
+
/// AddressCXXOfBaseClass - This function will add the necessary delta
/// to the load of 'this' and returns address of the base class.
- // FIXME. This currently only does a derived to non-virtual base conversion.
+ // FIXME. This currently only does a derived to non-virtual base conversion.
// Other kinds of conversions will come later.
llvm::Value *AddressCXXOfBaseClass(llvm::Value *ThisValue,
- const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl);
-
- void EmitClassAggrMemberwiseCopy(llvm::Value *DestValue,
+
+ void EmitClassAggrMemberwiseCopy(llvm::Value *DestValue,
llvm::Value *SrcValue,
const ArrayType *Array,
const CXXRecordDecl *BaseClassDecl,
QualType Ty);
- void EmitClassAggrCopyAssignment(llvm::Value *DestValue,
+ void EmitClassAggrCopyAssignment(llvm::Value *DestValue,
llvm::Value *SrcValue,
const ArrayType *Array,
const CXXRecordDecl *BaseClassDecl,
QualType Ty);
void EmitClassMemberwiseCopy(llvm::Value *DestValue, llvm::Value *SrcValue,
- const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl,
QualType Ty);
-
+
void EmitClassCopyAssignment(llvm::Value *DestValue, llvm::Value *SrcValue,
- const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl,
QualType Ty);
-
- void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
+
+ void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd);
-
+
void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
const ArrayType *Array,
llvm::Value *This);
@@ -602,16 +602,16 @@
void EmitCXXAggrDestructorCall(const CXXDestructorDecl *D,
const ArrayType *Array,
llvm::Value *This);
-
+
void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
llvm::Value *This);
-
+
void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
void PopCXXTemporary();
-
+
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
-
+
//===--------------------------------------------------------------------===//
// Declaration Emission
//===--------------------------------------------------------------------===//
@@ -621,7 +621,7 @@
/// This function can be called with a null (unreachable) insert point.
void EmitDecl(const Decl &D);
- /// EmitBlockVarDecl - Emit a block variable declaration.
+ /// EmitBlockVarDecl - Emit a block variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
void EmitBlockVarDecl(const VarDecl &D);
@@ -799,7 +799,7 @@
LValue EmitCXXConditionDeclLValue(const CXXConditionDeclExpr *E);
LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
-
+
LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
LValue EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E);
@@ -822,13 +822,13 @@
llvm::Value *Callee,
const CallArgList &Args,
const Decl *TargetDecl = 0);
-
+
RValue EmitCall(llvm::Value *Callee, QualType FnType,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
const Decl *TargetDecl = 0);
RValue EmitCallExpr(const CallExpr *E);
-
+
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *&This,
const llvm::Type *Ty);
RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
@@ -840,10 +840,10 @@
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD);
-
+
RValue EmitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *E);
-
- RValue EmitBuiltinExpr(const FunctionDecl *FD,
+
+ RValue EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E);
RValue EmitBlockCallExpr(const CallExpr *E);
@@ -873,7 +873,7 @@
/// expression. Will emit a temporary variable if E is not an LValue.
RValue EmitReferenceBindingToExpr(const Expr* E, QualType DestType,
bool IsInitializer = false);
-
+
//===--------------------------------------------------------------------===//
// Expression Emission
//===--------------------------------------------------------------------===//
@@ -946,20 +946,20 @@
/// with the C++ runtime so that its destructor will be called at exit.
void EmitCXXGlobalDtorRegistration(const CXXDestructorDecl *Dtor,
llvm::Constant *DeclPtr);
-
- /// GenerateCXXGlobalInitFunc - Generates code for initializing global
+
+ /// GenerateCXXGlobalInitFunc - Generates code for initializing global
/// variables.
void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
const VarDecl **Decls,
unsigned NumDecls);
-
+
void EmitCXXConstructExpr(llvm::Value *Dest, const CXXConstructExpr *E);
-
+
RValue EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
- llvm::Value *AggLoc = 0,
+ llvm::Value *AggLoc = 0,
bool IsAggLocVolatile = false,
bool IsInitializer = false);
-
+
//===--------------------------------------------------------------------===//
// Internal Helpers
//===--------------------------------------------------------------------===//
@@ -1004,7 +1004,7 @@
void ExpandTypeToArgs(QualType Ty, RValue Src,
llvm::SmallVector<llvm::Value*, 16> &Args);
- llvm::Value* EmitAsmInput(const AsmStmt &S,
+ llvm::Value* EmitAsmInput(const AsmStmt &S,
const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);
@@ -1017,9 +1017,9 @@
/// EmitCallArg - Emit a single call argument.
RValue EmitCallArg(const Expr *E, QualType ArgType);
-
+
/// EmitCallArgs - Emit call arguments for a function.
- /// The CallArgTypeInfo parameter is used for iterating over the known
+ /// The CallArgTypeInfo parameter is used for iterating over the known
/// argument types of the function being called.
template<typename T>
void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
@@ -1034,21 +1034,21 @@
QualType ArgType = *I;
assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
- getTypePtr() ==
- getContext().getCanonicalType(Arg->getType()).getTypePtr() &&
+ getTypePtr() ==
+ getContext().getCanonicalType(Arg->getType()).getTypePtr() &&
"type mismatch in call argument!");
-
- Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
+
+ Args.push_back(std::make_pair(EmitCallArg(*Arg, ArgType),
ArgType));
}
-
- // Either we've emitted all the call args, or we have a call to a
+
+ // Either we've emitted all the call args, or we have a call to a
// variadic function.
- assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
+ assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
"Extra arguments in non-variadic function!");
-
+
}
-
+
// If we still have any arguments, emit them using the type of the argument.
for (; Arg != ArgEnd; ++Arg) {
QualType ArgType = Arg->getType();
@@ -1057,7 +1057,7 @@
}
}
};
-
+
} // end namespace CodeGen
} // end namespace clang
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 180a686..fbd8521e 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -81,7 +81,7 @@
bool OmitOnError) {
if (OmitOnError && getDiags().hasErrorOccurred())
return;
- unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
"cannot compile this %0 yet");
std::string Msg = Type;
getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
@@ -94,13 +94,13 @@
bool OmitOnError) {
if (OmitOnError && getDiags().hasErrorOccurred())
return;
- unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
+ unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Error,
"cannot compile this %0 yet");
std::string Msg = Type;
getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
}
-LangOptions::VisibilityMode
+LangOptions::VisibilityMode
CodeGenModule::getDeclVisibilityMode(const Decl *D) const {
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
if (VD->getStorageClass() == VarDecl::PrivateExtern)
@@ -109,7 +109,7 @@
if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) {
switch (attr->getVisibility()) {
default: assert(0 && "Unknown visibility!");
- case VisibilityAttr::DefaultVisibility:
+ case VisibilityAttr::DefaultVisibility:
return LangOptions::Default;
case VisibilityAttr::HiddenVisibility:
return LangOptions::Hidden;
@@ -121,7 +121,7 @@
return getLangOptions().getVisibilityMode();
}
-void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
+void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
const Decl *D) const {
// Internal definitions always have default visibility.
if (GV->hasLocalLinkage()) {
@@ -142,12 +142,12 @@
const char *CodeGenModule::getMangledName(const GlobalDecl &GD) {
const NamedDecl *ND = GD.getDecl();
-
+
if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
return getMangledCXXCtorName(D, GD.getCtorType());
if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
return getMangledCXXDtorName(D, GD.getDtorType());
-
+
return getMangledName(ND);
}
@@ -163,7 +163,7 @@
assert(ND->getIdentifier() && "Attempt to mangle unnamed decl.");
return ND->getNameAsCString();
}
-
+
llvm::SmallString<256> Name;
llvm::raw_svector_ostream Out(Name);
if (!mangleName(ND, Context, Out)) {
@@ -178,7 +178,7 @@
const char *CodeGenModule::UniqueMangledName(const char *NameStart,
const char *NameEnd) {
assert(*(NameEnd - 1) == '\0' && "Mangled name must be null terminated!");
-
+
return MangledNames.GetOrCreateValue(NameStart, NameEnd).getKeyData();
}
@@ -199,21 +199,21 @@
void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
// Ctor function type is void()*.
llvm::FunctionType* CtorFTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
std::vector<const llvm::Type*>(),
false);
llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
// Get the type of a ctor entry, { i32, void ()* }.
- llvm::StructType* CtorStructTy =
- llvm::StructType::get(VMContext, llvm::Type::getInt32Ty(VMContext),
+ llvm::StructType* CtorStructTy =
+ llvm::StructType::get(VMContext, llvm::Type::getInt32Ty(VMContext),
llvm::PointerType::getUnqual(CtorFTy), NULL);
// Construct the constructor and destructor arrays.
std::vector<llvm::Constant*> Ctors;
for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
std::vector<llvm::Constant*> S;
- S.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ S.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
I->second, false));
S.push_back(llvm::ConstantExpr::getBitCast(I->first, CtorPFTy));
Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
@@ -237,38 +237,38 @@
llvm::ConstantArray::get(llvm::ArrayType::get(Annotations[0]->getType(),
Annotations.size()),
Annotations);
- llvm::GlobalValue *gv =
- new llvm::GlobalVariable(TheModule, Array->getType(), false,
- llvm::GlobalValue::AppendingLinkage, Array,
+ llvm::GlobalValue *gv =
+ new llvm::GlobalVariable(TheModule, Array->getType(), false,
+ llvm::GlobalValue::AppendingLinkage, Array,
"llvm.global.annotations");
gv->setSection("llvm.metadata");
}
static CodeGenModule::GVALinkage
-GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
+GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
const LangOptions &Features) {
// The kind of external linkage this function will have, if it is not
// inline or static.
CodeGenModule::GVALinkage External = CodeGenModule::GVA_StrongExternal;
- if (Context.getLangOptions().CPlusPlus &&
+ if (Context.getLangOptions().CPlusPlus &&
FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
External = CodeGenModule::GVA_TemplateInstantiation;
-
+
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
// C++ member functions defined inside the class are always inline.
if (MD->isInline() || !MD->isOutOfLine())
return CodeGenModule::GVA_CXXInline;
-
+
return External;
}
-
+
// "static" functions get internal linkage.
if (FD->getStorageClass() == FunctionDecl::Static)
return CodeGenModule::GVA_Internal;
if (!FD->isInline())
return External;
-
+
// If the inline function explicitly has the GNU inline attribute on it, or if
// this is C89 mode, we use to GNU semantics.
if (!Features.C99 && !Features.CPlusPlus) {
@@ -291,7 +291,7 @@
// have already handled "static inline" above, with the GVA_Internal case.
if (Features.CPlusPlus) // inline and extern inline.
return CodeGenModule::GVA_CXXInline;
-
+
assert(Features.C99 && "Must be in C99 mode if not in C89 or C++ mode");
if (FD->isC99InlineDefinition())
return CodeGenModule::GVA_C99Inline;
@@ -335,7 +335,7 @@
}
void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
- const CGFunctionInfo &Info,
+ const CGFunctionInfo &Info,
llvm::Function *F) {
AttributeListType AttributeList;
ConstructAttributeList(Info, D, AttributeList);
@@ -354,16 +354,16 @@
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
if (!Features.Exceptions && !Features.ObjCNonFragileABI)
- F->addFnAttr(llvm::Attribute::NoUnwind);
+ F->addFnAttr(llvm::Attribute::NoUnwind);
if (D->hasAttr<AlwaysInlineAttr>())
F->addFnAttr(llvm::Attribute::AlwaysInline);
-
+
if (D->hasAttr<NoInlineAttr>())
F->addFnAttr(llvm::Attribute::NoInline);
}
-void CodeGenModule::SetCommonAttributes(const Decl *D,
+void CodeGenModule::SetCommonAttributes(const Decl *D,
llvm::GlobalValue *GV) {
setGlobalVisibility(GV, D);
@@ -390,19 +390,19 @@
bool IsIncompleteFunction) {
if (!IsIncompleteFunction)
SetLLVMFunctionAttributes(FD, getTypes().getFunctionInfo(FD), F);
-
+
// Only a few attributes are set on declarations; these may later be
// overridden by a definition.
-
+
if (FD->hasAttr<DLLImportAttr>()) {
F->setLinkage(llvm::Function::DLLImportLinkage);
- } else if (FD->hasAttr<WeakAttr>() ||
+ } else if (FD->hasAttr<WeakAttr>() ||
FD->hasAttr<WeakImportAttr>()) {
// "extern_weak" is overloaded in LLVM; we probably should have
- // separate linkage types for this.
+ // separate linkage types for this.
F->setLinkage(llvm::Function::ExternalWeakLinkage);
} else {
- F->setLinkage(llvm::Function::ExternalLinkage);
+ F->setLinkage(llvm::Function::ExternalLinkage);
}
if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
@@ -410,7 +410,7 @@
}
void CodeGenModule::AddUsedGlobal(llvm::GlobalValue *GV) {
- assert(!GV->isDeclaration() &&
+ assert(!GV->isDeclaration() &&
"Only globals with definition can force usage.");
LLVMUsed.push_back(GV);
}
@@ -422,22 +422,22 @@
llvm::Type *i8PTy =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
-
+
// Convert LLVMUsed to what ConstantArray needs.
std::vector<llvm::Constant*> UsedArray;
UsedArray.resize(LLVMUsed.size());
for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
- UsedArray[i] =
- llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
+ UsedArray[i] =
+ llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
i8PTy);
}
-
+
if (UsedArray.empty())
return;
llvm::ArrayType *ATy = llvm::ArrayType::get(i8PTy, UsedArray.size());
-
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(getModule(), ATy, false,
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), ATy, false,
llvm::GlobalValue::AppendingLinkage,
llvm::ConstantArray::get(ATy, UsedArray),
"llvm.used");
@@ -459,27 +459,27 @@
// just ignore the deferred decl.
llvm::GlobalValue *CGRef = GlobalDeclMap[getMangledName(D)];
assert(CGRef && "Deferred decl wasn't referenced?");
-
+
if (!CGRef->isDeclaration())
continue;
-
+
// Otherwise, emit the definition and move on to the next one.
EmitGlobalDefinition(D);
}
}
-/// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
+/// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
/// annotation information for a given GlobalValue. The annotation struct is
-/// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
-/// GlobalValue being annotated. The second field is the constant string
-/// created from the AnnotateAttr's annotation. The third field is a constant
+/// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
+/// GlobalValue being annotated. The second field is the constant string
+/// created from the AnnotateAttr's annotation. The third field is a constant
/// string containing the name of the translation unit. The fourth field is
/// the line number in the file of the annotated value declaration.
///
/// FIXME: this does not unique the annotation string constants, as llvm-gcc
/// appears to.
///
-llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
+llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
const AnnotateAttr *AA,
unsigned LineNo) {
llvm::Module *M = &getModule();
@@ -488,7 +488,7 @@
// which are the 2nd and 3rd elements of the global annotation structure.
const llvm::Type *SBP =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
- llvm::Constant *anno = llvm::ConstantArray::get(VMContext,
+ llvm::Constant *anno = llvm::ConstantArray::get(VMContext,
AA->getAnnotation(), true);
llvm::Constant *unit = llvm::ConstantArray::get(VMContext,
M->getModuleIdentifier(),
@@ -496,14 +496,14 @@
// Get the two global values corresponding to the ConstantArrays we just
// created to hold the bytes of the strings.
- llvm::GlobalValue *annoGV =
+ llvm::GlobalValue *annoGV =
new llvm::GlobalVariable(*M, anno->getType(), false,
llvm::GlobalValue::PrivateLinkage, anno,
GV->getName());
// translation unit name string, emitted into the llvm.metadata section.
llvm::GlobalValue *unitGV =
new llvm::GlobalVariable(*M, unit->getType(), false,
- llvm::GlobalValue::PrivateLinkage, unit,
+ llvm::GlobalValue::PrivateLinkage, unit,
".str");
// Create the ConstantStruct for the global annotation.
@@ -524,12 +524,12 @@
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
// Constructors and destructors should never be deferred.
- if (FD->hasAttr<ConstructorAttr>() ||
+ if (FD->hasAttr<ConstructorAttr>() ||
FD->hasAttr<DestructorAttr>())
return false;
GVALinkage Linkage = GetLinkageForFunction(getContext(), FD, Features);
-
+
// static, static inline, always_inline, and extern inline functions can
// always be deferred. Normal inline functions can be deferred in C99/C++.
if (Linkage == GVA_Internal || Linkage == GVA_C99Inline ||
@@ -537,7 +537,7 @@
return true;
return false;
}
-
+
const VarDecl *VD = cast<VarDecl>(Global);
assert(VD->isFileVarDecl() && "Invalid decl");
@@ -546,7 +546,7 @@
void CodeGenModule::EmitGlobal(GlobalDecl GD) {
const ValueDecl *Global = GD.getDecl();
-
+
// If this is an alias definition (which otherwise looks like a declaration)
// emit it now.
if (Global->hasAttr<AliasAttr>())
@@ -563,7 +563,7 @@
// In C++, if this is marked "extern", defer code generation.
if (getLangOptions().CPlusPlus && !VD->getInit() &&
- (VD->getStorageClass() == VarDecl::Extern ||
+ (VD->getStorageClass() == VarDecl::Extern ||
VD->isExternC(getContext())))
return;
@@ -595,7 +595,7 @@
void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
const ValueDecl *D = GD.getDecl();
-
+
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
EmitCXXConstructor(CD, GD.getCtorType());
else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
@@ -624,16 +624,16 @@
if (Entry) {
if (Entry->getType()->getElementType() == Ty)
return Entry;
-
+
// Make sure the result is of the correct type.
const llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
return llvm::ConstantExpr::getBitCast(Entry, PTy);
}
-
+
// This is the first use or definition of a mangled name. If there is a
// deferred decl with this name, remember that we need to emit it at the end
// of the file.
- llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
+ llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
DeferredDecls.find(MangledName);
if (DDI != DeferredDecls.end()) {
// Move the potentially referenced deferred decl to the DeferredDeclsToEmit
@@ -649,20 +649,20 @@
// A called constructor which has no definition or declaration need be
// synthesized.
else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- const CXXRecordDecl *ClassDecl =
+ const CXXRecordDecl *ClassDecl =
cast<CXXRecordDecl>(CD->getDeclContext());
if (CD->isCopyConstructor(getContext()))
DeferredCopyConstructorToEmit(D);
else if (!ClassDecl->hasUserDeclaredConstructor())
DeferredDeclsToEmit.push_back(D);
}
- else if (isa<CXXDestructorDecl>(FD))
+ else if (isa<CXXDestructorDecl>(FD))
DeferredDestructorToEmit(D);
else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isCopyAssignment())
DeferredCopyAssignmentToEmit(D);
}
-
+
// This function doesn't have a complete type (for example, the return
// type is an incomplete struct). Use a fake type instead, and make
// sure not to try to set attributes.
@@ -672,7 +672,7 @@
std::vector<const llvm::Type*>(), false);
IsIncompleteFunction = true;
}
- llvm::Function *F = llvm::Function::Create(cast<llvm::FunctionType>(Ty),
+ llvm::Function *F = llvm::Function::Create(cast<llvm::FunctionType>(Ty),
llvm::Function::ExternalLinkage,
"", &getModule());
F->setName(MangledName);
@@ -685,13 +685,13 @@
/// Defer definition of copy constructor(s) which need be implicitly defined.
void CodeGenModule::DeferredCopyConstructorToEmit(GlobalDecl CopyCtorDecl) {
- const CXXConstructorDecl *CD =
+ const CXXConstructorDecl *CD =
cast<CXXConstructorDecl>(CopyCtorDecl.getDecl());
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext());
if (ClassDecl->hasTrivialCopyConstructor() ||
ClassDecl->hasUserDeclaredCopyConstructor())
return;
-
+
// First make sure all direct base classes and virtual bases and non-static
// data mebers which need to have their copy constructors implicitly defined
// are defined. 12.8.p7
@@ -699,11 +699,11 @@
Base != ClassDecl->bases_end(); ++Base) {
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- if (CXXConstructorDecl *BaseCopyCtor =
+ if (CXXConstructorDecl *BaseCopyCtor =
BaseClassDecl->getCopyConstructor(Context, 0))
GetAddrOfCXXConstructor(BaseCopyCtor, Ctor_Complete);
}
-
+
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
Field != FieldEnd; ++Field) {
@@ -715,7 +715,7 @@
continue;
CXXRecordDecl *FieldClassDecl
= cast<CXXRecordDecl>(FieldClassType->getDecl());
- if (CXXConstructorDecl *FieldCopyCtor =
+ if (CXXConstructorDecl *FieldCopyCtor =
FieldClassDecl->getCopyConstructor(Context, 0))
GetAddrOfCXXConstructor(FieldCopyCtor, Ctor_Complete);
}
@@ -727,11 +727,11 @@
void CodeGenModule::DeferredCopyAssignmentToEmit(GlobalDecl CopyAssignDecl) {
const CXXMethodDecl *CD = cast<CXXMethodDecl>(CopyAssignDecl.getDecl());
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext());
-
+
if (ClassDecl->hasTrivialCopyAssignment() ||
ClassDecl->hasUserDeclaredCopyAssignment())
return;
-
+
// First make sure all direct base classes and virtual bases and non-static
// data mebers which need to have their copy assignments implicitly defined
// are defined. 12.8.p12
@@ -745,7 +745,7 @@
BaseClassDecl->hasConstCopyAssignment(getContext(), MD))
GetAddrOfFunction(GlobalDecl(MD), 0);
}
-
+
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
Field != FieldEnd; ++Field) {
@@ -764,7 +764,7 @@
GetAddrOfFunction(GlobalDecl(MD), 0);
}
}
- DeferredDeclsToEmit.push_back(CopyAssignDecl);
+ DeferredDeclsToEmit.push_back(CopyAssignDecl);
}
void CodeGenModule::DeferredDestructorToEmit(GlobalDecl DtorDecl) {
@@ -778,11 +778,11 @@
Base != ClassDecl->bases_end(); ++Base) {
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
- if (const CXXDestructorDecl *BaseDtor =
+ if (const CXXDestructorDecl *BaseDtor =
BaseClassDecl->getDestructor(Context))
GetAddrOfCXXDestructor(BaseDtor, Dtor_Complete);
}
-
+
for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
FieldEnd = ClassDecl->field_end();
Field != FieldEnd; ++Field) {
@@ -794,7 +794,7 @@
continue;
CXXRecordDecl *FieldClassDecl
= cast<CXXRecordDecl>(FieldClassType->getDecl());
- if (const CXXDestructorDecl *FieldDtor =
+ if (const CXXDestructorDecl *FieldDtor =
FieldClassDecl->getDestructor(Context))
GetAddrOfCXXDestructor(FieldDtor, Dtor_Complete);
}
@@ -839,15 +839,15 @@
if (Entry) {
if (Entry->getType() == Ty)
return Entry;
-
+
// Make sure the result is of the correct type.
return llvm::ConstantExpr::getBitCast(Entry, Ty);
}
-
+
// This is the first use or definition of a mangled name. If there is a
// deferred decl with this name, remember that we need to emit it at the end
// of the file.
- llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
+ llvm::DenseMap<const char*, GlobalDecl>::iterator DDI =
DeferredDecls.find(MangledName);
if (DDI != DeferredDecls.end()) {
// Move the potentially referenced deferred decl to the DeferredDeclsToEmit
@@ -855,11 +855,11 @@
DeferredDeclsToEmit.push_back(DDI->second);
DeferredDecls.erase(DDI);
}
-
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
llvm::GlobalValue::ExternalLinkage,
- 0, "", 0,
+ 0, "", 0,
false, Ty->getAddressSpace());
GV->setName(MangledName);
@@ -873,13 +873,13 @@
if (D->getStorageClass() == VarDecl::PrivateExtern)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
- if (D->hasAttr<WeakAttr>() ||
+ if (D->hasAttr<WeakAttr>() ||
D->hasAttr<WeakImportAttr>())
GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
GV->setThreadLocal(D->isThreadSpecified());
}
-
+
return Entry = GV;
}
@@ -894,8 +894,8 @@
QualType ASTTy = D->getType();
if (Ty == 0)
Ty = getTypes().ConvertTypeForMem(ASTTy);
-
- const llvm::PointerType *PTy =
+
+ const llvm::PointerType *PTy =
llvm::PointerType::get(Ty, ASTTy.getAddressSpace());
return GetOrCreateLLVMGlobal(getMangledName(D), PTy, D);
}
@@ -931,7 +931,7 @@
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
llvm::Constant *Init = 0;
QualType ASTTy = D->getType();
-
+
if (D->getInit() == 0) {
// This is a tentative definition; tentative definitions are
// implicitly initialized with { 0 }.
@@ -946,7 +946,7 @@
Init = EmitNullConstant(D->getType());
} else {
Init = EmitConstantExpr(D->getInit(), D->getType());
-
+
if (!Init) {
QualType T = D->getInit()->getType();
if (getLangOptions().CPlusPlus) {
@@ -961,7 +961,7 @@
const llvm::Type* InitType = Init->getType();
llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
-
+
// Strip off a bitcast if we got one back.
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
assert(CE->getOpcode() == llvm::Instruction::BitCast ||
@@ -969,10 +969,10 @@
CE->getOpcode() == llvm::Instruction::GetElementPtr);
Entry = CE->getOperand(0);
}
-
+
// Entry is now either a Function or GlobalVariable.
llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Entry);
-
+
// We have a definition after a declaration with the wrong type.
// We must make a new GlobalVariable* and update everything that used OldGV
// (a declaration or tentative definition) with the new GlobalVariable*
@@ -985,7 +985,7 @@
if (GV == 0 ||
GV->getType()->getElementType() != InitType ||
GV->getType()->getAddressSpace() != ASTTy.getAddressSpace()) {
-
+
// Remove the old entry from GlobalDeclMap so that we'll create a new one.
GlobalDeclMap.erase(getMangledName(D));
@@ -994,7 +994,7 @@
GV->takeName(cast<llvm::GlobalValue>(Entry));
// Replace all uses of the old global with the new global
- llvm::Constant *NewPtrForOldDecl =
+ llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(GV, Entry->getType());
Entry->replaceAllUsesWith(NewPtrForOldDecl);
@@ -1017,7 +1017,7 @@
// members, it cannot be declared "LLVM const".
GV->setConstant(true);
}
-
+
GV->setAlignment(getContext().getDeclAlignInBytes(D));
// Set the llvm linkage type as appropriate.
@@ -1064,7 +1064,7 @@
// If we're redefining a global as a function, don't transform it.
llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
if (OldFn == 0) return;
-
+
const llvm::Type *NewRetTy = NewFn->getReturnType();
llvm::SmallVector<llvm::Value*, 4> ArgList;
@@ -1074,7 +1074,7 @@
unsigned OpNo = UI.getOperandNo();
llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*UI++);
if (!CI || OpNo != 0) continue;
-
+
// If the return types don't match exactly, and if the call isn't dead, then
// we can't transform this call.
if (CI->getType() != NewRetTy && !CI->use_empty())
@@ -1095,7 +1095,7 @@
}
if (DontTransform)
continue;
-
+
// Okay, we can transform this. Create the new call instruction and copy
// over the required information.
ArgList.append(CI->op_begin()+1, CI->op_begin()+1+ArgNo);
@@ -1118,21 +1118,21 @@
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
const llvm::FunctionType *Ty;
const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
-
+
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
bool isVariadic = D->getType()->getAsFunctionProtoType()->isVariadic();
-
+
Ty = getTypes().GetFunctionType(getTypes().getFunctionInfo(MD), isVariadic);
} else {
Ty = cast<llvm::FunctionType>(getTypes().ConvertType(D->getType()));
-
+
// As a special case, make sure that definitions of K&R function
// "type foo()" aren't declared as varargs (which forces the backend
// to do unnecessary work).
if (D->getType()->isFunctionNoProtoType()) {
assert(Ty->isVarArg() && "Didn't lower type as expected");
- // Due to stret, the lowered function could have arguments.
- // Just create the same type as was lowered by ConvertType
+ // Due to stret, the lowered function could have arguments.
+ // Just create the same type as was lowered by ConvertType
// but strip off the varargs bit.
std::vector<const llvm::Type*> Args(Ty->param_begin(), Ty->param_end());
Ty = llvm::FunctionType::get(Ty->getReturnType(), Args, false);
@@ -1141,17 +1141,17 @@
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
-
+
// Strip off a bitcast if we got one back.
if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
assert(CE->getOpcode() == llvm::Instruction::BitCast);
Entry = CE->getOperand(0);
}
-
-
+
+
if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() != Ty) {
llvm::GlobalValue *OldFn = cast<llvm::GlobalValue>(Entry);
-
+
// If the types mismatch then we have to rewrite the definition.
assert(OldFn->isDeclaration() &&
"Shouldn't replace non-declaration");
@@ -1167,7 +1167,7 @@
GlobalDeclMap.erase(getMangledName(D));
llvm::Function *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
NewFn->takeName(OldFn);
-
+
// If this is an implementation of a function without a prototype, try to
// replace any existing uses of the function (which may be calls) with uses
// of the new function
@@ -1175,27 +1175,27 @@
ReplaceUsesOfNonProtoTypeWithRealFunction(OldFn, NewFn);
OldFn->removeDeadConstantUsers();
}
-
+
// Replace uses of F with the Function we will endow with a body.
if (!Entry->use_empty()) {
- llvm::Constant *NewPtrForOldDecl =
+ llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(NewFn, Entry->getType());
Entry->replaceAllUsesWith(NewPtrForOldDecl);
}
-
+
// Ok, delete the old function now, which is dead.
OldFn->eraseFromParent();
-
+
Entry = NewFn;
}
-
+
llvm::Function *Fn = cast<llvm::Function>(Entry);
CodeGenFunction(*this).GenerateCode(D, Fn);
SetFunctionDefinitionAttributes(D, Fn);
SetLLVMFunctionAttributesForDefinition(D, Fn);
-
+
if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
AddGlobalCtor(Fn, CA->getPriority());
if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
@@ -1207,7 +1207,7 @@
assert(AA && "Not an alias?");
const llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
-
+
// Unique the name through the identifier table.
const char *AliaseeName = AA->getAliasee().c_str();
AliaseeName = getContext().Idents.get(AliaseeName).getName();
@@ -1222,22 +1222,22 @@
llvm::PointerType::getUnqual(DeclTy), 0);
// Create the new alias itself, but don't set a name yet.
- llvm::GlobalValue *GA =
+ llvm::GlobalValue *GA =
new llvm::GlobalAlias(Aliasee->getType(),
llvm::Function::ExternalLinkage,
"", Aliasee, &getModule());
-
+
// See if there is already something with the alias' name in the module.
const char *MangledName = getMangledName(D);
llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
-
+
if (Entry && !Entry->isDeclaration()) {
// If there is a definition in the module, then it wins over the alias.
// This is dubious, but allow it to be safe. Just ignore the alias.
GA->eraseFromParent();
return;
}
-
+
if (Entry) {
// If there is a declaration in the module, then we had an extern followed
// by the alias, as in:
@@ -1246,12 +1246,12 @@
// int test6() __attribute__((alias("test7")));
//
// Remove it and replace uses of it with the alias.
-
+
Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
Entry->getType()));
Entry->eraseFromParent();
}
-
+
// Now we know that there is no conflict, set the name.
Entry = GA;
GA->setName(MangledName);
@@ -1267,7 +1267,7 @@
} else {
GA->setLinkage(llvm::Function::DLLExportLinkage);
}
- } else if (D->hasAttr<WeakAttr>() ||
+ } else if (D->hasAttr<WeakAttr>() ||
D->hasAttr<WeakImportAttr>()) {
GA->setLinkage(llvm::Function::WeakAnyLinkage);
}
@@ -1279,20 +1279,20 @@
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Value *CodeGenModule::getBuiltinLibFunction(unsigned BuiltinID) {
assert((Context.BuiltinInfo.isLibFunction(BuiltinID) ||
- Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) &&
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) &&
"isn't a lib fn");
-
+
// Get the name, skip over the __builtin_ prefix (if necessary).
const char *Name = Context.BuiltinInfo.GetName(BuiltinID);
if (Context.BuiltinInfo.isLibFunction(BuiltinID))
Name += 10;
-
+
// Get the type for the builtin.
ASTContext::GetBuiltinTypeError Error;
QualType Type = Context.GetBuiltinType(BuiltinID, Error);
assert(Error == ASTContext::GE_None && "Can't get builtin type");
- const llvm::FunctionType *Ty =
+ const llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(Type));
// Unique the name through the identifier table.
@@ -1336,7 +1336,7 @@
// Check for simple case.
if (!Literal->containsNonAsciiOrNull()) {
StringLength = NumBytes;
- return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
+ return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
StringLength));
}
@@ -1344,18 +1344,18 @@
llvm::SmallVector<UTF16, 128> ToBuf(NumBytes);
const UTF8 *FromPtr = (UTF8 *)Literal->getStrData();
UTF16 *ToPtr = &ToBuf[0];
-
- ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+
+ ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
&ToPtr, ToPtr + NumBytes,
strictConversion);
-
+
// Check for conversion failure.
if (Result != conversionOK) {
// FIXME: Have Sema::CheckObjCString() validate the UTF-8 string and remove
// this duplicate code.
assert(Result == sourceIllegal && "UTF-8 to UTF-16 conversion failed");
StringLength = NumBytes;
- return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
+ return Map.GetOrCreateValue(llvm::StringRef(Literal->getStrData(),
StringLength));
}
@@ -1391,41 +1391,41 @@
unsigned StringLength = 0;
bool isUTF16 = false;
llvm::StringMapEntry<llvm::Constant*> &Entry =
- GetConstantCFStringEntry(CFConstantStringMap, Literal,
+ GetConstantCFStringEntry(CFConstantStringMap, Literal,
getTargetData().isLittleEndian(),
isUTF16, StringLength);
-
+
if (llvm::Constant *C = Entry.getValue())
return C;
-
+
llvm::Constant *Zero =
llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext));
llvm::Constant *Zeros[] = { Zero, Zero };
-
+
// If we don't already have it, get __CFConstantStringClassReference.
if (!CFConstantStringClassRef) {
const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
Ty = llvm::ArrayType::get(Ty, 0);
- llvm::Constant *GV = CreateRuntimeVariable(Ty,
+ llvm::Constant *GV = CreateRuntimeVariable(Ty,
"__CFConstantStringClassReference");
// Decay array -> ptr
CFConstantStringClassRef =
llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
}
-
+
QualType CFTy = getContext().getCFConstantStringType();
- const llvm::StructType *STy =
+ const llvm::StructType *STy =
cast<llvm::StructType>(getTypes().ConvertType(CFTy));
std::vector<llvm::Constant*> Fields(4);
// Class pointer.
Fields[0] = CFConstantStringClassRef;
-
+
// Flags.
const llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
- Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
+ Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
llvm::ConstantInt::get(Ty, 0x07C8);
// String pointer.
@@ -1449,30 +1449,30 @@
// are following gcc here.
isConstant = true;
}
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(getModule(), C->getType(), isConstant,
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), isConstant,
Linkage, C, Prefix);
if (Sect)
GV->setSection(Sect);
if (isUTF16) {
unsigned Align = getContext().getTypeAlign(getContext().ShortTy)/8;
- GV->setAlignment(Align);
+ GV->setAlignment(Align);
}
Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros, 2);
// String length.
Ty = getTypes().ConvertType(getContext().LongTy);
Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
-
+
// The struct.
C = llvm::ConstantStruct::get(STy, Fields);
- GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
- llvm::GlobalVariable::PrivateLinkage, C,
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
"_unnamed_cfstring_");
if (const char *Sect = getContext().Target.getCFStringSection())
GV->setSection(Sect);
Entry.setValue(GV);
-
+
return GV;
}
@@ -1485,16 +1485,16 @@
const ConstantArrayType *CAT =
getContext().getAsConstantArrayType(E->getType());
assert(CAT && "String isn't pointer or array!");
-
+
// Resize the string to the right size.
std::string Str(StrData, StrData+Len);
uint64_t RealLen = CAT->getSize().getZExtValue();
-
+
if (E->isWide())
RealLen *= getContext().Target.getWCharWidth()/8;
-
+
Str.resize(RealLen, '\0');
-
+
return Str;
}
@@ -1518,16 +1518,16 @@
/// GenerateWritableString -- Creates storage for a string literal.
-static llvm::Constant *GenerateStringLiteral(const std::string &str,
+static llvm::Constant *GenerateStringLiteral(const std::string &str,
bool constant,
CodeGenModule &CGM,
const char *GlobalName) {
// Create Constant for this string literal. Don't add a '\0'.
llvm::Constant *C =
llvm::ConstantArray::get(CGM.getLLVMContext(), str, false);
-
+
// Create a global variable for this string
- return new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
+ return new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
llvm::GlobalValue::PrivateLinkage,
C, GlobalName);
}
@@ -1551,8 +1551,8 @@
// Don't share any string literals if strings aren't constant.
if (!IsConstant)
return GenerateStringLiteral(str, false, *this, GlobalName);
-
- llvm::StringMapEntry<llvm::Constant *> &Entry =
+
+ llvm::StringMapEntry<llvm::Constant *> &Entry =
ConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]);
if (Entry.getValue())
@@ -1574,12 +1574,12 @@
/// EmitObjCPropertyImplementations - Emit information for synthesized
/// properties for an implementation.
-void CodeGenModule::EmitObjCPropertyImplementations(const
+void CodeGenModule::EmitObjCPropertyImplementations(const
ObjCImplementationDecl *D) {
- for (ObjCImplementationDecl::propimpl_iterator
+ for (ObjCImplementationDecl::propimpl_iterator
i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
ObjCPropertyImplDecl *PID = *i;
-
+
// Dynamic is just for type-checking.
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
ObjCPropertyDecl *PD = PID->getPropertyDecl();
@@ -1631,7 +1631,7 @@
// Ignore dependent declarations.
if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
return;
-
+
switch (D->getKind()) {
case Decl::CXXConversion:
case Decl::CXXMethod:
@@ -1639,9 +1639,9 @@
// Skip function templates
if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate())
return;
-
+
// Fall through
-
+
case Decl::Var:
EmitGlobal(GlobalDecl(cast<ValueDecl>(D)));
break;
@@ -1668,7 +1668,7 @@
break;
// Objective-C Decls
-
+
// Forward declarations, no (immediate) code generation.
case Decl::ObjCClass:
case Decl::ObjCForwardProtocol:
@@ -1691,7 +1691,7 @@
EmitObjCPropertyImplementations(OMD);
Runtime->GenerateClass(OMD);
break;
- }
+ }
case Decl::ObjCMethod: {
ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(D);
// If this is not a prototype, emit the body.
@@ -1699,7 +1699,7 @@
CodeGenFunction(*this).GenerateObjCMethod(OMD);
break;
}
- case Decl::ObjCCompatibleAlias:
+ case Decl::ObjCCompatibleAlias:
// compatibility-alias is a directive and has no code gen.
break;
@@ -1711,7 +1711,7 @@
FileScopeAsmDecl *AD = cast<FileScopeAsmDecl>(D);
std::string AsmString(AD->getAsmString()->getStrData(),
AD->getAsmString()->getByteLength());
-
+
const std::string &S = getModule().getModuleInlineAsm();
if (S.empty())
getModule().setModuleInlineAsm(AsmString);
@@ -1719,8 +1719,8 @@
getModule().setModuleInlineAsm(S + '\n' + AsmString);
break;
}
-
- default:
+
+ default:
// Make sure we handled everything we should, every other kind is a
// non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
// function. Need to recode Decl::Kind to do that easily.
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 2aa97de..607f2a1 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -73,32 +73,32 @@
// a regular VarDecl or a FunctionDecl.
class GlobalDecl {
llvm::PointerIntPair<const ValueDecl*, 2> Value;
-
+
public:
GlobalDecl() {}
-
+
explicit GlobalDecl(const ValueDecl *VD) : Value(VD, 0) {
assert(!isa<CXXConstructorDecl>(VD) && "Use other ctor with ctor decls!");
assert(!isa<CXXDestructorDecl>(VD) && "Use other ctor with dtor decls!");
}
- GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
+ GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
: Value(D, Type) {}
GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
: Value(D, Type) {}
-
+
const ValueDecl *getDecl() const { return Value.getPointer(); }
-
+
CXXCtorType getCtorType() const {
assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!");
return static_cast<CXXCtorType>(Value.getInt());
}
-
+
CXXDtorType getDtorType() const {
assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!");
return static_cast<CXXDtorType>(Value.getInt());
}
};
-
+
/// CodeGenModule - This class organizes the cross-function state that is used
/// while generating LLVM code.
class CodeGenModule : public BlockModule {
@@ -176,11 +176,11 @@
/// CXXGlobalInits - Variables with global initializers that need to run
/// before main.
std::vector<const VarDecl*> CXXGlobalInits;
-
+
/// CFConstantStringClassRef - Cached reference to the class for constant
/// strings. This value has type int * but is actually an Obj-C class pointer.
llvm::Constant *CFConstantStringClassRef;
-
+
llvm::LLVMContext &VMContext;
public:
CodeGenModule(ASTContext &C, const CompileOptions &CompileOpts,
@@ -255,7 +255,7 @@
/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
/// array for the given ObjCEncodeExpr node.
llvm::Constant *GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
-
+
/// GetAddrOfConstantString - Returns a pointer to a character array
/// containing the literal. This contents are exactly that of the given
/// string, i.e. it will not be null terminated automatically; see
@@ -280,14 +280,14 @@
/// GetAddrOfCXXConstructor - Return the address of the constructor of the
/// given type.
- llvm::Function *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ llvm::Function *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
CXXCtorType Type);
/// GetAddrOfCXXDestructor - Return the address of the constructor of the
/// given type.
- llvm::Function *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ llvm::Function *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type);
-
+
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Value *getBuiltinLibFunction(unsigned BuiltinID);
@@ -378,9 +378,9 @@
const char *getMangledName(const GlobalDecl &D);
const char *getMangledName(const NamedDecl *ND);
- const char *getMangledCXXCtorName(const CXXConstructorDecl *D,
+ const char *getMangledCXXCtorName(const CXXConstructorDecl *D,
CXXCtorType Type);
- const char *getMangledCXXDtorName(const CXXDestructorDecl *D,
+ const char *getMangledCXXDtorName(const CXXDestructorDecl *D,
CXXDtorType Type);
void EmitTentativeDefinition(const VarDecl *D);
@@ -392,12 +392,12 @@
GVA_StrongExternal,
GVA_TemplateInstantiation
};
-
+
private:
/// UniqueMangledName - Unique a name by (if necessary) inserting it into the
/// MangledNames string map.
const char *UniqueMangledName(const char *NameStart, const char *NameEnd);
-
+
llvm::Constant *GetOrCreateLLVMFunction(const char *MangledName,
const llvm::Type *Ty,
GlobalDecl D);
@@ -407,7 +407,7 @@
void DeferredCopyConstructorToEmit(GlobalDecl D);
void DeferredCopyAssignmentToEmit(GlobalDecl D);
void DeferredDestructorToEmit(GlobalDecl D);
-
+
/// SetCommonAttributes - Set attributes which are common to any
/// form of a global definition (alias, Objective-C method,
/// function, global variable).
@@ -416,9 +416,9 @@
void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
/// SetFunctionDefinitionAttributes - Set attributes for a global definition.
- void SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ void SetFunctionDefinitionAttributes(const FunctionDecl *D,
llvm::GlobalValue *GV);
-
+
/// SetFunctionAttributes - Set function attributes for a function
/// declaration.
void SetFunctionAttributes(const FunctionDecl *FD,
@@ -437,29 +437,29 @@
void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
// C++ related functions.
-
+
void EmitNamespace(const NamespaceDecl *D);
void EmitLinkageSpec(const LinkageSpecDecl *D);
/// EmitCXXConstructors - Emit constructors (base, complete) from a
/// C++ constructor Decl.
void EmitCXXConstructors(const CXXConstructorDecl *D);
-
+
/// EmitCXXConstructor - Emit a single constructor with the given type from
/// a C++ constructor Decl.
void EmitCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type);
-
- /// EmitCXXDestructors - Emit destructors (base, complete) from a
+
+ /// EmitCXXDestructors - Emit destructors (base, complete) from a
/// C++ destructor Decl.
void EmitCXXDestructors(const CXXDestructorDecl *D);
-
+
/// EmitCXXDestructor - Emit a single destructor with the given type from
/// a C++ destructor Decl.
void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
-
+
/// EmitCXXGlobalInitFunc - Emit a function that initializes C++ globals.
void EmitCXXGlobalInitFunc();
-
+
// FIXME: Hardcoding priority here is gross.
void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
void AddGlobalDtor(llvm::Function *Dtor, int Priority=65535);
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 48f8192..94db836 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This is the code that handles AST -> LLVM type lowering.
+// This is the code that handles AST -> LLVM type lowering.
//
//===----------------------------------------------------------------------===//
@@ -34,8 +34,8 @@
}
CodeGenTypes::~CodeGenTypes() {
- for(llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
- I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
+ for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
+ I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
I != E; ++I)
delete I->second;
CGRecordLayouts.clear();
@@ -65,7 +65,7 @@
const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
T = Context.getCanonicalType(T);
-
+
// See if type is already cached.
llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
I = TypeCache.find(T.getTypePtr());
@@ -75,7 +75,7 @@
return I->second.get();
const llvm::Type *ResultType = ConvertNewType(T);
- TypeCache.insert(std::make_pair(T.getTypePtr(),
+ TypeCache.insert(std::make_pair(T.getTypePtr(),
llvm::PATypeHolder(ResultType)));
return ResultType;
}
@@ -94,15 +94,15 @@
/// memory representation is usually i8 or i32, depending on the target.
const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
const llvm::Type *R = ConvertType(T);
-
+
// If this is a non-bool type, don't map it.
if (R != llvm::Type::getInt1Ty(getLLVMContext()))
return R;
-
+
// Otherwise, return an integer of the target-specified size.
return llvm::IntegerType::get(getLLVMContext(),
(unsigned)Context.getTypeSize(T));
-
+
}
// Code to verify a given function type is complete, i.e. the return type
@@ -124,15 +124,15 @@
/// replace the 'opaque' type we previously made for it if applicable.
void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
const Type *Key = Context.getTagDeclType(TD).getTypePtr();
- llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
TagDeclTypes.find(Key);
if (TDTI == TagDeclTypes.end()) return;
-
+
// Remember the opaque LLVM type for this tagdecl.
llvm::PATypeHolder OpaqueHolder = TDTI->second;
assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
"Updating compilation of an already non-opaque type?");
-
+
// Remove it from TagDeclTypes so that it will be regenerated.
TagDeclTypes.erase(TDTI);
@@ -163,7 +163,7 @@
}
}
-static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
+static const llvm::Type* getTypeForFormat(llvm::LLVMContext &VMContext,
const llvm::fltSemantics &format) {
if (&format == &llvm::APFloat::IEEEsingle)
return llvm::Type::getFloatTy(VMContext);
@@ -181,7 +181,7 @@
const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
const clang::Type &Ty = *Context.getCanonicalType(T);
-
+
switch (Ty.getTypeClass()) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
@@ -204,7 +204,7 @@
case BuiltinType::Bool:
// Note that we always return bool as i1 for use as a scalar type.
return llvm::Type::getInt1Ty(getLLVMContext());
-
+
case BuiltinType::Char_S:
case BuiltinType::Char_U:
case BuiltinType::SChar:
@@ -222,13 +222,13 @@
case BuiltinType::Char32:
return llvm::IntegerType::get(getLLVMContext(),
static_cast<unsigned>(Context.getTypeSize(T)));
-
+
case BuiltinType::Float:
case BuiltinType::Double:
case BuiltinType::LongDouble:
- return getTypeForFormat(getLLVMContext(),
+ return getTypeForFormat(getLLVMContext(),
Context.getFloatTypeSemantics(T));
-
+
case BuiltinType::UInt128:
case BuiltinType::Int128:
return llvm::IntegerType::get(getLLVMContext(), 128);
@@ -236,10 +236,10 @@
break;
}
case Type::FixedWidthInt:
- return llvm::IntegerType::get(getLLVMContext(),
+ return llvm::IntegerType::get(getLLVMContext(),
cast<FixedWidthIntType>(T)->getWidth());
case Type::Complex: {
- const llvm::Type *EltTy =
+ const llvm::Type *EltTy =
ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
return llvm::StructType::get(TheModule.getContext(), EltTy, EltTy, NULL);
}
@@ -258,7 +258,7 @@
PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
}
-
+
case Type::VariableArray: {
const VariableArrayType &A = cast<VariableArrayType>(Ty);
assert(A.getIndexTypeQualifier() == 0 &&
@@ -305,7 +305,7 @@
const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
return GetFunctionType(getFunctionInfo(FNPT), true);
}
-
+
case Type::ExtQual:
return
ConvertTypeRecursive(QualType(cast<ExtQualType>(Ty).getBaseType(), 0));
@@ -319,12 +319,12 @@
T = llvm::OpaqueType::get(getLLVMContext());
return T;
}
-
+
case Type::ObjCObjectPointer: {
// Protocol qualifications do not influence the LLVM type, we just return a
// pointer to the underlying interface type. We don't need to worry about
// recursive conversion.
- const llvm::Type *T =
+ const llvm::Type *T =
ConvertTypeRecursive(cast<ObjCObjectPointerType>(Ty).getPointeeType());
return llvm::PointerType::getUnqual(T);
}
@@ -333,10 +333,10 @@
case Type::Enum: {
const TagDecl *TD = cast<TagType>(Ty).getDecl();
const llvm::Type *Res = ConvertTagDeclType(TD);
-
+
std::string TypeName(TD->getKindName());
TypeName += '.';
-
+
// Name the codegen type after the typedef name
// if there is no tag type name available
if (TD->getIdentifier())
@@ -345,8 +345,8 @@
TypeName += TdT->getDecl()->getNameAsString();
else
TypeName += "anon";
-
- TheModule.addTypeName(TypeName, Res);
+
+ TheModule.addTypeName(TypeName, Res);
return Res;
}
@@ -365,7 +365,7 @@
QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
if (ETy->isFunctionType()) {
return llvm::StructType::get(TheModule.getContext(),
- ConvertType(Context.getPointerDiffType()),
+ ConvertType(Context.getPointerDiffType()),
ConvertType(Context.getPointerDiffType()),
NULL);
} else
@@ -375,7 +375,7 @@
case Type::TemplateSpecialization:
assert(false && "Dependent types can't get here");
}
-
+
// FIXME: implement.
return llvm::OpaqueType::get(getLLVMContext());
}
@@ -395,18 +395,18 @@
}
}
}
-
+
// TagDecl's are not necessarily unique, instead use the (clang)
// type connected to the decl.
- const Type *Key =
+ const Type *Key =
Context.getTagDeclType(TD).getTypePtr();
- llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
+ llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
TagDeclTypes.find(Key);
-
+
// If we've already compiled this tag type, use the previous definition.
if (TDTI != TagDeclTypes.end())
return TDTI->second;
-
+
// If this is still a forward definition, just define an opaque type to use
// for this tagged decl.
if (!TD->isDefinition()) {
@@ -414,14 +414,14 @@
TagDeclTypes.insert(std::make_pair(Key, ResultType));
return ResultType;
}
-
+
// Okay, this is a definition of a type. Compile the implementation now.
-
+
if (TD->isEnum()) {
// Don't bother storing enums in TagDeclTypes.
return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
}
-
+
// This decl could well be recursive. In this case, insert an opaque
// definition of this type, which the recursive uses will get. We will then
// refine this opaque version later.
@@ -430,30 +430,30 @@
// type. This will later be refined to the actual type.
llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(getLLVMContext());
TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
-
+
const llvm::Type *ResultType;
const RecordDecl *RD = cast<const RecordDecl>(TD);
// Layout fields.
- CGRecordLayout *Layout =
+ CGRecordLayout *Layout =
CGRecordLayoutBuilder::ComputeLayout(*this, RD);
-
+
CGRecordLayouts[Key] = Layout;
ResultType = Layout->getLLVMType();
-
+
// Refine our Opaque type to ResultType. This can invalidate ResultType, so
// make sure to read the result out of the holder.
cast<llvm::OpaqueType>(ResultHolder.get())
->refineAbstractTypeTo(ResultType);
-
+
return ResultHolder.get();
-}
+}
/// getLLVMFieldNo - Return llvm::StructType element number
/// that corresponds to the field FD.
unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) {
assert(!FD->isBitField() && "Don't use getLLVMFieldNo on bit fields!");
-
+
llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD);
assert (I != FieldInfo.end() && "Unable to find field info");
return I->second;
@@ -481,11 +481,11 @@
/// getCGRecordLayout - Return record layout info for the given llvm::Type.
const CGRecordLayout &
CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const {
- const Type *Key =
+ const Type *Key =
Context.getTagDeclType(TD).getTypePtr();
llvm::DenseMap<const Type*, CGRecordLayout *>::iterator I
= CGRecordLayouts.find(Key);
- assert (I != CGRecordLayouts.end()
+ assert (I != CGRecordLayouts.end()
&& "Unable to find record layout information for type");
return *I->second;
}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index f8df1b4..0e73d48 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This is the code that handles AST -> LLVM type lowering.
+// This is the code that handles AST -> LLVM type lowering.
//
//===----------------------------------------------------------------------===//
@@ -49,20 +49,20 @@
namespace CodeGen {
class CodeGenTypes;
- /// CGRecordLayout - This class handles struct and union layout info while
+ /// CGRecordLayout - This class handles struct and union layout info while
/// lowering AST types to LLVM types.
class CGRecordLayout {
CGRecordLayout(); // DO NOT IMPLEMENT
-
+
/// LLVMType - The LLVMType corresponding to this record layout.
const llvm::Type *LLVMType;
-
+
/// ContainsMemberPointer - Whether one of the fields in this record layout
/// is a member pointer, or a struct that contains a member pointer.
bool ContainsMemberPointer;
-
+
public:
- CGRecordLayout(const llvm::Type *T, bool ContainsMemberPointer)
+ CGRecordLayout(const llvm::Type *T, bool ContainsMemberPointer)
: LLVMType(T), ContainsMemberPointer(ContainsMemberPointer) { }
/// getLLVMType - Return llvm type associated with this record.
@@ -73,9 +73,9 @@
bool containsMemberPointer() const {
return ContainsMemberPointer;
}
-
+
};
-
+
/// CodeGenTypes - This class organizes the cross-module state that is used
/// while lowering AST types to LLVM types.
class CodeGenTypes {
@@ -84,7 +84,7 @@
llvm::Module& TheModule;
const llvm::TargetData& TheTargetData;
mutable const ABIInfo* TheABIInfo;
-
+
llvm::SmallVector<std::pair<QualType,
llvm::OpaqueType *>, 8> PointersToResolve;
@@ -98,9 +98,9 @@
/// types are never refined.
llvm::DenseMap<const ObjCInterfaceType*, const llvm::Type *> InterfaceTypes;
- /// CGRecordLayouts - This maps llvm struct type with corresponding
- /// record layout info.
- /// FIXME : If CGRecordLayout is less than 16 bytes then use
+ /// CGRecordLayouts - This maps llvm struct type with corresponding
+ /// record layout info.
+ /// FIXME : If CGRecordLayout is less than 16 bytes then use
/// inline it in the map.
llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
@@ -113,8 +113,8 @@
public:
struct BitFieldInfo {
- BitFieldInfo(unsigned FieldNo,
- unsigned Start,
+ BitFieldInfo(unsigned FieldNo,
+ unsigned Start,
unsigned Size)
: FieldNo(FieldNo), Start(Start), Size(Size) {}
@@ -128,7 +128,7 @@
/// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder)
/// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
- /// used instead of llvm::Type because it allows us to bypass potential
+ /// used instead of llvm::Type because it allows us to bypass potential
/// dangling type pointers due to type refinement on llvm side.
llvm::DenseMap<Type *, llvm::PATypeHolder> TypeCache;
@@ -140,17 +140,17 @@
public:
CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD);
~CodeGenTypes();
-
+
const llvm::TargetData &getTargetData() const { return TheTargetData; }
TargetInfo &getTarget() const { return Target; }
ASTContext &getContext() const { return Context; }
const ABIInfo &getABIInfo() const;
llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
- /// ConvertType - Convert type T into a llvm::Type.
+ /// ConvertType - Convert type T into a llvm::Type.
const llvm::Type *ConvertType(QualType T);
const llvm::Type *ConvertTypeRecursive(QualType T);
-
+
/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
@@ -161,20 +161,20 @@
/// GetFunctionType - Get the LLVM function type for \arg Info.
const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
bool IsVariadic);
-
+
const CGRecordLayout &getCGRecordLayout(const TagDecl*) const;
-
+
/// getLLVMFieldNo - Return llvm::StructType element number
/// that corresponds to the field FD.
unsigned getLLVMFieldNo(const FieldDecl *FD);
-
+
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
void UpdateCompletedType(const TagDecl *TD);
/// getFunctionInfo - Get the CGFunctionInfo for this function signature.
- const CGFunctionInfo &getFunctionInfo(QualType RetTy,
- const llvm::SmallVector<QualType,16>
+ const CGFunctionInfo &getFunctionInfo(QualType RetTy,
+ const llvm::SmallVector<QualType,16>
&ArgTys);
const CGFunctionInfo &getFunctionInfo(const FunctionNoProtoType *FTNP);
@@ -182,12 +182,12 @@
const CGFunctionInfo &getFunctionInfo(const FunctionDecl *FD);
const CGFunctionInfo &getFunctionInfo(const CXXMethodDecl *MD);
const CGFunctionInfo &getFunctionInfo(const ObjCMethodDecl *MD);
- const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
const CallArgList &Args);
public:
- const CGFunctionInfo &getFunctionInfo(QualType ResTy,
+ const CGFunctionInfo &getFunctionInfo(QualType ResTy,
const FunctionArgList &Args);
-
+
public: // These are internal details of CGT that shouldn't be used externally.
/// addFieldInfo - Assign field number to field FD.
void addFieldInfo(const FieldDecl *FD, unsigned FieldNo);
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index 04bd52b..f9495b8 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -34,7 +34,7 @@
const CXXMethodDecl *Structor;
unsigned StructorType;
CXXCtorType CtorType;
-
+
public:
CXXNameMangler(ASTContext &C, llvm::raw_ostream &os)
: Context(C), Out(os), Structor(0), StructorType(0) { }
@@ -46,7 +46,7 @@
int64_t nv_t, int64_t v_t,
int64_t nv_r, int64_t v_r);
void mangleGuardVariable(const VarDecl *D);
-
+
void mangleCXXVtable(QualType Type);
void mangleCXXRtti(QualType Type);
void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type);
@@ -54,7 +54,7 @@
private:
bool mangleFunctionDecl(const FunctionDecl *FD);
-
+
void mangleFunctionEncoding(const FunctionDecl *FD);
void mangleName(const NamedDecl *ND);
void mangleUnqualifiedName(const NamedDecl *ND);
@@ -78,19 +78,19 @@
void mangleExpression(Expr *E);
void mangleCXXCtorType(CXXCtorType T);
void mangleCXXDtorType(CXXDtorType T);
-
+
void mangleTemplateArgumentList(const TemplateArgumentList &L);
void mangleTemplateArgument(const TemplateArgument &A);
};
}
static bool isInCLinkageSpecification(const Decl *D) {
- for (const DeclContext *DC = D->getDeclContext();
+ for (const DeclContext *DC = D->getDeclContext();
!DC->isTranslationUnit(); DC = DC->getParent()) {
- if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
}
-
+
return false;
}
@@ -101,12 +101,12 @@
// C functions are not mangled, and "main" is never mangled.
if (!Context.getLangOptions().CPlusPlus || FD->isMain(Context))
return false;
-
- // No mangling in an "implicit extern C" header.
+
+ // No mangling in an "implicit extern C" header.
if (FD->getLocation().isValid() &&
Context.getSourceManager().isInExternCSystemHeader(FD->getLocation()))
return false;
-
+
// No name mangling in a C linkage specification.
if (isInCLinkageSpecification(FD))
return false;
@@ -127,7 +127,7 @@
Out << ALA->getLabel();
return true;
}
-
+
// <mangled-name> ::= _Z <encoding>
// ::= <data name>
// ::= <special-name>
@@ -135,36 +135,36 @@
// FIXME: Actually use a visitor to decode these?
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
return mangleFunctionDecl(FD);
-
+
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (!Context.getLangOptions().CPlusPlus ||
isInCLinkageSpecification(D) ||
D->getDeclContext()->isTranslationUnit())
return false;
-
+
Out << "_Z";
mangleName(VD);
return true;
}
-
+
return false;
}
-void CXXNameMangler::mangleCXXCtor(const CXXConstructorDecl *D,
+void CXXNameMangler::mangleCXXCtor(const CXXConstructorDecl *D,
CXXCtorType Type) {
assert(!Structor && "Structor already set!");
Structor = D;
StructorType = Type;
-
+
mangle(D);
}
-void CXXNameMangler::mangleCXXDtor(const CXXDestructorDecl *D,
+void CXXNameMangler::mangleCXXDtor(const CXXDestructorDecl *D,
CXXDtorType Type) {
assert(!Structor && "Structor already set!");
Structor = D;
StructorType = Type;
-
+
mangle(D);
}
@@ -180,9 +180,8 @@
mangleType(T);
}
-void CXXNameMangler::mangleGuardVariable(const VarDecl *D)
-{
- // <special-name> ::= GV <object name> # Guard variable for one-time
+void CXXNameMangler::mangleGuardVariable(const VarDecl *D) {
+ // <special-name> ::= GV <object name> # Guard variable for one-time
// # initialization
Out << "_ZGV";
@@ -192,14 +191,14 @@
void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
// <encoding> ::= <function name> <bare-function-type>
mangleName(FD);
-
+
// Whether the mangling of a function type includes the return type depends on
// the context and the nature of the function. The rules for deciding whether
// the return type is included are:
- //
+ //
// 1. Template functions (names or types) have return types encoded, with
// the exceptions listed below.
- // 2. Function types not appearing as part of a function name mangling,
+ // 2. Function types not appearing as part of a function name mangling,
// e.g. parameters, pointer types, etc., have return type encoded, with the
// exceptions listed below.
// 3. Non-template function names do not have return types encoded.
@@ -233,7 +232,7 @@
//
// <unscoped-name> ::= <unqualified-name>
// ::= St <unqualified-name> # ::std::
- if (ND->getDeclContext()->isTranslationUnit())
+ if (ND->getDeclContext()->isTranslationUnit())
mangleUnqualifiedName(ND);
else if (isStdNamespace(ND->getDeclContext())) {
Out << "St";
@@ -298,8 +297,8 @@
void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND) {
// <unqualified-name> ::= <operator-name>
- // ::= <ctor-dtor-name>
- // ::= <source-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
DeclarationName Name = ND->getDeclName();
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
@@ -335,7 +334,7 @@
break;
case DeclarationName::CXXConversionFunctionName:
- // <operator-name> ::= cv <type> # (cast)
+ // <operator-name> ::= cv <type> # (cast)
Out << "cv";
mangleType(Context.getCanonicalType(Name.getCXXNameType()));
break;
@@ -349,9 +348,9 @@
assert(false && "Can't mangle a using directive name!");
break;
}
-
+
if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
- if (const TemplateArgumentList *TemplateArgs
+ if (const TemplateArgumentList *TemplateArgs
= Function->getTemplateSpecializationArgs())
mangleTemplateArgumentList(*TemplateArgs);
}
@@ -379,7 +378,7 @@
void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
// <local-name> := Z <function encoding> E <entity name> [<discriminator>]
// := Z <function encoding> E s [<discriminator>]
- // <discriminator> := _ <non-negative number>
+ // <discriminator> := _ <non-negative number>
Out << 'Z';
mangleFunctionEncoding(cast<FunctionDecl>(ND->getDeclContext()));
Out << 'E';
@@ -399,7 +398,7 @@
if (const NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(DC))
mangleSourceName(Namespace->getIdentifier());
else if (const RecordDecl *Record = dyn_cast<RecordDecl>(DC)) {
- if (const ClassTemplateSpecializationDecl *D =
+ if (const ClassTemplateSpecializationDecl *D =
dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
mangleType(QualType(D->getTypeForDecl(), 0));
} else
@@ -407,7 +406,7 @@
}
}
-void
+void
CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
switch (OO) {
// <operator-name> ::= nw # new
@@ -503,13 +502,13 @@
case OO_None:
case OO_Conditional:
case NUM_OVERLOADED_OPERATORS:
- assert(false && "Not an overloaded operator");
+ assert(false && "Not an overloaded operator");
break;
}
}
void CXXNameMangler::mangleCVQualifiers(unsigned Quals) {
- // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
+ // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
if (Quals & QualType::Restrict)
Out << 'r';
if (Quals & QualType::Volatile)
@@ -595,7 +594,7 @@
case BuiltinType::Overload:
case BuiltinType::Dependent:
- assert(false &&
+ assert(false &&
"Overloaded and dependent types shouldn't get to name mangling");
break;
case BuiltinType::UndeducedAuto:
@@ -631,9 +630,9 @@
Out << 'v';
return;
}
-
+
for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
- ArgEnd = Proto->arg_type_end();
+ ArgEnd = Proto->arg_type_end();
Arg != ArgEnd; ++Arg)
mangleType(*Arg);
@@ -643,7 +642,7 @@
}
// <type> ::= <class-enum-type>
-// <class-enum-type> ::= <name>
+// <class-enum-type> ::= <name>
void CXXNameMangler::mangleType(const EnumType *T) {
mangleType(static_cast<const TagType*>(T));
}
@@ -655,9 +654,9 @@
mangleName(T->getDecl()->getTypedefForAnonDecl());
else
mangleName(T->getDecl());
-
+
// If this is a class template specialization, mangle the template arguments.
- if (ClassTemplateSpecializationDecl *Spec
+ if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl()))
mangleTemplateArgumentList(Spec->getTemplateArgs());
}
@@ -695,7 +694,7 @@
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
mangleCVQualifiers(FPT->getTypeQuals());
mangleType(FPT);
- } else
+ } else
mangleType(PointeeType);
}
@@ -825,18 +824,18 @@
void CXXNameMangler::mangleTemplateArgumentList(const TemplateArgumentList &L) {
// <template-args> ::= I <template-arg>+ E
Out << "I";
-
+
for (unsigned i = 0, e = L.size(); i != e; ++i) {
const TemplateArgument &A = L[i];
-
+
mangleTemplateArgument(A);
}
-
+
Out << "E";
}
void CXXNameMangler::mangleTemplateArgument(const TemplateArgument &A) {
- // <template-arg> ::= <type> # type or template
+ // <template-arg> ::= <type> # type or template
// ::= X <expression> E # expression
// ::= <expr-primary> # simple expressions
// ::= I <template-arg>* E # argument pack
@@ -851,9 +850,9 @@
// <expr-primary> ::= L <type> <value number> E # integer literal
Out << 'L';
-
+
mangleType(A.getIntegralType());
-
+
const llvm::APSInt *Integral = A.getAsIntegral();
if (A.getIntegralType()->isBooleanType()) {
// Boolean values are encoded as 0/1.
@@ -863,7 +862,7 @@
Out << 'n';
Integral->abs().print(Out, false);
}
-
+
Out << 'E';
break;
}
@@ -878,21 +877,21 @@
/// and this routine will return false. In this case, the caller should just
/// emit the identifier of the declaration (\c D->getIdentifier()) as its
/// name.
- bool mangleName(const NamedDecl *D, ASTContext &Context,
+ bool mangleName(const NamedDecl *D, ASTContext &Context,
llvm::raw_ostream &os) {
assert(!isa<CXXConstructorDecl>(D) &&
"Use mangleCXXCtor for constructor decls!");
assert(!isa<CXXDestructorDecl>(D) &&
"Use mangleCXXDtor for destructor decls!");
-
+
CXXNameMangler Mangler(Context, os);
if (!Mangler.mangle(D))
return false;
-
+
os.flush();
return true;
}
-
+
/// \brief Mangles the a thunk with the offset n for the declaration D and
/// emits that name to the given output stream.
void mangleThunk(const FunctionDecl *FD, int64_t nv, int64_t v,
@@ -902,12 +901,12 @@
"Use mangleCXXCtor for constructor decls!");
assert(!isa<CXXDestructorDecl>(FD) &&
"Use mangleCXXDtor for destructor decls!");
-
+
CXXNameMangler Mangler(Context, os);
Mangler.mangleThunk(FD, nv, v);
os.flush();
}
-
+
/// \brief Mangles the a covariant thunk for the declaration D and emits that
/// name to the given output stream.
void mangleCovariantThunk(const FunctionDecl *FD, int64_t nv_t, int64_t v_t,
@@ -918,12 +917,12 @@
"Use mangleCXXCtor for constructor decls!");
assert(!isa<CXXDestructorDecl>(FD) &&
"Use mangleCXXDtor for destructor decls!");
-
+
CXXNameMangler Mangler(Context, os);
Mangler.mangleCovariantThunk(FD, nv_t, v_t, nv_r, v_r);
os.flush();
}
-
+
/// mangleGuardVariable - Returns the mangled name for a guard variable
/// for the passed in VarDecl.
void mangleGuardVariable(const VarDecl *D, ASTContext &Context,
@@ -933,20 +932,20 @@
os.flush();
}
-
+
void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
ASTContext &Context, llvm::raw_ostream &os) {
CXXNameMangler Mangler(Context, os);
Mangler.mangleCXXCtor(D, Type);
-
+
os.flush();
}
-
+
void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
ASTContext &Context, llvm::raw_ostream &os) {
CXXNameMangler Mangler(Context, os);
Mangler.mangleCXXDtor(D, Type);
-
+
os.flush();
}
diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h
index 8558391..16f1ae6 100644
--- a/lib/CodeGen/Mangle.h
+++ b/lib/CodeGen/Mangle.h
@@ -32,8 +32,8 @@
class FunctionDecl;
class NamedDecl;
class VarDecl;
-
- bool mangleName(const NamedDecl *D, ASTContext &Context,
+
+ bool mangleName(const NamedDecl *D, ASTContext &Context,
llvm::raw_ostream &os);
void mangleThunk(const FunctionDecl *FD, int64_t n, int64_t vn,
ASTContext &Context, llvm::raw_ostream &os);
@@ -51,4 +51,4 @@
ASTContext &Context, llvm::raw_ostream &os);
}
-#endif
+#endif
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index 600271f..c8f686a 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -40,27 +40,27 @@
CodeGeneratorImpl(Diagnostic &diags, const std::string& ModuleName,
const CompileOptions &CO, llvm::LLVMContext& C)
: Diags(diags), CompileOpts(CO), M(new llvm::Module(ModuleName, C)) {}
-
+
virtual ~CodeGeneratorImpl() {}
-
+
virtual llvm::Module* GetModule() {
return M.get();
}
-
+
virtual llvm::Module* ReleaseModule() {
return M.take();
}
-
+
virtual void Initialize(ASTContext &Context) {
Ctx = &Context;
-
+
M->setTargetTriple(Ctx->Target.getTriple().getTriple());
M->setDataLayout(Ctx->Target.getTargetDescription());
TD.reset(new llvm::TargetData(Ctx->Target.getTargetDescription()));
Builder.reset(new CodeGen::CodeGenModule(Context, CompileOpts,
*M, *TD, Diags));
}
-
+
virtual void HandleTopLevelDecl(DeclGroupRef DG) {
// Make sure to emit all elements of a Decl.
for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
@@ -94,7 +94,7 @@
};
}
-CodeGenerator *clang::CreateLLVMCodeGen(Diagnostic &Diags,
+CodeGenerator *clang::CreateLLVMCodeGen(Diagnostic &Diags,
const std::string& ModuleName,
const CompileOptions &CO,
llvm::LLVMContext& C) {
diff --git a/lib/CodeGen/TargetABIInfo.cpp b/lib/CodeGen/TargetABIInfo.cpp
index 9525a8e..daeec0a 100644
--- a/lib/CodeGen/TargetABIInfo.cpp
+++ b/lib/CodeGen/TargetABIInfo.cpp
@@ -242,7 +242,7 @@
CodeGenFunction &CGF) const;
X86_32ABIInfo(ASTContext &Context, bool d, bool p)
- : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
+ : ABIInfo(), Context(Context), IsDarwinVectorABI(d),
IsSmallStructInRegABI(p) {}
};
}
@@ -402,7 +402,7 @@
// Structures with flexible arrays are always indirect.
if (const RecordType *RT = Ty->getAsStructureType())
if (RT->getDecl()->hasFlexibleArrayMember())
- return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
+ return ABIArgInfo::getIndirect(getIndirectArgumentAlignment(Ty,
Context));
// Ignore empty structs.
@@ -1035,7 +1035,7 @@
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
unsigned neededInt, neededSSE;
- it->info = classifyArgumentType(it->type, Context, VMContext,
+ it->info = classifyArgumentType(it->type, Context, VMContext,
neededInt, neededSSE);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
@@ -1107,7 +1107,7 @@
llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
llvm::LLVMContext &VMContext = CGF.getLLVMContext();
-
+
// Assume that va_list type is correct; should be pointer to LLVM type:
// struct {
// i32 gp_offset;
@@ -1338,7 +1338,7 @@
void ARMABIInfo::computeInfo(CGFunctionInfo &FI, ASTContext &Context,
llvm::LLVMContext &VMContext) const {
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), Context,
VMContext);
for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
it != ie; ++it) {
@@ -1392,7 +1392,7 @@
llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
// FIXME: Need to handle alignment
- const llvm::Type *BP =
+ const llvm::Type *BP =
llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);