David Chisnall | 9735ca6 | 2011-03-25 11:57:33 +0000 | [diff] [blame] | 1 | //==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This abstract class defines the interface for Objective-C runtime-specific |
| 11 | // code generation. It provides some concrete helper methods for functionality |
| 12 | // shared between all (or most) of the Objective-C runtimes supported by clang. |
| 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
| 16 | #include "CGObjCRuntime.h" |
| 17 | |
| 18 | #include "CGRecordLayout.h" |
| 19 | #include "CodeGenModule.h" |
| 20 | #include "CodeGenFunction.h" |
| 21 | #include "CGCleanup.h" |
| 22 | |
| 23 | #include "clang/AST/RecordLayout.h" |
| 24 | #include "clang/AST/StmtObjC.h" |
| 25 | |
| 26 | #include "llvm/Support/CallSite.h" |
| 27 | |
| 28 | using namespace clang; |
| 29 | using namespace CodeGen; |
| 30 | |
| 31 | static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM, |
| 32 | const ObjCInterfaceDecl *OID, |
| 33 | const ObjCImplementationDecl *ID, |
| 34 | const ObjCIvarDecl *Ivar) { |
| 35 | const ObjCInterfaceDecl *Container = Ivar->getContainingInterface(); |
| 36 | |
| 37 | // FIXME: We should eliminate the need to have ObjCImplementationDecl passed |
| 38 | // in here; it should never be necessary because that should be the lexical |
| 39 | // decl context for the ivar. |
| 40 | |
| 41 | // If we know have an implementation (and the ivar is in it) then |
| 42 | // look up in the implementation layout. |
| 43 | const ASTRecordLayout *RL; |
| 44 | if (ID && ID->getClassInterface() == Container) |
| 45 | RL = &CGM.getContext().getASTObjCImplementationLayout(ID); |
| 46 | else |
| 47 | RL = &CGM.getContext().getASTObjCInterfaceLayout(Container); |
| 48 | |
| 49 | // Compute field index. |
| 50 | // |
| 51 | // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is |
| 52 | // implemented. This should be fixed to get the information from the layout |
| 53 | // directly. |
| 54 | unsigned Index = 0; |
| 55 | llvm::SmallVector<ObjCIvarDecl*, 16> Ivars; |
| 56 | CGM.getContext().ShallowCollectObjCIvars(Container, Ivars); |
| 57 | for (unsigned k = 0, e = Ivars.size(); k != e; ++k) { |
| 58 | if (Ivar == Ivars[k]) |
| 59 | break; |
| 60 | ++Index; |
| 61 | } |
| 62 | assert(Index != Ivars.size() && "Ivar is not inside container!"); |
| 63 | assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!"); |
| 64 | |
| 65 | return RL->getFieldOffset(Index); |
| 66 | } |
| 67 | |
| 68 | uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, |
| 69 | const ObjCInterfaceDecl *OID, |
| 70 | const ObjCIvarDecl *Ivar) { |
Ken Dyck | 0afe967 | 2011-04-14 01:00:39 +0000 | [diff] [blame] | 71 | return LookupFieldBitOffset(CGM, OID, 0, Ivar) / |
| 72 | CGM.getContext().getCharWidth(); |
David Chisnall | 9735ca6 | 2011-03-25 11:57:33 +0000 | [diff] [blame] | 73 | } |
| 74 | |
| 75 | uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, |
| 76 | const ObjCImplementationDecl *OID, |
| 77 | const ObjCIvarDecl *Ivar) { |
Ken Dyck | 0afe967 | 2011-04-14 01:00:39 +0000 | [diff] [blame] | 78 | return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / |
| 79 | CGM.getContext().getCharWidth(); |
David Chisnall | 9735ca6 | 2011-03-25 11:57:33 +0000 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, |
| 83 | const ObjCInterfaceDecl *OID, |
| 84 | llvm::Value *BaseValue, |
| 85 | const ObjCIvarDecl *Ivar, |
| 86 | unsigned CVRQualifiers, |
| 87 | llvm::Value *Offset) { |
| 88 | // Compute (type*) ( (char *) BaseValue + Offset) |
| 89 | const llvm::Type *I8Ptr = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); |
| 90 | QualType IvarTy = Ivar->getType(); |
| 91 | const llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy); |
| 92 | llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr); |
| 93 | V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr"); |
| 94 | V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy)); |
| 95 | |
| 96 | if (!Ivar->isBitField()) { |
| 97 | LValue LV = CGF.MakeAddrLValue(V, IvarTy); |
| 98 | LV.getQuals().addCVRQualifiers(CVRQualifiers); |
| 99 | return LV; |
| 100 | } |
| 101 | |
| 102 | // We need to compute an access strategy for this bit-field. We are given the |
| 103 | // offset to the first byte in the bit-field, the sub-byte offset is taken |
| 104 | // from the original layout. We reuse the normal bit-field access strategy by |
| 105 | // treating this as an access to a struct where the bit-field is in byte 0, |
| 106 | // and adjust the containing type size as appropriate. |
| 107 | // |
| 108 | // FIXME: Note that currently we make a very conservative estimate of the |
| 109 | // alignment of the bit-field, because (a) it is not clear what guarantees the |
| 110 | // runtime makes us, and (b) we don't have a way to specify that the struct is |
| 111 | // at an alignment plus offset. |
| 112 | // |
| 113 | // Note, there is a subtle invariant here: we can only call this routine on |
| 114 | // non-synthesized ivars but we may be called for synthesized ivars. However, |
| 115 | // a synthesized ivar can never be a bit-field, so this is safe. |
| 116 | const ASTRecordLayout &RL = |
| 117 | CGF.CGM.getContext().getASTObjCInterfaceLayout(OID); |
| 118 | uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize()); |
| 119 | uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar); |
Ken Dyck | 18052cd | 2011-04-22 17:23:43 +0000 | [diff] [blame] | 120 | uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth(); |
| 121 | uint64_t ContainingTypeAlign = CGF.CGM.getContext().Target.getCharAlign(); |
David Chisnall | 9735ca6 | 2011-03-25 11:57:33 +0000 | [diff] [blame] | 122 | uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset); |
| 123 | uint64_t BitFieldSize = |
| 124 | Ivar->getBitWidth()->EvaluateAsInt(CGF.getContext()).getZExtValue(); |
| 125 | |
| 126 | // Allocate a new CGBitFieldInfo object to describe this access. |
| 127 | // |
| 128 | // FIXME: This is incredibly wasteful, these should be uniqued or part of some |
| 129 | // layout object. However, this is blocked on other cleanups to the |
| 130 | // Objective-C code, so for now we just live with allocating a bunch of these |
| 131 | // objects. |
| 132 | CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo( |
| 133 | CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize, |
| 134 | ContainingTypeSize, ContainingTypeAlign)); |
| 135 | |
| 136 | return LValue::MakeBitfield(V, *Info, |
| 137 | IvarTy.getCVRQualifiers() | CVRQualifiers); |
| 138 | } |
| 139 | |
| 140 | namespace { |
| 141 | struct CatchHandler { |
| 142 | const VarDecl *Variable; |
| 143 | const Stmt *Body; |
| 144 | llvm::BasicBlock *Block; |
| 145 | llvm::Value *TypeInfo; |
| 146 | }; |
| 147 | |
| 148 | struct CallObjCEndCatch : EHScopeStack::Cleanup { |
| 149 | CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) : |
| 150 | MightThrow(MightThrow), Fn(Fn) {} |
| 151 | bool MightThrow; |
| 152 | llvm::Value *Fn; |
| 153 | |
| 154 | void Emit(CodeGenFunction &CGF, bool IsForEH) { |
| 155 | if (!MightThrow) { |
| 156 | CGF.Builder.CreateCall(Fn)->setDoesNotThrow(); |
| 157 | return; |
| 158 | } |
| 159 | |
| 160 | CGF.EmitCallOrInvoke(Fn, 0, 0); |
| 161 | } |
| 162 | }; |
| 163 | } |
| 164 | |
| 165 | |
| 166 | void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF, |
| 167 | const ObjCAtTryStmt &S, |
| 168 | llvm::Function *beginCatchFn, |
| 169 | llvm::Function *endCatchFn, |
| 170 | llvm::Function *exceptionRethrowFn) { |
| 171 | // Jump destination for falling out of catch bodies. |
| 172 | CodeGenFunction::JumpDest Cont; |
| 173 | if (S.getNumCatchStmts()) |
| 174 | Cont = CGF.getJumpDestInCurrentScope("eh.cont"); |
| 175 | |
| 176 | CodeGenFunction::FinallyInfo FinallyInfo; |
| 177 | if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) |
| 178 | FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(), |
| 179 | beginCatchFn, |
| 180 | endCatchFn, |
| 181 | exceptionRethrowFn); |
| 182 | |
| 183 | llvm::SmallVector<CatchHandler, 8> Handlers; |
| 184 | |
| 185 | // Enter the catch, if there is one. |
| 186 | if (S.getNumCatchStmts()) { |
| 187 | for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) { |
| 188 | const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I); |
| 189 | const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); |
| 190 | |
| 191 | Handlers.push_back(CatchHandler()); |
| 192 | CatchHandler &Handler = Handlers.back(); |
| 193 | Handler.Variable = CatchDecl; |
| 194 | Handler.Body = CatchStmt->getCatchBody(); |
| 195 | Handler.Block = CGF.createBasicBlock("catch"); |
| 196 | |
| 197 | // @catch(...) always matches. |
| 198 | if (!CatchDecl) { |
| 199 | Handler.TypeInfo = 0; // catch-all |
| 200 | // Don't consider any other catches. |
| 201 | break; |
| 202 | } |
| 203 | |
| 204 | Handler.TypeInfo = GetEHType(CatchDecl->getType()); |
| 205 | } |
| 206 | |
| 207 | EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size()); |
| 208 | for (unsigned I = 0, E = Handlers.size(); I != E; ++I) |
| 209 | Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block); |
| 210 | } |
| 211 | |
| 212 | // Emit the try body. |
| 213 | CGF.EmitStmt(S.getTryBody()); |
| 214 | |
| 215 | // Leave the try. |
| 216 | if (S.getNumCatchStmts()) |
| 217 | CGF.EHStack.popCatch(); |
| 218 | |
| 219 | // Remember where we were. |
| 220 | CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); |
| 221 | |
| 222 | // Emit the handlers. |
| 223 | for (unsigned I = 0, E = Handlers.size(); I != E; ++I) { |
| 224 | CatchHandler &Handler = Handlers[I]; |
| 225 | |
| 226 | CGF.EmitBlock(Handler.Block); |
| 227 | llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot()); |
| 228 | |
| 229 | // Enter the catch. |
| 230 | llvm::Value *Exn = RawExn; |
| 231 | if (beginCatchFn) { |
| 232 | Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted"); |
| 233 | cast<llvm::CallInst>(Exn)->setDoesNotThrow(); |
| 234 | } |
| 235 | |
| 236 | if (endCatchFn) { |
| 237 | // Add a cleanup to leave the catch. |
| 238 | bool EndCatchMightThrow = (Handler.Variable == 0); |
| 239 | |
| 240 | CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup, |
| 241 | EndCatchMightThrow, |
| 242 | endCatchFn); |
| 243 | } |
| 244 | |
| 245 | // Bind the catch parameter if it exists. |
| 246 | if (const VarDecl *CatchParam = Handler.Variable) { |
| 247 | const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType()); |
| 248 | llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType); |
| 249 | |
| 250 | CGF.EmitAutoVarDecl(*CatchParam); |
| 251 | CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam)); |
| 252 | } |
| 253 | |
| 254 | CGF.ObjCEHValueStack.push_back(Exn); |
| 255 | CGF.EmitStmt(Handler.Body); |
| 256 | CGF.ObjCEHValueStack.pop_back(); |
| 257 | |
| 258 | // Leave the earlier cleanup. |
| 259 | if (endCatchFn) |
| 260 | CGF.PopCleanupBlock(); |
| 261 | |
| 262 | CGF.EmitBranchThroughCleanup(Cont); |
| 263 | } |
| 264 | |
| 265 | // Go back to the try-statement fallthrough. |
| 266 | CGF.Builder.restoreIP(SavedIP); |
| 267 | |
| 268 | // Pop out of the normal cleanup on the finally. |
| 269 | if (S.getFinallyStmt()) |
| 270 | CGF.ExitFinallyBlock(FinallyInfo); |
| 271 | |
| 272 | if (Cont.isValid()) |
| 273 | CGF.EmitBlock(Cont.getBlock()); |
| 274 | } |
| 275 | |
| 276 | namespace { |
| 277 | struct CallSyncExit : EHScopeStack::Cleanup { |
| 278 | llvm::Value *SyncExitFn; |
| 279 | llvm::Value *SyncArg; |
| 280 | CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg) |
| 281 | : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {} |
| 282 | |
| 283 | void Emit(CodeGenFunction &CGF, bool IsForEHCleanup) { |
| 284 | CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow(); |
| 285 | } |
| 286 | }; |
| 287 | } |
| 288 | |
| 289 | void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF, |
| 290 | const ObjCAtSynchronizedStmt &S, |
| 291 | llvm::Function *syncEnterFn, |
| 292 | llvm::Function *syncExitFn) { |
| 293 | // Evaluate the lock operand. This should dominate the cleanup. |
| 294 | llvm::Value *SyncArg = |
| 295 | CGF.EmitScalarExpr(S.getSynchExpr()); |
| 296 | |
| 297 | // Acquire the lock. |
| 298 | SyncArg = CGF.Builder.CreateBitCast(SyncArg, syncEnterFn->getFunctionType()->getParamType(0)); |
| 299 | CGF.Builder.CreateCall(syncEnterFn, SyncArg); |
| 300 | |
| 301 | // Register an all-paths cleanup to release the lock. |
| 302 | CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, |
| 303 | SyncArg); |
| 304 | |
| 305 | // Emit the body of the statement. |
| 306 | CGF.EmitStmt(S.getSynchBody()); |
| 307 | |
| 308 | // Pop the lock-release cleanup. |
| 309 | CGF.PopCleanupBlock(); |
| 310 | } |