1 //==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This abstract class defines the interface for Objective-C runtime-specific 11 // code generation. It provides some concrete helper methods for functionality 12 // shared between all (or most) of the Objective-C runtimes supported by clang. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "CGObjCRuntime.h" 17 #include "CGCleanup.h" 18 #include "CGRecordLayout.h" 19 #include "CodeGenFunction.h" 20 #include "CodeGenModule.h" 21 #include "clang/AST/RecordLayout.h" 22 #include "clang/AST/StmtObjC.h" 23 #include "clang/CodeGen/CGFunctionInfo.h" 24 #include "llvm/IR/CallSite.h" 25 26 using namespace clang; 27 using namespace CodeGen; 28 29 static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM, 30 const ObjCInterfaceDecl *OID, 31 const ObjCImplementationDecl *ID, 32 const ObjCIvarDecl *Ivar) { 33 const ObjCInterfaceDecl *Container = Ivar->getContainingInterface(); 34 35 // FIXME: We should eliminate the need to have ObjCImplementationDecl passed 36 // in here; it should never be necessary because that should be the lexical 37 // decl context for the ivar. 38 39 // If we know have an implementation (and the ivar is in it) then 40 // look up in the implementation layout. 41 const ASTRecordLayout *RL; 42 if (ID && declaresSameEntity(ID->getClassInterface(), Container)) 43 RL = &CGM.getContext().getASTObjCImplementationLayout(ID); 44 else 45 RL = &CGM.getContext().getASTObjCInterfaceLayout(Container); 46 47 // Compute field index. 48 // 49 // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is 50 // implemented. This should be fixed to get the information from the layout 51 // directly. 52 unsigned Index = 0; 53 54 for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin(); 55 IVD; IVD = IVD->getNextIvar()) { 56 if (Ivar == IVD) 57 break; 58 ++Index; 59 } 60 assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!"); 61 62 return RL->getFieldOffset(Index); 63 } 64 65 uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, 66 const ObjCInterfaceDecl *OID, 67 const ObjCIvarDecl *Ivar) { 68 return LookupFieldBitOffset(CGM, OID, nullptr, Ivar) / 69 CGM.getContext().getCharWidth(); 70 } 71 72 uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM, 73 const ObjCImplementationDecl *OID, 74 const ObjCIvarDecl *Ivar) { 75 return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) / 76 CGM.getContext().getCharWidth(); 77 } 78 79 unsigned CGObjCRuntime::ComputeBitfieldBitOffset( 80 CodeGen::CodeGenModule &CGM, 81 const ObjCInterfaceDecl *ID, 82 const ObjCIvarDecl *Ivar) { 83 return LookupFieldBitOffset(CGM, ID, ID->getImplementation(), Ivar); 84 } 85 86 LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF, 87 const ObjCInterfaceDecl *OID, 88 llvm::Value *BaseValue, 89 const ObjCIvarDecl *Ivar, 90 unsigned CVRQualifiers, 91 llvm::Value *Offset) { 92 // Compute (type*) ( (char *) BaseValue + Offset) 93 QualType IvarTy = Ivar->getType().withCVRQualifiers(CVRQualifiers); 94 llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy); 95 llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy); 96 V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr"); 97 98 if (!Ivar->isBitField()) { 99 V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy)); 100 LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy); 101 return LV; 102 } 103 104 // We need to compute an access strategy for this bit-field. We are given the 105 // offset to the first byte in the bit-field, the sub-byte offset is taken 106 // from the original layout. We reuse the normal bit-field access strategy by 107 // treating this as an access to a struct where the bit-field is in byte 0, 108 // and adjust the containing type size as appropriate. 109 // 110 // FIXME: Note that currently we make a very conservative estimate of the 111 // alignment of the bit-field, because (a) it is not clear what guarantees the 112 // runtime makes us, and (b) we don't have a way to specify that the struct is 113 // at an alignment plus offset. 114 // 115 // Note, there is a subtle invariant here: we can only call this routine on 116 // non-synthesized ivars but we may be called for synthesized ivars. However, 117 // a synthesized ivar can never be a bit-field, so this is safe. 118 uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, nullptr, Ivar); 119 uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth(); 120 uint64_t AlignmentBits = CGF.CGM.getTarget().getCharAlign(); 121 uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext()); 122 CharUnits StorageSize = CGF.CGM.getContext().toCharUnitsFromBits( 123 llvm::alignTo(BitOffset + BitFieldSize, AlignmentBits)); 124 CharUnits Alignment = CGF.CGM.getContext().toCharUnitsFromBits(AlignmentBits); 125 126 // Allocate a new CGBitFieldInfo object to describe this access. 127 // 128 // FIXME: This is incredibly wasteful, these should be uniqued or part of some 129 // layout object. However, this is blocked on other cleanups to the 130 // Objective-C code, so for now we just live with allocating a bunch of these 131 // objects. 132 CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo( 133 CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize, 134 CGF.CGM.getContext().toBits(StorageSize), 135 CharUnits::fromQuantity(0))); 136 137 Address Addr(V, Alignment); 138 Addr = CGF.Builder.CreateElementBitCast(Addr, 139 llvm::Type::getIntNTy(CGF.getLLVMContext(), 140 Info->StorageSize)); 141 return LValue::MakeBitfield(Addr, *Info, IvarTy, AlignmentSource::Decl); 142 } 143 144 namespace { 145 struct CatchHandler { 146 const VarDecl *Variable; 147 const Stmt *Body; 148 llvm::BasicBlock *Block; 149 llvm::Constant *TypeInfo; 150 }; 151 152 struct CallObjCEndCatch final : EHScopeStack::Cleanup { 153 CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) 154 : MightThrow(MightThrow), Fn(Fn) {} 155 bool MightThrow; 156 llvm::Value *Fn; 157 158 void Emit(CodeGenFunction &CGF, Flags flags) override { 159 if (MightThrow) 160 CGF.EmitRuntimeCallOrInvoke(Fn); 161 else 162 CGF.EmitNounwindRuntimeCall(Fn); 163 } 164 }; 165 } 166 167 168 void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF, 169 const ObjCAtTryStmt &S, 170 llvm::Constant *beginCatchFn, 171 llvm::Constant *endCatchFn, 172 llvm::Constant *exceptionRethrowFn) { 173 // Jump destination for falling out of catch bodies. 174 CodeGenFunction::JumpDest Cont; 175 if (S.getNumCatchStmts()) 176 Cont = CGF.getJumpDestInCurrentScope("eh.cont"); 177 178 CodeGenFunction::FinallyInfo FinallyInfo; 179 if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) 180 FinallyInfo.enter(CGF, Finally->getFinallyBody(), 181 beginCatchFn, endCatchFn, exceptionRethrowFn); 182 183 SmallVector<CatchHandler, 8> Handlers; 184 185 // Enter the catch, if there is one. 186 if (S.getNumCatchStmts()) { 187 for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) { 188 const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I); 189 const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl(); 190 191 Handlers.push_back(CatchHandler()); 192 CatchHandler &Handler = Handlers.back(); 193 Handler.Variable = CatchDecl; 194 Handler.Body = CatchStmt->getCatchBody(); 195 Handler.Block = CGF.createBasicBlock("catch"); 196 197 // @catch(...) always matches. 198 if (!CatchDecl) { 199 Handler.TypeInfo = nullptr; // catch-all 200 // Don't consider any other catches. 201 break; 202 } 203 204 Handler.TypeInfo = GetEHType(CatchDecl->getType()); 205 } 206 207 EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size()); 208 for (unsigned I = 0, E = Handlers.size(); I != E; ++I) 209 Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block); 210 } 211 212 // Emit the try body. 213 CGF.EmitStmt(S.getTryBody()); 214 215 // Leave the try. 216 if (S.getNumCatchStmts()) 217 CGF.popCatchScope(); 218 219 // Remember where we were. 220 CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP(); 221 222 // Emit the handlers. 223 for (unsigned I = 0, E = Handlers.size(); I != E; ++I) { 224 CatchHandler &Handler = Handlers[I]; 225 226 CGF.EmitBlock(Handler.Block); 227 llvm::Value *RawExn = CGF.getExceptionFromSlot(); 228 229 // Enter the catch. 230 llvm::Value *Exn = RawExn; 231 if (beginCatchFn) 232 Exn = CGF.EmitNounwindRuntimeCall(beginCatchFn, RawExn, "exn.adjusted"); 233 234 CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange()); 235 236 if (endCatchFn) { 237 // Add a cleanup to leave the catch. 238 bool EndCatchMightThrow = (Handler.Variable == nullptr); 239 240 CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup, 241 EndCatchMightThrow, 242 endCatchFn); 243 } 244 245 // Bind the catch parameter if it exists. 246 if (const VarDecl *CatchParam = Handler.Variable) { 247 llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType()); 248 llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType); 249 250 CGF.EmitAutoVarDecl(*CatchParam); 251 EmitInitOfCatchParam(CGF, CastExn, CatchParam); 252 } 253 254 CGF.ObjCEHValueStack.push_back(Exn); 255 CGF.EmitStmt(Handler.Body); 256 CGF.ObjCEHValueStack.pop_back(); 257 258 // Leave any cleanups associated with the catch. 259 cleanups.ForceCleanup(); 260 261 CGF.EmitBranchThroughCleanup(Cont); 262 } 263 264 // Go back to the try-statement fallthrough. 265 CGF.Builder.restoreIP(SavedIP); 266 267 // Pop out of the finally. 268 if (S.getFinallyStmt()) 269 FinallyInfo.exit(CGF); 270 271 if (Cont.isValid()) 272 CGF.EmitBlock(Cont.getBlock()); 273 } 274 275 void CGObjCRuntime::EmitInitOfCatchParam(CodeGenFunction &CGF, 276 llvm::Value *exn, 277 const VarDecl *paramDecl) { 278 279 Address paramAddr = CGF.GetAddrOfLocalVar(paramDecl); 280 281 switch (paramDecl->getType().getQualifiers().getObjCLifetime()) { 282 case Qualifiers::OCL_Strong: 283 exn = CGF.EmitARCRetainNonBlock(exn); 284 // fallthrough 285 286 case Qualifiers::OCL_None: 287 case Qualifiers::OCL_ExplicitNone: 288 case Qualifiers::OCL_Autoreleasing: 289 CGF.Builder.CreateStore(exn, paramAddr); 290 return; 291 292 case Qualifiers::OCL_Weak: 293 CGF.EmitARCInitWeak(paramAddr, exn); 294 return; 295 } 296 llvm_unreachable("invalid ownership qualifier"); 297 } 298 299 namespace { 300 struct CallSyncExit final : EHScopeStack::Cleanup { 301 llvm::Value *SyncExitFn; 302 llvm::Value *SyncArg; 303 CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg) 304 : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {} 305 306 void Emit(CodeGenFunction &CGF, Flags flags) override { 307 CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow(); 308 } 309 }; 310 } 311 312 void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF, 313 const ObjCAtSynchronizedStmt &S, 314 llvm::Function *syncEnterFn, 315 llvm::Function *syncExitFn) { 316 CodeGenFunction::RunCleanupsScope cleanups(CGF); 317 318 // Evaluate the lock operand. This is guaranteed to dominate the 319 // ARC release and lock-release cleanups. 320 const Expr *lockExpr = S.getSynchExpr(); 321 llvm::Value *lock; 322 if (CGF.getLangOpts().ObjCAutoRefCount) { 323 lock = CGF.EmitARCRetainScalarExpr(lockExpr); 324 lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock); 325 } else { 326 lock = CGF.EmitScalarExpr(lockExpr); 327 } 328 lock = CGF.Builder.CreateBitCast(lock, CGF.VoidPtrTy); 329 330 // Acquire the lock. 331 CGF.Builder.CreateCall(syncEnterFn, lock)->setDoesNotThrow(); 332 333 // Register an all-paths cleanup to release the lock. 334 CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, lock); 335 336 // Emit the body of the statement. 337 CGF.EmitStmt(S.getSynchBody()); 338 } 339 340 /// Compute the pointer-to-function type to which a message send 341 /// should be casted in order to correctly call the given method 342 /// with the given arguments. 343 /// 344 /// \param method - may be null 345 /// \param resultType - the result type to use if there's no method 346 /// \param callArgs - the actual arguments, including implicit ones 347 CGObjCRuntime::MessageSendInfo 348 CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method, 349 QualType resultType, 350 CallArgList &callArgs) { 351 // If there's a method, use information from that. 352 if (method) { 353 const CGFunctionInfo &signature = 354 CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty); 355 356 llvm::PointerType *signatureType = 357 CGM.getTypes().GetFunctionType(signature)->getPointerTo(); 358 359 const CGFunctionInfo &signatureForCall = 360 CGM.getTypes().arrangeCall(signature, callArgs); 361 362 return MessageSendInfo(signatureForCall, signatureType); 363 } 364 365 // There's no method; just use a default CC. 366 const CGFunctionInfo &argsInfo = 367 CGM.getTypes().arrangeUnprototypedObjCMessageSend(resultType, callArgs); 368 369 // Derive the signature to call from that. 370 llvm::PointerType *signatureType = 371 CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo(); 372 return MessageSendInfo(argsInfo, signatureType); 373 } 374