1 //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Objective-C code as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGDebugInfo.h" 14 #include "CGObjCRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "ConstantEmitter.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/StmtObjC.h" 22 #include "clang/Basic/Diagnostic.h" 23 #include "clang/CodeGen/CGFunctionInfo.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/IR/DataLayout.h" 26 #include "llvm/IR/InlineAsm.h" 27 using namespace clang; 28 using namespace CodeGen; 29 30 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; 31 static TryEmitResult 32 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); 33 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, 34 QualType ET, 35 RValue Result); 36 37 /// Given the address of a variable of pointer type, find the correct 38 /// null to store into it. 39 static llvm::Constant *getNullForVariable(Address addr) { 40 llvm::Type *type = addr.getElementType(); 41 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type)); 42 } 43 44 /// Emits an instance of NSConstantString representing the object. 45 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) 46 { 47 llvm::Constant *C = 48 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); 49 // FIXME: This bitcast should just be made an invariant on the Runtime. 50 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); 51 } 52 53 /// EmitObjCBoxedExpr - This routine generates code to call 54 /// the appropriate expression boxing method. This will either be 55 /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], 56 /// or [NSValue valueWithBytes:objCType:]. 57 /// 58 llvm::Value * 59 CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { 60 // Generate the correct selector for this literal's concrete type. 61 // Get the method. 62 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); 63 const Expr *SubExpr = E->getSubExpr(); 64 65 if (E->isExpressibleAsConstantInitializer()) { 66 ConstantEmitter ConstEmitter(CGM); 67 return ConstEmitter.tryEmitAbstract(E, E->getType()); 68 } 69 70 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"); 71 Selector Sel = BoxingMethod->getSelector(); 72 73 // Generate a reference to the class pointer, which will be the receiver. 74 // Assumes that the method was introduced in the class that should be 75 // messaged (avoids pulling it out of the result type). 76 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 77 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); 78 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl); 79 80 CallArgList Args; 81 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); 82 QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); 83 84 // ObjCBoxedExpr supports boxing of structs and unions 85 // via [NSValue valueWithBytes:objCType:] 86 const QualType ValueType(SubExpr->getType().getCanonicalType()); 87 if (ValueType->isObjCBoxableRecordType()) { 88 // Emit CodeGen for first parameter 89 // and cast value to correct type 90 Address Temporary = CreateMemTemp(SubExpr->getType()); 91 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); 92 Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT)); 93 Args.add(RValue::get(BitCast.getPointer()), ArgQT); 94 95 // Create char array to store type encoding 96 std::string Str; 97 getContext().getObjCEncodingForType(ValueType, Str); 98 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); 99 100 // Cast type encoding to correct type 101 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; 102 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); 103 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT)); 104 105 Args.add(RValue::get(Cast), EncodingQT); 106 } else { 107 Args.add(EmitAnyExpr(SubExpr), ArgQT); 108 } 109 110 RValue result = Runtime.GenerateMessageSend( 111 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver, 112 Args, ClassDecl, BoxingMethod); 113 return Builder.CreateBitCast(result.getScalarVal(), 114 ConvertType(E->getType())); 115 } 116 117 llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, 118 const ObjCMethodDecl *MethodWithObjects) { 119 ASTContext &Context = CGM.getContext(); 120 const ObjCDictionaryLiteral *DLE = nullptr; 121 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E); 122 if (!ALE) 123 DLE = cast<ObjCDictionaryLiteral>(E); 124 125 // Optimize empty collections by referencing constants, when available. 126 uint64_t NumElements = 127 ALE ? ALE->getNumElements() : DLE->getNumElements(); 128 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) { 129 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__"; 130 QualType IdTy(CGM.getContext().getObjCIdType()); 131 llvm::Constant *Constant = 132 CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName); 133 LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy); 134 llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc()); 135 cast<llvm::LoadInst>(Ptr)->setMetadata( 136 CGM.getModule().getMDKindID("invariant.load"), 137 llvm::MDNode::get(getLLVMContext(), None)); 138 return Builder.CreateBitCast(Ptr, ConvertType(E->getType())); 139 } 140 141 // Compute the type of the array we're initializing. 142 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()), 143 NumElements); 144 QualType ElementType = Context.getObjCIdType().withConst(); 145 QualType ElementArrayType 146 = Context.getConstantArrayType(ElementType, APNumElements, nullptr, 147 ArrayType::Normal, /*IndexTypeQuals=*/0); 148 149 // Allocate the temporary array(s). 150 Address Objects = CreateMemTemp(ElementArrayType, "objects"); 151 Address Keys = Address::invalid(); 152 if (DLE) 153 Keys = CreateMemTemp(ElementArrayType, "keys"); 154 155 // In ARC, we may need to do extra work to keep all the keys and 156 // values alive until after the call. 157 SmallVector<llvm::Value *, 16> NeededObjects; 158 bool TrackNeededObjects = 159 (getLangOpts().ObjCAutoRefCount && 160 CGM.getCodeGenOpts().OptimizationLevel != 0); 161 162 // Perform the actual initialialization of the array(s). 163 for (uint64_t i = 0; i < NumElements; i++) { 164 if (ALE) { 165 // Emit the element and store it to the appropriate array slot. 166 const Expr *Rhs = ALE->getElement(i); 167 LValue LV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), 168 ElementType, AlignmentSource::Decl); 169 170 llvm::Value *value = EmitScalarExpr(Rhs); 171 EmitStoreThroughLValue(RValue::get(value), LV, true); 172 if (TrackNeededObjects) { 173 NeededObjects.push_back(value); 174 } 175 } else { 176 // Emit the key and store it to the appropriate array slot. 177 const Expr *Key = DLE->getKeyValueElement(i).Key; 178 LValue KeyLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Keys, i), 179 ElementType, AlignmentSource::Decl); 180 llvm::Value *keyValue = EmitScalarExpr(Key); 181 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); 182 183 // Emit the value and store it to the appropriate array slot. 184 const Expr *Value = DLE->getKeyValueElement(i).Value; 185 LValue ValueLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), 186 ElementType, AlignmentSource::Decl); 187 llvm::Value *valueValue = EmitScalarExpr(Value); 188 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); 189 if (TrackNeededObjects) { 190 NeededObjects.push_back(keyValue); 191 NeededObjects.push_back(valueValue); 192 } 193 } 194 } 195 196 // Generate the argument list. 197 CallArgList Args; 198 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); 199 const ParmVarDecl *argDecl = *PI++; 200 QualType ArgQT = argDecl->getType().getUnqualifiedType(); 201 Args.add(RValue::get(Objects.getPointer()), ArgQT); 202 if (DLE) { 203 argDecl = *PI++; 204 ArgQT = argDecl->getType().getUnqualifiedType(); 205 Args.add(RValue::get(Keys.getPointer()), ArgQT); 206 } 207 argDecl = *PI; 208 ArgQT = argDecl->getType().getUnqualifiedType(); 209 llvm::Value *Count = 210 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements); 211 Args.add(RValue::get(Count), ArgQT); 212 213 // Generate a reference to the class pointer, which will be the receiver. 214 Selector Sel = MethodWithObjects->getSelector(); 215 QualType ResultType = E->getType(); 216 const ObjCObjectPointerType *InterfacePointerType 217 = ResultType->getAsObjCInterfacePointerType(); 218 ObjCInterfaceDecl *Class 219 = InterfacePointerType->getObjectType()->getInterface(); 220 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 221 llvm::Value *Receiver = Runtime.GetClass(*this, Class); 222 223 // Generate the message send. 224 RValue result = Runtime.GenerateMessageSend( 225 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel, 226 Receiver, Args, Class, MethodWithObjects); 227 228 // The above message send needs these objects, but in ARC they are 229 // passed in a buffer that is essentially __unsafe_unretained. 230 // Therefore we must prevent the optimizer from releasing them until 231 // after the call. 232 if (TrackNeededObjects) { 233 EmitARCIntrinsicUse(NeededObjects); 234 } 235 236 return Builder.CreateBitCast(result.getScalarVal(), 237 ConvertType(E->getType())); 238 } 239 240 llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { 241 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); 242 } 243 244 llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( 245 const ObjCDictionaryLiteral *E) { 246 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); 247 } 248 249 /// Emit a selector. 250 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { 251 // Untyped selector. 252 // Note that this implementation allows for non-constant strings to be passed 253 // as arguments to @selector(). Currently, the only thing preventing this 254 // behaviour is the type checking in the front end. 255 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector()); 256 } 257 258 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { 259 // FIXME: This should pass the Decl not the name. 260 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol()); 261 } 262 263 /// Adjust the type of an Objective-C object that doesn't match up due 264 /// to type erasure at various points, e.g., related result types or the use 265 /// of parameterized classes. 266 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, 267 RValue Result) { 268 if (!ExpT->isObjCRetainableType()) 269 return Result; 270 271 // If the converted types are the same, we're done. 272 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT); 273 if (ExpLLVMTy == Result.getScalarVal()->getType()) 274 return Result; 275 276 // We have applied a substitution. Cast the rvalue appropriately. 277 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(), 278 ExpLLVMTy)); 279 } 280 281 /// Decide whether to extend the lifetime of the receiver of a 282 /// returns-inner-pointer message. 283 static bool 284 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { 285 switch (message->getReceiverKind()) { 286 287 // For a normal instance message, we should extend unless the 288 // receiver is loaded from a variable with precise lifetime. 289 case ObjCMessageExpr::Instance: { 290 const Expr *receiver = message->getInstanceReceiver(); 291 292 // Look through OVEs. 293 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 294 if (opaque->getSourceExpr()) 295 receiver = opaque->getSourceExpr()->IgnoreParens(); 296 } 297 298 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver); 299 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true; 300 receiver = ice->getSubExpr()->IgnoreParens(); 301 302 // Look through OVEs. 303 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 304 if (opaque->getSourceExpr()) 305 receiver = opaque->getSourceExpr()->IgnoreParens(); 306 } 307 308 // Only __strong variables. 309 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 310 return true; 311 312 // All ivars and fields have precise lifetime. 313 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver)) 314 return false; 315 316 // Otherwise, check for variables. 317 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); 318 if (!declRef) return true; 319 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl()); 320 if (!var) return true; 321 322 // All variables have precise lifetime except local variables with 323 // automatic storage duration that aren't specially marked. 324 return (var->hasLocalStorage() && 325 !var->hasAttr<ObjCPreciseLifetimeAttr>()); 326 } 327 328 case ObjCMessageExpr::Class: 329 case ObjCMessageExpr::SuperClass: 330 // It's never necessary for class objects. 331 return false; 332 333 case ObjCMessageExpr::SuperInstance: 334 // We generally assume that 'self' lives throughout a method call. 335 return false; 336 } 337 338 llvm_unreachable("invalid receiver kind"); 339 } 340 341 /// Given an expression of ObjC pointer type, check whether it was 342 /// immediately loaded from an ARC __weak l-value. 343 static const Expr *findWeakLValue(const Expr *E) { 344 assert(E->getType()->isObjCRetainableType()); 345 E = E->IgnoreParens(); 346 if (auto CE = dyn_cast<CastExpr>(E)) { 347 if (CE->getCastKind() == CK_LValueToRValue) { 348 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) 349 return CE->getSubExpr(); 350 } 351 } 352 353 return nullptr; 354 } 355 356 /// The ObjC runtime may provide entrypoints that are likely to be faster 357 /// than an ordinary message send of the appropriate selector. 358 /// 359 /// The entrypoints are guaranteed to be equivalent to just sending the 360 /// corresponding message. If the entrypoint is implemented naively as just a 361 /// message send, using it is a trade-off: it sacrifices a few cycles of 362 /// overhead to save a small amount of code. However, it's possible for 363 /// runtimes to detect and special-case classes that use "standard" 364 /// behavior; if that's dynamically a large proportion of all objects, using 365 /// the entrypoint will also be faster than using a message send. 366 /// 367 /// If the runtime does support a required entrypoint, then this method will 368 /// generate a call and return the resulting value. Otherwise it will return 369 /// None and the caller can generate a msgSend instead. 370 static Optional<llvm::Value *> 371 tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType, 372 llvm::Value *Receiver, 373 const CallArgList& Args, Selector Sel, 374 const ObjCMethodDecl *method, 375 bool isClassMessage) { 376 auto &CGM = CGF.CGM; 377 if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls) 378 return None; 379 380 auto &Runtime = CGM.getLangOpts().ObjCRuntime; 381 switch (Sel.getMethodFamily()) { 382 case OMF_alloc: 383 if (isClassMessage && 384 Runtime.shouldUseRuntimeFunctionsForAlloc() && 385 ResultType->isObjCObjectPointerType()) { 386 // [Foo alloc] -> objc_alloc(Foo) or 387 // [self alloc] -> objc_alloc(self) 388 if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc") 389 return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType)); 390 // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or 391 // [self allocWithZone:nil] -> objc_allocWithZone(self) 392 if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 && 393 Args.size() == 1 && Args.front().getType()->isPointerType() && 394 Sel.getNameForSlot(0) == "allocWithZone") { 395 const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal(); 396 if (isa<llvm::ConstantPointerNull>(arg)) 397 return CGF.EmitObjCAllocWithZone(Receiver, 398 CGF.ConvertType(ResultType)); 399 return None; 400 } 401 } 402 break; 403 404 case OMF_autorelease: 405 if (ResultType->isObjCObjectPointerType() && 406 CGM.getLangOpts().getGC() == LangOptions::NonGC && 407 Runtime.shouldUseARCFunctionsForRetainRelease()) 408 return CGF.EmitObjCAutorelease(Receiver, CGF.ConvertType(ResultType)); 409 break; 410 411 case OMF_retain: 412 if (ResultType->isObjCObjectPointerType() && 413 CGM.getLangOpts().getGC() == LangOptions::NonGC && 414 Runtime.shouldUseARCFunctionsForRetainRelease()) 415 return CGF.EmitObjCRetainNonBlock(Receiver, CGF.ConvertType(ResultType)); 416 break; 417 418 case OMF_release: 419 if (ResultType->isVoidType() && 420 CGM.getLangOpts().getGC() == LangOptions::NonGC && 421 Runtime.shouldUseARCFunctionsForRetainRelease()) { 422 CGF.EmitObjCRelease(Receiver, ARCPreciseLifetime); 423 return nullptr; 424 } 425 break; 426 427 default: 428 break; 429 } 430 return None; 431 } 432 433 /// Instead of '[[MyClass alloc] init]', try to generate 434 /// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the 435 /// caller side, as well as the optimized objc_alloc. 436 static Optional<llvm::Value *> 437 tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) { 438 auto &Runtime = CGF.getLangOpts().ObjCRuntime; 439 if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit()) 440 return None; 441 442 // Match the exact pattern '[[MyClass alloc] init]'. 443 Selector Sel = OME->getSelector(); 444 if (OME->getReceiverKind() != ObjCMessageExpr::Instance || 445 !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() || 446 Sel.getNameForSlot(0) != "init") 447 return None; 448 449 // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]' or 450 // we are in an ObjC class method and 'receiver' is '[self alloc]'. 451 auto *SubOME = 452 dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParenCasts()); 453 if (!SubOME) 454 return None; 455 Selector SubSel = SubOME->getSelector(); 456 457 // Check if we are in an ObjC class method and the receiver expression is 458 // 'self'. 459 const Expr *SelfInClassMethod = nullptr; 460 if (const auto *CurMD = dyn_cast_or_null<ObjCMethodDecl>(CGF.CurFuncDecl)) 461 if (CurMD->isClassMethod()) 462 if ((SelfInClassMethod = SubOME->getInstanceReceiver())) 463 if (!SelfInClassMethod->isObjCSelfExpr()) 464 SelfInClassMethod = nullptr; 465 466 if ((SubOME->getReceiverKind() != ObjCMessageExpr::Class && 467 !SelfInClassMethod) || !SubOME->getType()->isObjCObjectPointerType() || 468 !SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc") 469 return None; 470 471 llvm::Value *Receiver; 472 if (SelfInClassMethod) { 473 Receiver = CGF.EmitScalarExpr(SelfInClassMethod); 474 } else { 475 QualType ReceiverType = SubOME->getClassReceiver(); 476 const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>(); 477 const ObjCInterfaceDecl *ID = ObjTy->getInterface(); 478 assert(ID && "null interface should be impossible here"); 479 Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, ID); 480 } 481 return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType())); 482 } 483 484 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, 485 ReturnValueSlot Return) { 486 // Only the lookup mechanism and first two arguments of the method 487 // implementation vary between runtimes. We can get the receiver and 488 // arguments in generic code. 489 490 bool isDelegateInit = E->isDelegateInitCall(); 491 492 const ObjCMethodDecl *method = E->getMethodDecl(); 493 494 // If the method is -retain, and the receiver's being loaded from 495 // a __weak variable, peephole the entire operation to objc_loadWeakRetained. 496 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance && 497 method->getMethodFamily() == OMF_retain) { 498 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { 499 LValue lvalue = EmitLValue(lvalueExpr); 500 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress()); 501 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); 502 } 503 } 504 505 if (Optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E)) 506 return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val)); 507 508 // We don't retain the receiver in delegate init calls, and this is 509 // safe because the receiver value is always loaded from 'self', 510 // which we zero out. We don't want to Block_copy block receivers, 511 // though. 512 bool retainSelf = 513 (!isDelegateInit && 514 CGM.getLangOpts().ObjCAutoRefCount && 515 method && 516 method->hasAttr<NSConsumesSelfAttr>()); 517 518 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 519 bool isSuperMessage = false; 520 bool isClassMessage = false; 521 ObjCInterfaceDecl *OID = nullptr; 522 // Find the receiver 523 QualType ReceiverType; 524 llvm::Value *Receiver = nullptr; 525 switch (E->getReceiverKind()) { 526 case ObjCMessageExpr::Instance: 527 ReceiverType = E->getInstanceReceiver()->getType(); 528 if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(CurFuncDecl)) 529 if (OMD->isClassMethod()) 530 if (E->getInstanceReceiver()->isObjCSelfExpr()) 531 isClassMessage = true; 532 if (retainSelf) { 533 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this, 534 E->getInstanceReceiver()); 535 Receiver = ter.getPointer(); 536 if (ter.getInt()) retainSelf = false; 537 } else 538 Receiver = EmitScalarExpr(E->getInstanceReceiver()); 539 break; 540 541 case ObjCMessageExpr::Class: { 542 ReceiverType = E->getClassReceiver(); 543 const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>(); 544 assert(ObjTy && "Invalid Objective-C class message send"); 545 OID = ObjTy->getInterface(); 546 assert(OID && "Invalid Objective-C class message send"); 547 Receiver = Runtime.GetClass(*this, OID); 548 isClassMessage = true; 549 break; 550 } 551 552 case ObjCMessageExpr::SuperInstance: 553 ReceiverType = E->getSuperType(); 554 Receiver = LoadObjCSelf(); 555 isSuperMessage = true; 556 break; 557 558 case ObjCMessageExpr::SuperClass: 559 ReceiverType = E->getSuperType(); 560 Receiver = LoadObjCSelf(); 561 isSuperMessage = true; 562 isClassMessage = true; 563 break; 564 } 565 566 if (retainSelf) 567 Receiver = EmitARCRetainNonBlock(Receiver); 568 569 // In ARC, we sometimes want to "extend the lifetime" 570 // (i.e. retain+autorelease) of receivers of returns-inner-pointer 571 // messages. 572 if (getLangOpts().ObjCAutoRefCount && method && 573 method->hasAttr<ObjCReturnsInnerPointerAttr>() && 574 shouldExtendReceiverForInnerPointerMessage(E)) 575 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver); 576 577 QualType ResultType = method ? method->getReturnType() : E->getType(); 578 579 CallArgList Args; 580 EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method)); 581 582 // For delegate init calls in ARC, do an unsafe store of null into 583 // self. This represents the call taking direct ownership of that 584 // value. We have to do this after emitting the other call 585 // arguments because they might also reference self, but we don't 586 // have to worry about any of them modifying self because that would 587 // be an undefined read and write of an object in unordered 588 // expressions. 589 if (isDelegateInit) { 590 assert(getLangOpts().ObjCAutoRefCount && 591 "delegate init calls should only be marked in ARC"); 592 593 // Do an unsafe store of null into self. 594 Address selfAddr = 595 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 596 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr); 597 } 598 599 RValue result; 600 if (isSuperMessage) { 601 // super is only valid in an Objective-C method 602 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 603 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); 604 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType, 605 E->getSelector(), 606 OMD->getClassInterface(), 607 isCategoryImpl, 608 Receiver, 609 isClassMessage, 610 Args, 611 method); 612 } else { 613 // Call runtime methods directly if we can. 614 if (Optional<llvm::Value *> SpecializedResult = 615 tryGenerateSpecializedMessageSend(*this, ResultType, Receiver, Args, 616 E->getSelector(), method, 617 isClassMessage)) { 618 result = RValue::get(SpecializedResult.getValue()); 619 } else { 620 result = Runtime.GenerateMessageSend(*this, Return, ResultType, 621 E->getSelector(), Receiver, Args, 622 OID, method); 623 } 624 } 625 626 // For delegate init calls in ARC, implicitly store the result of 627 // the call back into self. This takes ownership of the value. 628 if (isDelegateInit) { 629 Address selfAddr = 630 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 631 llvm::Value *newSelf = result.getScalarVal(); 632 633 // The delegate return type isn't necessarily a matching type; in 634 // fact, it's quite likely to be 'id'. 635 llvm::Type *selfTy = selfAddr.getElementType(); 636 newSelf = Builder.CreateBitCast(newSelf, selfTy); 637 638 Builder.CreateStore(newSelf, selfAddr); 639 } 640 641 return AdjustObjCObjectType(*this, E->getType(), result); 642 } 643 644 namespace { 645 struct FinishARCDealloc final : EHScopeStack::Cleanup { 646 void Emit(CodeGenFunction &CGF, Flags flags) override { 647 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl); 648 649 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); 650 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 651 if (!iface->getSuperClass()) return; 652 653 bool isCategory = isa<ObjCCategoryImplDecl>(impl); 654 655 // Call [super dealloc] if we have a superclass. 656 llvm::Value *self = CGF.LoadObjCSelf(); 657 658 CallArgList args; 659 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(), 660 CGF.getContext().VoidTy, 661 method->getSelector(), 662 iface, 663 isCategory, 664 self, 665 /*is class msg*/ false, 666 args, 667 method); 668 } 669 }; 670 } 671 672 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates 673 /// the LLVM function and sets the other context used by 674 /// CodeGenFunction. 675 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, 676 const ObjCContainerDecl *CD) { 677 SourceLocation StartLoc = OMD->getBeginLoc(); 678 FunctionArgList args; 679 // Check if we should generate debug info for this method. 680 if (OMD->hasAttr<NoDebugAttr>()) 681 DebugInfo = nullptr; // disable debug info indefinitely for this function 682 683 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); 684 685 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD); 686 CGM.SetInternalFunctionAttributes(OMD, Fn, FI); 687 688 args.push_back(OMD->getSelfDecl()); 689 args.push_back(OMD->getCmdDecl()); 690 691 args.append(OMD->param_begin(), OMD->param_end()); 692 693 CurGD = OMD; 694 CurEHLocation = OMD->getEndLoc(); 695 696 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, 697 OMD->getLocation(), StartLoc); 698 699 // In ARC, certain methods get an extra cleanup. 700 if (CGM.getLangOpts().ObjCAutoRefCount && 701 OMD->isInstanceMethod() && 702 OMD->getSelector().isUnarySelector()) { 703 const IdentifierInfo *ident = 704 OMD->getSelector().getIdentifierInfoForSlot(0); 705 if (ident->isStr("dealloc")) 706 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind()); 707 } 708 } 709 710 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 711 LValue lvalue, QualType type); 712 713 /// Generate an Objective-C method. An Objective-C method is a C function with 714 /// its pointer, name, and types registered in the class structure. 715 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { 716 StartObjCMethod(OMD, OMD->getClassInterface()); 717 PGO.assignRegionCounters(GlobalDecl(OMD), CurFn); 718 assert(isa<CompoundStmt>(OMD->getBody())); 719 incrementProfileCounter(OMD->getBody()); 720 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); 721 FinishFunction(OMD->getBodyRBrace()); 722 } 723 724 /// emitStructGetterCall - Call the runtime function to load a property 725 /// into the return value slot. 726 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, 727 bool isAtomic, bool hasStrong) { 728 ASTContext &Context = CGF.getContext(); 729 730 Address src = 731 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 732 .getAddress(); 733 734 // objc_copyStruct (ReturnValue, &structIvar, 735 // sizeof (Type of Ivar), isAtomic, false); 736 CallArgList args; 737 738 Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy); 739 args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy); 740 741 src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy); 742 args.add(RValue::get(src.getPointer()), Context.VoidPtrTy); 743 744 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); 745 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType()); 746 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy); 747 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy); 748 749 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); 750 CGCallee callee = CGCallee::forDirect(fn); 751 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args), 752 callee, ReturnValueSlot(), args); 753 } 754 755 /// Determine whether the given architecture supports unaligned atomic 756 /// accesses. They don't have to be fast, just faster than a function 757 /// call and a mutex. 758 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { 759 // FIXME: Allow unaligned atomic load/store on x86. (It is not 760 // currently supported by the backend.) 761 return 0; 762 } 763 764 /// Return the maximum size that permits atomic accesses for the given 765 /// architecture. 766 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, 767 llvm::Triple::ArchType arch) { 768 // ARM has 8-byte atomic accesses, but it's not clear whether we 769 // want to rely on them here. 770 771 // In the default case, just assume that any size up to a pointer is 772 // fine given adequate alignment. 773 return CharUnits::fromQuantity(CGM.PointerSizeInBytes); 774 } 775 776 namespace { 777 class PropertyImplStrategy { 778 public: 779 enum StrategyKind { 780 /// The 'native' strategy is to use the architecture's provided 781 /// reads and writes. 782 Native, 783 784 /// Use objc_setProperty and objc_getProperty. 785 GetSetProperty, 786 787 /// Use objc_setProperty for the setter, but use expression 788 /// evaluation for the getter. 789 SetPropertyAndExpressionGet, 790 791 /// Use objc_copyStruct. 792 CopyStruct, 793 794 /// The 'expression' strategy is to emit normal assignment or 795 /// lvalue-to-rvalue expressions. 796 Expression 797 }; 798 799 StrategyKind getKind() const { return StrategyKind(Kind); } 800 801 bool hasStrongMember() const { return HasStrong; } 802 bool isAtomic() const { return IsAtomic; } 803 bool isCopy() const { return IsCopy; } 804 805 CharUnits getIvarSize() const { return IvarSize; } 806 CharUnits getIvarAlignment() const { return IvarAlignment; } 807 808 PropertyImplStrategy(CodeGenModule &CGM, 809 const ObjCPropertyImplDecl *propImpl); 810 811 private: 812 unsigned Kind : 8; 813 unsigned IsAtomic : 1; 814 unsigned IsCopy : 1; 815 unsigned HasStrong : 1; 816 817 CharUnits IvarSize; 818 CharUnits IvarAlignment; 819 }; 820 } 821 822 /// Pick an implementation strategy for the given property synthesis. 823 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, 824 const ObjCPropertyImplDecl *propImpl) { 825 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 826 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); 827 828 IsCopy = (setterKind == ObjCPropertyDecl::Copy); 829 IsAtomic = prop->isAtomic(); 830 HasStrong = false; // doesn't matter here. 831 832 // Evaluate the ivar's size and alignment. 833 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 834 QualType ivarType = ivar->getType(); 835 std::tie(IvarSize, IvarAlignment) = 836 CGM.getContext().getTypeInfoInChars(ivarType); 837 838 // If we have a copy property, we always have to use getProperty/setProperty. 839 // TODO: we could actually use setProperty and an expression for non-atomics. 840 if (IsCopy) { 841 Kind = GetSetProperty; 842 return; 843 } 844 845 // Handle retain. 846 if (setterKind == ObjCPropertyDecl::Retain) { 847 // In GC-only, there's nothing special that needs to be done. 848 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { 849 // fallthrough 850 851 // In ARC, if the property is non-atomic, use expression emission, 852 // which translates to objc_storeStrong. This isn't required, but 853 // it's slightly nicer. 854 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) { 855 // Using standard expression emission for the setter is only 856 // acceptable if the ivar is __strong, which won't be true if 857 // the property is annotated with __attribute__((NSObject)). 858 // TODO: falling all the way back to objc_setProperty here is 859 // just laziness, though; we could still use objc_storeStrong 860 // if we hacked it right. 861 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) 862 Kind = Expression; 863 else 864 Kind = SetPropertyAndExpressionGet; 865 return; 866 867 // Otherwise, we need to at least use setProperty. However, if 868 // the property isn't atomic, we can use normal expression 869 // emission for the getter. 870 } else if (!IsAtomic) { 871 Kind = SetPropertyAndExpressionGet; 872 return; 873 874 // Otherwise, we have to use both setProperty and getProperty. 875 } else { 876 Kind = GetSetProperty; 877 return; 878 } 879 } 880 881 // If we're not atomic, just use expression accesses. 882 if (!IsAtomic) { 883 Kind = Expression; 884 return; 885 } 886 887 // Properties on bitfield ivars need to be emitted using expression 888 // accesses even if they're nominally atomic. 889 if (ivar->isBitField()) { 890 Kind = Expression; 891 return; 892 } 893 894 // GC-qualified or ARC-qualified ivars need to be emitted as 895 // expressions. This actually works out to being atomic anyway, 896 // except for ARC __strong, but that should trigger the above code. 897 if (ivarType.hasNonTrivialObjCLifetime() || 898 (CGM.getLangOpts().getGC() && 899 CGM.getContext().getObjCGCAttrKind(ivarType))) { 900 Kind = Expression; 901 return; 902 } 903 904 // Compute whether the ivar has strong members. 905 if (CGM.getLangOpts().getGC()) 906 if (const RecordType *recordType = ivarType->getAs<RecordType>()) 907 HasStrong = recordType->getDecl()->hasObjectMember(); 908 909 // We can never access structs with object members with a native 910 // access, because we need to use write barriers. This is what 911 // objc_copyStruct is for. 912 if (HasStrong) { 913 Kind = CopyStruct; 914 return; 915 } 916 917 // Otherwise, this is target-dependent and based on the size and 918 // alignment of the ivar. 919 920 // If the size of the ivar is not a power of two, give up. We don't 921 // want to get into the business of doing compare-and-swaps. 922 if (!IvarSize.isPowerOfTwo()) { 923 Kind = CopyStruct; 924 return; 925 } 926 927 llvm::Triple::ArchType arch = 928 CGM.getTarget().getTriple().getArch(); 929 930 // Most architectures require memory to fit within a single cache 931 // line, so the alignment has to be at least the size of the access. 932 // Otherwise we have to grab a lock. 933 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) { 934 Kind = CopyStruct; 935 return; 936 } 937 938 // If the ivar's size exceeds the architecture's maximum atomic 939 // access size, we have to use CopyStruct. 940 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { 941 Kind = CopyStruct; 942 return; 943 } 944 945 // Otherwise, we can use native loads and stores. 946 Kind = Native; 947 } 948 949 /// Generate an Objective-C property getter function. 950 /// 951 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 952 /// is illegal within a category. 953 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, 954 const ObjCPropertyImplDecl *PID) { 955 llvm::Constant *AtomicHelperFn = 956 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); 957 ObjCMethodDecl *OMD = PID->getGetterMethodDecl(); 958 assert(OMD && "Invalid call to generate getter (empty method)"); 959 StartObjCMethod(OMD, IMP->getClassInterface()); 960 961 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn); 962 963 FinishFunction(); 964 } 965 966 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { 967 const Expr *getter = propImpl->getGetterCXXConstructor(); 968 if (!getter) return true; 969 970 // Sema only makes only of these when the ivar has a C++ class type, 971 // so the form is pretty constrained. 972 973 // If the property has a reference type, we might just be binding a 974 // reference, in which case the result will be a gl-value. We should 975 // treat this as a non-trivial operation. 976 if (getter->isGLValue()) 977 return false; 978 979 // If we selected a trivial copy-constructor, we're okay. 980 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter)) 981 return (construct->getConstructor()->isTrivial()); 982 983 // The constructor might require cleanups (in which case it's never 984 // trivial). 985 assert(isa<ExprWithCleanups>(getter)); 986 return false; 987 } 988 989 /// emitCPPObjectAtomicGetterCall - Call the runtime function to 990 /// copy the ivar into the resturn slot. 991 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, 992 llvm::Value *returnAddr, 993 ObjCIvarDecl *ivar, 994 llvm::Constant *AtomicHelperFn) { 995 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, 996 // AtomicHelperFn); 997 CallArgList args; 998 999 // The 1st argument is the return Slot. 1000 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy); 1001 1002 // The 2nd argument is the address of the ivar. 1003 llvm::Value *ivarAddr = 1004 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 1005 CGF.LoadObjCSelf(), ivar, 0).getPointer(); 1006 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1007 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1008 1009 // Third argument is the helper function. 1010 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1011 1012 llvm::FunctionCallee copyCppAtomicObjectFn = 1013 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); 1014 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn); 1015 CGF.EmitCall( 1016 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1017 callee, ReturnValueSlot(), args); 1018 } 1019 1020 void 1021 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, 1022 const ObjCPropertyImplDecl *propImpl, 1023 const ObjCMethodDecl *GetterMethodDecl, 1024 llvm::Constant *AtomicHelperFn) { 1025 // If there's a non-trivial 'get' expression, we just have to emit that. 1026 if (!hasTrivialGetExpr(propImpl)) { 1027 if (!AtomicHelperFn) { 1028 auto *ret = ReturnStmt::Create(getContext(), SourceLocation(), 1029 propImpl->getGetterCXXConstructor(), 1030 /* NRVOCandidate=*/nullptr); 1031 EmitReturnStmt(*ret); 1032 } 1033 else { 1034 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1035 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), 1036 ivar, AtomicHelperFn); 1037 } 1038 return; 1039 } 1040 1041 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 1042 QualType propType = prop->getType(); 1043 ObjCMethodDecl *getterMethod = propImpl->getGetterMethodDecl(); 1044 1045 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1046 1047 // Pick an implementation strategy. 1048 PropertyImplStrategy strategy(CGM, propImpl); 1049 switch (strategy.getKind()) { 1050 case PropertyImplStrategy::Native: { 1051 // We don't need to do anything for a zero-size struct. 1052 if (strategy.getIvarSize().isZero()) 1053 return; 1054 1055 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1056 1057 // Currently, all atomic accesses have to be through integer 1058 // types, so there's no point in trying to pick a prettier type. 1059 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize()); 1060 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); 1061 bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay 1062 1063 // Perform an atomic load. This does not impose ordering constraints. 1064 Address ivarAddr = LV.getAddress(); 1065 ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType); 1066 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); 1067 load->setAtomic(llvm::AtomicOrdering::Unordered); 1068 1069 // Store that value into the return address. Doing this with a 1070 // bitcast is likely to produce some pretty ugly IR, but it's not 1071 // the *most* terrible thing in the world. 1072 llvm::Type *retTy = ConvertType(getterMethod->getReturnType()); 1073 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy); 1074 llvm::Value *ivarVal = load; 1075 if (ivarSize > retTySize) { 1076 llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize); 1077 ivarVal = Builder.CreateTrunc(load, newTy); 1078 bitcastType = newTy->getPointerTo(); 1079 } 1080 Builder.CreateStore(ivarVal, 1081 Builder.CreateBitCast(ReturnValue, bitcastType)); 1082 1083 // Make sure we don't do an autorelease. 1084 AutoreleaseResult = false; 1085 return; 1086 } 1087 1088 case PropertyImplStrategy::GetSetProperty: { 1089 llvm::FunctionCallee getPropertyFn = 1090 CGM.getObjCRuntime().GetPropertyGetFunction(); 1091 if (!getPropertyFn) { 1092 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy"); 1093 return; 1094 } 1095 CGCallee callee = CGCallee::forDirect(getPropertyFn); 1096 1097 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). 1098 // FIXME: Can't this be simpler? This might even be worse than the 1099 // corresponding gcc code. 1100 llvm::Value *cmd = 1101 Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd"); 1102 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1103 llvm::Value *ivarOffset = 1104 EmitIvarOffset(classImpl->getClassInterface(), ivar); 1105 1106 CallArgList args; 1107 args.add(RValue::get(self), getContext().getObjCIdType()); 1108 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1109 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1110 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1111 getContext().BoolTy); 1112 1113 // FIXME: We shouldn't need to get the function info here, the 1114 // runtime already should have computed it to build the function. 1115 llvm::CallBase *CallInstruction; 1116 RValue RV = EmitCall(getTypes().arrangeBuiltinFunctionCall( 1117 getContext().getObjCIdType(), args), 1118 callee, ReturnValueSlot(), args, &CallInstruction); 1119 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction)) 1120 call->setTailCall(); 1121 1122 // We need to fix the type here. Ivars with copy & retain are 1123 // always objects so we don't need to worry about complex or 1124 // aggregates. 1125 RV = RValue::get(Builder.CreateBitCast( 1126 RV.getScalarVal(), 1127 getTypes().ConvertType(getterMethod->getReturnType()))); 1128 1129 EmitReturnOfRValue(RV, propType); 1130 1131 // objc_getProperty does an autorelease, so we should suppress ours. 1132 AutoreleaseResult = false; 1133 1134 return; 1135 } 1136 1137 case PropertyImplStrategy::CopyStruct: 1138 emitStructGetterCall(*this, ivar, strategy.isAtomic(), 1139 strategy.hasStrongMember()); 1140 return; 1141 1142 case PropertyImplStrategy::Expression: 1143 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1144 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1145 1146 QualType ivarType = ivar->getType(); 1147 switch (getEvaluationKind(ivarType)) { 1148 case TEK_Complex: { 1149 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation()); 1150 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType), 1151 /*init*/ true); 1152 return; 1153 } 1154 case TEK_Aggregate: { 1155 // The return value slot is guaranteed to not be aliased, but 1156 // that's not necessarily the same as "on the stack", so 1157 // we still potentially need objc_memmove_collectable. 1158 EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType), 1159 /* Src= */ LV, ivarType, getOverlapForReturnValue()); 1160 return; 1161 } 1162 case TEK_Scalar: { 1163 llvm::Value *value; 1164 if (propType->isReferenceType()) { 1165 value = LV.getAddress().getPointer(); 1166 } else { 1167 // We want to load and autoreleaseReturnValue ARC __weak ivars. 1168 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1169 if (getLangOpts().ObjCAutoRefCount) { 1170 value = emitARCRetainLoadOfScalar(*this, LV, ivarType); 1171 } else { 1172 value = EmitARCLoadWeak(LV.getAddress()); 1173 } 1174 1175 // Otherwise we want to do a simple load, suppressing the 1176 // final autorelease. 1177 } else { 1178 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal(); 1179 AutoreleaseResult = false; 1180 } 1181 1182 value = Builder.CreateBitCast( 1183 value, ConvertType(GetterMethodDecl->getReturnType())); 1184 } 1185 1186 EmitReturnOfRValue(RValue::get(value), propType); 1187 return; 1188 } 1189 } 1190 llvm_unreachable("bad evaluation kind"); 1191 } 1192 1193 } 1194 llvm_unreachable("bad @property implementation strategy!"); 1195 } 1196 1197 /// emitStructSetterCall - Call the runtime function to store the value 1198 /// from the first formal parameter into the given ivar. 1199 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, 1200 ObjCIvarDecl *ivar) { 1201 // objc_copyStruct (&structIvar, &Arg, 1202 // sizeof (struct something), true, false); 1203 CallArgList args; 1204 1205 // The first argument is the address of the ivar. 1206 llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 1207 CGF.LoadObjCSelf(), ivar, 0) 1208 .getPointer(); 1209 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1210 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1211 1212 // The second argument is the address of the parameter variable. 1213 ParmVarDecl *argVar = *OMD->param_begin(); 1214 DeclRefExpr argRef(CGF.getContext(), argVar, false, 1215 argVar->getType().getNonReferenceType(), VK_LValue, 1216 SourceLocation()); 1217 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); 1218 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1219 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1220 1221 // The third argument is the sizeof the type. 1222 llvm::Value *size = 1223 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType())); 1224 args.add(RValue::get(size), CGF.getContext().getSizeType()); 1225 1226 // The fourth argument is the 'isAtomic' flag. 1227 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy); 1228 1229 // The fifth argument is the 'hasStrong' flag. 1230 // FIXME: should this really always be false? 1231 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy); 1232 1233 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); 1234 CGCallee callee = CGCallee::forDirect(fn); 1235 CGF.EmitCall( 1236 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1237 callee, ReturnValueSlot(), args); 1238 } 1239 1240 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store 1241 /// the value from the first formal parameter into the given ivar, using 1242 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. 1243 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, 1244 ObjCMethodDecl *OMD, 1245 ObjCIvarDecl *ivar, 1246 llvm::Constant *AtomicHelperFn) { 1247 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, 1248 // AtomicHelperFn); 1249 CallArgList args; 1250 1251 // The first argument is the address of the ivar. 1252 llvm::Value *ivarAddr = 1253 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 1254 CGF.LoadObjCSelf(), ivar, 0).getPointer(); 1255 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1256 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1257 1258 // The second argument is the address of the parameter variable. 1259 ParmVarDecl *argVar = *OMD->param_begin(); 1260 DeclRefExpr argRef(CGF.getContext(), argVar, false, 1261 argVar->getType().getNonReferenceType(), VK_LValue, 1262 SourceLocation()); 1263 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); 1264 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1265 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1266 1267 // Third argument is the helper function. 1268 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1269 1270 llvm::FunctionCallee fn = 1271 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); 1272 CGCallee callee = CGCallee::forDirect(fn); 1273 CGF.EmitCall( 1274 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1275 callee, ReturnValueSlot(), args); 1276 } 1277 1278 1279 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { 1280 Expr *setter = PID->getSetterCXXAssignment(); 1281 if (!setter) return true; 1282 1283 // Sema only makes only of these when the ivar has a C++ class type, 1284 // so the form is pretty constrained. 1285 1286 // An operator call is trivial if the function it calls is trivial. 1287 // This also implies that there's nothing non-trivial going on with 1288 // the arguments, because operator= can only be trivial if it's a 1289 // synthesized assignment operator and therefore both parameters are 1290 // references. 1291 if (CallExpr *call = dyn_cast<CallExpr>(setter)) { 1292 if (const FunctionDecl *callee 1293 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl())) 1294 if (callee->isTrivial()) 1295 return true; 1296 return false; 1297 } 1298 1299 assert(isa<ExprWithCleanups>(setter)); 1300 return false; 1301 } 1302 1303 static bool UseOptimizedSetter(CodeGenModule &CGM) { 1304 if (CGM.getLangOpts().getGC() != LangOptions::NonGC) 1305 return false; 1306 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); 1307 } 1308 1309 void 1310 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, 1311 const ObjCPropertyImplDecl *propImpl, 1312 llvm::Constant *AtomicHelperFn) { 1313 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1314 ObjCMethodDecl *setterMethod = propImpl->getSetterMethodDecl(); 1315 1316 // Just use the setter expression if Sema gave us one and it's 1317 // non-trivial. 1318 if (!hasTrivialSetExpr(propImpl)) { 1319 if (!AtomicHelperFn) 1320 // If non-atomic, assignment is called directly. 1321 EmitStmt(propImpl->getSetterCXXAssignment()); 1322 else 1323 // If atomic, assignment is called via a locking api. 1324 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, 1325 AtomicHelperFn); 1326 return; 1327 } 1328 1329 PropertyImplStrategy strategy(CGM, propImpl); 1330 switch (strategy.getKind()) { 1331 case PropertyImplStrategy::Native: { 1332 // We don't need to do anything for a zero-size struct. 1333 if (strategy.getIvarSize().isZero()) 1334 return; 1335 1336 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1337 1338 LValue ivarLValue = 1339 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); 1340 Address ivarAddr = ivarLValue.getAddress(); 1341 1342 // Currently, all atomic accesses have to be through integer 1343 // types, so there's no point in trying to pick a prettier type. 1344 llvm::Type *bitcastType = 1345 llvm::Type::getIntNTy(getLLVMContext(), 1346 getContext().toBits(strategy.getIvarSize())); 1347 1348 // Cast both arguments to the chosen operation type. 1349 argAddr = Builder.CreateElementBitCast(argAddr, bitcastType); 1350 ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType); 1351 1352 // This bitcast load is likely to cause some nasty IR. 1353 llvm::Value *load = Builder.CreateLoad(argAddr); 1354 1355 // Perform an atomic store. There are no memory ordering requirements. 1356 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); 1357 store->setAtomic(llvm::AtomicOrdering::Unordered); 1358 return; 1359 } 1360 1361 case PropertyImplStrategy::GetSetProperty: 1362 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1363 1364 llvm::FunctionCallee setOptimizedPropertyFn = nullptr; 1365 llvm::FunctionCallee setPropertyFn = nullptr; 1366 if (UseOptimizedSetter(CGM)) { 1367 // 10.8 and iOS 6.0 code and GC is off 1368 setOptimizedPropertyFn = 1369 CGM.getObjCRuntime().GetOptimizedPropertySetFunction( 1370 strategy.isAtomic(), strategy.isCopy()); 1371 if (!setOptimizedPropertyFn) { 1372 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI"); 1373 return; 1374 } 1375 } 1376 else { 1377 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); 1378 if (!setPropertyFn) { 1379 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy"); 1380 return; 1381 } 1382 } 1383 1384 // Emit objc_setProperty((id) self, _cmd, offset, arg, 1385 // <is-atomic>, <is-copy>). 1386 llvm::Value *cmd = 1387 Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl())); 1388 llvm::Value *self = 1389 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1390 llvm::Value *ivarOffset = 1391 EmitIvarOffset(classImpl->getClassInterface(), ivar); 1392 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1393 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg"); 1394 arg = Builder.CreateBitCast(arg, VoidPtrTy); 1395 1396 CallArgList args; 1397 args.add(RValue::get(self), getContext().getObjCIdType()); 1398 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1399 if (setOptimizedPropertyFn) { 1400 args.add(RValue::get(arg), getContext().getObjCIdType()); 1401 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1402 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn); 1403 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1404 callee, ReturnValueSlot(), args); 1405 } else { 1406 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1407 args.add(RValue::get(arg), getContext().getObjCIdType()); 1408 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1409 getContext().BoolTy); 1410 args.add(RValue::get(Builder.getInt1(strategy.isCopy())), 1411 getContext().BoolTy); 1412 // FIXME: We shouldn't need to get the function info here, the runtime 1413 // already should have computed it to build the function. 1414 CGCallee callee = CGCallee::forDirect(setPropertyFn); 1415 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1416 callee, ReturnValueSlot(), args); 1417 } 1418 1419 return; 1420 } 1421 1422 case PropertyImplStrategy::CopyStruct: 1423 emitStructSetterCall(*this, setterMethod, ivar); 1424 return; 1425 1426 case PropertyImplStrategy::Expression: 1427 break; 1428 } 1429 1430 // Otherwise, fake up some ASTs and emit a normal assignment. 1431 ValueDecl *selfDecl = setterMethod->getSelfDecl(); 1432 DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(), 1433 VK_LValue, SourceLocation()); 1434 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, 1435 selfDecl->getType(), CK_LValueToRValue, &self, 1436 VK_RValue); 1437 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), 1438 SourceLocation(), SourceLocation(), 1439 &selfLoad, true, true); 1440 1441 ParmVarDecl *argDecl = *setterMethod->param_begin(); 1442 QualType argType = argDecl->getType().getNonReferenceType(); 1443 DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue, 1444 SourceLocation()); 1445 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, 1446 argType.getUnqualifiedType(), CK_LValueToRValue, 1447 &arg, VK_RValue); 1448 1449 // The property type can differ from the ivar type in some situations with 1450 // Objective-C pointer types, we can always bit cast the RHS in these cases. 1451 // The following absurdity is just to ensure well-formed IR. 1452 CastKind argCK = CK_NoOp; 1453 if (ivarRef.getType()->isObjCObjectPointerType()) { 1454 if (argLoad.getType()->isObjCObjectPointerType()) 1455 argCK = CK_BitCast; 1456 else if (argLoad.getType()->isBlockPointerType()) 1457 argCK = CK_BlockPointerToObjCPointerCast; 1458 else 1459 argCK = CK_CPointerToObjCPointerCast; 1460 } else if (ivarRef.getType()->isBlockPointerType()) { 1461 if (argLoad.getType()->isBlockPointerType()) 1462 argCK = CK_BitCast; 1463 else 1464 argCK = CK_AnyPointerToBlockPointerCast; 1465 } else if (ivarRef.getType()->isPointerType()) { 1466 argCK = CK_BitCast; 1467 } 1468 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, 1469 ivarRef.getType(), argCK, &argLoad, 1470 VK_RValue); 1471 Expr *finalArg = &argLoad; 1472 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(), 1473 argLoad.getType())) 1474 finalArg = &argCast; 1475 1476 1477 BinaryOperator assign(&ivarRef, finalArg, BO_Assign, 1478 ivarRef.getType(), VK_RValue, OK_Ordinary, 1479 SourceLocation(), FPOptions()); 1480 EmitStmt(&assign); 1481 } 1482 1483 /// Generate an Objective-C property setter function. 1484 /// 1485 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1486 /// is illegal within a category. 1487 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, 1488 const ObjCPropertyImplDecl *PID) { 1489 llvm::Constant *AtomicHelperFn = 1490 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); 1491 ObjCMethodDecl *OMD = PID->getSetterMethodDecl(); 1492 assert(OMD && "Invalid call to generate setter (empty method)"); 1493 StartObjCMethod(OMD, IMP->getClassInterface()); 1494 1495 generateObjCSetterBody(IMP, PID, AtomicHelperFn); 1496 1497 FinishFunction(); 1498 } 1499 1500 namespace { 1501 struct DestroyIvar final : EHScopeStack::Cleanup { 1502 private: 1503 llvm::Value *addr; 1504 const ObjCIvarDecl *ivar; 1505 CodeGenFunction::Destroyer *destroyer; 1506 bool useEHCleanupForArray; 1507 public: 1508 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, 1509 CodeGenFunction::Destroyer *destroyer, 1510 bool useEHCleanupForArray) 1511 : addr(addr), ivar(ivar), destroyer(destroyer), 1512 useEHCleanupForArray(useEHCleanupForArray) {} 1513 1514 void Emit(CodeGenFunction &CGF, Flags flags) override { 1515 LValue lvalue 1516 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); 1517 CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer, 1518 flags.isForNormalCleanup() && useEHCleanupForArray); 1519 } 1520 }; 1521 } 1522 1523 /// Like CodeGenFunction::destroyARCStrong, but do it with a call. 1524 static void destroyARCStrongWithStore(CodeGenFunction &CGF, 1525 Address addr, 1526 QualType type) { 1527 llvm::Value *null = getNullForVariable(addr); 1528 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 1529 } 1530 1531 static void emitCXXDestructMethod(CodeGenFunction &CGF, 1532 ObjCImplementationDecl *impl) { 1533 CodeGenFunction::RunCleanupsScope scope(CGF); 1534 1535 llvm::Value *self = CGF.LoadObjCSelf(); 1536 1537 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 1538 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); 1539 ivar; ivar = ivar->getNextIvar()) { 1540 QualType type = ivar->getType(); 1541 1542 // Check whether the ivar is a destructible type. 1543 QualType::DestructionKind dtorKind = type.isDestructedType(); 1544 if (!dtorKind) continue; 1545 1546 CodeGenFunction::Destroyer *destroyer = nullptr; 1547 1548 // Use a call to objc_storeStrong to destroy strong ivars, for the 1549 // general benefit of the tools. 1550 if (dtorKind == QualType::DK_objc_strong_lifetime) { 1551 destroyer = destroyARCStrongWithStore; 1552 1553 // Otherwise use the default for the destruction kind. 1554 } else { 1555 destroyer = CGF.getDestroyer(dtorKind); 1556 } 1557 1558 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind); 1559 1560 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer, 1561 cleanupKind & EHCleanup); 1562 } 1563 1564 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?"); 1565 } 1566 1567 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, 1568 ObjCMethodDecl *MD, 1569 bool ctor) { 1570 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface()); 1571 StartObjCMethod(MD, IMP->getClassInterface()); 1572 1573 // Emit .cxx_construct. 1574 if (ctor) { 1575 // Suppress the final autorelease in ARC. 1576 AutoreleaseResult = false; 1577 1578 for (const auto *IvarInit : IMP->inits()) { 1579 FieldDecl *Field = IvarInit->getAnyMember(); 1580 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field); 1581 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), 1582 LoadObjCSelf(), Ivar, 0); 1583 EmitAggExpr(IvarInit->getInit(), 1584 AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, 1585 AggValueSlot::DoesNotNeedGCBarriers, 1586 AggValueSlot::IsNotAliased, 1587 AggValueSlot::DoesNotOverlap)); 1588 } 1589 // constructor returns 'self'. 1590 CodeGenTypes &Types = CGM.getTypes(); 1591 QualType IdTy(CGM.getContext().getObjCIdType()); 1592 llvm::Value *SelfAsId = 1593 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); 1594 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy); 1595 1596 // Emit .cxx_destruct. 1597 } else { 1598 emitCXXDestructMethod(*this, IMP); 1599 } 1600 FinishFunction(); 1601 } 1602 1603 llvm::Value *CodeGenFunction::LoadObjCSelf() { 1604 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 1605 DeclRefExpr DRE(getContext(), Self, 1606 /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), 1607 Self->getType(), VK_LValue, SourceLocation()); 1608 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation()); 1609 } 1610 1611 QualType CodeGenFunction::TypeOfSelfObject() { 1612 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 1613 ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); 1614 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( 1615 getContext().getCanonicalType(selfDecl->getType())); 1616 return PTy->getPointeeType(); 1617 } 1618 1619 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ 1620 llvm::FunctionCallee EnumerationMutationFnPtr = 1621 CGM.getObjCRuntime().EnumerationMutationFunction(); 1622 if (!EnumerationMutationFnPtr) { 1623 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); 1624 return; 1625 } 1626 CGCallee EnumerationMutationFn = 1627 CGCallee::forDirect(EnumerationMutationFnPtr); 1628 1629 CGDebugInfo *DI = getDebugInfo(); 1630 if (DI) 1631 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); 1632 1633 RunCleanupsScope ForScope(*this); 1634 1635 // The local variable comes into scope immediately. 1636 AutoVarEmission variable = AutoVarEmission::invalid(); 1637 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) 1638 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl())); 1639 1640 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end"); 1641 1642 // Fast enumeration state. 1643 QualType StateTy = CGM.getObjCFastEnumerationStateType(); 1644 Address StatePtr = CreateMemTemp(StateTy, "state.ptr"); 1645 EmitNullInitialization(StatePtr, StateTy); 1646 1647 // Number of elements in the items array. 1648 static const unsigned NumItems = 16; 1649 1650 // Fetch the countByEnumeratingWithState:objects:count: selector. 1651 IdentifierInfo *II[] = { 1652 &CGM.getContext().Idents.get("countByEnumeratingWithState"), 1653 &CGM.getContext().Idents.get("objects"), 1654 &CGM.getContext().Idents.get("count") 1655 }; 1656 Selector FastEnumSel = 1657 CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]); 1658 1659 QualType ItemsTy = 1660 getContext().getConstantArrayType(getContext().getObjCIdType(), 1661 llvm::APInt(32, NumItems), nullptr, 1662 ArrayType::Normal, 0); 1663 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr"); 1664 1665 // Emit the collection pointer. In ARC, we do a retain. 1666 llvm::Value *Collection; 1667 if (getLangOpts().ObjCAutoRefCount) { 1668 Collection = EmitARCRetainScalarExpr(S.getCollection()); 1669 1670 // Enter a cleanup to do the release. 1671 EmitObjCConsumeObject(S.getCollection()->getType(), Collection); 1672 } else { 1673 Collection = EmitScalarExpr(S.getCollection()); 1674 } 1675 1676 // The 'continue' label needs to appear within the cleanup for the 1677 // collection object. 1678 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next"); 1679 1680 // Send it our message: 1681 CallArgList Args; 1682 1683 // The first argument is a temporary of the enumeration-state type. 1684 Args.add(RValue::get(StatePtr.getPointer()), 1685 getContext().getPointerType(StateTy)); 1686 1687 // The second argument is a temporary array with space for NumItems 1688 // pointers. We'll actually be loading elements from the array 1689 // pointer written into the control state; this buffer is so that 1690 // collections that *aren't* backed by arrays can still queue up 1691 // batches of elements. 1692 Args.add(RValue::get(ItemsPtr.getPointer()), 1693 getContext().getPointerType(ItemsTy)); 1694 1695 // The third argument is the capacity of that temporary array. 1696 llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType()); 1697 llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems); 1698 Args.add(RValue::get(Count), getContext().getNSUIntegerType()); 1699 1700 // Start the enumeration. 1701 RValue CountRV = 1702 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1703 getContext().getNSUIntegerType(), 1704 FastEnumSel, Collection, Args); 1705 1706 // The initial number of objects that were returned in the buffer. 1707 llvm::Value *initialBufferLimit = CountRV.getScalarVal(); 1708 1709 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty"); 1710 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit"); 1711 1712 llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy); 1713 1714 // If the limit pointer was zero to begin with, the collection is 1715 // empty; skip all this. Set the branch weight assuming this has the same 1716 // probability of exiting the loop as any other loop exit. 1717 uint64_t EntryCount = getCurrentProfileCount(); 1718 Builder.CreateCondBr( 1719 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, 1720 LoopInitBB, 1721 createProfileWeights(EntryCount, getProfileCount(S.getBody()))); 1722 1723 // Otherwise, initialize the loop. 1724 EmitBlock(LoopInitBB); 1725 1726 // Save the initial mutations value. This is the value at an 1727 // address that was written into the state object by 1728 // countByEnumeratingWithState:objects:count:. 1729 Address StateMutationsPtrPtr = 1730 Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr"); 1731 llvm::Value *StateMutationsPtr 1732 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1733 1734 llvm::Value *initialMutations = 1735 Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1736 "forcoll.initial-mutations"); 1737 1738 // Start looping. This is the point we return to whenever we have a 1739 // fresh, non-empty batch of objects. 1740 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody"); 1741 EmitBlock(LoopBodyBB); 1742 1743 // The current index into the buffer. 1744 llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index"); 1745 index->addIncoming(zero, LoopInitBB); 1746 1747 // The current buffer size. 1748 llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count"); 1749 count->addIncoming(initialBufferLimit, LoopInitBB); 1750 1751 incrementProfileCounter(&S); 1752 1753 // Check whether the mutations value has changed from where it was 1754 // at start. StateMutationsPtr should actually be invariant between 1755 // refreshes. 1756 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1757 llvm::Value *currentMutations 1758 = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1759 "statemutations"); 1760 1761 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated"); 1762 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated"); 1763 1764 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations), 1765 WasNotMutatedBB, WasMutatedBB); 1766 1767 // If so, call the enumeration-mutation function. 1768 EmitBlock(WasMutatedBB); 1769 llvm::Value *V = 1770 Builder.CreateBitCast(Collection, 1771 ConvertType(getContext().getObjCIdType())); 1772 CallArgList Args2; 1773 Args2.add(RValue::get(V), getContext().getObjCIdType()); 1774 // FIXME: We shouldn't need to get the function info here, the runtime already 1775 // should have computed it to build the function. 1776 EmitCall( 1777 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2), 1778 EnumerationMutationFn, ReturnValueSlot(), Args2); 1779 1780 // Otherwise, or if the mutation function returns, just continue. 1781 EmitBlock(WasNotMutatedBB); 1782 1783 // Initialize the element variable. 1784 RunCleanupsScope elementVariableScope(*this); 1785 bool elementIsVariable; 1786 LValue elementLValue; 1787 QualType elementType; 1788 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { 1789 // Initialize the variable, in case it's a __block variable or something. 1790 EmitAutoVarInit(variable); 1791 1792 const VarDecl *D = cast<VarDecl>(SD->getSingleDecl()); 1793 DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false, 1794 D->getType(), VK_LValue, SourceLocation()); 1795 elementLValue = EmitLValue(&tempDRE); 1796 elementType = D->getType(); 1797 elementIsVariable = true; 1798 1799 if (D->isARCPseudoStrong()) 1800 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); 1801 } else { 1802 elementLValue = LValue(); // suppress warning 1803 elementType = cast<Expr>(S.getElement())->getType(); 1804 elementIsVariable = false; 1805 } 1806 llvm::Type *convertedElementType = ConvertType(elementType); 1807 1808 // Fetch the buffer out of the enumeration state. 1809 // TODO: this pointer should actually be invariant between 1810 // refreshes, which would help us do certain loop optimizations. 1811 Address StateItemsPtr = 1812 Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr"); 1813 llvm::Value *EnumStateItems = 1814 Builder.CreateLoad(StateItemsPtr, "stateitems"); 1815 1816 // Fetch the value at the current index from the buffer. 1817 llvm::Value *CurrentItemPtr = 1818 Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr"); 1819 llvm::Value *CurrentItem = 1820 Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign()); 1821 1822 // Cast that value to the right type. 1823 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType, 1824 "currentitem"); 1825 1826 // Make sure we have an l-value. Yes, this gets evaluated every 1827 // time through the loop. 1828 if (!elementIsVariable) { 1829 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 1830 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue); 1831 } else { 1832 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, 1833 /*isInit*/ true); 1834 } 1835 1836 // If we do have an element variable, this assignment is the end of 1837 // its initialization. 1838 if (elementIsVariable) 1839 EmitAutoVarCleanups(variable); 1840 1841 // Perform the loop body, setting up break and continue labels. 1842 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); 1843 { 1844 RunCleanupsScope Scope(*this); 1845 EmitStmt(S.getBody()); 1846 } 1847 BreakContinueStack.pop_back(); 1848 1849 // Destroy the element variable now. 1850 elementVariableScope.ForceCleanup(); 1851 1852 // Check whether there are more elements. 1853 EmitBlock(AfterBody.getBlock()); 1854 1855 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch"); 1856 1857 // First we check in the local buffer. 1858 llvm::Value *indexPlusOne = 1859 Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1)); 1860 1861 // If we haven't overrun the buffer yet, we can continue. 1862 // Set the branch weights based on the simplifying assumption that this is 1863 // like a while-loop, i.e., ignoring that the false branch fetches more 1864 // elements and then returns to the loop. 1865 Builder.CreateCondBr( 1866 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, 1867 createProfileWeights(getProfileCount(S.getBody()), EntryCount)); 1868 1869 index->addIncoming(indexPlusOne, AfterBody.getBlock()); 1870 count->addIncoming(count, AfterBody.getBlock()); 1871 1872 // Otherwise, we have to fetch more elements. 1873 EmitBlock(FetchMoreBB); 1874 1875 CountRV = 1876 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1877 getContext().getNSUIntegerType(), 1878 FastEnumSel, Collection, Args); 1879 1880 // If we got a zero count, we're done. 1881 llvm::Value *refetchCount = CountRV.getScalarVal(); 1882 1883 // (note that the message send might split FetchMoreBB) 1884 index->addIncoming(zero, Builder.GetInsertBlock()); 1885 count->addIncoming(refetchCount, Builder.GetInsertBlock()); 1886 1887 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero), 1888 EmptyBB, LoopBodyBB); 1889 1890 // No more elements. 1891 EmitBlock(EmptyBB); 1892 1893 if (!elementIsVariable) { 1894 // If the element was not a declaration, set it to be null. 1895 1896 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType); 1897 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 1898 EmitStoreThroughLValue(RValue::get(null), elementLValue); 1899 } 1900 1901 if (DI) 1902 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); 1903 1904 ForScope.ForceCleanup(); 1905 EmitBlock(LoopEnd.getBlock()); 1906 } 1907 1908 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { 1909 CGM.getObjCRuntime().EmitTryStmt(*this, S); 1910 } 1911 1912 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { 1913 CGM.getObjCRuntime().EmitThrowStmt(*this, S); 1914 } 1915 1916 void CodeGenFunction::EmitObjCAtSynchronizedStmt( 1917 const ObjCAtSynchronizedStmt &S) { 1918 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); 1919 } 1920 1921 namespace { 1922 struct CallObjCRelease final : EHScopeStack::Cleanup { 1923 CallObjCRelease(llvm::Value *object) : object(object) {} 1924 llvm::Value *object; 1925 1926 void Emit(CodeGenFunction &CGF, Flags flags) override { 1927 // Releases at the end of the full-expression are imprecise. 1928 CGF.EmitARCRelease(object, ARCImpreciseLifetime); 1929 } 1930 }; 1931 } 1932 1933 /// Produce the code for a CK_ARCConsumeObject. Does a primitive 1934 /// release at the end of the full-expression. 1935 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, 1936 llvm::Value *object) { 1937 // If we're in a conditional branch, we need to make the cleanup 1938 // conditional. 1939 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object); 1940 return object; 1941 } 1942 1943 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, 1944 llvm::Value *value) { 1945 return EmitARCRetainAutorelease(type, value); 1946 } 1947 1948 /// Given a number of pointers, inform the optimizer that they're 1949 /// being intrinsically used up until this point in the program. 1950 void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { 1951 llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use; 1952 if (!fn) 1953 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use); 1954 1955 // This isn't really a "runtime" function, but as an intrinsic it 1956 // doesn't really matter as long as we align things up. 1957 EmitNounwindRuntimeCall(fn, values); 1958 } 1959 1960 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) { 1961 if (auto *F = dyn_cast<llvm::Function>(RTF)) { 1962 // If the target runtime doesn't naturally support ARC, emit weak 1963 // references to the runtime support library. We don't really 1964 // permit this to fail, but we need a particular relocation style. 1965 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && 1966 !CGM.getTriple().isOSBinFormatCOFF()) { 1967 F->setLinkage(llvm::Function::ExternalWeakLinkage); 1968 } 1969 } 1970 } 1971 1972 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, 1973 llvm::FunctionCallee RTF) { 1974 setARCRuntimeFunctionLinkage(CGM, RTF.getCallee()); 1975 } 1976 1977 /// Perform an operation having the signature 1978 /// i8* (i8*) 1979 /// where a null input causes a no-op and returns null. 1980 static llvm::Value *emitARCValueOperation( 1981 CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType, 1982 llvm::Function *&fn, llvm::Intrinsic::ID IntID, 1983 llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) { 1984 if (isa<llvm::ConstantPointerNull>(value)) 1985 return value; 1986 1987 if (!fn) { 1988 fn = CGF.CGM.getIntrinsic(IntID); 1989 setARCRuntimeFunctionLinkage(CGF.CGM, fn); 1990 } 1991 1992 // Cast the argument to 'id'. 1993 llvm::Type *origType = returnType ? returnType : value->getType(); 1994 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 1995 1996 // Call the function. 1997 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value); 1998 call->setTailCallKind(tailKind); 1999 2000 // Cast the result back to the original type. 2001 return CGF.Builder.CreateBitCast(call, origType); 2002 } 2003 2004 /// Perform an operation having the following signature: 2005 /// i8* (i8**) 2006 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr, 2007 llvm::Function *&fn, 2008 llvm::Intrinsic::ID IntID) { 2009 if (!fn) { 2010 fn = CGF.CGM.getIntrinsic(IntID); 2011 setARCRuntimeFunctionLinkage(CGF.CGM, fn); 2012 } 2013 2014 // Cast the argument to 'id*'. 2015 llvm::Type *origType = addr.getElementType(); 2016 addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy); 2017 2018 // Call the function. 2019 llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); 2020 2021 // Cast the result back to a dereference of the original type. 2022 if (origType != CGF.Int8PtrTy) 2023 result = CGF.Builder.CreateBitCast(result, origType); 2024 2025 return result; 2026 } 2027 2028 /// Perform an operation having the following signature: 2029 /// i8* (i8**, i8*) 2030 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr, 2031 llvm::Value *value, 2032 llvm::Function *&fn, 2033 llvm::Intrinsic::ID IntID, 2034 bool ignored) { 2035 assert(addr.getElementType() == value->getType()); 2036 2037 if (!fn) { 2038 fn = CGF.CGM.getIntrinsic(IntID); 2039 setARCRuntimeFunctionLinkage(CGF.CGM, fn); 2040 } 2041 2042 llvm::Type *origType = value->getType(); 2043 2044 llvm::Value *args[] = { 2045 CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), 2046 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) 2047 }; 2048 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); 2049 2050 if (ignored) return nullptr; 2051 2052 return CGF.Builder.CreateBitCast(result, origType); 2053 } 2054 2055 /// Perform an operation having the following signature: 2056 /// void (i8**, i8**) 2057 static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src, 2058 llvm::Function *&fn, 2059 llvm::Intrinsic::ID IntID) { 2060 assert(dst.getType() == src.getType()); 2061 2062 if (!fn) { 2063 fn = CGF.CGM.getIntrinsic(IntID); 2064 setARCRuntimeFunctionLinkage(CGF.CGM, fn); 2065 } 2066 2067 llvm::Value *args[] = { 2068 CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), 2069 CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) 2070 }; 2071 CGF.EmitNounwindRuntimeCall(fn, args); 2072 } 2073 2074 /// Perform an operation having the signature 2075 /// i8* (i8*) 2076 /// where a null input causes a no-op and returns null. 2077 static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF, 2078 llvm::Value *value, 2079 llvm::Type *returnType, 2080 llvm::FunctionCallee &fn, 2081 StringRef fnName) { 2082 if (isa<llvm::ConstantPointerNull>(value)) 2083 return value; 2084 2085 if (!fn) { 2086 llvm::FunctionType *fnType = 2087 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false); 2088 fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName); 2089 2090 // We have Native ARC, so set nonlazybind attribute for performance 2091 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) 2092 if (fnName == "objc_retain") 2093 f->addFnAttr(llvm::Attribute::NonLazyBind); 2094 } 2095 2096 // Cast the argument to 'id'. 2097 llvm::Type *origType = returnType ? returnType : value->getType(); 2098 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 2099 2100 // Call the function. 2101 llvm::CallBase *Inst = CGF.EmitCallOrInvoke(fn, value); 2102 2103 // Cast the result back to the original type. 2104 return CGF.Builder.CreateBitCast(Inst, origType); 2105 } 2106 2107 /// Produce the code to do a retain. Based on the type, calls one of: 2108 /// call i8* \@objc_retain(i8* %value) 2109 /// call i8* \@objc_retainBlock(i8* %value) 2110 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { 2111 if (type->isBlockPointerType()) 2112 return EmitARCRetainBlock(value, /*mandatory*/ false); 2113 else 2114 return EmitARCRetainNonBlock(value); 2115 } 2116 2117 /// Retain the given object, with normal retain semantics. 2118 /// call i8* \@objc_retain(i8* %value) 2119 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { 2120 return emitARCValueOperation(*this, value, nullptr, 2121 CGM.getObjCEntrypoints().objc_retain, 2122 llvm::Intrinsic::objc_retain); 2123 } 2124 2125 /// Retain the given block, with _Block_copy semantics. 2126 /// call i8* \@objc_retainBlock(i8* %value) 2127 /// 2128 /// \param mandatory - If false, emit the call with metadata 2129 /// indicating that it's okay for the optimizer to eliminate this call 2130 /// if it can prove that the block never escapes except down the stack. 2131 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, 2132 bool mandatory) { 2133 llvm::Value *result 2134 = emitARCValueOperation(*this, value, nullptr, 2135 CGM.getObjCEntrypoints().objc_retainBlock, 2136 llvm::Intrinsic::objc_retainBlock); 2137 2138 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to 2139 // tell the optimizer that it doesn't need to do this copy if the 2140 // block doesn't escape, where being passed as an argument doesn't 2141 // count as escaping. 2142 if (!mandatory && isa<llvm::Instruction>(result)) { 2143 llvm::CallInst *call 2144 = cast<llvm::CallInst>(result->stripPointerCasts()); 2145 assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock); 2146 2147 call->setMetadata("clang.arc.copy_on_escape", 2148 llvm::MDNode::get(Builder.getContext(), None)); 2149 } 2150 2151 return result; 2152 } 2153 2154 static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { 2155 // Fetch the void(void) inline asm which marks that we're going to 2156 // do something with the autoreleased return value. 2157 llvm::InlineAsm *&marker 2158 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; 2159 if (!marker) { 2160 StringRef assembly 2161 = CGF.CGM.getTargetCodeGenInfo() 2162 .getARCRetainAutoreleasedReturnValueMarker(); 2163 2164 // If we have an empty assembly string, there's nothing to do. 2165 if (assembly.empty()) { 2166 2167 // Otherwise, at -O0, build an inline asm that we're going to call 2168 // in a moment. 2169 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { 2170 llvm::FunctionType *type = 2171 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false); 2172 2173 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true); 2174 2175 // If we're at -O1 and above, we don't want to litter the code 2176 // with this marker yet, so leave a breadcrumb for the ARC 2177 // optimizer to pick up. 2178 } else { 2179 const char *markerKey = "clang.arc.retainAutoreleasedReturnValueMarker"; 2180 if (!CGF.CGM.getModule().getModuleFlag(markerKey)) { 2181 auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly); 2182 CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, markerKey, str); 2183 } 2184 } 2185 } 2186 2187 // Call the marker asm if we made one, which we do only at -O0. 2188 if (marker) 2189 CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker)); 2190 } 2191 2192 /// Retain the given object which is the result of a function call. 2193 /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) 2194 /// 2195 /// Yes, this function name is one character away from a different 2196 /// call with completely different semantics. 2197 llvm::Value * 2198 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { 2199 emitAutoreleasedReturnValueMarker(*this); 2200 llvm::CallInst::TailCallKind tailKind = 2201 CGM.getTargetCodeGenInfo() 2202 .shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() 2203 ? llvm::CallInst::TCK_NoTail 2204 : llvm::CallInst::TCK_None; 2205 return emitARCValueOperation( 2206 *this, value, nullptr, 2207 CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue, 2208 llvm::Intrinsic::objc_retainAutoreleasedReturnValue, tailKind); 2209 } 2210 2211 /// Claim a possibly-autoreleased return value at +0. This is only 2212 /// valid to do in contexts which do not rely on the retain to keep 2213 /// the object valid for all of its uses; for example, when 2214 /// the value is ignored, or when it is being assigned to an 2215 /// __unsafe_unretained variable. 2216 /// 2217 /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) 2218 llvm::Value * 2219 CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { 2220 emitAutoreleasedReturnValueMarker(*this); 2221 return emitARCValueOperation(*this, value, nullptr, 2222 CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue, 2223 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue); 2224 } 2225 2226 /// Release the given object. 2227 /// call void \@objc_release(i8* %value) 2228 void CodeGenFunction::EmitARCRelease(llvm::Value *value, 2229 ARCPreciseLifetime_t precise) { 2230 if (isa<llvm::ConstantPointerNull>(value)) return; 2231 2232 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release; 2233 if (!fn) { 2234 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_release); 2235 setARCRuntimeFunctionLinkage(CGM, fn); 2236 } 2237 2238 // Cast the argument to 'id'. 2239 value = Builder.CreateBitCast(value, Int8PtrTy); 2240 2241 // Call objc_release. 2242 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value); 2243 2244 if (precise == ARCImpreciseLifetime) { 2245 call->setMetadata("clang.imprecise_release", 2246 llvm::MDNode::get(Builder.getContext(), None)); 2247 } 2248 } 2249 2250 /// Destroy a __strong variable. 2251 /// 2252 /// At -O0, emit a call to store 'null' into the address; 2253 /// instrumenting tools prefer this because the address is exposed, 2254 /// but it's relatively cumbersome to optimize. 2255 /// 2256 /// At -O1 and above, just load and call objc_release. 2257 /// 2258 /// call void \@objc_storeStrong(i8** %addr, i8* null) 2259 void CodeGenFunction::EmitARCDestroyStrong(Address addr, 2260 ARCPreciseLifetime_t precise) { 2261 if (CGM.getCodeGenOpts().OptimizationLevel == 0) { 2262 llvm::Value *null = getNullForVariable(addr); 2263 EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 2264 return; 2265 } 2266 2267 llvm::Value *value = Builder.CreateLoad(addr); 2268 EmitARCRelease(value, precise); 2269 } 2270 2271 /// Store into a strong object. Always calls this: 2272 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2273 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, 2274 llvm::Value *value, 2275 bool ignored) { 2276 assert(addr.getElementType() == value->getType()); 2277 2278 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong; 2279 if (!fn) { 2280 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_storeStrong); 2281 setARCRuntimeFunctionLinkage(CGM, fn); 2282 } 2283 2284 llvm::Value *args[] = { 2285 Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), 2286 Builder.CreateBitCast(value, Int8PtrTy) 2287 }; 2288 EmitNounwindRuntimeCall(fn, args); 2289 2290 if (ignored) return nullptr; 2291 return value; 2292 } 2293 2294 /// Store into a strong object. Sometimes calls this: 2295 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2296 /// Other times, breaks it down into components. 2297 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, 2298 llvm::Value *newValue, 2299 bool ignored) { 2300 QualType type = dst.getType(); 2301 bool isBlock = type->isBlockPointerType(); 2302 2303 // Use a store barrier at -O0 unless this is a block type or the 2304 // lvalue is inadequately aligned. 2305 if (shouldUseFusedARCCalls() && 2306 !isBlock && 2307 (dst.getAlignment().isZero() || 2308 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { 2309 return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored); 2310 } 2311 2312 // Otherwise, split it out. 2313 2314 // Retain the new value. 2315 newValue = EmitARCRetain(type, newValue); 2316 2317 // Read the old value. 2318 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation()); 2319 2320 // Store. We do this before the release so that any deallocs won't 2321 // see the old value. 2322 EmitStoreOfScalar(newValue, dst); 2323 2324 // Finally, release the old value. 2325 EmitARCRelease(oldValue, dst.isARCPreciseLifetime()); 2326 2327 return newValue; 2328 } 2329 2330 /// Autorelease the given object. 2331 /// call i8* \@objc_autorelease(i8* %value) 2332 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { 2333 return emitARCValueOperation(*this, value, nullptr, 2334 CGM.getObjCEntrypoints().objc_autorelease, 2335 llvm::Intrinsic::objc_autorelease); 2336 } 2337 2338 /// Autorelease the given object. 2339 /// call i8* \@objc_autoreleaseReturnValue(i8* %value) 2340 llvm::Value * 2341 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { 2342 return emitARCValueOperation(*this, value, nullptr, 2343 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, 2344 llvm::Intrinsic::objc_autoreleaseReturnValue, 2345 llvm::CallInst::TCK_Tail); 2346 } 2347 2348 /// Do a fused retain/autorelease of the given object. 2349 /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) 2350 llvm::Value * 2351 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { 2352 return emitARCValueOperation(*this, value, nullptr, 2353 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, 2354 llvm::Intrinsic::objc_retainAutoreleaseReturnValue, 2355 llvm::CallInst::TCK_Tail); 2356 } 2357 2358 /// Do a fused retain/autorelease of the given object. 2359 /// call i8* \@objc_retainAutorelease(i8* %value) 2360 /// or 2361 /// %retain = call i8* \@objc_retainBlock(i8* %value) 2362 /// call i8* \@objc_autorelease(i8* %retain) 2363 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, 2364 llvm::Value *value) { 2365 if (!type->isBlockPointerType()) 2366 return EmitARCRetainAutoreleaseNonBlock(value); 2367 2368 if (isa<llvm::ConstantPointerNull>(value)) return value; 2369 2370 llvm::Type *origType = value->getType(); 2371 value = Builder.CreateBitCast(value, Int8PtrTy); 2372 value = EmitARCRetainBlock(value, /*mandatory*/ true); 2373 value = EmitARCAutorelease(value); 2374 return Builder.CreateBitCast(value, origType); 2375 } 2376 2377 /// Do a fused retain/autorelease of the given object. 2378 /// call i8* \@objc_retainAutorelease(i8* %value) 2379 llvm::Value * 2380 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { 2381 return emitARCValueOperation(*this, value, nullptr, 2382 CGM.getObjCEntrypoints().objc_retainAutorelease, 2383 llvm::Intrinsic::objc_retainAutorelease); 2384 } 2385 2386 /// i8* \@objc_loadWeak(i8** %addr) 2387 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). 2388 llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { 2389 return emitARCLoadOperation(*this, addr, 2390 CGM.getObjCEntrypoints().objc_loadWeak, 2391 llvm::Intrinsic::objc_loadWeak); 2392 } 2393 2394 /// i8* \@objc_loadWeakRetained(i8** %addr) 2395 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { 2396 return emitARCLoadOperation(*this, addr, 2397 CGM.getObjCEntrypoints().objc_loadWeakRetained, 2398 llvm::Intrinsic::objc_loadWeakRetained); 2399 } 2400 2401 /// i8* \@objc_storeWeak(i8** %addr, i8* %value) 2402 /// Returns %value. 2403 llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, 2404 llvm::Value *value, 2405 bool ignored) { 2406 return emitARCStoreOperation(*this, addr, value, 2407 CGM.getObjCEntrypoints().objc_storeWeak, 2408 llvm::Intrinsic::objc_storeWeak, ignored); 2409 } 2410 2411 /// i8* \@objc_initWeak(i8** %addr, i8* %value) 2412 /// Returns %value. %addr is known to not have a current weak entry. 2413 /// Essentially equivalent to: 2414 /// *addr = nil; objc_storeWeak(addr, value); 2415 void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { 2416 // If we're initializing to null, just write null to memory; no need 2417 // to get the runtime involved. But don't do this if optimization 2418 // is enabled, because accounting for this would make the optimizer 2419 // much more complicated. 2420 if (isa<llvm::ConstantPointerNull>(value) && 2421 CGM.getCodeGenOpts().OptimizationLevel == 0) { 2422 Builder.CreateStore(value, addr); 2423 return; 2424 } 2425 2426 emitARCStoreOperation(*this, addr, value, 2427 CGM.getObjCEntrypoints().objc_initWeak, 2428 llvm::Intrinsic::objc_initWeak, /*ignored*/ true); 2429 } 2430 2431 /// void \@objc_destroyWeak(i8** %addr) 2432 /// Essentially objc_storeWeak(addr, nil). 2433 void CodeGenFunction::EmitARCDestroyWeak(Address addr) { 2434 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; 2435 if (!fn) { 2436 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_destroyWeak); 2437 setARCRuntimeFunctionLinkage(CGM, fn); 2438 } 2439 2440 // Cast the argument to 'id*'. 2441 addr = Builder.CreateBitCast(addr, Int8PtrPtrTy); 2442 2443 EmitNounwindRuntimeCall(fn, addr.getPointer()); 2444 } 2445 2446 /// void \@objc_moveWeak(i8** %dest, i8** %src) 2447 /// Disregards the current value in %dest. Leaves %src pointing to nothing. 2448 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). 2449 void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { 2450 emitARCCopyOperation(*this, dst, src, 2451 CGM.getObjCEntrypoints().objc_moveWeak, 2452 llvm::Intrinsic::objc_moveWeak); 2453 } 2454 2455 /// void \@objc_copyWeak(i8** %dest, i8** %src) 2456 /// Disregards the current value in %dest. Essentially 2457 /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) 2458 void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { 2459 emitARCCopyOperation(*this, dst, src, 2460 CGM.getObjCEntrypoints().objc_copyWeak, 2461 llvm::Intrinsic::objc_copyWeak); 2462 } 2463 2464 void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr, 2465 Address SrcAddr) { 2466 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2467 Object = EmitObjCConsumeObject(Ty, Object); 2468 EmitARCStoreWeak(DstAddr, Object, false); 2469 } 2470 2471 void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr, 2472 Address SrcAddr) { 2473 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2474 Object = EmitObjCConsumeObject(Ty, Object); 2475 EmitARCStoreWeak(DstAddr, Object, false); 2476 EmitARCDestroyWeak(SrcAddr); 2477 } 2478 2479 /// Produce the code to do a objc_autoreleasepool_push. 2480 /// call i8* \@objc_autoreleasePoolPush(void) 2481 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { 2482 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; 2483 if (!fn) { 2484 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush); 2485 setARCRuntimeFunctionLinkage(CGM, fn); 2486 } 2487 2488 return EmitNounwindRuntimeCall(fn); 2489 } 2490 2491 /// Produce the code to do a primitive release. 2492 /// call void \@objc_autoreleasePoolPop(i8* %ptr) 2493 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { 2494 assert(value->getType() == Int8PtrTy); 2495 2496 if (getInvokeDest()) { 2497 // Call the runtime method not the intrinsic if we are handling exceptions 2498 llvm::FunctionCallee &fn = 2499 CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke; 2500 if (!fn) { 2501 llvm::FunctionType *fnType = 2502 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2503 fn = CGM.CreateRuntimeFunction(fnType, "objc_autoreleasePoolPop"); 2504 setARCRuntimeFunctionLinkage(CGM, fn); 2505 } 2506 2507 // objc_autoreleasePoolPop can throw. 2508 EmitRuntimeCallOrInvoke(fn, value); 2509 } else { 2510 llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; 2511 if (!fn) { 2512 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop); 2513 setARCRuntimeFunctionLinkage(CGM, fn); 2514 } 2515 2516 EmitRuntimeCall(fn, value); 2517 } 2518 } 2519 2520 /// Produce the code to do an MRR version objc_autoreleasepool_push. 2521 /// Which is: [[NSAutoreleasePool alloc] init]; 2522 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. 2523 /// init is declared as: - (id) init; in its NSObject super class. 2524 /// 2525 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { 2526 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 2527 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); 2528 // [NSAutoreleasePool alloc] 2529 IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); 2530 Selector AllocSel = getContext().Selectors.getSelector(0, &II); 2531 CallArgList Args; 2532 RValue AllocRV = 2533 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2534 getContext().getObjCIdType(), 2535 AllocSel, Receiver, Args); 2536 2537 // [Receiver init] 2538 Receiver = AllocRV.getScalarVal(); 2539 II = &CGM.getContext().Idents.get("init"); 2540 Selector InitSel = getContext().Selectors.getSelector(0, &II); 2541 RValue InitRV = 2542 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2543 getContext().getObjCIdType(), 2544 InitSel, Receiver, Args); 2545 return InitRV.getScalarVal(); 2546 } 2547 2548 /// Allocate the given objc object. 2549 /// call i8* \@objc_alloc(i8* %value) 2550 llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value, 2551 llvm::Type *resultType) { 2552 return emitObjCValueOperation(*this, value, resultType, 2553 CGM.getObjCEntrypoints().objc_alloc, 2554 "objc_alloc"); 2555 } 2556 2557 /// Allocate the given objc object. 2558 /// call i8* \@objc_allocWithZone(i8* %value) 2559 llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value, 2560 llvm::Type *resultType) { 2561 return emitObjCValueOperation(*this, value, resultType, 2562 CGM.getObjCEntrypoints().objc_allocWithZone, 2563 "objc_allocWithZone"); 2564 } 2565 2566 llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value, 2567 llvm::Type *resultType) { 2568 return emitObjCValueOperation(*this, value, resultType, 2569 CGM.getObjCEntrypoints().objc_alloc_init, 2570 "objc_alloc_init"); 2571 } 2572 2573 /// Produce the code to do a primitive release. 2574 /// [tmp drain]; 2575 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { 2576 IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); 2577 Selector DrainSel = getContext().Selectors.getSelector(0, &II); 2578 CallArgList Args; 2579 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 2580 getContext().VoidTy, DrainSel, Arg, Args); 2581 } 2582 2583 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, 2584 Address addr, 2585 QualType type) { 2586 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime); 2587 } 2588 2589 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, 2590 Address addr, 2591 QualType type) { 2592 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime); 2593 } 2594 2595 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, 2596 Address addr, 2597 QualType type) { 2598 CGF.EmitARCDestroyWeak(addr); 2599 } 2600 2601 void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr, 2602 QualType type) { 2603 llvm::Value *value = CGF.Builder.CreateLoad(addr); 2604 CGF.EmitARCIntrinsicUse(value); 2605 } 2606 2607 /// Autorelease the given object. 2608 /// call i8* \@objc_autorelease(i8* %value) 2609 llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value, 2610 llvm::Type *returnType) { 2611 return emitObjCValueOperation( 2612 *this, value, returnType, 2613 CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction, 2614 "objc_autorelease"); 2615 } 2616 2617 /// Retain the given object, with normal retain semantics. 2618 /// call i8* \@objc_retain(i8* %value) 2619 llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value, 2620 llvm::Type *returnType) { 2621 return emitObjCValueOperation( 2622 *this, value, returnType, 2623 CGM.getObjCEntrypoints().objc_retainRuntimeFunction, "objc_retain"); 2624 } 2625 2626 /// Release the given object. 2627 /// call void \@objc_release(i8* %value) 2628 void CodeGenFunction::EmitObjCRelease(llvm::Value *value, 2629 ARCPreciseLifetime_t precise) { 2630 if (isa<llvm::ConstantPointerNull>(value)) return; 2631 2632 llvm::FunctionCallee &fn = 2633 CGM.getObjCEntrypoints().objc_releaseRuntimeFunction; 2634 if (!fn) { 2635 llvm::FunctionType *fnType = 2636 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2637 fn = CGM.CreateRuntimeFunction(fnType, "objc_release"); 2638 setARCRuntimeFunctionLinkage(CGM, fn); 2639 // We have Native ARC, so set nonlazybind attribute for performance 2640 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) 2641 f->addFnAttr(llvm::Attribute::NonLazyBind); 2642 } 2643 2644 // Cast the argument to 'id'. 2645 value = Builder.CreateBitCast(value, Int8PtrTy); 2646 2647 // Call objc_release. 2648 llvm::CallBase *call = EmitCallOrInvoke(fn, value); 2649 2650 if (precise == ARCImpreciseLifetime) { 2651 call->setMetadata("clang.imprecise_release", 2652 llvm::MDNode::get(Builder.getContext(), None)); 2653 } 2654 } 2655 2656 namespace { 2657 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { 2658 llvm::Value *Token; 2659 2660 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2661 2662 void Emit(CodeGenFunction &CGF, Flags flags) override { 2663 CGF.EmitObjCAutoreleasePoolPop(Token); 2664 } 2665 }; 2666 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { 2667 llvm::Value *Token; 2668 2669 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2670 2671 void Emit(CodeGenFunction &CGF, Flags flags) override { 2672 CGF.EmitObjCMRRAutoreleasePoolPop(Token); 2673 } 2674 }; 2675 } 2676 2677 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { 2678 if (CGM.getLangOpts().ObjCAutoRefCount) 2679 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr); 2680 else 2681 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr); 2682 } 2683 2684 static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) { 2685 switch (lifetime) { 2686 case Qualifiers::OCL_None: 2687 case Qualifiers::OCL_ExplicitNone: 2688 case Qualifiers::OCL_Strong: 2689 case Qualifiers::OCL_Autoreleasing: 2690 return true; 2691 2692 case Qualifiers::OCL_Weak: 2693 return false; 2694 } 2695 2696 llvm_unreachable("impossible lifetime!"); 2697 } 2698 2699 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2700 LValue lvalue, 2701 QualType type) { 2702 llvm::Value *result; 2703 bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime()); 2704 if (shouldRetain) { 2705 result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal(); 2706 } else { 2707 assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); 2708 result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress()); 2709 } 2710 return TryEmitResult(result, !shouldRetain); 2711 } 2712 2713 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2714 const Expr *e) { 2715 e = e->IgnoreParens(); 2716 QualType type = e->getType(); 2717 2718 // If we're loading retained from a __strong xvalue, we can avoid 2719 // an extra retain/release pair by zeroing out the source of this 2720 // "move" operation. 2721 if (e->isXValue() && 2722 !type.isConstQualified() && 2723 type.getObjCLifetime() == Qualifiers::OCL_Strong) { 2724 // Emit the lvalue. 2725 LValue lv = CGF.EmitLValue(e); 2726 2727 // Load the object pointer. 2728 llvm::Value *result = CGF.EmitLoadOfLValue(lv, 2729 SourceLocation()).getScalarVal(); 2730 2731 // Set the source pointer to NULL. 2732 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv); 2733 2734 return TryEmitResult(result, true); 2735 } 2736 2737 // As a very special optimization, in ARC++, if the l-value is the 2738 // result of a non-volatile assignment, do a simple retain of the 2739 // result of the call to objc_storeWeak instead of reloading. 2740 if (CGF.getLangOpts().CPlusPlus && 2741 !type.isVolatileQualified() && 2742 type.getObjCLifetime() == Qualifiers::OCL_Weak && 2743 isa<BinaryOperator>(e) && 2744 cast<BinaryOperator>(e)->getOpcode() == BO_Assign) 2745 return TryEmitResult(CGF.EmitScalarExpr(e), false); 2746 2747 // Try to emit code for scalar constant instead of emitting LValue and 2748 // loading it because we are not guaranteed to have an l-value. One of such 2749 // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable. 2750 if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) { 2751 auto *DRE = const_cast<DeclRefExpr *>(decl_expr); 2752 if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE)) 2753 return TryEmitResult(CGF.emitScalarConstant(constant, DRE), 2754 !shouldRetainObjCLifetime(type.getObjCLifetime())); 2755 } 2756 2757 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type); 2758 } 2759 2760 typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, 2761 llvm::Value *value)> 2762 ValueTransform; 2763 2764 /// Insert code immediately after a call. 2765 static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, 2766 llvm::Value *value, 2767 ValueTransform doAfterCall, 2768 ValueTransform doFallback) { 2769 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) { 2770 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2771 2772 // Place the retain immediately following the call. 2773 CGF.Builder.SetInsertPoint(call->getParent(), 2774 ++llvm::BasicBlock::iterator(call)); 2775 value = doAfterCall(CGF, value); 2776 2777 CGF.Builder.restoreIP(ip); 2778 return value; 2779 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) { 2780 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2781 2782 // Place the retain at the beginning of the normal destination block. 2783 llvm::BasicBlock *BB = invoke->getNormalDest(); 2784 CGF.Builder.SetInsertPoint(BB, BB->begin()); 2785 value = doAfterCall(CGF, value); 2786 2787 CGF.Builder.restoreIP(ip); 2788 return value; 2789 2790 // Bitcasts can arise because of related-result returns. Rewrite 2791 // the operand. 2792 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) { 2793 llvm::Value *operand = bitcast->getOperand(0); 2794 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback); 2795 bitcast->setOperand(0, operand); 2796 return bitcast; 2797 2798 // Generic fall-back case. 2799 } else { 2800 // Retain using the non-block variant: we never need to do a copy 2801 // of a block that's been returned to us. 2802 return doFallback(CGF, value); 2803 } 2804 } 2805 2806 /// Given that the given expression is some sort of call (which does 2807 /// not return retained), emit a retain following it. 2808 static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, 2809 const Expr *e) { 2810 llvm::Value *value = CGF.EmitScalarExpr(e); 2811 return emitARCOperationAfterCall(CGF, value, 2812 [](CodeGenFunction &CGF, llvm::Value *value) { 2813 return CGF.EmitARCRetainAutoreleasedReturnValue(value); 2814 }, 2815 [](CodeGenFunction &CGF, llvm::Value *value) { 2816 return CGF.EmitARCRetainNonBlock(value); 2817 }); 2818 } 2819 2820 /// Given that the given expression is some sort of call (which does 2821 /// not return retained), perform an unsafeClaim following it. 2822 static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, 2823 const Expr *e) { 2824 llvm::Value *value = CGF.EmitScalarExpr(e); 2825 return emitARCOperationAfterCall(CGF, value, 2826 [](CodeGenFunction &CGF, llvm::Value *value) { 2827 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); 2828 }, 2829 [](CodeGenFunction &CGF, llvm::Value *value) { 2830 return value; 2831 }); 2832 } 2833 2834 llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, 2835 bool allowUnsafeClaim) { 2836 if (allowUnsafeClaim && 2837 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) { 2838 return emitARCUnsafeClaimCallResult(*this, E); 2839 } else { 2840 llvm::Value *value = emitARCRetainCallResult(*this, E); 2841 return EmitObjCConsumeObject(E->getType(), value); 2842 } 2843 } 2844 2845 /// Determine whether it might be important to emit a separate 2846 /// objc_retain_block on the result of the given expression, or 2847 /// whether it's okay to just emit it in a +1 context. 2848 static bool shouldEmitSeparateBlockRetain(const Expr *e) { 2849 assert(e->getType()->isBlockPointerType()); 2850 e = e->IgnoreParens(); 2851 2852 // For future goodness, emit block expressions directly in +1 2853 // contexts if we can. 2854 if (isa<BlockExpr>(e)) 2855 return false; 2856 2857 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) { 2858 switch (cast->getCastKind()) { 2859 // Emitting these operations in +1 contexts is goodness. 2860 case CK_LValueToRValue: 2861 case CK_ARCReclaimReturnedObject: 2862 case CK_ARCConsumeObject: 2863 case CK_ARCProduceObject: 2864 return false; 2865 2866 // These operations preserve a block type. 2867 case CK_NoOp: 2868 case CK_BitCast: 2869 return shouldEmitSeparateBlockRetain(cast->getSubExpr()); 2870 2871 // These operations are known to be bad (or haven't been considered). 2872 case CK_AnyPointerToBlockPointerCast: 2873 default: 2874 return true; 2875 } 2876 } 2877 2878 return true; 2879 } 2880 2881 namespace { 2882 /// A CRTP base class for emitting expressions of retainable object 2883 /// pointer type in ARC. 2884 template <typename Impl, typename Result> class ARCExprEmitter { 2885 protected: 2886 CodeGenFunction &CGF; 2887 Impl &asImpl() { return *static_cast<Impl*>(this); } 2888 2889 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} 2890 2891 public: 2892 Result visit(const Expr *e); 2893 Result visitCastExpr(const CastExpr *e); 2894 Result visitPseudoObjectExpr(const PseudoObjectExpr *e); 2895 Result visitBlockExpr(const BlockExpr *e); 2896 Result visitBinaryOperator(const BinaryOperator *e); 2897 Result visitBinAssign(const BinaryOperator *e); 2898 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); 2899 Result visitBinAssignAutoreleasing(const BinaryOperator *e); 2900 Result visitBinAssignWeak(const BinaryOperator *e); 2901 Result visitBinAssignStrong(const BinaryOperator *e); 2902 2903 // Minimal implementation: 2904 // Result visitLValueToRValue(const Expr *e) 2905 // Result visitConsumeObject(const Expr *e) 2906 // Result visitExtendBlockObject(const Expr *e) 2907 // Result visitReclaimReturnedObject(const Expr *e) 2908 // Result visitCall(const Expr *e) 2909 // Result visitExpr(const Expr *e) 2910 // 2911 // Result emitBitCast(Result result, llvm::Type *resultType) 2912 // llvm::Value *getValueOfResult(Result result) 2913 }; 2914 } 2915 2916 /// Try to emit a PseudoObjectExpr under special ARC rules. 2917 /// 2918 /// This massively duplicates emitPseudoObjectRValue. 2919 template <typename Impl, typename Result> 2920 Result 2921 ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { 2922 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 2923 2924 // Find the result expression. 2925 const Expr *resultExpr = E->getResultExpr(); 2926 assert(resultExpr); 2927 Result result; 2928 2929 for (PseudoObjectExpr::const_semantics_iterator 2930 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 2931 const Expr *semantic = *i; 2932 2933 // If this semantic expression is an opaque value, bind it 2934 // to the result of its source expression. 2935 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 2936 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 2937 OVMA opaqueData; 2938 2939 // If this semantic is the result of the pseudo-object 2940 // expression, try to evaluate the source as +1. 2941 if (ov == resultExpr) { 2942 assert(!OVMA::shouldBindAsLValue(ov)); 2943 result = asImpl().visit(ov->getSourceExpr()); 2944 opaqueData = OVMA::bind(CGF, ov, 2945 RValue::get(asImpl().getValueOfResult(result))); 2946 2947 // Otherwise, just bind it. 2948 } else { 2949 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 2950 } 2951 opaques.push_back(opaqueData); 2952 2953 // Otherwise, if the expression is the result, evaluate it 2954 // and remember the result. 2955 } else if (semantic == resultExpr) { 2956 result = asImpl().visit(semantic); 2957 2958 // Otherwise, evaluate the expression in an ignored context. 2959 } else { 2960 CGF.EmitIgnoredExpr(semantic); 2961 } 2962 } 2963 2964 // Unbind all the opaques now. 2965 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 2966 opaques[i].unbind(CGF); 2967 2968 return result; 2969 } 2970 2971 template <typename Impl, typename Result> 2972 Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) { 2973 // The default implementation just forwards the expression to visitExpr. 2974 return asImpl().visitExpr(e); 2975 } 2976 2977 template <typename Impl, typename Result> 2978 Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { 2979 switch (e->getCastKind()) { 2980 2981 // No-op casts don't change the type, so we just ignore them. 2982 case CK_NoOp: 2983 return asImpl().visit(e->getSubExpr()); 2984 2985 // These casts can change the type. 2986 case CK_CPointerToObjCPointerCast: 2987 case CK_BlockPointerToObjCPointerCast: 2988 case CK_AnyPointerToBlockPointerCast: 2989 case CK_BitCast: { 2990 llvm::Type *resultType = CGF.ConvertType(e->getType()); 2991 assert(e->getSubExpr()->getType()->hasPointerRepresentation()); 2992 Result result = asImpl().visit(e->getSubExpr()); 2993 return asImpl().emitBitCast(result, resultType); 2994 } 2995 2996 // Handle some casts specially. 2997 case CK_LValueToRValue: 2998 return asImpl().visitLValueToRValue(e->getSubExpr()); 2999 case CK_ARCConsumeObject: 3000 return asImpl().visitConsumeObject(e->getSubExpr()); 3001 case CK_ARCExtendBlockObject: 3002 return asImpl().visitExtendBlockObject(e->getSubExpr()); 3003 case CK_ARCReclaimReturnedObject: 3004 return asImpl().visitReclaimReturnedObject(e->getSubExpr()); 3005 3006 // Otherwise, use the default logic. 3007 default: 3008 return asImpl().visitExpr(e); 3009 } 3010 } 3011 3012 template <typename Impl, typename Result> 3013 Result 3014 ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { 3015 switch (e->getOpcode()) { 3016 case BO_Comma: 3017 CGF.EmitIgnoredExpr(e->getLHS()); 3018 CGF.EnsureInsertPoint(); 3019 return asImpl().visit(e->getRHS()); 3020 3021 case BO_Assign: 3022 return asImpl().visitBinAssign(e); 3023 3024 default: 3025 return asImpl().visitExpr(e); 3026 } 3027 } 3028 3029 template <typename Impl, typename Result> 3030 Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { 3031 switch (e->getLHS()->getType().getObjCLifetime()) { 3032 case Qualifiers::OCL_ExplicitNone: 3033 return asImpl().visitBinAssignUnsafeUnretained(e); 3034 3035 case Qualifiers::OCL_Weak: 3036 return asImpl().visitBinAssignWeak(e); 3037 3038 case Qualifiers::OCL_Autoreleasing: 3039 return asImpl().visitBinAssignAutoreleasing(e); 3040 3041 case Qualifiers::OCL_Strong: 3042 return asImpl().visitBinAssignStrong(e); 3043 3044 case Qualifiers::OCL_None: 3045 return asImpl().visitExpr(e); 3046 } 3047 llvm_unreachable("bad ObjC ownership qualifier"); 3048 } 3049 3050 /// The default rule for __unsafe_unretained emits the RHS recursively, 3051 /// stores into the unsafe variable, and propagates the result outward. 3052 template <typename Impl, typename Result> 3053 Result ARCExprEmitter<Impl,Result>:: 3054 visitBinAssignUnsafeUnretained(const BinaryOperator *e) { 3055 // Recursively emit the RHS. 3056 // For __block safety, do this before emitting the LHS. 3057 Result result = asImpl().visit(e->getRHS()); 3058 3059 // Perform the store. 3060 LValue lvalue = 3061 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); 3062 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), 3063 lvalue); 3064 3065 return result; 3066 } 3067 3068 template <typename Impl, typename Result> 3069 Result 3070 ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { 3071 return asImpl().visitExpr(e); 3072 } 3073 3074 template <typename Impl, typename Result> 3075 Result 3076 ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { 3077 return asImpl().visitExpr(e); 3078 } 3079 3080 template <typename Impl, typename Result> 3081 Result 3082 ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { 3083 return asImpl().visitExpr(e); 3084 } 3085 3086 /// The general expression-emission logic. 3087 template <typename Impl, typename Result> 3088 Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { 3089 // We should *never* see a nested full-expression here, because if 3090 // we fail to emit at +1, our caller must not retain after we close 3091 // out the full-expression. This isn't as important in the unsafe 3092 // emitter. 3093 assert(!isa<ExprWithCleanups>(e)); 3094 3095 // Look through parens, __extension__, generic selection, etc. 3096 e = e->IgnoreParens(); 3097 3098 // Handle certain kinds of casts. 3099 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { 3100 return asImpl().visitCastExpr(ce); 3101 3102 // Handle the comma operator. 3103 } else if (auto op = dyn_cast<BinaryOperator>(e)) { 3104 return asImpl().visitBinaryOperator(op); 3105 3106 // TODO: handle conditional operators here 3107 3108 // For calls and message sends, use the retained-call logic. 3109 // Delegate inits are a special case in that they're the only 3110 // returns-retained expression that *isn't* surrounded by 3111 // a consume. 3112 } else if (isa<CallExpr>(e) || 3113 (isa<ObjCMessageExpr>(e) && 3114 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) { 3115 return asImpl().visitCall(e); 3116 3117 // Look through pseudo-object expressions. 3118 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 3119 return asImpl().visitPseudoObjectExpr(pseudo); 3120 } else if (auto *be = dyn_cast<BlockExpr>(e)) 3121 return asImpl().visitBlockExpr(be); 3122 3123 return asImpl().visitExpr(e); 3124 } 3125 3126 namespace { 3127 3128 /// An emitter for +1 results. 3129 struct ARCRetainExprEmitter : 3130 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { 3131 3132 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3133 3134 llvm::Value *getValueOfResult(TryEmitResult result) { 3135 return result.getPointer(); 3136 } 3137 3138 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { 3139 llvm::Value *value = result.getPointer(); 3140 value = CGF.Builder.CreateBitCast(value, resultType); 3141 result.setPointer(value); 3142 return result; 3143 } 3144 3145 TryEmitResult visitLValueToRValue(const Expr *e) { 3146 return tryEmitARCRetainLoadOfScalar(CGF, e); 3147 } 3148 3149 /// For consumptions, just emit the subexpression and thus elide 3150 /// the retain/release pair. 3151 TryEmitResult visitConsumeObject(const Expr *e) { 3152 llvm::Value *result = CGF.EmitScalarExpr(e); 3153 return TryEmitResult(result, true); 3154 } 3155 3156 TryEmitResult visitBlockExpr(const BlockExpr *e) { 3157 TryEmitResult result = visitExpr(e); 3158 // Avoid the block-retain if this is a block literal that doesn't need to be 3159 // copied to the heap. 3160 if (e->getBlockDecl()->canAvoidCopyToHeap()) 3161 result.setInt(true); 3162 return result; 3163 } 3164 3165 /// Block extends are net +0. Naively, we could just recurse on 3166 /// the subexpression, but actually we need to ensure that the 3167 /// value is copied as a block, so there's a little filter here. 3168 TryEmitResult visitExtendBlockObject(const Expr *e) { 3169 llvm::Value *result; // will be a +0 value 3170 3171 // If we can't safely assume the sub-expression will produce a 3172 // block-copied value, emit the sub-expression at +0. 3173 if (shouldEmitSeparateBlockRetain(e)) { 3174 result = CGF.EmitScalarExpr(e); 3175 3176 // Otherwise, try to emit the sub-expression at +1 recursively. 3177 } else { 3178 TryEmitResult subresult = asImpl().visit(e); 3179 3180 // If that produced a retained value, just use that. 3181 if (subresult.getInt()) { 3182 return subresult; 3183 } 3184 3185 // Otherwise it's +0. 3186 result = subresult.getPointer(); 3187 } 3188 3189 // Retain the object as a block. 3190 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true); 3191 return TryEmitResult(result, true); 3192 } 3193 3194 /// For reclaims, emit the subexpression as a retained call and 3195 /// skip the consumption. 3196 TryEmitResult visitReclaimReturnedObject(const Expr *e) { 3197 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3198 return TryEmitResult(result, true); 3199 } 3200 3201 /// When we have an undecorated call, retroactively do a claim. 3202 TryEmitResult visitCall(const Expr *e) { 3203 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3204 return TryEmitResult(result, true); 3205 } 3206 3207 // TODO: maybe special-case visitBinAssignWeak? 3208 3209 TryEmitResult visitExpr(const Expr *e) { 3210 // We didn't find an obvious production, so emit what we've got and 3211 // tell the caller that we didn't manage to retain. 3212 llvm::Value *result = CGF.EmitScalarExpr(e); 3213 return TryEmitResult(result, false); 3214 } 3215 }; 3216 } 3217 3218 static TryEmitResult 3219 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { 3220 return ARCRetainExprEmitter(CGF).visit(e); 3221 } 3222 3223 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 3224 LValue lvalue, 3225 QualType type) { 3226 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); 3227 llvm::Value *value = result.getPointer(); 3228 if (!result.getInt()) 3229 value = CGF.EmitARCRetain(type, value); 3230 return value; 3231 } 3232 3233 /// EmitARCRetainScalarExpr - Semantically equivalent to 3234 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a 3235 /// best-effort attempt to peephole expressions that naturally produce 3236 /// retained objects. 3237 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { 3238 // The retain needs to happen within the full-expression. 3239 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3240 enterFullExpression(cleanups); 3241 RunCleanupsScope scope(*this); 3242 return EmitARCRetainScalarExpr(cleanups->getSubExpr()); 3243 } 3244 3245 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3246 llvm::Value *value = result.getPointer(); 3247 if (!result.getInt()) 3248 value = EmitARCRetain(e->getType(), value); 3249 return value; 3250 } 3251 3252 llvm::Value * 3253 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { 3254 // The retain needs to happen within the full-expression. 3255 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3256 enterFullExpression(cleanups); 3257 RunCleanupsScope scope(*this); 3258 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); 3259 } 3260 3261 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3262 llvm::Value *value = result.getPointer(); 3263 if (result.getInt()) 3264 value = EmitARCAutorelease(value); 3265 else 3266 value = EmitARCRetainAutorelease(e->getType(), value); 3267 return value; 3268 } 3269 3270 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { 3271 llvm::Value *result; 3272 bool doRetain; 3273 3274 if (shouldEmitSeparateBlockRetain(e)) { 3275 result = EmitScalarExpr(e); 3276 doRetain = true; 3277 } else { 3278 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e); 3279 result = subresult.getPointer(); 3280 doRetain = !subresult.getInt(); 3281 } 3282 3283 if (doRetain) 3284 result = EmitARCRetainBlock(result, /*mandatory*/ true); 3285 return EmitObjCConsumeObject(e->getType(), result); 3286 } 3287 3288 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { 3289 // In ARC, retain and autorelease the expression. 3290 if (getLangOpts().ObjCAutoRefCount) { 3291 // Do so before running any cleanups for the full-expression. 3292 // EmitARCRetainAutoreleaseScalarExpr does this for us. 3293 return EmitARCRetainAutoreleaseScalarExpr(expr); 3294 } 3295 3296 // Otherwise, use the normal scalar-expression emission. The 3297 // exception machinery doesn't do anything special with the 3298 // exception like retaining it, so there's no safety associated with 3299 // only running cleanups after the throw has started, and when it 3300 // matters it tends to be substantially inferior code. 3301 return EmitScalarExpr(expr); 3302 } 3303 3304 namespace { 3305 3306 /// An emitter for assigning into an __unsafe_unretained context. 3307 struct ARCUnsafeUnretainedExprEmitter : 3308 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { 3309 3310 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3311 3312 llvm::Value *getValueOfResult(llvm::Value *value) { 3313 return value; 3314 } 3315 3316 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { 3317 return CGF.Builder.CreateBitCast(value, resultType); 3318 } 3319 3320 llvm::Value *visitLValueToRValue(const Expr *e) { 3321 return CGF.EmitScalarExpr(e); 3322 } 3323 3324 /// For consumptions, just emit the subexpression and perform the 3325 /// consumption like normal. 3326 llvm::Value *visitConsumeObject(const Expr *e) { 3327 llvm::Value *value = CGF.EmitScalarExpr(e); 3328 return CGF.EmitObjCConsumeObject(e->getType(), value); 3329 } 3330 3331 /// No special logic for block extensions. (This probably can't 3332 /// actually happen in this emitter, though.) 3333 llvm::Value *visitExtendBlockObject(const Expr *e) { 3334 return CGF.EmitARCExtendBlockObject(e); 3335 } 3336 3337 /// For reclaims, perform an unsafeClaim if that's enabled. 3338 llvm::Value *visitReclaimReturnedObject(const Expr *e) { 3339 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true); 3340 } 3341 3342 /// When we have an undecorated call, just emit it without adding 3343 /// the unsafeClaim. 3344 llvm::Value *visitCall(const Expr *e) { 3345 return CGF.EmitScalarExpr(e); 3346 } 3347 3348 /// Just do normal scalar emission in the default case. 3349 llvm::Value *visitExpr(const Expr *e) { 3350 return CGF.EmitScalarExpr(e); 3351 } 3352 }; 3353 } 3354 3355 static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, 3356 const Expr *e) { 3357 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); 3358 } 3359 3360 /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to 3361 /// immediately releasing the resut of EmitARCRetainScalarExpr, but 3362 /// avoiding any spurious retains, including by performing reclaims 3363 /// with objc_unsafeClaimAutoreleasedReturnValue. 3364 llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { 3365 // Look through full-expressions. 3366 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3367 enterFullExpression(cleanups); 3368 RunCleanupsScope scope(*this); 3369 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); 3370 } 3371 3372 return emitARCUnsafeUnretainedScalarExpr(*this, e); 3373 } 3374 3375 std::pair<LValue,llvm::Value*> 3376 CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, 3377 bool ignored) { 3378 // Evaluate the RHS first. If we're ignoring the result, assume 3379 // that we can emit at an unsafe +0. 3380 llvm::Value *value; 3381 if (ignored) { 3382 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS()); 3383 } else { 3384 value = EmitScalarExpr(e->getRHS()); 3385 } 3386 3387 // Emit the LHS and perform the store. 3388 LValue lvalue = EmitLValue(e->getLHS()); 3389 EmitStoreOfScalar(value, lvalue); 3390 3391 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); 3392 } 3393 3394 std::pair<LValue,llvm::Value*> 3395 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, 3396 bool ignored) { 3397 // Evaluate the RHS first. 3398 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS()); 3399 llvm::Value *value = result.getPointer(); 3400 3401 bool hasImmediateRetain = result.getInt(); 3402 3403 // If we didn't emit a retained object, and the l-value is of block 3404 // type, then we need to emit the block-retain immediately in case 3405 // it invalidates the l-value. 3406 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) { 3407 value = EmitARCRetainBlock(value, /*mandatory*/ false); 3408 hasImmediateRetain = true; 3409 } 3410 3411 LValue lvalue = EmitLValue(e->getLHS()); 3412 3413 // If the RHS was emitted retained, expand this. 3414 if (hasImmediateRetain) { 3415 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation()); 3416 EmitStoreOfScalar(value, lvalue); 3417 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime()); 3418 } else { 3419 value = EmitARCStoreStrong(lvalue, value, ignored); 3420 } 3421 3422 return std::pair<LValue,llvm::Value*>(lvalue, value); 3423 } 3424 3425 std::pair<LValue,llvm::Value*> 3426 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { 3427 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS()); 3428 LValue lvalue = EmitLValue(e->getLHS()); 3429 3430 EmitStoreOfScalar(value, lvalue); 3431 3432 return std::pair<LValue,llvm::Value*>(lvalue, value); 3433 } 3434 3435 void CodeGenFunction::EmitObjCAutoreleasePoolStmt( 3436 const ObjCAutoreleasePoolStmt &ARPS) { 3437 const Stmt *subStmt = ARPS.getSubStmt(); 3438 const CompoundStmt &S = cast<CompoundStmt>(*subStmt); 3439 3440 CGDebugInfo *DI = getDebugInfo(); 3441 if (DI) 3442 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc()); 3443 3444 // Keep track of the current cleanup stack depth. 3445 RunCleanupsScope Scope(*this); 3446 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { 3447 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 3448 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token); 3449 } else { 3450 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); 3451 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token); 3452 } 3453 3454 for (const auto *I : S.body()) 3455 EmitStmt(I); 3456 3457 if (DI) 3458 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc()); 3459 } 3460 3461 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, 3462 /// make sure it survives garbage collection until this point. 3463 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { 3464 // We just use an inline assembly. 3465 llvm::FunctionType *extenderType 3466 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All); 3467 llvm::InlineAsm *extender = llvm::InlineAsm::get(extenderType, 3468 /* assembly */ "", 3469 /* constraints */ "r", 3470 /* side effects */ true); 3471 3472 object = Builder.CreateBitCast(object, VoidPtrTy); 3473 EmitNounwindRuntimeCall(extender, object); 3474 } 3475 3476 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with 3477 /// non-trivial copy assignment function, produce following helper function. 3478 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } 3479 /// 3480 llvm::Constant * 3481 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( 3482 const ObjCPropertyImplDecl *PID) { 3483 if (!getLangOpts().CPlusPlus || 3484 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3485 return nullptr; 3486 QualType Ty = PID->getPropertyIvarDecl()->getType(); 3487 if (!Ty->isRecordType()) 3488 return nullptr; 3489 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3490 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic))) 3491 return nullptr; 3492 llvm::Constant *HelperFn = nullptr; 3493 if (hasTrivialSetExpr(PID)) 3494 return nullptr; 3495 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"); 3496 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) 3497 return HelperFn; 3498 3499 ASTContext &C = getContext(); 3500 IdentifierInfo *II 3501 = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); 3502 3503 QualType ReturnTy = C.VoidTy; 3504 QualType DestTy = C.getPointerType(Ty); 3505 QualType SrcTy = Ty; 3506 SrcTy.addConst(); 3507 SrcTy = C.getPointerType(SrcTy); 3508 3509 SmallVector<QualType, 2> ArgTys; 3510 ArgTys.push_back(DestTy); 3511 ArgTys.push_back(SrcTy); 3512 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3513 3514 FunctionDecl *FD = FunctionDecl::Create( 3515 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3516 FunctionTy, nullptr, SC_Static, false, false); 3517 3518 FunctionArgList args; 3519 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy, 3520 ImplicitParamDecl::Other); 3521 args.push_back(&DstDecl); 3522 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy, 3523 ImplicitParamDecl::Other); 3524 args.push_back(&SrcDecl); 3525 3526 const CGFunctionInfo &FI = 3527 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3528 3529 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3530 3531 llvm::Function *Fn = 3532 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 3533 "__assign_helper_atomic_property_", 3534 &CGM.getModule()); 3535 3536 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3537 3538 StartFunction(FD, ReturnTy, Fn, FI, args); 3539 3540 DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue, 3541 SourceLocation()); 3542 UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(), 3543 VK_LValue, OK_Ordinary, SourceLocation(), false); 3544 3545 DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue, 3546 SourceLocation()); 3547 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(), 3548 VK_LValue, OK_Ordinary, SourceLocation(), false); 3549 3550 Expr *Args[2] = { &DST, &SRC }; 3551 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment()); 3552 CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create( 3553 C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(), 3554 VK_LValue, SourceLocation(), FPOptions()); 3555 3556 EmitStmt(TheCall); 3557 3558 FinishFunction(); 3559 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3560 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); 3561 return HelperFn; 3562 } 3563 3564 llvm::Constant * 3565 CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( 3566 const ObjCPropertyImplDecl *PID) { 3567 if (!getLangOpts().CPlusPlus || 3568 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3569 return nullptr; 3570 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3571 QualType Ty = PD->getType(); 3572 if (!Ty->isRecordType()) 3573 return nullptr; 3574 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic))) 3575 return nullptr; 3576 llvm::Constant *HelperFn = nullptr; 3577 if (hasTrivialGetExpr(PID)) 3578 return nullptr; 3579 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"); 3580 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) 3581 return HelperFn; 3582 3583 ASTContext &C = getContext(); 3584 IdentifierInfo *II = 3585 &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); 3586 3587 QualType ReturnTy = C.VoidTy; 3588 QualType DestTy = C.getPointerType(Ty); 3589 QualType SrcTy = Ty; 3590 SrcTy.addConst(); 3591 SrcTy = C.getPointerType(SrcTy); 3592 3593 SmallVector<QualType, 2> ArgTys; 3594 ArgTys.push_back(DestTy); 3595 ArgTys.push_back(SrcTy); 3596 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3597 3598 FunctionDecl *FD = FunctionDecl::Create( 3599 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3600 FunctionTy, nullptr, SC_Static, false, false); 3601 3602 FunctionArgList args; 3603 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy, 3604 ImplicitParamDecl::Other); 3605 args.push_back(&DstDecl); 3606 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy, 3607 ImplicitParamDecl::Other); 3608 args.push_back(&SrcDecl); 3609 3610 const CGFunctionInfo &FI = 3611 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3612 3613 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3614 3615 llvm::Function *Fn = llvm::Function::Create( 3616 LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_", 3617 &CGM.getModule()); 3618 3619 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3620 3621 StartFunction(FD, ReturnTy, Fn, FI, args); 3622 3623 DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue, 3624 SourceLocation()); 3625 3626 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(), 3627 VK_LValue, OK_Ordinary, SourceLocation(), false); 3628 3629 CXXConstructExpr *CXXConstExpr = 3630 cast<CXXConstructExpr>(PID->getGetterCXXConstructor()); 3631 3632 SmallVector<Expr*, 4> ConstructorArgs; 3633 ConstructorArgs.push_back(&SRC); 3634 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), 3635 CXXConstExpr->arg_end()); 3636 3637 CXXConstructExpr *TheCXXConstructExpr = 3638 CXXConstructExpr::Create(C, Ty, SourceLocation(), 3639 CXXConstExpr->getConstructor(), 3640 CXXConstExpr->isElidable(), 3641 ConstructorArgs, 3642 CXXConstExpr->hadMultipleCandidates(), 3643 CXXConstExpr->isListInitialization(), 3644 CXXConstExpr->isStdInitListInitialization(), 3645 CXXConstExpr->requiresZeroInitialization(), 3646 CXXConstExpr->getConstructionKind(), 3647 SourceRange()); 3648 3649 DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue, 3650 SourceLocation()); 3651 3652 RValue DV = EmitAnyExpr(&DstExpr); 3653 CharUnits Alignment 3654 = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); 3655 EmitAggExpr(TheCXXConstructExpr, 3656 AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment), 3657 Qualifiers(), 3658 AggValueSlot::IsDestructed, 3659 AggValueSlot::DoesNotNeedGCBarriers, 3660 AggValueSlot::IsNotAliased, 3661 AggValueSlot::DoesNotOverlap)); 3662 3663 FinishFunction(); 3664 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3665 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); 3666 return HelperFn; 3667 } 3668 3669 llvm::Value * 3670 CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { 3671 // Get selectors for retain/autorelease. 3672 IdentifierInfo *CopyID = &getContext().Idents.get("copy"); 3673 Selector CopySelector = 3674 getContext().Selectors.getNullarySelector(CopyID); 3675 IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); 3676 Selector AutoreleaseSelector = 3677 getContext().Selectors.getNullarySelector(AutoreleaseID); 3678 3679 // Emit calls to retain/autorelease. 3680 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 3681 llvm::Value *Val = Block; 3682 RValue Result; 3683 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3684 Ty, CopySelector, 3685 Val, CallArgList(), nullptr, nullptr); 3686 Val = Result.getScalarVal(); 3687 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3688 Ty, AutoreleaseSelector, 3689 Val, CallArgList(), nullptr, nullptr); 3690 Val = Result.getScalarVal(); 3691 return Val; 3692 } 3693 3694 llvm::Value * 3695 CodeGenFunction::EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args) { 3696 assert(Args.size() == 3 && "Expected 3 argument here!"); 3697 3698 if (!CGM.IsOSVersionAtLeastFn) { 3699 llvm::FunctionType *FTy = 3700 llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false); 3701 CGM.IsOSVersionAtLeastFn = 3702 CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast"); 3703 } 3704 3705 llvm::Value *CallRes = 3706 EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args); 3707 3708 return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty)); 3709 } 3710 3711 void CodeGenModule::emitAtAvailableLinkGuard() { 3712 if (!IsOSVersionAtLeastFn) 3713 return; 3714 // @available requires CoreFoundation only on Darwin. 3715 if (!Target.getTriple().isOSDarwin()) 3716 return; 3717 // Add -framework CoreFoundation to the linker commands. We still want to 3718 // emit the core foundation reference down below because otherwise if 3719 // CoreFoundation is not used in the code, the linker won't link the 3720 // framework. 3721 auto &Context = getLLVMContext(); 3722 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"), 3723 llvm::MDString::get(Context, "CoreFoundation")}; 3724 LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args)); 3725 // Emit a reference to a symbol from CoreFoundation to ensure that 3726 // CoreFoundation is linked into the final binary. 3727 llvm::FunctionType *FTy = 3728 llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false); 3729 llvm::FunctionCallee CFFunc = 3730 CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber"); 3731 3732 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false); 3733 llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction( 3734 CheckFTy, "__clang_at_available_requires_core_foundation_framework", 3735 llvm::AttributeList(), /*Local=*/true); 3736 llvm::Function *CFLinkCheckFunc = 3737 cast<llvm::Function>(CFLinkCheckFuncRef.getCallee()->stripPointerCasts()); 3738 if (CFLinkCheckFunc->empty()) { 3739 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); 3740 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); 3741 CodeGenFunction CGF(*this); 3742 CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc)); 3743 CGF.EmitNounwindRuntimeCall(CFFunc, 3744 llvm::Constant::getNullValue(VoidPtrTy)); 3745 CGF.Builder.CreateUnreachable(); 3746 addCompilerUsedGlobal(CFLinkCheckFunc); 3747 } 3748 } 3749 3750 CGObjCRuntime::~CGObjCRuntime() {} 3751