1 //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This contains code to emit Objective-C code as LLVM code. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "CGDebugInfo.h" 14 #include "CGObjCRuntime.h" 15 #include "CodeGenFunction.h" 16 #include "CodeGenModule.h" 17 #include "ConstantEmitter.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/Attr.h" 21 #include "clang/AST/DeclObjC.h" 22 #include "clang/AST/StmtObjC.h" 23 #include "clang/Basic/Diagnostic.h" 24 #include "clang/CodeGen/CGFunctionInfo.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/BinaryFormat/MachO.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/InlineAsm.h" 29 using namespace clang; 30 using namespace CodeGen; 31 32 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; 33 static TryEmitResult 34 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); 35 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, 36 QualType ET, 37 RValue Result); 38 39 /// Given the address of a variable of pointer type, find the correct 40 /// null to store into it. 41 static llvm::Constant *getNullForVariable(Address addr) { 42 llvm::Type *type = addr.getElementType(); 43 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type)); 44 } 45 46 /// Emits an instance of NSConstantString representing the object. 47 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) 48 { 49 llvm::Constant *C = 50 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); 51 // FIXME: This bitcast should just be made an invariant on the Runtime. 52 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); 53 } 54 55 /// EmitObjCBoxedExpr - This routine generates code to call 56 /// the appropriate expression boxing method. This will either be 57 /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], 58 /// or [NSValue valueWithBytes:objCType:]. 59 /// 60 llvm::Value * 61 CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { 62 // Generate the correct selector for this literal's concrete type. 63 // Get the method. 64 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); 65 const Expr *SubExpr = E->getSubExpr(); 66 67 if (E->isExpressibleAsConstantInitializer()) { 68 ConstantEmitter ConstEmitter(CGM); 69 return ConstEmitter.tryEmitAbstract(E, E->getType()); 70 } 71 72 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"); 73 Selector Sel = BoxingMethod->getSelector(); 74 75 // Generate a reference to the class pointer, which will be the receiver. 76 // Assumes that the method was introduced in the class that should be 77 // messaged (avoids pulling it out of the result type). 78 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 79 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); 80 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl); 81 82 CallArgList Args; 83 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); 84 QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); 85 86 // ObjCBoxedExpr supports boxing of structs and unions 87 // via [NSValue valueWithBytes:objCType:] 88 const QualType ValueType(SubExpr->getType().getCanonicalType()); 89 if (ValueType->isObjCBoxableRecordType()) { 90 // Emit CodeGen for first parameter 91 // and cast value to correct type 92 Address Temporary = CreateMemTemp(SubExpr->getType()); 93 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); 94 Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT)); 95 Args.add(RValue::get(BitCast.getPointer()), ArgQT); 96 97 // Create char array to store type encoding 98 std::string Str; 99 getContext().getObjCEncodingForType(ValueType, Str); 100 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); 101 102 // Cast type encoding to correct type 103 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; 104 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); 105 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT)); 106 107 Args.add(RValue::get(Cast), EncodingQT); 108 } else { 109 Args.add(EmitAnyExpr(SubExpr), ArgQT); 110 } 111 112 RValue result = Runtime.GenerateMessageSend( 113 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver, 114 Args, ClassDecl, BoxingMethod); 115 return Builder.CreateBitCast(result.getScalarVal(), 116 ConvertType(E->getType())); 117 } 118 119 llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, 120 const ObjCMethodDecl *MethodWithObjects) { 121 ASTContext &Context = CGM.getContext(); 122 const ObjCDictionaryLiteral *DLE = nullptr; 123 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E); 124 if (!ALE) 125 DLE = cast<ObjCDictionaryLiteral>(E); 126 127 // Optimize empty collections by referencing constants, when available. 128 uint64_t NumElements = 129 ALE ? ALE->getNumElements() : DLE->getNumElements(); 130 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) { 131 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__"; 132 QualType IdTy(CGM.getContext().getObjCIdType()); 133 llvm::Constant *Constant = 134 CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName); 135 LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy); 136 llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc()); 137 cast<llvm::LoadInst>(Ptr)->setMetadata( 138 CGM.getModule().getMDKindID("invariant.load"), 139 llvm::MDNode::get(getLLVMContext(), None)); 140 return Builder.CreateBitCast(Ptr, ConvertType(E->getType())); 141 } 142 143 // Compute the type of the array we're initializing. 144 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()), 145 NumElements); 146 QualType ElementType = Context.getObjCIdType().withConst(); 147 QualType ElementArrayType 148 = Context.getConstantArrayType(ElementType, APNumElements, nullptr, 149 ArrayType::Normal, /*IndexTypeQuals=*/0); 150 151 // Allocate the temporary array(s). 152 Address Objects = CreateMemTemp(ElementArrayType, "objects"); 153 Address Keys = Address::invalid(); 154 if (DLE) 155 Keys = CreateMemTemp(ElementArrayType, "keys"); 156 157 // In ARC, we may need to do extra work to keep all the keys and 158 // values alive until after the call. 159 SmallVector<llvm::Value *, 16> NeededObjects; 160 bool TrackNeededObjects = 161 (getLangOpts().ObjCAutoRefCount && 162 CGM.getCodeGenOpts().OptimizationLevel != 0); 163 164 // Perform the actual initialialization of the array(s). 165 for (uint64_t i = 0; i < NumElements; i++) { 166 if (ALE) { 167 // Emit the element and store it to the appropriate array slot. 168 const Expr *Rhs = ALE->getElement(i); 169 LValue LV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), 170 ElementType, AlignmentSource::Decl); 171 172 llvm::Value *value = EmitScalarExpr(Rhs); 173 EmitStoreThroughLValue(RValue::get(value), LV, true); 174 if (TrackNeededObjects) { 175 NeededObjects.push_back(value); 176 } 177 } else { 178 // Emit the key and store it to the appropriate array slot. 179 const Expr *Key = DLE->getKeyValueElement(i).Key; 180 LValue KeyLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Keys, i), 181 ElementType, AlignmentSource::Decl); 182 llvm::Value *keyValue = EmitScalarExpr(Key); 183 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); 184 185 // Emit the value and store it to the appropriate array slot. 186 const Expr *Value = DLE->getKeyValueElement(i).Value; 187 LValue ValueLV = MakeAddrLValue(Builder.CreateConstArrayGEP(Objects, i), 188 ElementType, AlignmentSource::Decl); 189 llvm::Value *valueValue = EmitScalarExpr(Value); 190 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); 191 if (TrackNeededObjects) { 192 NeededObjects.push_back(keyValue); 193 NeededObjects.push_back(valueValue); 194 } 195 } 196 } 197 198 // Generate the argument list. 199 CallArgList Args; 200 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); 201 const ParmVarDecl *argDecl = *PI++; 202 QualType ArgQT = argDecl->getType().getUnqualifiedType(); 203 Args.add(RValue::get(Objects.getPointer()), ArgQT); 204 if (DLE) { 205 argDecl = *PI++; 206 ArgQT = argDecl->getType().getUnqualifiedType(); 207 Args.add(RValue::get(Keys.getPointer()), ArgQT); 208 } 209 argDecl = *PI; 210 ArgQT = argDecl->getType().getUnqualifiedType(); 211 llvm::Value *Count = 212 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements); 213 Args.add(RValue::get(Count), ArgQT); 214 215 // Generate a reference to the class pointer, which will be the receiver. 216 Selector Sel = MethodWithObjects->getSelector(); 217 QualType ResultType = E->getType(); 218 const ObjCObjectPointerType *InterfacePointerType 219 = ResultType->getAsObjCInterfacePointerType(); 220 ObjCInterfaceDecl *Class 221 = InterfacePointerType->getObjectType()->getInterface(); 222 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 223 llvm::Value *Receiver = Runtime.GetClass(*this, Class); 224 225 // Generate the message send. 226 RValue result = Runtime.GenerateMessageSend( 227 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel, 228 Receiver, Args, Class, MethodWithObjects); 229 230 // The above message send needs these objects, but in ARC they are 231 // passed in a buffer that is essentially __unsafe_unretained. 232 // Therefore we must prevent the optimizer from releasing them until 233 // after the call. 234 if (TrackNeededObjects) { 235 EmitARCIntrinsicUse(NeededObjects); 236 } 237 238 return Builder.CreateBitCast(result.getScalarVal(), 239 ConvertType(E->getType())); 240 } 241 242 llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { 243 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); 244 } 245 246 llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( 247 const ObjCDictionaryLiteral *E) { 248 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); 249 } 250 251 /// Emit a selector. 252 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { 253 // Untyped selector. 254 // Note that this implementation allows for non-constant strings to be passed 255 // as arguments to @selector(). Currently, the only thing preventing this 256 // behaviour is the type checking in the front end. 257 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector()); 258 } 259 260 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { 261 // FIXME: This should pass the Decl not the name. 262 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol()); 263 } 264 265 /// Adjust the type of an Objective-C object that doesn't match up due 266 /// to type erasure at various points, e.g., related result types or the use 267 /// of parameterized classes. 268 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, 269 RValue Result) { 270 if (!ExpT->isObjCRetainableType()) 271 return Result; 272 273 // If the converted types are the same, we're done. 274 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT); 275 if (ExpLLVMTy == Result.getScalarVal()->getType()) 276 return Result; 277 278 // We have applied a substitution. Cast the rvalue appropriately. 279 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(), 280 ExpLLVMTy)); 281 } 282 283 /// Decide whether to extend the lifetime of the receiver of a 284 /// returns-inner-pointer message. 285 static bool 286 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { 287 switch (message->getReceiverKind()) { 288 289 // For a normal instance message, we should extend unless the 290 // receiver is loaded from a variable with precise lifetime. 291 case ObjCMessageExpr::Instance: { 292 const Expr *receiver = message->getInstanceReceiver(); 293 294 // Look through OVEs. 295 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 296 if (opaque->getSourceExpr()) 297 receiver = opaque->getSourceExpr()->IgnoreParens(); 298 } 299 300 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver); 301 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true; 302 receiver = ice->getSubExpr()->IgnoreParens(); 303 304 // Look through OVEs. 305 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 306 if (opaque->getSourceExpr()) 307 receiver = opaque->getSourceExpr()->IgnoreParens(); 308 } 309 310 // Only __strong variables. 311 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 312 return true; 313 314 // All ivars and fields have precise lifetime. 315 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver)) 316 return false; 317 318 // Otherwise, check for variables. 319 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); 320 if (!declRef) return true; 321 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl()); 322 if (!var) return true; 323 324 // All variables have precise lifetime except local variables with 325 // automatic storage duration that aren't specially marked. 326 return (var->hasLocalStorage() && 327 !var->hasAttr<ObjCPreciseLifetimeAttr>()); 328 } 329 330 case ObjCMessageExpr::Class: 331 case ObjCMessageExpr::SuperClass: 332 // It's never necessary for class objects. 333 return false; 334 335 case ObjCMessageExpr::SuperInstance: 336 // We generally assume that 'self' lives throughout a method call. 337 return false; 338 } 339 340 llvm_unreachable("invalid receiver kind"); 341 } 342 343 /// Given an expression of ObjC pointer type, check whether it was 344 /// immediately loaded from an ARC __weak l-value. 345 static const Expr *findWeakLValue(const Expr *E) { 346 assert(E->getType()->isObjCRetainableType()); 347 E = E->IgnoreParens(); 348 if (auto CE = dyn_cast<CastExpr>(E)) { 349 if (CE->getCastKind() == CK_LValueToRValue) { 350 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) 351 return CE->getSubExpr(); 352 } 353 } 354 355 return nullptr; 356 } 357 358 /// The ObjC runtime may provide entrypoints that are likely to be faster 359 /// than an ordinary message send of the appropriate selector. 360 /// 361 /// The entrypoints are guaranteed to be equivalent to just sending the 362 /// corresponding message. If the entrypoint is implemented naively as just a 363 /// message send, using it is a trade-off: it sacrifices a few cycles of 364 /// overhead to save a small amount of code. However, it's possible for 365 /// runtimes to detect and special-case classes that use "standard" 366 /// behavior; if that's dynamically a large proportion of all objects, using 367 /// the entrypoint will also be faster than using a message send. 368 /// 369 /// If the runtime does support a required entrypoint, then this method will 370 /// generate a call and return the resulting value. Otherwise it will return 371 /// None and the caller can generate a msgSend instead. 372 static Optional<llvm::Value *> 373 tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType, 374 llvm::Value *Receiver, 375 const CallArgList& Args, Selector Sel, 376 const ObjCMethodDecl *method, 377 bool isClassMessage) { 378 auto &CGM = CGF.CGM; 379 if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls) 380 return None; 381 382 auto &Runtime = CGM.getLangOpts().ObjCRuntime; 383 switch (Sel.getMethodFamily()) { 384 case OMF_alloc: 385 if (isClassMessage && 386 Runtime.shouldUseRuntimeFunctionsForAlloc() && 387 ResultType->isObjCObjectPointerType()) { 388 // [Foo alloc] -> objc_alloc(Foo) or 389 // [self alloc] -> objc_alloc(self) 390 if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc") 391 return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType)); 392 // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) or 393 // [self allocWithZone:nil] -> objc_allocWithZone(self) 394 if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 && 395 Args.size() == 1 && Args.front().getType()->isPointerType() && 396 Sel.getNameForSlot(0) == "allocWithZone") { 397 const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal(); 398 if (isa<llvm::ConstantPointerNull>(arg)) 399 return CGF.EmitObjCAllocWithZone(Receiver, 400 CGF.ConvertType(ResultType)); 401 return None; 402 } 403 } 404 break; 405 406 case OMF_autorelease: 407 if (ResultType->isObjCObjectPointerType() && 408 CGM.getLangOpts().getGC() == LangOptions::NonGC && 409 Runtime.shouldUseARCFunctionsForRetainRelease()) 410 return CGF.EmitObjCAutorelease(Receiver, CGF.ConvertType(ResultType)); 411 break; 412 413 case OMF_retain: 414 if (ResultType->isObjCObjectPointerType() && 415 CGM.getLangOpts().getGC() == LangOptions::NonGC && 416 Runtime.shouldUseARCFunctionsForRetainRelease()) 417 return CGF.EmitObjCRetainNonBlock(Receiver, CGF.ConvertType(ResultType)); 418 break; 419 420 case OMF_release: 421 if (ResultType->isVoidType() && 422 CGM.getLangOpts().getGC() == LangOptions::NonGC && 423 Runtime.shouldUseARCFunctionsForRetainRelease()) { 424 CGF.EmitObjCRelease(Receiver, ARCPreciseLifetime); 425 return nullptr; 426 } 427 break; 428 429 default: 430 break; 431 } 432 return None; 433 } 434 435 CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend( 436 CodeGenFunction &CGF, ReturnValueSlot Return, QualType ResultType, 437 Selector Sel, llvm::Value *Receiver, const CallArgList &Args, 438 const ObjCInterfaceDecl *OID, const ObjCMethodDecl *Method, 439 bool isClassMessage) { 440 if (Optional<llvm::Value *> SpecializedResult = 441 tryGenerateSpecializedMessageSend(CGF, ResultType, Receiver, Args, 442 Sel, Method, isClassMessage)) { 443 return RValue::get(SpecializedResult.getValue()); 444 } 445 return GenerateMessageSend(CGF, Return, ResultType, Sel, Receiver, Args, OID, 446 Method); 447 } 448 449 static void AppendFirstImpliedRuntimeProtocols( 450 const ObjCProtocolDecl *PD, 451 llvm::UniqueVector<const ObjCProtocolDecl *> &PDs) { 452 if (!PD->isNonRuntimeProtocol()) { 453 const auto *Can = PD->getCanonicalDecl(); 454 PDs.insert(Can); 455 return; 456 } 457 458 for (const auto *ParentPD : PD->protocols()) 459 AppendFirstImpliedRuntimeProtocols(ParentPD, PDs); 460 } 461 462 std::vector<const ObjCProtocolDecl *> 463 CGObjCRuntime::GetRuntimeProtocolList(ObjCProtocolDecl::protocol_iterator begin, 464 ObjCProtocolDecl::protocol_iterator end) { 465 std::vector<const ObjCProtocolDecl *> RuntimePds; 466 llvm::DenseSet<const ObjCProtocolDecl *> NonRuntimePDs; 467 468 for (; begin != end; ++begin) { 469 const auto *It = *begin; 470 const auto *Can = It->getCanonicalDecl(); 471 if (Can->isNonRuntimeProtocol()) 472 NonRuntimePDs.insert(Can); 473 else 474 RuntimePds.push_back(Can); 475 } 476 477 // If there are no non-runtime protocols then we can just stop now. 478 if (NonRuntimePDs.empty()) 479 return RuntimePds; 480 481 // Else we have to search through the non-runtime protocol's inheritancy 482 // hierarchy DAG stopping whenever a branch either finds a runtime protocol or 483 // a non-runtime protocol without any parents. These are the "first-implied" 484 // protocols from a non-runtime protocol. 485 llvm::UniqueVector<const ObjCProtocolDecl *> FirstImpliedProtos; 486 for (const auto *PD : NonRuntimePDs) 487 AppendFirstImpliedRuntimeProtocols(PD, FirstImpliedProtos); 488 489 // Walk the Runtime list to get all protocols implied via the inclusion of 490 // this protocol, e.g. all protocols it inherits from including itself. 491 llvm::DenseSet<const ObjCProtocolDecl *> AllImpliedProtocols; 492 for (const auto *PD : RuntimePds) { 493 const auto *Can = PD->getCanonicalDecl(); 494 AllImpliedProtocols.insert(Can); 495 Can->getImpliedProtocols(AllImpliedProtocols); 496 } 497 498 // Similar to above, walk the list of first-implied protocols to find the set 499 // all the protocols implied excluding the listed protocols themselves since 500 // they are not yet a part of the `RuntimePds` list. 501 for (const auto *PD : FirstImpliedProtos) { 502 PD->getImpliedProtocols(AllImpliedProtocols); 503 } 504 505 // From the first-implied list we have to finish building the final protocol 506 // list. If a protocol in the first-implied list was already implied via some 507 // inheritance path through some other protocols then it would be redundant to 508 // add it here and so we skip over it. 509 for (const auto *PD : FirstImpliedProtos) { 510 if (!AllImpliedProtocols.contains(PD)) { 511 RuntimePds.push_back(PD); 512 } 513 } 514 515 return RuntimePds; 516 } 517 518 /// Instead of '[[MyClass alloc] init]', try to generate 519 /// 'objc_alloc_init(MyClass)'. This provides a code size improvement on the 520 /// caller side, as well as the optimized objc_alloc. 521 static Optional<llvm::Value *> 522 tryEmitSpecializedAllocInit(CodeGenFunction &CGF, const ObjCMessageExpr *OME) { 523 auto &Runtime = CGF.getLangOpts().ObjCRuntime; 524 if (!Runtime.shouldUseRuntimeFunctionForCombinedAllocInit()) 525 return None; 526 527 // Match the exact pattern '[[MyClass alloc] init]'. 528 Selector Sel = OME->getSelector(); 529 if (OME->getReceiverKind() != ObjCMessageExpr::Instance || 530 !OME->getType()->isObjCObjectPointerType() || !Sel.isUnarySelector() || 531 Sel.getNameForSlot(0) != "init") 532 return None; 533 534 // Okay, this is '[receiver init]', check if 'receiver' is '[cls alloc]' 535 // with 'cls' a Class. 536 auto *SubOME = 537 dyn_cast<ObjCMessageExpr>(OME->getInstanceReceiver()->IgnoreParenCasts()); 538 if (!SubOME) 539 return None; 540 Selector SubSel = SubOME->getSelector(); 541 542 if (!SubOME->getType()->isObjCObjectPointerType() || 543 !SubSel.isUnarySelector() || SubSel.getNameForSlot(0) != "alloc") 544 return None; 545 546 llvm::Value *Receiver = nullptr; 547 switch (SubOME->getReceiverKind()) { 548 case ObjCMessageExpr::Instance: 549 if (!SubOME->getInstanceReceiver()->getType()->isObjCClassType()) 550 return None; 551 Receiver = CGF.EmitScalarExpr(SubOME->getInstanceReceiver()); 552 break; 553 554 case ObjCMessageExpr::Class: { 555 QualType ReceiverType = SubOME->getClassReceiver(); 556 const ObjCObjectType *ObjTy = ReceiverType->castAs<ObjCObjectType>(); 557 const ObjCInterfaceDecl *ID = ObjTy->getInterface(); 558 assert(ID && "null interface should be impossible here"); 559 Receiver = CGF.CGM.getObjCRuntime().GetClass(CGF, ID); 560 break; 561 } 562 case ObjCMessageExpr::SuperInstance: 563 case ObjCMessageExpr::SuperClass: 564 return None; 565 } 566 567 return CGF.EmitObjCAllocInit(Receiver, CGF.ConvertType(OME->getType())); 568 } 569 570 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, 571 ReturnValueSlot Return) { 572 // Only the lookup mechanism and first two arguments of the method 573 // implementation vary between runtimes. We can get the receiver and 574 // arguments in generic code. 575 576 bool isDelegateInit = E->isDelegateInitCall(); 577 578 const ObjCMethodDecl *method = E->getMethodDecl(); 579 580 // If the method is -retain, and the receiver's being loaded from 581 // a __weak variable, peephole the entire operation to objc_loadWeakRetained. 582 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance && 583 method->getMethodFamily() == OMF_retain) { 584 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { 585 LValue lvalue = EmitLValue(lvalueExpr); 586 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress(*this)); 587 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); 588 } 589 } 590 591 if (Optional<llvm::Value *> Val = tryEmitSpecializedAllocInit(*this, E)) 592 return AdjustObjCObjectType(*this, E->getType(), RValue::get(*Val)); 593 594 // We don't retain the receiver in delegate init calls, and this is 595 // safe because the receiver value is always loaded from 'self', 596 // which we zero out. We don't want to Block_copy block receivers, 597 // though. 598 bool retainSelf = 599 (!isDelegateInit && 600 CGM.getLangOpts().ObjCAutoRefCount && 601 method && 602 method->hasAttr<NSConsumesSelfAttr>()); 603 604 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 605 bool isSuperMessage = false; 606 bool isClassMessage = false; 607 ObjCInterfaceDecl *OID = nullptr; 608 // Find the receiver 609 QualType ReceiverType; 610 llvm::Value *Receiver = nullptr; 611 switch (E->getReceiverKind()) { 612 case ObjCMessageExpr::Instance: 613 ReceiverType = E->getInstanceReceiver()->getType(); 614 isClassMessage = ReceiverType->isObjCClassType(); 615 if (retainSelf) { 616 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this, 617 E->getInstanceReceiver()); 618 Receiver = ter.getPointer(); 619 if (ter.getInt()) retainSelf = false; 620 } else 621 Receiver = EmitScalarExpr(E->getInstanceReceiver()); 622 break; 623 624 case ObjCMessageExpr::Class: { 625 ReceiverType = E->getClassReceiver(); 626 OID = ReceiverType->castAs<ObjCObjectType>()->getInterface(); 627 assert(OID && "Invalid Objective-C class message send"); 628 Receiver = Runtime.GetClass(*this, OID); 629 isClassMessage = true; 630 break; 631 } 632 633 case ObjCMessageExpr::SuperInstance: 634 ReceiverType = E->getSuperType(); 635 Receiver = LoadObjCSelf(); 636 isSuperMessage = true; 637 break; 638 639 case ObjCMessageExpr::SuperClass: 640 ReceiverType = E->getSuperType(); 641 Receiver = LoadObjCSelf(); 642 isSuperMessage = true; 643 isClassMessage = true; 644 break; 645 } 646 647 if (retainSelf) 648 Receiver = EmitARCRetainNonBlock(Receiver); 649 650 // In ARC, we sometimes want to "extend the lifetime" 651 // (i.e. retain+autorelease) of receivers of returns-inner-pointer 652 // messages. 653 if (getLangOpts().ObjCAutoRefCount && method && 654 method->hasAttr<ObjCReturnsInnerPointerAttr>() && 655 shouldExtendReceiverForInnerPointerMessage(E)) 656 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver); 657 658 QualType ResultType = method ? method->getReturnType() : E->getType(); 659 660 CallArgList Args; 661 EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method)); 662 663 // For delegate init calls in ARC, do an unsafe store of null into 664 // self. This represents the call taking direct ownership of that 665 // value. We have to do this after emitting the other call 666 // arguments because they might also reference self, but we don't 667 // have to worry about any of them modifying self because that would 668 // be an undefined read and write of an object in unordered 669 // expressions. 670 if (isDelegateInit) { 671 assert(getLangOpts().ObjCAutoRefCount && 672 "delegate init calls should only be marked in ARC"); 673 674 // Do an unsafe store of null into self. 675 Address selfAddr = 676 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 677 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr); 678 } 679 680 RValue result; 681 if (isSuperMessage) { 682 // super is only valid in an Objective-C method 683 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 684 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); 685 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType, 686 E->getSelector(), 687 OMD->getClassInterface(), 688 isCategoryImpl, 689 Receiver, 690 isClassMessage, 691 Args, 692 method); 693 } else { 694 // Call runtime methods directly if we can. 695 result = Runtime.GeneratePossiblySpecializedMessageSend( 696 *this, Return, ResultType, E->getSelector(), Receiver, Args, OID, 697 method, isClassMessage); 698 } 699 700 // For delegate init calls in ARC, implicitly store the result of 701 // the call back into self. This takes ownership of the value. 702 if (isDelegateInit) { 703 Address selfAddr = 704 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 705 llvm::Value *newSelf = result.getScalarVal(); 706 707 // The delegate return type isn't necessarily a matching type; in 708 // fact, it's quite likely to be 'id'. 709 llvm::Type *selfTy = selfAddr.getElementType(); 710 newSelf = Builder.CreateBitCast(newSelf, selfTy); 711 712 Builder.CreateStore(newSelf, selfAddr); 713 } 714 715 return AdjustObjCObjectType(*this, E->getType(), result); 716 } 717 718 namespace { 719 struct FinishARCDealloc final : EHScopeStack::Cleanup { 720 void Emit(CodeGenFunction &CGF, Flags flags) override { 721 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl); 722 723 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); 724 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 725 if (!iface->getSuperClass()) return; 726 727 bool isCategory = isa<ObjCCategoryImplDecl>(impl); 728 729 // Call [super dealloc] if we have a superclass. 730 llvm::Value *self = CGF.LoadObjCSelf(); 731 732 CallArgList args; 733 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(), 734 CGF.getContext().VoidTy, 735 method->getSelector(), 736 iface, 737 isCategory, 738 self, 739 /*is class msg*/ false, 740 args, 741 method); 742 } 743 }; 744 } 745 746 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates 747 /// the LLVM function and sets the other context used by 748 /// CodeGenFunction. 749 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, 750 const ObjCContainerDecl *CD) { 751 SourceLocation StartLoc = OMD->getBeginLoc(); 752 FunctionArgList args; 753 // Check if we should generate debug info for this method. 754 if (OMD->hasAttr<NoDebugAttr>()) 755 DebugInfo = nullptr; // disable debug info indefinitely for this function 756 757 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); 758 759 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD); 760 if (OMD->isDirectMethod()) { 761 Fn->setVisibility(llvm::Function::HiddenVisibility); 762 CGM.SetLLVMFunctionAttributes(OMD, FI, Fn); 763 CGM.SetLLVMFunctionAttributesForDefinition(OMD, Fn); 764 } else { 765 CGM.SetInternalFunctionAttributes(OMD, Fn, FI); 766 } 767 768 args.push_back(OMD->getSelfDecl()); 769 args.push_back(OMD->getCmdDecl()); 770 771 args.append(OMD->param_begin(), OMD->param_end()); 772 773 CurGD = OMD; 774 CurEHLocation = OMD->getEndLoc(); 775 776 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, 777 OMD->getLocation(), StartLoc); 778 779 if (OMD->isDirectMethod()) { 780 // This function is a direct call, it has to implement a nil check 781 // on entry. 782 // 783 // TODO: possibly have several entry points to elide the check 784 CGM.getObjCRuntime().GenerateDirectMethodPrologue(*this, Fn, OMD, CD); 785 } 786 787 // In ARC, certain methods get an extra cleanup. 788 if (CGM.getLangOpts().ObjCAutoRefCount && 789 OMD->isInstanceMethod() && 790 OMD->getSelector().isUnarySelector()) { 791 const IdentifierInfo *ident = 792 OMD->getSelector().getIdentifierInfoForSlot(0); 793 if (ident->isStr("dealloc")) 794 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind()); 795 } 796 } 797 798 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 799 LValue lvalue, QualType type); 800 801 /// Generate an Objective-C method. An Objective-C method is a C function with 802 /// its pointer, name, and types registered in the class structure. 803 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { 804 StartObjCMethod(OMD, OMD->getClassInterface()); 805 PGO.assignRegionCounters(GlobalDecl(OMD), CurFn); 806 assert(isa<CompoundStmt>(OMD->getBody())); 807 incrementProfileCounter(OMD->getBody()); 808 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); 809 FinishFunction(OMD->getBodyRBrace()); 810 } 811 812 /// emitStructGetterCall - Call the runtime function to load a property 813 /// into the return value slot. 814 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, 815 bool isAtomic, bool hasStrong) { 816 ASTContext &Context = CGF.getContext(); 817 818 Address src = 819 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 820 .getAddress(CGF); 821 822 // objc_copyStruct (ReturnValue, &structIvar, 823 // sizeof (Type of Ivar), isAtomic, false); 824 CallArgList args; 825 826 Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy); 827 args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy); 828 829 src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy); 830 args.add(RValue::get(src.getPointer()), Context.VoidPtrTy); 831 832 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); 833 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType()); 834 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy); 835 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy); 836 837 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); 838 CGCallee callee = CGCallee::forDirect(fn); 839 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args), 840 callee, ReturnValueSlot(), args); 841 } 842 843 /// Determine whether the given architecture supports unaligned atomic 844 /// accesses. They don't have to be fast, just faster than a function 845 /// call and a mutex. 846 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { 847 // FIXME: Allow unaligned atomic load/store on x86. (It is not 848 // currently supported by the backend.) 849 return 0; 850 } 851 852 /// Return the maximum size that permits atomic accesses for the given 853 /// architecture. 854 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, 855 llvm::Triple::ArchType arch) { 856 // ARM has 8-byte atomic accesses, but it's not clear whether we 857 // want to rely on them here. 858 859 // In the default case, just assume that any size up to a pointer is 860 // fine given adequate alignment. 861 return CharUnits::fromQuantity(CGM.PointerSizeInBytes); 862 } 863 864 namespace { 865 class PropertyImplStrategy { 866 public: 867 enum StrategyKind { 868 /// The 'native' strategy is to use the architecture's provided 869 /// reads and writes. 870 Native, 871 872 /// Use objc_setProperty and objc_getProperty. 873 GetSetProperty, 874 875 /// Use objc_setProperty for the setter, but use expression 876 /// evaluation for the getter. 877 SetPropertyAndExpressionGet, 878 879 /// Use objc_copyStruct. 880 CopyStruct, 881 882 /// The 'expression' strategy is to emit normal assignment or 883 /// lvalue-to-rvalue expressions. 884 Expression 885 }; 886 887 StrategyKind getKind() const { return StrategyKind(Kind); } 888 889 bool hasStrongMember() const { return HasStrong; } 890 bool isAtomic() const { return IsAtomic; } 891 bool isCopy() const { return IsCopy; } 892 893 CharUnits getIvarSize() const { return IvarSize; } 894 CharUnits getIvarAlignment() const { return IvarAlignment; } 895 896 PropertyImplStrategy(CodeGenModule &CGM, 897 const ObjCPropertyImplDecl *propImpl); 898 899 private: 900 unsigned Kind : 8; 901 unsigned IsAtomic : 1; 902 unsigned IsCopy : 1; 903 unsigned HasStrong : 1; 904 905 CharUnits IvarSize; 906 CharUnits IvarAlignment; 907 }; 908 } 909 910 /// Pick an implementation strategy for the given property synthesis. 911 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, 912 const ObjCPropertyImplDecl *propImpl) { 913 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 914 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); 915 916 IsCopy = (setterKind == ObjCPropertyDecl::Copy); 917 IsAtomic = prop->isAtomic(); 918 HasStrong = false; // doesn't matter here. 919 920 // Evaluate the ivar's size and alignment. 921 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 922 QualType ivarType = ivar->getType(); 923 auto TInfo = CGM.getContext().getTypeInfoInChars(ivarType); 924 IvarSize = TInfo.Width; 925 IvarAlignment = TInfo.Align; 926 927 // If we have a copy property, we always have to use getProperty/setProperty. 928 // TODO: we could actually use setProperty and an expression for non-atomics. 929 if (IsCopy) { 930 Kind = GetSetProperty; 931 return; 932 } 933 934 // Handle retain. 935 if (setterKind == ObjCPropertyDecl::Retain) { 936 // In GC-only, there's nothing special that needs to be done. 937 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { 938 // fallthrough 939 940 // In ARC, if the property is non-atomic, use expression emission, 941 // which translates to objc_storeStrong. This isn't required, but 942 // it's slightly nicer. 943 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) { 944 // Using standard expression emission for the setter is only 945 // acceptable if the ivar is __strong, which won't be true if 946 // the property is annotated with __attribute__((NSObject)). 947 // TODO: falling all the way back to objc_setProperty here is 948 // just laziness, though; we could still use objc_storeStrong 949 // if we hacked it right. 950 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) 951 Kind = Expression; 952 else 953 Kind = SetPropertyAndExpressionGet; 954 return; 955 956 // Otherwise, we need to at least use setProperty. However, if 957 // the property isn't atomic, we can use normal expression 958 // emission for the getter. 959 } else if (!IsAtomic) { 960 Kind = SetPropertyAndExpressionGet; 961 return; 962 963 // Otherwise, we have to use both setProperty and getProperty. 964 } else { 965 Kind = GetSetProperty; 966 return; 967 } 968 } 969 970 // If we're not atomic, just use expression accesses. 971 if (!IsAtomic) { 972 Kind = Expression; 973 return; 974 } 975 976 // Properties on bitfield ivars need to be emitted using expression 977 // accesses even if they're nominally atomic. 978 if (ivar->isBitField()) { 979 Kind = Expression; 980 return; 981 } 982 983 // GC-qualified or ARC-qualified ivars need to be emitted as 984 // expressions. This actually works out to being atomic anyway, 985 // except for ARC __strong, but that should trigger the above code. 986 if (ivarType.hasNonTrivialObjCLifetime() || 987 (CGM.getLangOpts().getGC() && 988 CGM.getContext().getObjCGCAttrKind(ivarType))) { 989 Kind = Expression; 990 return; 991 } 992 993 // Compute whether the ivar has strong members. 994 if (CGM.getLangOpts().getGC()) 995 if (const RecordType *recordType = ivarType->getAs<RecordType>()) 996 HasStrong = recordType->getDecl()->hasObjectMember(); 997 998 // We can never access structs with object members with a native 999 // access, because we need to use write barriers. This is what 1000 // objc_copyStruct is for. 1001 if (HasStrong) { 1002 Kind = CopyStruct; 1003 return; 1004 } 1005 1006 // Otherwise, this is target-dependent and based on the size and 1007 // alignment of the ivar. 1008 1009 // If the size of the ivar is not a power of two, give up. We don't 1010 // want to get into the business of doing compare-and-swaps. 1011 if (!IvarSize.isPowerOfTwo()) { 1012 Kind = CopyStruct; 1013 return; 1014 } 1015 1016 llvm::Triple::ArchType arch = 1017 CGM.getTarget().getTriple().getArch(); 1018 1019 // Most architectures require memory to fit within a single cache 1020 // line, so the alignment has to be at least the size of the access. 1021 // Otherwise we have to grab a lock. 1022 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) { 1023 Kind = CopyStruct; 1024 return; 1025 } 1026 1027 // If the ivar's size exceeds the architecture's maximum atomic 1028 // access size, we have to use CopyStruct. 1029 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { 1030 Kind = CopyStruct; 1031 return; 1032 } 1033 1034 // Otherwise, we can use native loads and stores. 1035 Kind = Native; 1036 } 1037 1038 /// Generate an Objective-C property getter function. 1039 /// 1040 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1041 /// is illegal within a category. 1042 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, 1043 const ObjCPropertyImplDecl *PID) { 1044 llvm::Constant *AtomicHelperFn = 1045 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); 1046 ObjCMethodDecl *OMD = PID->getGetterMethodDecl(); 1047 assert(OMD && "Invalid call to generate getter (empty method)"); 1048 StartObjCMethod(OMD, IMP->getClassInterface()); 1049 1050 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn); 1051 1052 FinishFunction(OMD->getEndLoc()); 1053 } 1054 1055 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { 1056 const Expr *getter = propImpl->getGetterCXXConstructor(); 1057 if (!getter) return true; 1058 1059 // Sema only makes only of these when the ivar has a C++ class type, 1060 // so the form is pretty constrained. 1061 1062 // If the property has a reference type, we might just be binding a 1063 // reference, in which case the result will be a gl-value. We should 1064 // treat this as a non-trivial operation. 1065 if (getter->isGLValue()) 1066 return false; 1067 1068 // If we selected a trivial copy-constructor, we're okay. 1069 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter)) 1070 return (construct->getConstructor()->isTrivial()); 1071 1072 // The constructor might require cleanups (in which case it's never 1073 // trivial). 1074 assert(isa<ExprWithCleanups>(getter)); 1075 return false; 1076 } 1077 1078 /// emitCPPObjectAtomicGetterCall - Call the runtime function to 1079 /// copy the ivar into the resturn slot. 1080 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, 1081 llvm::Value *returnAddr, 1082 ObjCIvarDecl *ivar, 1083 llvm::Constant *AtomicHelperFn) { 1084 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, 1085 // AtomicHelperFn); 1086 CallArgList args; 1087 1088 // The 1st argument is the return Slot. 1089 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy); 1090 1091 // The 2nd argument is the address of the ivar. 1092 llvm::Value *ivarAddr = 1093 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 1094 .getPointer(CGF); 1095 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1096 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1097 1098 // Third argument is the helper function. 1099 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1100 1101 llvm::FunctionCallee copyCppAtomicObjectFn = 1102 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); 1103 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn); 1104 CGF.EmitCall( 1105 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1106 callee, ReturnValueSlot(), args); 1107 } 1108 1109 void 1110 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, 1111 const ObjCPropertyImplDecl *propImpl, 1112 const ObjCMethodDecl *GetterMethodDecl, 1113 llvm::Constant *AtomicHelperFn) { 1114 // If there's a non-trivial 'get' expression, we just have to emit that. 1115 if (!hasTrivialGetExpr(propImpl)) { 1116 if (!AtomicHelperFn) { 1117 auto *ret = ReturnStmt::Create(getContext(), SourceLocation(), 1118 propImpl->getGetterCXXConstructor(), 1119 /* NRVOCandidate=*/nullptr); 1120 EmitReturnStmt(*ret); 1121 } 1122 else { 1123 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1124 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), 1125 ivar, AtomicHelperFn); 1126 } 1127 return; 1128 } 1129 1130 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 1131 QualType propType = prop->getType(); 1132 ObjCMethodDecl *getterMethod = propImpl->getGetterMethodDecl(); 1133 1134 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1135 1136 // Pick an implementation strategy. 1137 PropertyImplStrategy strategy(CGM, propImpl); 1138 switch (strategy.getKind()) { 1139 case PropertyImplStrategy::Native: { 1140 // We don't need to do anything for a zero-size struct. 1141 if (strategy.getIvarSize().isZero()) 1142 return; 1143 1144 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1145 1146 // Currently, all atomic accesses have to be through integer 1147 // types, so there's no point in trying to pick a prettier type. 1148 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize()); 1149 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); 1150 bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay 1151 1152 // Perform an atomic load. This does not impose ordering constraints. 1153 Address ivarAddr = LV.getAddress(*this); 1154 ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType); 1155 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); 1156 load->setAtomic(llvm::AtomicOrdering::Unordered); 1157 1158 // Store that value into the return address. Doing this with a 1159 // bitcast is likely to produce some pretty ugly IR, but it's not 1160 // the *most* terrible thing in the world. 1161 llvm::Type *retTy = ConvertType(getterMethod->getReturnType()); 1162 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy); 1163 llvm::Value *ivarVal = load; 1164 if (ivarSize > retTySize) { 1165 llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize); 1166 ivarVal = Builder.CreateTrunc(load, newTy); 1167 bitcastType = newTy->getPointerTo(); 1168 } 1169 Builder.CreateStore(ivarVal, 1170 Builder.CreateBitCast(ReturnValue, bitcastType)); 1171 1172 // Make sure we don't do an autorelease. 1173 AutoreleaseResult = false; 1174 return; 1175 } 1176 1177 case PropertyImplStrategy::GetSetProperty: { 1178 llvm::FunctionCallee getPropertyFn = 1179 CGM.getObjCRuntime().GetPropertyGetFunction(); 1180 if (!getPropertyFn) { 1181 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy"); 1182 return; 1183 } 1184 CGCallee callee = CGCallee::forDirect(getPropertyFn); 1185 1186 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). 1187 // FIXME: Can't this be simpler? This might even be worse than the 1188 // corresponding gcc code. 1189 llvm::Value *cmd = 1190 Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd"); 1191 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1192 llvm::Value *ivarOffset = 1193 EmitIvarOffset(classImpl->getClassInterface(), ivar); 1194 1195 CallArgList args; 1196 args.add(RValue::get(self), getContext().getObjCIdType()); 1197 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1198 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1199 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1200 getContext().BoolTy); 1201 1202 // FIXME: We shouldn't need to get the function info here, the 1203 // runtime already should have computed it to build the function. 1204 llvm::CallBase *CallInstruction; 1205 RValue RV = EmitCall(getTypes().arrangeBuiltinFunctionCall( 1206 getContext().getObjCIdType(), args), 1207 callee, ReturnValueSlot(), args, &CallInstruction); 1208 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction)) 1209 call->setTailCall(); 1210 1211 // We need to fix the type here. Ivars with copy & retain are 1212 // always objects so we don't need to worry about complex or 1213 // aggregates. 1214 RV = RValue::get(Builder.CreateBitCast( 1215 RV.getScalarVal(), 1216 getTypes().ConvertType(getterMethod->getReturnType()))); 1217 1218 EmitReturnOfRValue(RV, propType); 1219 1220 // objc_getProperty does an autorelease, so we should suppress ours. 1221 AutoreleaseResult = false; 1222 1223 return; 1224 } 1225 1226 case PropertyImplStrategy::CopyStruct: 1227 emitStructGetterCall(*this, ivar, strategy.isAtomic(), 1228 strategy.hasStrongMember()); 1229 return; 1230 1231 case PropertyImplStrategy::Expression: 1232 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1233 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1234 1235 QualType ivarType = ivar->getType(); 1236 switch (getEvaluationKind(ivarType)) { 1237 case TEK_Complex: { 1238 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation()); 1239 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType), 1240 /*init*/ true); 1241 return; 1242 } 1243 case TEK_Aggregate: { 1244 // The return value slot is guaranteed to not be aliased, but 1245 // that's not necessarily the same as "on the stack", so 1246 // we still potentially need objc_memmove_collectable. 1247 EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType), 1248 /* Src= */ LV, ivarType, getOverlapForReturnValue()); 1249 return; 1250 } 1251 case TEK_Scalar: { 1252 llvm::Value *value; 1253 if (propType->isReferenceType()) { 1254 value = LV.getAddress(*this).getPointer(); 1255 } else { 1256 // We want to load and autoreleaseReturnValue ARC __weak ivars. 1257 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1258 if (getLangOpts().ObjCAutoRefCount) { 1259 value = emitARCRetainLoadOfScalar(*this, LV, ivarType); 1260 } else { 1261 value = EmitARCLoadWeak(LV.getAddress(*this)); 1262 } 1263 1264 // Otherwise we want to do a simple load, suppressing the 1265 // final autorelease. 1266 } else { 1267 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal(); 1268 AutoreleaseResult = false; 1269 } 1270 1271 value = Builder.CreateBitCast( 1272 value, ConvertType(GetterMethodDecl->getReturnType())); 1273 } 1274 1275 EmitReturnOfRValue(RValue::get(value), propType); 1276 return; 1277 } 1278 } 1279 llvm_unreachable("bad evaluation kind"); 1280 } 1281 1282 } 1283 llvm_unreachable("bad @property implementation strategy!"); 1284 } 1285 1286 /// emitStructSetterCall - Call the runtime function to store the value 1287 /// from the first formal parameter into the given ivar. 1288 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, 1289 ObjCIvarDecl *ivar) { 1290 // objc_copyStruct (&structIvar, &Arg, 1291 // sizeof (struct something), true, false); 1292 CallArgList args; 1293 1294 // The first argument is the address of the ivar. 1295 llvm::Value *ivarAddr = 1296 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 1297 .getPointer(CGF); 1298 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1299 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1300 1301 // The second argument is the address of the parameter variable. 1302 ParmVarDecl *argVar = *OMD->param_begin(); 1303 DeclRefExpr argRef(CGF.getContext(), argVar, false, 1304 argVar->getType().getNonReferenceType(), VK_LValue, 1305 SourceLocation()); 1306 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); 1307 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1308 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1309 1310 // The third argument is the sizeof the type. 1311 llvm::Value *size = 1312 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType())); 1313 args.add(RValue::get(size), CGF.getContext().getSizeType()); 1314 1315 // The fourth argument is the 'isAtomic' flag. 1316 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy); 1317 1318 // The fifth argument is the 'hasStrong' flag. 1319 // FIXME: should this really always be false? 1320 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy); 1321 1322 llvm::FunctionCallee fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); 1323 CGCallee callee = CGCallee::forDirect(fn); 1324 CGF.EmitCall( 1325 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1326 callee, ReturnValueSlot(), args); 1327 } 1328 1329 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store 1330 /// the value from the first formal parameter into the given ivar, using 1331 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. 1332 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, 1333 ObjCMethodDecl *OMD, 1334 ObjCIvarDecl *ivar, 1335 llvm::Constant *AtomicHelperFn) { 1336 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, 1337 // AtomicHelperFn); 1338 CallArgList args; 1339 1340 // The first argument is the address of the ivar. 1341 llvm::Value *ivarAddr = 1342 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 1343 .getPointer(CGF); 1344 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1345 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1346 1347 // The second argument is the address of the parameter variable. 1348 ParmVarDecl *argVar = *OMD->param_begin(); 1349 DeclRefExpr argRef(CGF.getContext(), argVar, false, 1350 argVar->getType().getNonReferenceType(), VK_LValue, 1351 SourceLocation()); 1352 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(CGF); 1353 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1354 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1355 1356 // Third argument is the helper function. 1357 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1358 1359 llvm::FunctionCallee fn = 1360 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); 1361 CGCallee callee = CGCallee::forDirect(fn); 1362 CGF.EmitCall( 1363 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1364 callee, ReturnValueSlot(), args); 1365 } 1366 1367 1368 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { 1369 Expr *setter = PID->getSetterCXXAssignment(); 1370 if (!setter) return true; 1371 1372 // Sema only makes only of these when the ivar has a C++ class type, 1373 // so the form is pretty constrained. 1374 1375 // An operator call is trivial if the function it calls is trivial. 1376 // This also implies that there's nothing non-trivial going on with 1377 // the arguments, because operator= can only be trivial if it's a 1378 // synthesized assignment operator and therefore both parameters are 1379 // references. 1380 if (CallExpr *call = dyn_cast<CallExpr>(setter)) { 1381 if (const FunctionDecl *callee 1382 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl())) 1383 if (callee->isTrivial()) 1384 return true; 1385 return false; 1386 } 1387 1388 assert(isa<ExprWithCleanups>(setter)); 1389 return false; 1390 } 1391 1392 static bool UseOptimizedSetter(CodeGenModule &CGM) { 1393 if (CGM.getLangOpts().getGC() != LangOptions::NonGC) 1394 return false; 1395 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); 1396 } 1397 1398 void 1399 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, 1400 const ObjCPropertyImplDecl *propImpl, 1401 llvm::Constant *AtomicHelperFn) { 1402 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1403 ObjCMethodDecl *setterMethod = propImpl->getSetterMethodDecl(); 1404 1405 // Just use the setter expression if Sema gave us one and it's 1406 // non-trivial. 1407 if (!hasTrivialSetExpr(propImpl)) { 1408 if (!AtomicHelperFn) 1409 // If non-atomic, assignment is called directly. 1410 EmitStmt(propImpl->getSetterCXXAssignment()); 1411 else 1412 // If atomic, assignment is called via a locking api. 1413 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, 1414 AtomicHelperFn); 1415 return; 1416 } 1417 1418 PropertyImplStrategy strategy(CGM, propImpl); 1419 switch (strategy.getKind()) { 1420 case PropertyImplStrategy::Native: { 1421 // We don't need to do anything for a zero-size struct. 1422 if (strategy.getIvarSize().isZero()) 1423 return; 1424 1425 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1426 1427 LValue ivarLValue = 1428 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); 1429 Address ivarAddr = ivarLValue.getAddress(*this); 1430 1431 // Currently, all atomic accesses have to be through integer 1432 // types, so there's no point in trying to pick a prettier type. 1433 llvm::Type *bitcastType = 1434 llvm::Type::getIntNTy(getLLVMContext(), 1435 getContext().toBits(strategy.getIvarSize())); 1436 1437 // Cast both arguments to the chosen operation type. 1438 argAddr = Builder.CreateElementBitCast(argAddr, bitcastType); 1439 ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType); 1440 1441 // This bitcast load is likely to cause some nasty IR. 1442 llvm::Value *load = Builder.CreateLoad(argAddr); 1443 1444 // Perform an atomic store. There are no memory ordering requirements. 1445 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); 1446 store->setAtomic(llvm::AtomicOrdering::Unordered); 1447 return; 1448 } 1449 1450 case PropertyImplStrategy::GetSetProperty: 1451 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1452 1453 llvm::FunctionCallee setOptimizedPropertyFn = nullptr; 1454 llvm::FunctionCallee setPropertyFn = nullptr; 1455 if (UseOptimizedSetter(CGM)) { 1456 // 10.8 and iOS 6.0 code and GC is off 1457 setOptimizedPropertyFn = 1458 CGM.getObjCRuntime().GetOptimizedPropertySetFunction( 1459 strategy.isAtomic(), strategy.isCopy()); 1460 if (!setOptimizedPropertyFn) { 1461 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI"); 1462 return; 1463 } 1464 } 1465 else { 1466 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); 1467 if (!setPropertyFn) { 1468 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy"); 1469 return; 1470 } 1471 } 1472 1473 // Emit objc_setProperty((id) self, _cmd, offset, arg, 1474 // <is-atomic>, <is-copy>). 1475 llvm::Value *cmd = 1476 Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl())); 1477 llvm::Value *self = 1478 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1479 llvm::Value *ivarOffset = 1480 EmitIvarOffset(classImpl->getClassInterface(), ivar); 1481 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1482 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg"); 1483 arg = Builder.CreateBitCast(arg, VoidPtrTy); 1484 1485 CallArgList args; 1486 args.add(RValue::get(self), getContext().getObjCIdType()); 1487 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1488 if (setOptimizedPropertyFn) { 1489 args.add(RValue::get(arg), getContext().getObjCIdType()); 1490 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1491 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn); 1492 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1493 callee, ReturnValueSlot(), args); 1494 } else { 1495 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1496 args.add(RValue::get(arg), getContext().getObjCIdType()); 1497 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1498 getContext().BoolTy); 1499 args.add(RValue::get(Builder.getInt1(strategy.isCopy())), 1500 getContext().BoolTy); 1501 // FIXME: We shouldn't need to get the function info here, the runtime 1502 // already should have computed it to build the function. 1503 CGCallee callee = CGCallee::forDirect(setPropertyFn); 1504 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1505 callee, ReturnValueSlot(), args); 1506 } 1507 1508 return; 1509 } 1510 1511 case PropertyImplStrategy::CopyStruct: 1512 emitStructSetterCall(*this, setterMethod, ivar); 1513 return; 1514 1515 case PropertyImplStrategy::Expression: 1516 break; 1517 } 1518 1519 // Otherwise, fake up some ASTs and emit a normal assignment. 1520 ValueDecl *selfDecl = setterMethod->getSelfDecl(); 1521 DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(), 1522 VK_LValue, SourceLocation()); 1523 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, selfDecl->getType(), 1524 CK_LValueToRValue, &self, VK_RValue, 1525 FPOptionsOverride()); 1526 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), 1527 SourceLocation(), SourceLocation(), 1528 &selfLoad, true, true); 1529 1530 ParmVarDecl *argDecl = *setterMethod->param_begin(); 1531 QualType argType = argDecl->getType().getNonReferenceType(); 1532 DeclRefExpr arg(getContext(), argDecl, false, argType, VK_LValue, 1533 SourceLocation()); 1534 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, 1535 argType.getUnqualifiedType(), CK_LValueToRValue, 1536 &arg, VK_RValue, FPOptionsOverride()); 1537 1538 // The property type can differ from the ivar type in some situations with 1539 // Objective-C pointer types, we can always bit cast the RHS in these cases. 1540 // The following absurdity is just to ensure well-formed IR. 1541 CastKind argCK = CK_NoOp; 1542 if (ivarRef.getType()->isObjCObjectPointerType()) { 1543 if (argLoad.getType()->isObjCObjectPointerType()) 1544 argCK = CK_BitCast; 1545 else if (argLoad.getType()->isBlockPointerType()) 1546 argCK = CK_BlockPointerToObjCPointerCast; 1547 else 1548 argCK = CK_CPointerToObjCPointerCast; 1549 } else if (ivarRef.getType()->isBlockPointerType()) { 1550 if (argLoad.getType()->isBlockPointerType()) 1551 argCK = CK_BitCast; 1552 else 1553 argCK = CK_AnyPointerToBlockPointerCast; 1554 } else if (ivarRef.getType()->isPointerType()) { 1555 argCK = CK_BitCast; 1556 } 1557 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK, 1558 &argLoad, VK_RValue, FPOptionsOverride()); 1559 Expr *finalArg = &argLoad; 1560 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(), 1561 argLoad.getType())) 1562 finalArg = &argCast; 1563 1564 BinaryOperator *assign = BinaryOperator::Create( 1565 getContext(), &ivarRef, finalArg, BO_Assign, ivarRef.getType(), VK_RValue, 1566 OK_Ordinary, SourceLocation(), FPOptionsOverride()); 1567 EmitStmt(assign); 1568 } 1569 1570 /// Generate an Objective-C property setter function. 1571 /// 1572 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1573 /// is illegal within a category. 1574 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, 1575 const ObjCPropertyImplDecl *PID) { 1576 llvm::Constant *AtomicHelperFn = 1577 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); 1578 ObjCMethodDecl *OMD = PID->getSetterMethodDecl(); 1579 assert(OMD && "Invalid call to generate setter (empty method)"); 1580 StartObjCMethod(OMD, IMP->getClassInterface()); 1581 1582 generateObjCSetterBody(IMP, PID, AtomicHelperFn); 1583 1584 FinishFunction(OMD->getEndLoc()); 1585 } 1586 1587 namespace { 1588 struct DestroyIvar final : EHScopeStack::Cleanup { 1589 private: 1590 llvm::Value *addr; 1591 const ObjCIvarDecl *ivar; 1592 CodeGenFunction::Destroyer *destroyer; 1593 bool useEHCleanupForArray; 1594 public: 1595 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, 1596 CodeGenFunction::Destroyer *destroyer, 1597 bool useEHCleanupForArray) 1598 : addr(addr), ivar(ivar), destroyer(destroyer), 1599 useEHCleanupForArray(useEHCleanupForArray) {} 1600 1601 void Emit(CodeGenFunction &CGF, Flags flags) override { 1602 LValue lvalue 1603 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); 1604 CGF.emitDestroy(lvalue.getAddress(CGF), ivar->getType(), destroyer, 1605 flags.isForNormalCleanup() && useEHCleanupForArray); 1606 } 1607 }; 1608 } 1609 1610 /// Like CodeGenFunction::destroyARCStrong, but do it with a call. 1611 static void destroyARCStrongWithStore(CodeGenFunction &CGF, 1612 Address addr, 1613 QualType type) { 1614 llvm::Value *null = getNullForVariable(addr); 1615 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 1616 } 1617 1618 static void emitCXXDestructMethod(CodeGenFunction &CGF, 1619 ObjCImplementationDecl *impl) { 1620 CodeGenFunction::RunCleanupsScope scope(CGF); 1621 1622 llvm::Value *self = CGF.LoadObjCSelf(); 1623 1624 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 1625 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); 1626 ivar; ivar = ivar->getNextIvar()) { 1627 QualType type = ivar->getType(); 1628 1629 // Check whether the ivar is a destructible type. 1630 QualType::DestructionKind dtorKind = type.isDestructedType(); 1631 if (!dtorKind) continue; 1632 1633 CodeGenFunction::Destroyer *destroyer = nullptr; 1634 1635 // Use a call to objc_storeStrong to destroy strong ivars, for the 1636 // general benefit of the tools. 1637 if (dtorKind == QualType::DK_objc_strong_lifetime) { 1638 destroyer = destroyARCStrongWithStore; 1639 1640 // Otherwise use the default for the destruction kind. 1641 } else { 1642 destroyer = CGF.getDestroyer(dtorKind); 1643 } 1644 1645 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind); 1646 1647 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer, 1648 cleanupKind & EHCleanup); 1649 } 1650 1651 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?"); 1652 } 1653 1654 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, 1655 ObjCMethodDecl *MD, 1656 bool ctor) { 1657 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface()); 1658 StartObjCMethod(MD, IMP->getClassInterface()); 1659 1660 // Emit .cxx_construct. 1661 if (ctor) { 1662 // Suppress the final autorelease in ARC. 1663 AutoreleaseResult = false; 1664 1665 for (const auto *IvarInit : IMP->inits()) { 1666 FieldDecl *Field = IvarInit->getAnyMember(); 1667 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field); 1668 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), 1669 LoadObjCSelf(), Ivar, 0); 1670 EmitAggExpr(IvarInit->getInit(), 1671 AggValueSlot::forLValue(LV, *this, AggValueSlot::IsDestructed, 1672 AggValueSlot::DoesNotNeedGCBarriers, 1673 AggValueSlot::IsNotAliased, 1674 AggValueSlot::DoesNotOverlap)); 1675 } 1676 // constructor returns 'self'. 1677 CodeGenTypes &Types = CGM.getTypes(); 1678 QualType IdTy(CGM.getContext().getObjCIdType()); 1679 llvm::Value *SelfAsId = 1680 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); 1681 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy); 1682 1683 // Emit .cxx_destruct. 1684 } else { 1685 emitCXXDestructMethod(*this, IMP); 1686 } 1687 FinishFunction(); 1688 } 1689 1690 llvm::Value *CodeGenFunction::LoadObjCSelf() { 1691 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 1692 DeclRefExpr DRE(getContext(), Self, 1693 /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), 1694 Self->getType(), VK_LValue, SourceLocation()); 1695 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation()); 1696 } 1697 1698 QualType CodeGenFunction::TypeOfSelfObject() { 1699 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 1700 ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); 1701 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( 1702 getContext().getCanonicalType(selfDecl->getType())); 1703 return PTy->getPointeeType(); 1704 } 1705 1706 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ 1707 llvm::FunctionCallee EnumerationMutationFnPtr = 1708 CGM.getObjCRuntime().EnumerationMutationFunction(); 1709 if (!EnumerationMutationFnPtr) { 1710 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); 1711 return; 1712 } 1713 CGCallee EnumerationMutationFn = 1714 CGCallee::forDirect(EnumerationMutationFnPtr); 1715 1716 CGDebugInfo *DI = getDebugInfo(); 1717 if (DI) 1718 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); 1719 1720 RunCleanupsScope ForScope(*this); 1721 1722 // The local variable comes into scope immediately. 1723 AutoVarEmission variable = AutoVarEmission::invalid(); 1724 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) 1725 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl())); 1726 1727 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end"); 1728 1729 // Fast enumeration state. 1730 QualType StateTy = CGM.getObjCFastEnumerationStateType(); 1731 Address StatePtr = CreateMemTemp(StateTy, "state.ptr"); 1732 EmitNullInitialization(StatePtr, StateTy); 1733 1734 // Number of elements in the items array. 1735 static const unsigned NumItems = 16; 1736 1737 // Fetch the countByEnumeratingWithState:objects:count: selector. 1738 IdentifierInfo *II[] = { 1739 &CGM.getContext().Idents.get("countByEnumeratingWithState"), 1740 &CGM.getContext().Idents.get("objects"), 1741 &CGM.getContext().Idents.get("count") 1742 }; 1743 Selector FastEnumSel = 1744 CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]); 1745 1746 QualType ItemsTy = 1747 getContext().getConstantArrayType(getContext().getObjCIdType(), 1748 llvm::APInt(32, NumItems), nullptr, 1749 ArrayType::Normal, 0); 1750 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr"); 1751 1752 // Emit the collection pointer. In ARC, we do a retain. 1753 llvm::Value *Collection; 1754 if (getLangOpts().ObjCAutoRefCount) { 1755 Collection = EmitARCRetainScalarExpr(S.getCollection()); 1756 1757 // Enter a cleanup to do the release. 1758 EmitObjCConsumeObject(S.getCollection()->getType(), Collection); 1759 } else { 1760 Collection = EmitScalarExpr(S.getCollection()); 1761 } 1762 1763 // The 'continue' label needs to appear within the cleanup for the 1764 // collection object. 1765 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next"); 1766 1767 // Send it our message: 1768 CallArgList Args; 1769 1770 // The first argument is a temporary of the enumeration-state type. 1771 Args.add(RValue::get(StatePtr.getPointer()), 1772 getContext().getPointerType(StateTy)); 1773 1774 // The second argument is a temporary array with space for NumItems 1775 // pointers. We'll actually be loading elements from the array 1776 // pointer written into the control state; this buffer is so that 1777 // collections that *aren't* backed by arrays can still queue up 1778 // batches of elements. 1779 Args.add(RValue::get(ItemsPtr.getPointer()), 1780 getContext().getPointerType(ItemsTy)); 1781 1782 // The third argument is the capacity of that temporary array. 1783 llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType()); 1784 llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems); 1785 Args.add(RValue::get(Count), getContext().getNSUIntegerType()); 1786 1787 // Start the enumeration. 1788 RValue CountRV = 1789 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1790 getContext().getNSUIntegerType(), 1791 FastEnumSel, Collection, Args); 1792 1793 // The initial number of objects that were returned in the buffer. 1794 llvm::Value *initialBufferLimit = CountRV.getScalarVal(); 1795 1796 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty"); 1797 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit"); 1798 1799 llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy); 1800 1801 // If the limit pointer was zero to begin with, the collection is 1802 // empty; skip all this. Set the branch weight assuming this has the same 1803 // probability of exiting the loop as any other loop exit. 1804 uint64_t EntryCount = getCurrentProfileCount(); 1805 Builder.CreateCondBr( 1806 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, 1807 LoopInitBB, 1808 createProfileWeights(EntryCount, getProfileCount(S.getBody()))); 1809 1810 // Otherwise, initialize the loop. 1811 EmitBlock(LoopInitBB); 1812 1813 // Save the initial mutations value. This is the value at an 1814 // address that was written into the state object by 1815 // countByEnumeratingWithState:objects:count:. 1816 Address StateMutationsPtrPtr = 1817 Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr"); 1818 llvm::Value *StateMutationsPtr 1819 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1820 1821 llvm::Value *initialMutations = 1822 Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1823 "forcoll.initial-mutations"); 1824 1825 // Start looping. This is the point we return to whenever we have a 1826 // fresh, non-empty batch of objects. 1827 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody"); 1828 EmitBlock(LoopBodyBB); 1829 1830 // The current index into the buffer. 1831 llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index"); 1832 index->addIncoming(zero, LoopInitBB); 1833 1834 // The current buffer size. 1835 llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count"); 1836 count->addIncoming(initialBufferLimit, LoopInitBB); 1837 1838 incrementProfileCounter(&S); 1839 1840 // Check whether the mutations value has changed from where it was 1841 // at start. StateMutationsPtr should actually be invariant between 1842 // refreshes. 1843 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1844 llvm::Value *currentMutations 1845 = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1846 "statemutations"); 1847 1848 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated"); 1849 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated"); 1850 1851 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations), 1852 WasNotMutatedBB, WasMutatedBB); 1853 1854 // If so, call the enumeration-mutation function. 1855 EmitBlock(WasMutatedBB); 1856 llvm::Value *V = 1857 Builder.CreateBitCast(Collection, 1858 ConvertType(getContext().getObjCIdType())); 1859 CallArgList Args2; 1860 Args2.add(RValue::get(V), getContext().getObjCIdType()); 1861 // FIXME: We shouldn't need to get the function info here, the runtime already 1862 // should have computed it to build the function. 1863 EmitCall( 1864 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2), 1865 EnumerationMutationFn, ReturnValueSlot(), Args2); 1866 1867 // Otherwise, or if the mutation function returns, just continue. 1868 EmitBlock(WasNotMutatedBB); 1869 1870 // Initialize the element variable. 1871 RunCleanupsScope elementVariableScope(*this); 1872 bool elementIsVariable; 1873 LValue elementLValue; 1874 QualType elementType; 1875 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { 1876 // Initialize the variable, in case it's a __block variable or something. 1877 EmitAutoVarInit(variable); 1878 1879 const VarDecl *D = cast<VarDecl>(SD->getSingleDecl()); 1880 DeclRefExpr tempDRE(getContext(), const_cast<VarDecl *>(D), false, 1881 D->getType(), VK_LValue, SourceLocation()); 1882 elementLValue = EmitLValue(&tempDRE); 1883 elementType = D->getType(); 1884 elementIsVariable = true; 1885 1886 if (D->isARCPseudoStrong()) 1887 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); 1888 } else { 1889 elementLValue = LValue(); // suppress warning 1890 elementType = cast<Expr>(S.getElement())->getType(); 1891 elementIsVariable = false; 1892 } 1893 llvm::Type *convertedElementType = ConvertType(elementType); 1894 1895 // Fetch the buffer out of the enumeration state. 1896 // TODO: this pointer should actually be invariant between 1897 // refreshes, which would help us do certain loop optimizations. 1898 Address StateItemsPtr = 1899 Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr"); 1900 llvm::Value *EnumStateItems = 1901 Builder.CreateLoad(StateItemsPtr, "stateitems"); 1902 1903 // Fetch the value at the current index from the buffer. 1904 llvm::Value *CurrentItemPtr = 1905 Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr"); 1906 llvm::Value *CurrentItem = 1907 Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign()); 1908 1909 if (SanOpts.has(SanitizerKind::ObjCCast)) { 1910 // Before using an item from the collection, check that the implicit cast 1911 // from id to the element type is valid. This is done with instrumentation 1912 // roughly corresponding to: 1913 // 1914 // if (![item isKindOfClass:expectedCls]) { /* emit diagnostic */ } 1915 const ObjCObjectPointerType *ObjPtrTy = 1916 elementType->getAsObjCInterfacePointerType(); 1917 const ObjCInterfaceType *InterfaceTy = 1918 ObjPtrTy ? ObjPtrTy->getInterfaceType() : nullptr; 1919 if (InterfaceTy) { 1920 SanitizerScope SanScope(this); 1921 auto &C = CGM.getContext(); 1922 assert(InterfaceTy->getDecl() && "No decl for ObjC interface type"); 1923 Selector IsKindOfClassSel = GetUnarySelector("isKindOfClass", C); 1924 CallArgList IsKindOfClassArgs; 1925 llvm::Value *Cls = 1926 CGM.getObjCRuntime().GetClass(*this, InterfaceTy->getDecl()); 1927 IsKindOfClassArgs.add(RValue::get(Cls), C.getObjCClassType()); 1928 llvm::Value *IsClass = 1929 CGM.getObjCRuntime() 1930 .GenerateMessageSend(*this, ReturnValueSlot(), C.BoolTy, 1931 IsKindOfClassSel, CurrentItem, 1932 IsKindOfClassArgs) 1933 .getScalarVal(); 1934 llvm::Constant *StaticData[] = { 1935 EmitCheckSourceLocation(S.getBeginLoc()), 1936 EmitCheckTypeDescriptor(QualType(InterfaceTy, 0))}; 1937 EmitCheck({{IsClass, SanitizerKind::ObjCCast}}, 1938 SanitizerHandler::InvalidObjCCast, 1939 ArrayRef<llvm::Constant *>(StaticData), CurrentItem); 1940 } 1941 } 1942 1943 // Cast that value to the right type. 1944 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType, 1945 "currentitem"); 1946 1947 // Make sure we have an l-value. Yes, this gets evaluated every 1948 // time through the loop. 1949 if (!elementIsVariable) { 1950 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 1951 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue); 1952 } else { 1953 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, 1954 /*isInit*/ true); 1955 } 1956 1957 // If we do have an element variable, this assignment is the end of 1958 // its initialization. 1959 if (elementIsVariable) 1960 EmitAutoVarCleanups(variable); 1961 1962 // Perform the loop body, setting up break and continue labels. 1963 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); 1964 { 1965 RunCleanupsScope Scope(*this); 1966 EmitStmt(S.getBody()); 1967 } 1968 BreakContinueStack.pop_back(); 1969 1970 // Destroy the element variable now. 1971 elementVariableScope.ForceCleanup(); 1972 1973 // Check whether there are more elements. 1974 EmitBlock(AfterBody.getBlock()); 1975 1976 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch"); 1977 1978 // First we check in the local buffer. 1979 llvm::Value *indexPlusOne = 1980 Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1)); 1981 1982 // If we haven't overrun the buffer yet, we can continue. 1983 // Set the branch weights based on the simplifying assumption that this is 1984 // like a while-loop, i.e., ignoring that the false branch fetches more 1985 // elements and then returns to the loop. 1986 Builder.CreateCondBr( 1987 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, 1988 createProfileWeights(getProfileCount(S.getBody()), EntryCount)); 1989 1990 index->addIncoming(indexPlusOne, AfterBody.getBlock()); 1991 count->addIncoming(count, AfterBody.getBlock()); 1992 1993 // Otherwise, we have to fetch more elements. 1994 EmitBlock(FetchMoreBB); 1995 1996 CountRV = 1997 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1998 getContext().getNSUIntegerType(), 1999 FastEnumSel, Collection, Args); 2000 2001 // If we got a zero count, we're done. 2002 llvm::Value *refetchCount = CountRV.getScalarVal(); 2003 2004 // (note that the message send might split FetchMoreBB) 2005 index->addIncoming(zero, Builder.GetInsertBlock()); 2006 count->addIncoming(refetchCount, Builder.GetInsertBlock()); 2007 2008 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero), 2009 EmptyBB, LoopBodyBB); 2010 2011 // No more elements. 2012 EmitBlock(EmptyBB); 2013 2014 if (!elementIsVariable) { 2015 // If the element was not a declaration, set it to be null. 2016 2017 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType); 2018 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 2019 EmitStoreThroughLValue(RValue::get(null), elementLValue); 2020 } 2021 2022 if (DI) 2023 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); 2024 2025 ForScope.ForceCleanup(); 2026 EmitBlock(LoopEnd.getBlock()); 2027 } 2028 2029 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { 2030 CGM.getObjCRuntime().EmitTryStmt(*this, S); 2031 } 2032 2033 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { 2034 CGM.getObjCRuntime().EmitThrowStmt(*this, S); 2035 } 2036 2037 void CodeGenFunction::EmitObjCAtSynchronizedStmt( 2038 const ObjCAtSynchronizedStmt &S) { 2039 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); 2040 } 2041 2042 namespace { 2043 struct CallObjCRelease final : EHScopeStack::Cleanup { 2044 CallObjCRelease(llvm::Value *object) : object(object) {} 2045 llvm::Value *object; 2046 2047 void Emit(CodeGenFunction &CGF, Flags flags) override { 2048 // Releases at the end of the full-expression are imprecise. 2049 CGF.EmitARCRelease(object, ARCImpreciseLifetime); 2050 } 2051 }; 2052 } 2053 2054 /// Produce the code for a CK_ARCConsumeObject. Does a primitive 2055 /// release at the end of the full-expression. 2056 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, 2057 llvm::Value *object) { 2058 // If we're in a conditional branch, we need to make the cleanup 2059 // conditional. 2060 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object); 2061 return object; 2062 } 2063 2064 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, 2065 llvm::Value *value) { 2066 return EmitARCRetainAutorelease(type, value); 2067 } 2068 2069 /// Given a number of pointers, inform the optimizer that they're 2070 /// being intrinsically used up until this point in the program. 2071 void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { 2072 llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_use; 2073 if (!fn) 2074 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_use); 2075 2076 // This isn't really a "runtime" function, but as an intrinsic it 2077 // doesn't really matter as long as we align things up. 2078 EmitNounwindRuntimeCall(fn, values); 2079 } 2080 2081 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) { 2082 if (auto *F = dyn_cast<llvm::Function>(RTF)) { 2083 // If the target runtime doesn't naturally support ARC, emit weak 2084 // references to the runtime support library. We don't really 2085 // permit this to fail, but we need a particular relocation style. 2086 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && 2087 !CGM.getTriple().isOSBinFormatCOFF()) { 2088 F->setLinkage(llvm::Function::ExternalWeakLinkage); 2089 } 2090 } 2091 } 2092 2093 static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, 2094 llvm::FunctionCallee RTF) { 2095 setARCRuntimeFunctionLinkage(CGM, RTF.getCallee()); 2096 } 2097 2098 /// Perform an operation having the signature 2099 /// i8* (i8*) 2100 /// where a null input causes a no-op and returns null. 2101 static llvm::Value *emitARCValueOperation( 2102 CodeGenFunction &CGF, llvm::Value *value, llvm::Type *returnType, 2103 llvm::Function *&fn, llvm::Intrinsic::ID IntID, 2104 llvm::CallInst::TailCallKind tailKind = llvm::CallInst::TCK_None) { 2105 if (isa<llvm::ConstantPointerNull>(value)) 2106 return value; 2107 2108 if (!fn) { 2109 fn = CGF.CGM.getIntrinsic(IntID); 2110 setARCRuntimeFunctionLinkage(CGF.CGM, fn); 2111 } 2112 2113 // Cast the argument to 'id'. 2114 llvm::Type *origType = returnType ? returnType : value->getType(); 2115 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 2116 2117 // Call the function. 2118 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value); 2119 call->setTailCallKind(tailKind); 2120 2121 // Cast the result back to the original type. 2122 return CGF.Builder.CreateBitCast(call, origType); 2123 } 2124 2125 /// Perform an operation having the following signature: 2126 /// i8* (i8**) 2127 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr, 2128 llvm::Function *&fn, 2129 llvm::Intrinsic::ID IntID) { 2130 if (!fn) { 2131 fn = CGF.CGM.getIntrinsic(IntID); 2132 setARCRuntimeFunctionLinkage(CGF.CGM, fn); 2133 } 2134 2135 // Cast the argument to 'id*'. 2136 llvm::Type *origType = addr.getElementType(); 2137 addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy); 2138 2139 // Call the function. 2140 llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); 2141 2142 // Cast the result back to a dereference of the original type. 2143 if (origType != CGF.Int8PtrTy) 2144 result = CGF.Builder.CreateBitCast(result, origType); 2145 2146 return result; 2147 } 2148 2149 /// Perform an operation having the following signature: 2150 /// i8* (i8**, i8*) 2151 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, Address addr, 2152 llvm::Value *value, 2153 llvm::Function *&fn, 2154 llvm::Intrinsic::ID IntID, 2155 bool ignored) { 2156 assert(addr.getElementType() == value->getType()); 2157 2158 if (!fn) { 2159 fn = CGF.CGM.getIntrinsic(IntID); 2160 setARCRuntimeFunctionLinkage(CGF.CGM, fn); 2161 } 2162 2163 llvm::Type *origType = value->getType(); 2164 2165 llvm::Value *args[] = { 2166 CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), 2167 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) 2168 }; 2169 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); 2170 2171 if (ignored) return nullptr; 2172 2173 return CGF.Builder.CreateBitCast(result, origType); 2174 } 2175 2176 /// Perform an operation having the following signature: 2177 /// void (i8**, i8**) 2178 static void emitARCCopyOperation(CodeGenFunction &CGF, Address dst, Address src, 2179 llvm::Function *&fn, 2180 llvm::Intrinsic::ID IntID) { 2181 assert(dst.getType() == src.getType()); 2182 2183 if (!fn) { 2184 fn = CGF.CGM.getIntrinsic(IntID); 2185 setARCRuntimeFunctionLinkage(CGF.CGM, fn); 2186 } 2187 2188 llvm::Value *args[] = { 2189 CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), 2190 CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) 2191 }; 2192 CGF.EmitNounwindRuntimeCall(fn, args); 2193 } 2194 2195 /// Perform an operation having the signature 2196 /// i8* (i8*) 2197 /// where a null input causes a no-op and returns null. 2198 static llvm::Value *emitObjCValueOperation(CodeGenFunction &CGF, 2199 llvm::Value *value, 2200 llvm::Type *returnType, 2201 llvm::FunctionCallee &fn, 2202 StringRef fnName) { 2203 if (isa<llvm::ConstantPointerNull>(value)) 2204 return value; 2205 2206 if (!fn) { 2207 llvm::FunctionType *fnType = 2208 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false); 2209 fn = CGF.CGM.CreateRuntimeFunction(fnType, fnName); 2210 2211 // We have Native ARC, so set nonlazybind attribute for performance 2212 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) 2213 if (fnName == "objc_retain") 2214 f->addFnAttr(llvm::Attribute::NonLazyBind); 2215 } 2216 2217 // Cast the argument to 'id'. 2218 llvm::Type *origType = returnType ? returnType : value->getType(); 2219 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 2220 2221 // Call the function. 2222 llvm::CallBase *Inst = CGF.EmitCallOrInvoke(fn, value); 2223 2224 // Cast the result back to the original type. 2225 return CGF.Builder.CreateBitCast(Inst, origType); 2226 } 2227 2228 /// Produce the code to do a retain. Based on the type, calls one of: 2229 /// call i8* \@objc_retain(i8* %value) 2230 /// call i8* \@objc_retainBlock(i8* %value) 2231 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { 2232 if (type->isBlockPointerType()) 2233 return EmitARCRetainBlock(value, /*mandatory*/ false); 2234 else 2235 return EmitARCRetainNonBlock(value); 2236 } 2237 2238 /// Retain the given object, with normal retain semantics. 2239 /// call i8* \@objc_retain(i8* %value) 2240 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { 2241 return emitARCValueOperation(*this, value, nullptr, 2242 CGM.getObjCEntrypoints().objc_retain, 2243 llvm::Intrinsic::objc_retain); 2244 } 2245 2246 /// Retain the given block, with _Block_copy semantics. 2247 /// call i8* \@objc_retainBlock(i8* %value) 2248 /// 2249 /// \param mandatory - If false, emit the call with metadata 2250 /// indicating that it's okay for the optimizer to eliminate this call 2251 /// if it can prove that the block never escapes except down the stack. 2252 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, 2253 bool mandatory) { 2254 llvm::Value *result 2255 = emitARCValueOperation(*this, value, nullptr, 2256 CGM.getObjCEntrypoints().objc_retainBlock, 2257 llvm::Intrinsic::objc_retainBlock); 2258 2259 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to 2260 // tell the optimizer that it doesn't need to do this copy if the 2261 // block doesn't escape, where being passed as an argument doesn't 2262 // count as escaping. 2263 if (!mandatory && isa<llvm::Instruction>(result)) { 2264 llvm::CallInst *call 2265 = cast<llvm::CallInst>(result->stripPointerCasts()); 2266 assert(call->getCalledOperand() == 2267 CGM.getObjCEntrypoints().objc_retainBlock); 2268 2269 call->setMetadata("clang.arc.copy_on_escape", 2270 llvm::MDNode::get(Builder.getContext(), None)); 2271 } 2272 2273 return result; 2274 } 2275 2276 static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { 2277 // Fetch the void(void) inline asm which marks that we're going to 2278 // do something with the autoreleased return value. 2279 llvm::InlineAsm *&marker 2280 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; 2281 if (!marker) { 2282 StringRef assembly 2283 = CGF.CGM.getTargetCodeGenInfo() 2284 .getARCRetainAutoreleasedReturnValueMarker(); 2285 2286 // If we have an empty assembly string, there's nothing to do. 2287 if (assembly.empty()) { 2288 2289 // Otherwise, at -O0, build an inline asm that we're going to call 2290 // in a moment. 2291 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { 2292 llvm::FunctionType *type = 2293 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false); 2294 2295 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true); 2296 2297 // If we're at -O1 and above, we don't want to litter the code 2298 // with this marker yet, so leave a breadcrumb for the ARC 2299 // optimizer to pick up. 2300 } else { 2301 const char *markerKey = "clang.arc.retainAutoreleasedReturnValueMarker"; 2302 if (!CGF.CGM.getModule().getModuleFlag(markerKey)) { 2303 auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly); 2304 CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, markerKey, str); 2305 } 2306 } 2307 } 2308 2309 // Call the marker asm if we made one, which we do only at -O0. 2310 if (marker) 2311 CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker)); 2312 } 2313 2314 /// Retain the given object which is the result of a function call. 2315 /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) 2316 /// 2317 /// Yes, this function name is one character away from a different 2318 /// call with completely different semantics. 2319 llvm::Value * 2320 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { 2321 emitAutoreleasedReturnValueMarker(*this); 2322 llvm::CallInst::TailCallKind tailKind = 2323 CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail() 2324 ? llvm::CallInst::TCK_NoTail 2325 : llvm::CallInst::TCK_None; 2326 return emitARCValueOperation( 2327 *this, value, nullptr, 2328 CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue, 2329 llvm::Intrinsic::objc_retainAutoreleasedReturnValue, tailKind); 2330 } 2331 2332 /// Claim a possibly-autoreleased return value at +0. This is only 2333 /// valid to do in contexts which do not rely on the retain to keep 2334 /// the object valid for all of its uses; for example, when 2335 /// the value is ignored, or when it is being assigned to an 2336 /// __unsafe_unretained variable. 2337 /// 2338 /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) 2339 llvm::Value * 2340 CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { 2341 emitAutoreleasedReturnValueMarker(*this); 2342 llvm::CallInst::TailCallKind tailKind = 2343 CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail() 2344 ? llvm::CallInst::TCK_NoTail 2345 : llvm::CallInst::TCK_None; 2346 return emitARCValueOperation( 2347 *this, value, nullptr, 2348 CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue, 2349 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue, tailKind); 2350 } 2351 2352 /// Release the given object. 2353 /// call void \@objc_release(i8* %value) 2354 void CodeGenFunction::EmitARCRelease(llvm::Value *value, 2355 ARCPreciseLifetime_t precise) { 2356 if (isa<llvm::ConstantPointerNull>(value)) return; 2357 2358 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_release; 2359 if (!fn) { 2360 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_release); 2361 setARCRuntimeFunctionLinkage(CGM, fn); 2362 } 2363 2364 // Cast the argument to 'id'. 2365 value = Builder.CreateBitCast(value, Int8PtrTy); 2366 2367 // Call objc_release. 2368 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value); 2369 2370 if (precise == ARCImpreciseLifetime) { 2371 call->setMetadata("clang.imprecise_release", 2372 llvm::MDNode::get(Builder.getContext(), None)); 2373 } 2374 } 2375 2376 /// Destroy a __strong variable. 2377 /// 2378 /// At -O0, emit a call to store 'null' into the address; 2379 /// instrumenting tools prefer this because the address is exposed, 2380 /// but it's relatively cumbersome to optimize. 2381 /// 2382 /// At -O1 and above, just load and call objc_release. 2383 /// 2384 /// call void \@objc_storeStrong(i8** %addr, i8* null) 2385 void CodeGenFunction::EmitARCDestroyStrong(Address addr, 2386 ARCPreciseLifetime_t precise) { 2387 if (CGM.getCodeGenOpts().OptimizationLevel == 0) { 2388 llvm::Value *null = getNullForVariable(addr); 2389 EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 2390 return; 2391 } 2392 2393 llvm::Value *value = Builder.CreateLoad(addr); 2394 EmitARCRelease(value, precise); 2395 } 2396 2397 /// Store into a strong object. Always calls this: 2398 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2399 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, 2400 llvm::Value *value, 2401 bool ignored) { 2402 assert(addr.getElementType() == value->getType()); 2403 2404 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_storeStrong; 2405 if (!fn) { 2406 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_storeStrong); 2407 setARCRuntimeFunctionLinkage(CGM, fn); 2408 } 2409 2410 llvm::Value *args[] = { 2411 Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), 2412 Builder.CreateBitCast(value, Int8PtrTy) 2413 }; 2414 EmitNounwindRuntimeCall(fn, args); 2415 2416 if (ignored) return nullptr; 2417 return value; 2418 } 2419 2420 /// Store into a strong object. Sometimes calls this: 2421 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2422 /// Other times, breaks it down into components. 2423 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, 2424 llvm::Value *newValue, 2425 bool ignored) { 2426 QualType type = dst.getType(); 2427 bool isBlock = type->isBlockPointerType(); 2428 2429 // Use a store barrier at -O0 unless this is a block type or the 2430 // lvalue is inadequately aligned. 2431 if (shouldUseFusedARCCalls() && 2432 !isBlock && 2433 (dst.getAlignment().isZero() || 2434 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { 2435 return EmitARCStoreStrongCall(dst.getAddress(*this), newValue, ignored); 2436 } 2437 2438 // Otherwise, split it out. 2439 2440 // Retain the new value. 2441 newValue = EmitARCRetain(type, newValue); 2442 2443 // Read the old value. 2444 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation()); 2445 2446 // Store. We do this before the release so that any deallocs won't 2447 // see the old value. 2448 EmitStoreOfScalar(newValue, dst); 2449 2450 // Finally, release the old value. 2451 EmitARCRelease(oldValue, dst.isARCPreciseLifetime()); 2452 2453 return newValue; 2454 } 2455 2456 /// Autorelease the given object. 2457 /// call i8* \@objc_autorelease(i8* %value) 2458 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { 2459 return emitARCValueOperation(*this, value, nullptr, 2460 CGM.getObjCEntrypoints().objc_autorelease, 2461 llvm::Intrinsic::objc_autorelease); 2462 } 2463 2464 /// Autorelease the given object. 2465 /// call i8* \@objc_autoreleaseReturnValue(i8* %value) 2466 llvm::Value * 2467 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { 2468 return emitARCValueOperation(*this, value, nullptr, 2469 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, 2470 llvm::Intrinsic::objc_autoreleaseReturnValue, 2471 llvm::CallInst::TCK_Tail); 2472 } 2473 2474 /// Do a fused retain/autorelease of the given object. 2475 /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) 2476 llvm::Value * 2477 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { 2478 return emitARCValueOperation(*this, value, nullptr, 2479 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, 2480 llvm::Intrinsic::objc_retainAutoreleaseReturnValue, 2481 llvm::CallInst::TCK_Tail); 2482 } 2483 2484 /// Do a fused retain/autorelease of the given object. 2485 /// call i8* \@objc_retainAutorelease(i8* %value) 2486 /// or 2487 /// %retain = call i8* \@objc_retainBlock(i8* %value) 2488 /// call i8* \@objc_autorelease(i8* %retain) 2489 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, 2490 llvm::Value *value) { 2491 if (!type->isBlockPointerType()) 2492 return EmitARCRetainAutoreleaseNonBlock(value); 2493 2494 if (isa<llvm::ConstantPointerNull>(value)) return value; 2495 2496 llvm::Type *origType = value->getType(); 2497 value = Builder.CreateBitCast(value, Int8PtrTy); 2498 value = EmitARCRetainBlock(value, /*mandatory*/ true); 2499 value = EmitARCAutorelease(value); 2500 return Builder.CreateBitCast(value, origType); 2501 } 2502 2503 /// Do a fused retain/autorelease of the given object. 2504 /// call i8* \@objc_retainAutorelease(i8* %value) 2505 llvm::Value * 2506 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { 2507 return emitARCValueOperation(*this, value, nullptr, 2508 CGM.getObjCEntrypoints().objc_retainAutorelease, 2509 llvm::Intrinsic::objc_retainAutorelease); 2510 } 2511 2512 /// i8* \@objc_loadWeak(i8** %addr) 2513 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). 2514 llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { 2515 return emitARCLoadOperation(*this, addr, 2516 CGM.getObjCEntrypoints().objc_loadWeak, 2517 llvm::Intrinsic::objc_loadWeak); 2518 } 2519 2520 /// i8* \@objc_loadWeakRetained(i8** %addr) 2521 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { 2522 return emitARCLoadOperation(*this, addr, 2523 CGM.getObjCEntrypoints().objc_loadWeakRetained, 2524 llvm::Intrinsic::objc_loadWeakRetained); 2525 } 2526 2527 /// i8* \@objc_storeWeak(i8** %addr, i8* %value) 2528 /// Returns %value. 2529 llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, 2530 llvm::Value *value, 2531 bool ignored) { 2532 return emitARCStoreOperation(*this, addr, value, 2533 CGM.getObjCEntrypoints().objc_storeWeak, 2534 llvm::Intrinsic::objc_storeWeak, ignored); 2535 } 2536 2537 /// i8* \@objc_initWeak(i8** %addr, i8* %value) 2538 /// Returns %value. %addr is known to not have a current weak entry. 2539 /// Essentially equivalent to: 2540 /// *addr = nil; objc_storeWeak(addr, value); 2541 void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { 2542 // If we're initializing to null, just write null to memory; no need 2543 // to get the runtime involved. But don't do this if optimization 2544 // is enabled, because accounting for this would make the optimizer 2545 // much more complicated. 2546 if (isa<llvm::ConstantPointerNull>(value) && 2547 CGM.getCodeGenOpts().OptimizationLevel == 0) { 2548 Builder.CreateStore(value, addr); 2549 return; 2550 } 2551 2552 emitARCStoreOperation(*this, addr, value, 2553 CGM.getObjCEntrypoints().objc_initWeak, 2554 llvm::Intrinsic::objc_initWeak, /*ignored*/ true); 2555 } 2556 2557 /// void \@objc_destroyWeak(i8** %addr) 2558 /// Essentially objc_storeWeak(addr, nil). 2559 void CodeGenFunction::EmitARCDestroyWeak(Address addr) { 2560 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; 2561 if (!fn) { 2562 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_destroyWeak); 2563 setARCRuntimeFunctionLinkage(CGM, fn); 2564 } 2565 2566 // Cast the argument to 'id*'. 2567 addr = Builder.CreateBitCast(addr, Int8PtrPtrTy); 2568 2569 EmitNounwindRuntimeCall(fn, addr.getPointer()); 2570 } 2571 2572 /// void \@objc_moveWeak(i8** %dest, i8** %src) 2573 /// Disregards the current value in %dest. Leaves %src pointing to nothing. 2574 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). 2575 void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { 2576 emitARCCopyOperation(*this, dst, src, 2577 CGM.getObjCEntrypoints().objc_moveWeak, 2578 llvm::Intrinsic::objc_moveWeak); 2579 } 2580 2581 /// void \@objc_copyWeak(i8** %dest, i8** %src) 2582 /// Disregards the current value in %dest. Essentially 2583 /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) 2584 void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { 2585 emitARCCopyOperation(*this, dst, src, 2586 CGM.getObjCEntrypoints().objc_copyWeak, 2587 llvm::Intrinsic::objc_copyWeak); 2588 } 2589 2590 void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr, 2591 Address SrcAddr) { 2592 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2593 Object = EmitObjCConsumeObject(Ty, Object); 2594 EmitARCStoreWeak(DstAddr, Object, false); 2595 } 2596 2597 void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr, 2598 Address SrcAddr) { 2599 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2600 Object = EmitObjCConsumeObject(Ty, Object); 2601 EmitARCStoreWeak(DstAddr, Object, false); 2602 EmitARCDestroyWeak(SrcAddr); 2603 } 2604 2605 /// Produce the code to do a objc_autoreleasepool_push. 2606 /// call i8* \@objc_autoreleasePoolPush(void) 2607 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { 2608 llvm::Function *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; 2609 if (!fn) { 2610 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPush); 2611 setARCRuntimeFunctionLinkage(CGM, fn); 2612 } 2613 2614 return EmitNounwindRuntimeCall(fn); 2615 } 2616 2617 /// Produce the code to do a primitive release. 2618 /// call void \@objc_autoreleasePoolPop(i8* %ptr) 2619 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { 2620 assert(value->getType() == Int8PtrTy); 2621 2622 if (getInvokeDest()) { 2623 // Call the runtime method not the intrinsic if we are handling exceptions 2624 llvm::FunctionCallee &fn = 2625 CGM.getObjCEntrypoints().objc_autoreleasePoolPopInvoke; 2626 if (!fn) { 2627 llvm::FunctionType *fnType = 2628 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2629 fn = CGM.CreateRuntimeFunction(fnType, "objc_autoreleasePoolPop"); 2630 setARCRuntimeFunctionLinkage(CGM, fn); 2631 } 2632 2633 // objc_autoreleasePoolPop can throw. 2634 EmitRuntimeCallOrInvoke(fn, value); 2635 } else { 2636 llvm::FunctionCallee &fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; 2637 if (!fn) { 2638 fn = CGM.getIntrinsic(llvm::Intrinsic::objc_autoreleasePoolPop); 2639 setARCRuntimeFunctionLinkage(CGM, fn); 2640 } 2641 2642 EmitRuntimeCall(fn, value); 2643 } 2644 } 2645 2646 /// Produce the code to do an MRR version objc_autoreleasepool_push. 2647 /// Which is: [[NSAutoreleasePool alloc] init]; 2648 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. 2649 /// init is declared as: - (id) init; in its NSObject super class. 2650 /// 2651 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { 2652 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 2653 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); 2654 // [NSAutoreleasePool alloc] 2655 IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); 2656 Selector AllocSel = getContext().Selectors.getSelector(0, &II); 2657 CallArgList Args; 2658 RValue AllocRV = 2659 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2660 getContext().getObjCIdType(), 2661 AllocSel, Receiver, Args); 2662 2663 // [Receiver init] 2664 Receiver = AllocRV.getScalarVal(); 2665 II = &CGM.getContext().Idents.get("init"); 2666 Selector InitSel = getContext().Selectors.getSelector(0, &II); 2667 RValue InitRV = 2668 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2669 getContext().getObjCIdType(), 2670 InitSel, Receiver, Args); 2671 return InitRV.getScalarVal(); 2672 } 2673 2674 /// Allocate the given objc object. 2675 /// call i8* \@objc_alloc(i8* %value) 2676 llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value, 2677 llvm::Type *resultType) { 2678 return emitObjCValueOperation(*this, value, resultType, 2679 CGM.getObjCEntrypoints().objc_alloc, 2680 "objc_alloc"); 2681 } 2682 2683 /// Allocate the given objc object. 2684 /// call i8* \@objc_allocWithZone(i8* %value) 2685 llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value, 2686 llvm::Type *resultType) { 2687 return emitObjCValueOperation(*this, value, resultType, 2688 CGM.getObjCEntrypoints().objc_allocWithZone, 2689 "objc_allocWithZone"); 2690 } 2691 2692 llvm::Value *CodeGenFunction::EmitObjCAllocInit(llvm::Value *value, 2693 llvm::Type *resultType) { 2694 return emitObjCValueOperation(*this, value, resultType, 2695 CGM.getObjCEntrypoints().objc_alloc_init, 2696 "objc_alloc_init"); 2697 } 2698 2699 /// Produce the code to do a primitive release. 2700 /// [tmp drain]; 2701 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { 2702 IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); 2703 Selector DrainSel = getContext().Selectors.getSelector(0, &II); 2704 CallArgList Args; 2705 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 2706 getContext().VoidTy, DrainSel, Arg, Args); 2707 } 2708 2709 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, 2710 Address addr, 2711 QualType type) { 2712 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime); 2713 } 2714 2715 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, 2716 Address addr, 2717 QualType type) { 2718 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime); 2719 } 2720 2721 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, 2722 Address addr, 2723 QualType type) { 2724 CGF.EmitARCDestroyWeak(addr); 2725 } 2726 2727 void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr, 2728 QualType type) { 2729 llvm::Value *value = CGF.Builder.CreateLoad(addr); 2730 CGF.EmitARCIntrinsicUse(value); 2731 } 2732 2733 /// Autorelease the given object. 2734 /// call i8* \@objc_autorelease(i8* %value) 2735 llvm::Value *CodeGenFunction::EmitObjCAutorelease(llvm::Value *value, 2736 llvm::Type *returnType) { 2737 return emitObjCValueOperation( 2738 *this, value, returnType, 2739 CGM.getObjCEntrypoints().objc_autoreleaseRuntimeFunction, 2740 "objc_autorelease"); 2741 } 2742 2743 /// Retain the given object, with normal retain semantics. 2744 /// call i8* \@objc_retain(i8* %value) 2745 llvm::Value *CodeGenFunction::EmitObjCRetainNonBlock(llvm::Value *value, 2746 llvm::Type *returnType) { 2747 return emitObjCValueOperation( 2748 *this, value, returnType, 2749 CGM.getObjCEntrypoints().objc_retainRuntimeFunction, "objc_retain"); 2750 } 2751 2752 /// Release the given object. 2753 /// call void \@objc_release(i8* %value) 2754 void CodeGenFunction::EmitObjCRelease(llvm::Value *value, 2755 ARCPreciseLifetime_t precise) { 2756 if (isa<llvm::ConstantPointerNull>(value)) return; 2757 2758 llvm::FunctionCallee &fn = 2759 CGM.getObjCEntrypoints().objc_releaseRuntimeFunction; 2760 if (!fn) { 2761 llvm::FunctionType *fnType = 2762 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2763 fn = CGM.CreateRuntimeFunction(fnType, "objc_release"); 2764 setARCRuntimeFunctionLinkage(CGM, fn); 2765 // We have Native ARC, so set nonlazybind attribute for performance 2766 if (llvm::Function *f = dyn_cast<llvm::Function>(fn.getCallee())) 2767 f->addFnAttr(llvm::Attribute::NonLazyBind); 2768 } 2769 2770 // Cast the argument to 'id'. 2771 value = Builder.CreateBitCast(value, Int8PtrTy); 2772 2773 // Call objc_release. 2774 llvm::CallBase *call = EmitCallOrInvoke(fn, value); 2775 2776 if (precise == ARCImpreciseLifetime) { 2777 call->setMetadata("clang.imprecise_release", 2778 llvm::MDNode::get(Builder.getContext(), None)); 2779 } 2780 } 2781 2782 namespace { 2783 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { 2784 llvm::Value *Token; 2785 2786 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2787 2788 void Emit(CodeGenFunction &CGF, Flags flags) override { 2789 CGF.EmitObjCAutoreleasePoolPop(Token); 2790 } 2791 }; 2792 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { 2793 llvm::Value *Token; 2794 2795 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2796 2797 void Emit(CodeGenFunction &CGF, Flags flags) override { 2798 CGF.EmitObjCMRRAutoreleasePoolPop(Token); 2799 } 2800 }; 2801 } 2802 2803 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { 2804 if (CGM.getLangOpts().ObjCAutoRefCount) 2805 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr); 2806 else 2807 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr); 2808 } 2809 2810 static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) { 2811 switch (lifetime) { 2812 case Qualifiers::OCL_None: 2813 case Qualifiers::OCL_ExplicitNone: 2814 case Qualifiers::OCL_Strong: 2815 case Qualifiers::OCL_Autoreleasing: 2816 return true; 2817 2818 case Qualifiers::OCL_Weak: 2819 return false; 2820 } 2821 2822 llvm_unreachable("impossible lifetime!"); 2823 } 2824 2825 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2826 LValue lvalue, 2827 QualType type) { 2828 llvm::Value *result; 2829 bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime()); 2830 if (shouldRetain) { 2831 result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal(); 2832 } else { 2833 assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); 2834 result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress(CGF)); 2835 } 2836 return TryEmitResult(result, !shouldRetain); 2837 } 2838 2839 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2840 const Expr *e) { 2841 e = e->IgnoreParens(); 2842 QualType type = e->getType(); 2843 2844 // If we're loading retained from a __strong xvalue, we can avoid 2845 // an extra retain/release pair by zeroing out the source of this 2846 // "move" operation. 2847 if (e->isXValue() && 2848 !type.isConstQualified() && 2849 type.getObjCLifetime() == Qualifiers::OCL_Strong) { 2850 // Emit the lvalue. 2851 LValue lv = CGF.EmitLValue(e); 2852 2853 // Load the object pointer. 2854 llvm::Value *result = CGF.EmitLoadOfLValue(lv, 2855 SourceLocation()).getScalarVal(); 2856 2857 // Set the source pointer to NULL. 2858 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress(CGF)), lv); 2859 2860 return TryEmitResult(result, true); 2861 } 2862 2863 // As a very special optimization, in ARC++, if the l-value is the 2864 // result of a non-volatile assignment, do a simple retain of the 2865 // result of the call to objc_storeWeak instead of reloading. 2866 if (CGF.getLangOpts().CPlusPlus && 2867 !type.isVolatileQualified() && 2868 type.getObjCLifetime() == Qualifiers::OCL_Weak && 2869 isa<BinaryOperator>(e) && 2870 cast<BinaryOperator>(e)->getOpcode() == BO_Assign) 2871 return TryEmitResult(CGF.EmitScalarExpr(e), false); 2872 2873 // Try to emit code for scalar constant instead of emitting LValue and 2874 // loading it because we are not guaranteed to have an l-value. One of such 2875 // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable. 2876 if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) { 2877 auto *DRE = const_cast<DeclRefExpr *>(decl_expr); 2878 if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE)) 2879 return TryEmitResult(CGF.emitScalarConstant(constant, DRE), 2880 !shouldRetainObjCLifetime(type.getObjCLifetime())); 2881 } 2882 2883 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type); 2884 } 2885 2886 typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, 2887 llvm::Value *value)> 2888 ValueTransform; 2889 2890 /// Insert code immediately after a call. 2891 static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, 2892 llvm::Value *value, 2893 ValueTransform doAfterCall, 2894 ValueTransform doFallback) { 2895 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) { 2896 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2897 2898 // Place the retain immediately following the call. 2899 CGF.Builder.SetInsertPoint(call->getParent(), 2900 ++llvm::BasicBlock::iterator(call)); 2901 value = doAfterCall(CGF, value); 2902 2903 CGF.Builder.restoreIP(ip); 2904 return value; 2905 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) { 2906 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2907 2908 // Place the retain at the beginning of the normal destination block. 2909 llvm::BasicBlock *BB = invoke->getNormalDest(); 2910 CGF.Builder.SetInsertPoint(BB, BB->begin()); 2911 value = doAfterCall(CGF, value); 2912 2913 CGF.Builder.restoreIP(ip); 2914 return value; 2915 2916 // Bitcasts can arise because of related-result returns. Rewrite 2917 // the operand. 2918 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) { 2919 llvm::Value *operand = bitcast->getOperand(0); 2920 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback); 2921 bitcast->setOperand(0, operand); 2922 return bitcast; 2923 2924 // Generic fall-back case. 2925 } else { 2926 // Retain using the non-block variant: we never need to do a copy 2927 // of a block that's been returned to us. 2928 return doFallback(CGF, value); 2929 } 2930 } 2931 2932 /// Given that the given expression is some sort of call (which does 2933 /// not return retained), emit a retain following it. 2934 static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, 2935 const Expr *e) { 2936 llvm::Value *value = CGF.EmitScalarExpr(e); 2937 return emitARCOperationAfterCall(CGF, value, 2938 [](CodeGenFunction &CGF, llvm::Value *value) { 2939 return CGF.EmitARCRetainAutoreleasedReturnValue(value); 2940 }, 2941 [](CodeGenFunction &CGF, llvm::Value *value) { 2942 return CGF.EmitARCRetainNonBlock(value); 2943 }); 2944 } 2945 2946 /// Given that the given expression is some sort of call (which does 2947 /// not return retained), perform an unsafeClaim following it. 2948 static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, 2949 const Expr *e) { 2950 llvm::Value *value = CGF.EmitScalarExpr(e); 2951 return emitARCOperationAfterCall(CGF, value, 2952 [](CodeGenFunction &CGF, llvm::Value *value) { 2953 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); 2954 }, 2955 [](CodeGenFunction &CGF, llvm::Value *value) { 2956 return value; 2957 }); 2958 } 2959 2960 llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, 2961 bool allowUnsafeClaim) { 2962 if (allowUnsafeClaim && 2963 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) { 2964 return emitARCUnsafeClaimCallResult(*this, E); 2965 } else { 2966 llvm::Value *value = emitARCRetainCallResult(*this, E); 2967 return EmitObjCConsumeObject(E->getType(), value); 2968 } 2969 } 2970 2971 /// Determine whether it might be important to emit a separate 2972 /// objc_retain_block on the result of the given expression, or 2973 /// whether it's okay to just emit it in a +1 context. 2974 static bool shouldEmitSeparateBlockRetain(const Expr *e) { 2975 assert(e->getType()->isBlockPointerType()); 2976 e = e->IgnoreParens(); 2977 2978 // For future goodness, emit block expressions directly in +1 2979 // contexts if we can. 2980 if (isa<BlockExpr>(e)) 2981 return false; 2982 2983 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) { 2984 switch (cast->getCastKind()) { 2985 // Emitting these operations in +1 contexts is goodness. 2986 case CK_LValueToRValue: 2987 case CK_ARCReclaimReturnedObject: 2988 case CK_ARCConsumeObject: 2989 case CK_ARCProduceObject: 2990 return false; 2991 2992 // These operations preserve a block type. 2993 case CK_NoOp: 2994 case CK_BitCast: 2995 return shouldEmitSeparateBlockRetain(cast->getSubExpr()); 2996 2997 // These operations are known to be bad (or haven't been considered). 2998 case CK_AnyPointerToBlockPointerCast: 2999 default: 3000 return true; 3001 } 3002 } 3003 3004 return true; 3005 } 3006 3007 namespace { 3008 /// A CRTP base class for emitting expressions of retainable object 3009 /// pointer type in ARC. 3010 template <typename Impl, typename Result> class ARCExprEmitter { 3011 protected: 3012 CodeGenFunction &CGF; 3013 Impl &asImpl() { return *static_cast<Impl*>(this); } 3014 3015 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} 3016 3017 public: 3018 Result visit(const Expr *e); 3019 Result visitCastExpr(const CastExpr *e); 3020 Result visitPseudoObjectExpr(const PseudoObjectExpr *e); 3021 Result visitBlockExpr(const BlockExpr *e); 3022 Result visitBinaryOperator(const BinaryOperator *e); 3023 Result visitBinAssign(const BinaryOperator *e); 3024 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); 3025 Result visitBinAssignAutoreleasing(const BinaryOperator *e); 3026 Result visitBinAssignWeak(const BinaryOperator *e); 3027 Result visitBinAssignStrong(const BinaryOperator *e); 3028 3029 // Minimal implementation: 3030 // Result visitLValueToRValue(const Expr *e) 3031 // Result visitConsumeObject(const Expr *e) 3032 // Result visitExtendBlockObject(const Expr *e) 3033 // Result visitReclaimReturnedObject(const Expr *e) 3034 // Result visitCall(const Expr *e) 3035 // Result visitExpr(const Expr *e) 3036 // 3037 // Result emitBitCast(Result result, llvm::Type *resultType) 3038 // llvm::Value *getValueOfResult(Result result) 3039 }; 3040 } 3041 3042 /// Try to emit a PseudoObjectExpr under special ARC rules. 3043 /// 3044 /// This massively duplicates emitPseudoObjectRValue. 3045 template <typename Impl, typename Result> 3046 Result 3047 ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { 3048 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 3049 3050 // Find the result expression. 3051 const Expr *resultExpr = E->getResultExpr(); 3052 assert(resultExpr); 3053 Result result; 3054 3055 for (PseudoObjectExpr::const_semantics_iterator 3056 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 3057 const Expr *semantic = *i; 3058 3059 // If this semantic expression is an opaque value, bind it 3060 // to the result of its source expression. 3061 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 3062 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 3063 OVMA opaqueData; 3064 3065 // If this semantic is the result of the pseudo-object 3066 // expression, try to evaluate the source as +1. 3067 if (ov == resultExpr) { 3068 assert(!OVMA::shouldBindAsLValue(ov)); 3069 result = asImpl().visit(ov->getSourceExpr()); 3070 opaqueData = OVMA::bind(CGF, ov, 3071 RValue::get(asImpl().getValueOfResult(result))); 3072 3073 // Otherwise, just bind it. 3074 } else { 3075 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 3076 } 3077 opaques.push_back(opaqueData); 3078 3079 // Otherwise, if the expression is the result, evaluate it 3080 // and remember the result. 3081 } else if (semantic == resultExpr) { 3082 result = asImpl().visit(semantic); 3083 3084 // Otherwise, evaluate the expression in an ignored context. 3085 } else { 3086 CGF.EmitIgnoredExpr(semantic); 3087 } 3088 } 3089 3090 // Unbind all the opaques now. 3091 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 3092 opaques[i].unbind(CGF); 3093 3094 return result; 3095 } 3096 3097 template <typename Impl, typename Result> 3098 Result ARCExprEmitter<Impl, Result>::visitBlockExpr(const BlockExpr *e) { 3099 // The default implementation just forwards the expression to visitExpr. 3100 return asImpl().visitExpr(e); 3101 } 3102 3103 template <typename Impl, typename Result> 3104 Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { 3105 switch (e->getCastKind()) { 3106 3107 // No-op casts don't change the type, so we just ignore them. 3108 case CK_NoOp: 3109 return asImpl().visit(e->getSubExpr()); 3110 3111 // These casts can change the type. 3112 case CK_CPointerToObjCPointerCast: 3113 case CK_BlockPointerToObjCPointerCast: 3114 case CK_AnyPointerToBlockPointerCast: 3115 case CK_BitCast: { 3116 llvm::Type *resultType = CGF.ConvertType(e->getType()); 3117 assert(e->getSubExpr()->getType()->hasPointerRepresentation()); 3118 Result result = asImpl().visit(e->getSubExpr()); 3119 return asImpl().emitBitCast(result, resultType); 3120 } 3121 3122 // Handle some casts specially. 3123 case CK_LValueToRValue: 3124 return asImpl().visitLValueToRValue(e->getSubExpr()); 3125 case CK_ARCConsumeObject: 3126 return asImpl().visitConsumeObject(e->getSubExpr()); 3127 case CK_ARCExtendBlockObject: 3128 return asImpl().visitExtendBlockObject(e->getSubExpr()); 3129 case CK_ARCReclaimReturnedObject: 3130 return asImpl().visitReclaimReturnedObject(e->getSubExpr()); 3131 3132 // Otherwise, use the default logic. 3133 default: 3134 return asImpl().visitExpr(e); 3135 } 3136 } 3137 3138 template <typename Impl, typename Result> 3139 Result 3140 ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { 3141 switch (e->getOpcode()) { 3142 case BO_Comma: 3143 CGF.EmitIgnoredExpr(e->getLHS()); 3144 CGF.EnsureInsertPoint(); 3145 return asImpl().visit(e->getRHS()); 3146 3147 case BO_Assign: 3148 return asImpl().visitBinAssign(e); 3149 3150 default: 3151 return asImpl().visitExpr(e); 3152 } 3153 } 3154 3155 template <typename Impl, typename Result> 3156 Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { 3157 switch (e->getLHS()->getType().getObjCLifetime()) { 3158 case Qualifiers::OCL_ExplicitNone: 3159 return asImpl().visitBinAssignUnsafeUnretained(e); 3160 3161 case Qualifiers::OCL_Weak: 3162 return asImpl().visitBinAssignWeak(e); 3163 3164 case Qualifiers::OCL_Autoreleasing: 3165 return asImpl().visitBinAssignAutoreleasing(e); 3166 3167 case Qualifiers::OCL_Strong: 3168 return asImpl().visitBinAssignStrong(e); 3169 3170 case Qualifiers::OCL_None: 3171 return asImpl().visitExpr(e); 3172 } 3173 llvm_unreachable("bad ObjC ownership qualifier"); 3174 } 3175 3176 /// The default rule for __unsafe_unretained emits the RHS recursively, 3177 /// stores into the unsafe variable, and propagates the result outward. 3178 template <typename Impl, typename Result> 3179 Result ARCExprEmitter<Impl,Result>:: 3180 visitBinAssignUnsafeUnretained(const BinaryOperator *e) { 3181 // Recursively emit the RHS. 3182 // For __block safety, do this before emitting the LHS. 3183 Result result = asImpl().visit(e->getRHS()); 3184 3185 // Perform the store. 3186 LValue lvalue = 3187 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); 3188 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), 3189 lvalue); 3190 3191 return result; 3192 } 3193 3194 template <typename Impl, typename Result> 3195 Result 3196 ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { 3197 return asImpl().visitExpr(e); 3198 } 3199 3200 template <typename Impl, typename Result> 3201 Result 3202 ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { 3203 return asImpl().visitExpr(e); 3204 } 3205 3206 template <typename Impl, typename Result> 3207 Result 3208 ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { 3209 return asImpl().visitExpr(e); 3210 } 3211 3212 /// The general expression-emission logic. 3213 template <typename Impl, typename Result> 3214 Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { 3215 // We should *never* see a nested full-expression here, because if 3216 // we fail to emit at +1, our caller must not retain after we close 3217 // out the full-expression. This isn't as important in the unsafe 3218 // emitter. 3219 assert(!isa<ExprWithCleanups>(e)); 3220 3221 // Look through parens, __extension__, generic selection, etc. 3222 e = e->IgnoreParens(); 3223 3224 // Handle certain kinds of casts. 3225 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { 3226 return asImpl().visitCastExpr(ce); 3227 3228 // Handle the comma operator. 3229 } else if (auto op = dyn_cast<BinaryOperator>(e)) { 3230 return asImpl().visitBinaryOperator(op); 3231 3232 // TODO: handle conditional operators here 3233 3234 // For calls and message sends, use the retained-call logic. 3235 // Delegate inits are a special case in that they're the only 3236 // returns-retained expression that *isn't* surrounded by 3237 // a consume. 3238 } else if (isa<CallExpr>(e) || 3239 (isa<ObjCMessageExpr>(e) && 3240 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) { 3241 return asImpl().visitCall(e); 3242 3243 // Look through pseudo-object expressions. 3244 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 3245 return asImpl().visitPseudoObjectExpr(pseudo); 3246 } else if (auto *be = dyn_cast<BlockExpr>(e)) 3247 return asImpl().visitBlockExpr(be); 3248 3249 return asImpl().visitExpr(e); 3250 } 3251 3252 namespace { 3253 3254 /// An emitter for +1 results. 3255 struct ARCRetainExprEmitter : 3256 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { 3257 3258 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3259 3260 llvm::Value *getValueOfResult(TryEmitResult result) { 3261 return result.getPointer(); 3262 } 3263 3264 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { 3265 llvm::Value *value = result.getPointer(); 3266 value = CGF.Builder.CreateBitCast(value, resultType); 3267 result.setPointer(value); 3268 return result; 3269 } 3270 3271 TryEmitResult visitLValueToRValue(const Expr *e) { 3272 return tryEmitARCRetainLoadOfScalar(CGF, e); 3273 } 3274 3275 /// For consumptions, just emit the subexpression and thus elide 3276 /// the retain/release pair. 3277 TryEmitResult visitConsumeObject(const Expr *e) { 3278 llvm::Value *result = CGF.EmitScalarExpr(e); 3279 return TryEmitResult(result, true); 3280 } 3281 3282 TryEmitResult visitBlockExpr(const BlockExpr *e) { 3283 TryEmitResult result = visitExpr(e); 3284 // Avoid the block-retain if this is a block literal that doesn't need to be 3285 // copied to the heap. 3286 if (e->getBlockDecl()->canAvoidCopyToHeap()) 3287 result.setInt(true); 3288 return result; 3289 } 3290 3291 /// Block extends are net +0. Naively, we could just recurse on 3292 /// the subexpression, but actually we need to ensure that the 3293 /// value is copied as a block, so there's a little filter here. 3294 TryEmitResult visitExtendBlockObject(const Expr *e) { 3295 llvm::Value *result; // will be a +0 value 3296 3297 // If we can't safely assume the sub-expression will produce a 3298 // block-copied value, emit the sub-expression at +0. 3299 if (shouldEmitSeparateBlockRetain(e)) { 3300 result = CGF.EmitScalarExpr(e); 3301 3302 // Otherwise, try to emit the sub-expression at +1 recursively. 3303 } else { 3304 TryEmitResult subresult = asImpl().visit(e); 3305 3306 // If that produced a retained value, just use that. 3307 if (subresult.getInt()) { 3308 return subresult; 3309 } 3310 3311 // Otherwise it's +0. 3312 result = subresult.getPointer(); 3313 } 3314 3315 // Retain the object as a block. 3316 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true); 3317 return TryEmitResult(result, true); 3318 } 3319 3320 /// For reclaims, emit the subexpression as a retained call and 3321 /// skip the consumption. 3322 TryEmitResult visitReclaimReturnedObject(const Expr *e) { 3323 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3324 return TryEmitResult(result, true); 3325 } 3326 3327 /// When we have an undecorated call, retroactively do a claim. 3328 TryEmitResult visitCall(const Expr *e) { 3329 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3330 return TryEmitResult(result, true); 3331 } 3332 3333 // TODO: maybe special-case visitBinAssignWeak? 3334 3335 TryEmitResult visitExpr(const Expr *e) { 3336 // We didn't find an obvious production, so emit what we've got and 3337 // tell the caller that we didn't manage to retain. 3338 llvm::Value *result = CGF.EmitScalarExpr(e); 3339 return TryEmitResult(result, false); 3340 } 3341 }; 3342 } 3343 3344 static TryEmitResult 3345 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { 3346 return ARCRetainExprEmitter(CGF).visit(e); 3347 } 3348 3349 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 3350 LValue lvalue, 3351 QualType type) { 3352 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); 3353 llvm::Value *value = result.getPointer(); 3354 if (!result.getInt()) 3355 value = CGF.EmitARCRetain(type, value); 3356 return value; 3357 } 3358 3359 /// EmitARCRetainScalarExpr - Semantically equivalent to 3360 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a 3361 /// best-effort attempt to peephole expressions that naturally produce 3362 /// retained objects. 3363 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { 3364 // The retain needs to happen within the full-expression. 3365 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3366 RunCleanupsScope scope(*this); 3367 return EmitARCRetainScalarExpr(cleanups->getSubExpr()); 3368 } 3369 3370 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3371 llvm::Value *value = result.getPointer(); 3372 if (!result.getInt()) 3373 value = EmitARCRetain(e->getType(), value); 3374 return value; 3375 } 3376 3377 llvm::Value * 3378 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { 3379 // The retain needs to happen within the full-expression. 3380 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3381 RunCleanupsScope scope(*this); 3382 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); 3383 } 3384 3385 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3386 llvm::Value *value = result.getPointer(); 3387 if (result.getInt()) 3388 value = EmitARCAutorelease(value); 3389 else 3390 value = EmitARCRetainAutorelease(e->getType(), value); 3391 return value; 3392 } 3393 3394 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { 3395 llvm::Value *result; 3396 bool doRetain; 3397 3398 if (shouldEmitSeparateBlockRetain(e)) { 3399 result = EmitScalarExpr(e); 3400 doRetain = true; 3401 } else { 3402 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e); 3403 result = subresult.getPointer(); 3404 doRetain = !subresult.getInt(); 3405 } 3406 3407 if (doRetain) 3408 result = EmitARCRetainBlock(result, /*mandatory*/ true); 3409 return EmitObjCConsumeObject(e->getType(), result); 3410 } 3411 3412 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { 3413 // In ARC, retain and autorelease the expression. 3414 if (getLangOpts().ObjCAutoRefCount) { 3415 // Do so before running any cleanups for the full-expression. 3416 // EmitARCRetainAutoreleaseScalarExpr does this for us. 3417 return EmitARCRetainAutoreleaseScalarExpr(expr); 3418 } 3419 3420 // Otherwise, use the normal scalar-expression emission. The 3421 // exception machinery doesn't do anything special with the 3422 // exception like retaining it, so there's no safety associated with 3423 // only running cleanups after the throw has started, and when it 3424 // matters it tends to be substantially inferior code. 3425 return EmitScalarExpr(expr); 3426 } 3427 3428 namespace { 3429 3430 /// An emitter for assigning into an __unsafe_unretained context. 3431 struct ARCUnsafeUnretainedExprEmitter : 3432 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { 3433 3434 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3435 3436 llvm::Value *getValueOfResult(llvm::Value *value) { 3437 return value; 3438 } 3439 3440 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { 3441 return CGF.Builder.CreateBitCast(value, resultType); 3442 } 3443 3444 llvm::Value *visitLValueToRValue(const Expr *e) { 3445 return CGF.EmitScalarExpr(e); 3446 } 3447 3448 /// For consumptions, just emit the subexpression and perform the 3449 /// consumption like normal. 3450 llvm::Value *visitConsumeObject(const Expr *e) { 3451 llvm::Value *value = CGF.EmitScalarExpr(e); 3452 return CGF.EmitObjCConsumeObject(e->getType(), value); 3453 } 3454 3455 /// No special logic for block extensions. (This probably can't 3456 /// actually happen in this emitter, though.) 3457 llvm::Value *visitExtendBlockObject(const Expr *e) { 3458 return CGF.EmitARCExtendBlockObject(e); 3459 } 3460 3461 /// For reclaims, perform an unsafeClaim if that's enabled. 3462 llvm::Value *visitReclaimReturnedObject(const Expr *e) { 3463 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true); 3464 } 3465 3466 /// When we have an undecorated call, just emit it without adding 3467 /// the unsafeClaim. 3468 llvm::Value *visitCall(const Expr *e) { 3469 return CGF.EmitScalarExpr(e); 3470 } 3471 3472 /// Just do normal scalar emission in the default case. 3473 llvm::Value *visitExpr(const Expr *e) { 3474 return CGF.EmitScalarExpr(e); 3475 } 3476 }; 3477 } 3478 3479 static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, 3480 const Expr *e) { 3481 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); 3482 } 3483 3484 /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to 3485 /// immediately releasing the resut of EmitARCRetainScalarExpr, but 3486 /// avoiding any spurious retains, including by performing reclaims 3487 /// with objc_unsafeClaimAutoreleasedReturnValue. 3488 llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { 3489 // Look through full-expressions. 3490 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3491 RunCleanupsScope scope(*this); 3492 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); 3493 } 3494 3495 return emitARCUnsafeUnretainedScalarExpr(*this, e); 3496 } 3497 3498 std::pair<LValue,llvm::Value*> 3499 CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, 3500 bool ignored) { 3501 // Evaluate the RHS first. If we're ignoring the result, assume 3502 // that we can emit at an unsafe +0. 3503 llvm::Value *value; 3504 if (ignored) { 3505 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS()); 3506 } else { 3507 value = EmitScalarExpr(e->getRHS()); 3508 } 3509 3510 // Emit the LHS and perform the store. 3511 LValue lvalue = EmitLValue(e->getLHS()); 3512 EmitStoreOfScalar(value, lvalue); 3513 3514 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); 3515 } 3516 3517 std::pair<LValue,llvm::Value*> 3518 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, 3519 bool ignored) { 3520 // Evaluate the RHS first. 3521 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS()); 3522 llvm::Value *value = result.getPointer(); 3523 3524 bool hasImmediateRetain = result.getInt(); 3525 3526 // If we didn't emit a retained object, and the l-value is of block 3527 // type, then we need to emit the block-retain immediately in case 3528 // it invalidates the l-value. 3529 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) { 3530 value = EmitARCRetainBlock(value, /*mandatory*/ false); 3531 hasImmediateRetain = true; 3532 } 3533 3534 LValue lvalue = EmitLValue(e->getLHS()); 3535 3536 // If the RHS was emitted retained, expand this. 3537 if (hasImmediateRetain) { 3538 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation()); 3539 EmitStoreOfScalar(value, lvalue); 3540 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime()); 3541 } else { 3542 value = EmitARCStoreStrong(lvalue, value, ignored); 3543 } 3544 3545 return std::pair<LValue,llvm::Value*>(lvalue, value); 3546 } 3547 3548 std::pair<LValue,llvm::Value*> 3549 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { 3550 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS()); 3551 LValue lvalue = EmitLValue(e->getLHS()); 3552 3553 EmitStoreOfScalar(value, lvalue); 3554 3555 return std::pair<LValue,llvm::Value*>(lvalue, value); 3556 } 3557 3558 void CodeGenFunction::EmitObjCAutoreleasePoolStmt( 3559 const ObjCAutoreleasePoolStmt &ARPS) { 3560 const Stmt *subStmt = ARPS.getSubStmt(); 3561 const CompoundStmt &S = cast<CompoundStmt>(*subStmt); 3562 3563 CGDebugInfo *DI = getDebugInfo(); 3564 if (DI) 3565 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc()); 3566 3567 // Keep track of the current cleanup stack depth. 3568 RunCleanupsScope Scope(*this); 3569 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { 3570 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 3571 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token); 3572 } else { 3573 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); 3574 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token); 3575 } 3576 3577 for (const auto *I : S.body()) 3578 EmitStmt(I); 3579 3580 if (DI) 3581 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc()); 3582 } 3583 3584 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, 3585 /// make sure it survives garbage collection until this point. 3586 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { 3587 // We just use an inline assembly. 3588 llvm::FunctionType *extenderType 3589 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All); 3590 llvm::InlineAsm *extender = llvm::InlineAsm::get(extenderType, 3591 /* assembly */ "", 3592 /* constraints */ "r", 3593 /* side effects */ true); 3594 3595 object = Builder.CreateBitCast(object, VoidPtrTy); 3596 EmitNounwindRuntimeCall(extender, object); 3597 } 3598 3599 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with 3600 /// non-trivial copy assignment function, produce following helper function. 3601 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } 3602 /// 3603 llvm::Constant * 3604 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( 3605 const ObjCPropertyImplDecl *PID) { 3606 if (!getLangOpts().CPlusPlus || 3607 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3608 return nullptr; 3609 QualType Ty = PID->getPropertyIvarDecl()->getType(); 3610 if (!Ty->isRecordType()) 3611 return nullptr; 3612 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3613 if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) 3614 return nullptr; 3615 llvm::Constant *HelperFn = nullptr; 3616 if (hasTrivialSetExpr(PID)) 3617 return nullptr; 3618 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"); 3619 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) 3620 return HelperFn; 3621 3622 ASTContext &C = getContext(); 3623 IdentifierInfo *II 3624 = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); 3625 3626 QualType ReturnTy = C.VoidTy; 3627 QualType DestTy = C.getPointerType(Ty); 3628 QualType SrcTy = Ty; 3629 SrcTy.addConst(); 3630 SrcTy = C.getPointerType(SrcTy); 3631 3632 SmallVector<QualType, 2> ArgTys; 3633 ArgTys.push_back(DestTy); 3634 ArgTys.push_back(SrcTy); 3635 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3636 3637 FunctionDecl *FD = FunctionDecl::Create( 3638 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3639 FunctionTy, nullptr, SC_Static, false, false); 3640 3641 FunctionArgList args; 3642 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy, 3643 ImplicitParamDecl::Other); 3644 args.push_back(&DstDecl); 3645 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy, 3646 ImplicitParamDecl::Other); 3647 args.push_back(&SrcDecl); 3648 3649 const CGFunctionInfo &FI = 3650 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3651 3652 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3653 3654 llvm::Function *Fn = 3655 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 3656 "__assign_helper_atomic_property_", 3657 &CGM.getModule()); 3658 3659 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3660 3661 StartFunction(FD, ReturnTy, Fn, FI, args); 3662 3663 DeclRefExpr DstExpr(C, &DstDecl, false, DestTy, VK_RValue, SourceLocation()); 3664 UnaryOperator *DST = UnaryOperator::Create( 3665 C, &DstExpr, UO_Deref, DestTy->getPointeeType(), VK_LValue, OK_Ordinary, 3666 SourceLocation(), false, FPOptionsOverride()); 3667 3668 DeclRefExpr SrcExpr(C, &SrcDecl, false, SrcTy, VK_RValue, SourceLocation()); 3669 UnaryOperator *SRC = UnaryOperator::Create( 3670 C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, 3671 SourceLocation(), false, FPOptionsOverride()); 3672 3673 Expr *Args[2] = {DST, SRC}; 3674 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment()); 3675 CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create( 3676 C, OO_Equal, CalleeExp->getCallee(), Args, DestTy->getPointeeType(), 3677 VK_LValue, SourceLocation(), FPOptionsOverride()); 3678 3679 EmitStmt(TheCall); 3680 3681 FinishFunction(); 3682 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3683 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); 3684 return HelperFn; 3685 } 3686 3687 llvm::Constant * 3688 CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( 3689 const ObjCPropertyImplDecl *PID) { 3690 if (!getLangOpts().CPlusPlus || 3691 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3692 return nullptr; 3693 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3694 QualType Ty = PD->getType(); 3695 if (!Ty->isRecordType()) 3696 return nullptr; 3697 if ((!(PD->getPropertyAttributes() & ObjCPropertyAttribute::kind_atomic))) 3698 return nullptr; 3699 llvm::Constant *HelperFn = nullptr; 3700 if (hasTrivialGetExpr(PID)) 3701 return nullptr; 3702 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"); 3703 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) 3704 return HelperFn; 3705 3706 ASTContext &C = getContext(); 3707 IdentifierInfo *II = 3708 &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); 3709 3710 QualType ReturnTy = C.VoidTy; 3711 QualType DestTy = C.getPointerType(Ty); 3712 QualType SrcTy = Ty; 3713 SrcTy.addConst(); 3714 SrcTy = C.getPointerType(SrcTy); 3715 3716 SmallVector<QualType, 2> ArgTys; 3717 ArgTys.push_back(DestTy); 3718 ArgTys.push_back(SrcTy); 3719 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3720 3721 FunctionDecl *FD = FunctionDecl::Create( 3722 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3723 FunctionTy, nullptr, SC_Static, false, false); 3724 3725 FunctionArgList args; 3726 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy, 3727 ImplicitParamDecl::Other); 3728 args.push_back(&DstDecl); 3729 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy, 3730 ImplicitParamDecl::Other); 3731 args.push_back(&SrcDecl); 3732 3733 const CGFunctionInfo &FI = 3734 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3735 3736 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3737 3738 llvm::Function *Fn = llvm::Function::Create( 3739 LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_", 3740 &CGM.getModule()); 3741 3742 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3743 3744 StartFunction(FD, ReturnTy, Fn, FI, args); 3745 3746 DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue, 3747 SourceLocation()); 3748 3749 UnaryOperator *SRC = UnaryOperator::Create( 3750 C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary, 3751 SourceLocation(), false, FPOptionsOverride()); 3752 3753 CXXConstructExpr *CXXConstExpr = 3754 cast<CXXConstructExpr>(PID->getGetterCXXConstructor()); 3755 3756 SmallVector<Expr*, 4> ConstructorArgs; 3757 ConstructorArgs.push_back(SRC); 3758 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), 3759 CXXConstExpr->arg_end()); 3760 3761 CXXConstructExpr *TheCXXConstructExpr = 3762 CXXConstructExpr::Create(C, Ty, SourceLocation(), 3763 CXXConstExpr->getConstructor(), 3764 CXXConstExpr->isElidable(), 3765 ConstructorArgs, 3766 CXXConstExpr->hadMultipleCandidates(), 3767 CXXConstExpr->isListInitialization(), 3768 CXXConstExpr->isStdInitListInitialization(), 3769 CXXConstExpr->requiresZeroInitialization(), 3770 CXXConstExpr->getConstructionKind(), 3771 SourceRange()); 3772 3773 DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue, 3774 SourceLocation()); 3775 3776 RValue DV = EmitAnyExpr(&DstExpr); 3777 CharUnits Alignment 3778 = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); 3779 EmitAggExpr(TheCXXConstructExpr, 3780 AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment), 3781 Qualifiers(), 3782 AggValueSlot::IsDestructed, 3783 AggValueSlot::DoesNotNeedGCBarriers, 3784 AggValueSlot::IsNotAliased, 3785 AggValueSlot::DoesNotOverlap)); 3786 3787 FinishFunction(); 3788 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3789 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); 3790 return HelperFn; 3791 } 3792 3793 llvm::Value * 3794 CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { 3795 // Get selectors for retain/autorelease. 3796 IdentifierInfo *CopyID = &getContext().Idents.get("copy"); 3797 Selector CopySelector = 3798 getContext().Selectors.getNullarySelector(CopyID); 3799 IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); 3800 Selector AutoreleaseSelector = 3801 getContext().Selectors.getNullarySelector(AutoreleaseID); 3802 3803 // Emit calls to retain/autorelease. 3804 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 3805 llvm::Value *Val = Block; 3806 RValue Result; 3807 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3808 Ty, CopySelector, 3809 Val, CallArgList(), nullptr, nullptr); 3810 Val = Result.getScalarVal(); 3811 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3812 Ty, AutoreleaseSelector, 3813 Val, CallArgList(), nullptr, nullptr); 3814 Val = Result.getScalarVal(); 3815 return Val; 3816 } 3817 3818 static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) { 3819 switch (TT.getOS()) { 3820 case llvm::Triple::Darwin: 3821 case llvm::Triple::MacOSX: 3822 return llvm::MachO::PLATFORM_MACOS; 3823 case llvm::Triple::IOS: 3824 return llvm::MachO::PLATFORM_IOS; 3825 case llvm::Triple::TvOS: 3826 return llvm::MachO::PLATFORM_TVOS; 3827 case llvm::Triple::WatchOS: 3828 return llvm::MachO::PLATFORM_WATCHOS; 3829 default: 3830 return /*Unknown platform*/ 0; 3831 } 3832 } 3833 3834 static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF, 3835 const VersionTuple &Version) { 3836 CodeGenModule &CGM = CGF.CGM; 3837 // Note: we intend to support multi-platform version checks, so reserve 3838 // the room for a dual platform checking invocation that will be 3839 // implemented in the future. 3840 llvm::SmallVector<llvm::Value *, 8> Args; 3841 3842 auto EmitArgs = [&](const VersionTuple &Version, const llvm::Triple &TT) { 3843 Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor(); 3844 Args.push_back( 3845 llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT))); 3846 Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor())); 3847 Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0)); 3848 Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0)); 3849 }; 3850 3851 assert(!Version.empty() && "unexpected empty version"); 3852 EmitArgs(Version, CGM.getTarget().getTriple()); 3853 3854 if (!CGM.IsPlatformVersionAtLeastFn) { 3855 llvm::FunctionType *FTy = llvm::FunctionType::get( 3856 CGM.Int32Ty, {CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty}, 3857 false); 3858 CGM.IsPlatformVersionAtLeastFn = 3859 CGM.CreateRuntimeFunction(FTy, "__isPlatformVersionAtLeast"); 3860 } 3861 3862 llvm::Value *Check = 3863 CGF.EmitNounwindRuntimeCall(CGM.IsPlatformVersionAtLeastFn, Args); 3864 return CGF.Builder.CreateICmpNE(Check, 3865 llvm::Constant::getNullValue(CGM.Int32Ty)); 3866 } 3867 3868 llvm::Value * 3869 CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) { 3870 // Darwin uses the new __isPlatformVersionAtLeast family of routines. 3871 if (CGM.getTarget().getTriple().isOSDarwin()) 3872 return emitIsPlatformVersionAtLeast(*this, Version); 3873 3874 if (!CGM.IsOSVersionAtLeastFn) { 3875 llvm::FunctionType *FTy = 3876 llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false); 3877 CGM.IsOSVersionAtLeastFn = 3878 CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast"); 3879 } 3880 3881 Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor(); 3882 llvm::Value *Args[] = { 3883 llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()), 3884 llvm::ConstantInt::get(CGM.Int32Ty, Min ? *Min : 0), 3885 llvm::ConstantInt::get(CGM.Int32Ty, SMin ? *SMin : 0), 3886 }; 3887 3888 llvm::Value *CallRes = 3889 EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args); 3890 3891 return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty)); 3892 } 3893 3894 static bool isFoundationNeededForDarwinAvailabilityCheck( 3895 const llvm::Triple &TT, const VersionTuple &TargetVersion) { 3896 VersionTuple FoundationDroppedInVersion; 3897 switch (TT.getOS()) { 3898 case llvm::Triple::IOS: 3899 case llvm::Triple::TvOS: 3900 FoundationDroppedInVersion = VersionTuple(/*Major=*/13); 3901 break; 3902 case llvm::Triple::WatchOS: 3903 FoundationDroppedInVersion = VersionTuple(/*Major=*/6); 3904 break; 3905 case llvm::Triple::Darwin: 3906 case llvm::Triple::MacOSX: 3907 FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15); 3908 break; 3909 default: 3910 llvm_unreachable("Unexpected OS"); 3911 } 3912 return TargetVersion < FoundationDroppedInVersion; 3913 } 3914 3915 void CodeGenModule::emitAtAvailableLinkGuard() { 3916 if (!IsPlatformVersionAtLeastFn) 3917 return; 3918 // @available requires CoreFoundation only on Darwin. 3919 if (!Target.getTriple().isOSDarwin()) 3920 return; 3921 // @available doesn't need Foundation on macOS 10.15+, iOS/tvOS 13+, or 3922 // watchOS 6+. 3923 if (!isFoundationNeededForDarwinAvailabilityCheck( 3924 Target.getTriple(), Target.getPlatformMinVersion())) 3925 return; 3926 // Add -framework CoreFoundation to the linker commands. We still want to 3927 // emit the core foundation reference down below because otherwise if 3928 // CoreFoundation is not used in the code, the linker won't link the 3929 // framework. 3930 auto &Context = getLLVMContext(); 3931 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"), 3932 llvm::MDString::get(Context, "CoreFoundation")}; 3933 LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args)); 3934 // Emit a reference to a symbol from CoreFoundation to ensure that 3935 // CoreFoundation is linked into the final binary. 3936 llvm::FunctionType *FTy = 3937 llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false); 3938 llvm::FunctionCallee CFFunc = 3939 CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber"); 3940 3941 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false); 3942 llvm::FunctionCallee CFLinkCheckFuncRef = CreateRuntimeFunction( 3943 CheckFTy, "__clang_at_available_requires_core_foundation_framework", 3944 llvm::AttributeList(), /*Local=*/true); 3945 llvm::Function *CFLinkCheckFunc = 3946 cast<llvm::Function>(CFLinkCheckFuncRef.getCallee()->stripPointerCasts()); 3947 if (CFLinkCheckFunc->empty()) { 3948 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); 3949 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); 3950 CodeGenFunction CGF(*this); 3951 CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc)); 3952 CGF.EmitNounwindRuntimeCall(CFFunc, 3953 llvm::Constant::getNullValue(VoidPtrTy)); 3954 CGF.Builder.CreateUnreachable(); 3955 addCompilerUsedGlobal(CFLinkCheckFunc); 3956 } 3957 } 3958 3959 CGObjCRuntime::~CGObjCRuntime() {} 3960