1 //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Objective-C code as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGDebugInfo.h" 15 #include "CGObjCRuntime.h" 16 #include "CodeGenFunction.h" 17 #include "CodeGenModule.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/StmtObjC.h" 22 #include "clang/Basic/Diagnostic.h" 23 #include "clang/CodeGen/CGFunctionInfo.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/IR/CallSite.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/InlineAsm.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; 32 static TryEmitResult 33 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); 34 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, 35 QualType ET, 36 RValue Result); 37 38 /// Given the address of a variable of pointer type, find the correct 39 /// null to store into it. 40 static llvm::Constant *getNullForVariable(Address addr) { 41 llvm::Type *type = addr.getElementType(); 42 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type)); 43 } 44 45 /// Emits an instance of NSConstantString representing the object. 46 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) 47 { 48 llvm::Constant *C = 49 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); 50 // FIXME: This bitcast should just be made an invariant on the Runtime. 51 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); 52 } 53 54 /// EmitObjCBoxedExpr - This routine generates code to call 55 /// the appropriate expression boxing method. This will either be 56 /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], 57 /// or [NSValue valueWithBytes:objCType:]. 58 /// 59 llvm::Value * 60 CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { 61 // Generate the correct selector for this literal's concrete type. 62 // Get the method. 63 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); 64 const Expr *SubExpr = E->getSubExpr(); 65 assert(BoxingMethod && "BoxingMethod is null"); 66 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"); 67 Selector Sel = BoxingMethod->getSelector(); 68 69 // Generate a reference to the class pointer, which will be the receiver. 70 // Assumes that the method was introduced in the class that should be 71 // messaged (avoids pulling it out of the result type). 72 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 73 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); 74 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl); 75 76 CallArgList Args; 77 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); 78 QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); 79 80 // ObjCBoxedExpr supports boxing of structs and unions 81 // via [NSValue valueWithBytes:objCType:] 82 const QualType ValueType(SubExpr->getType().getCanonicalType()); 83 if (ValueType->isObjCBoxableRecordType()) { 84 // Emit CodeGen for first parameter 85 // and cast value to correct type 86 Address Temporary = CreateMemTemp(SubExpr->getType()); 87 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); 88 Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT)); 89 Args.add(RValue::get(BitCast.getPointer()), ArgQT); 90 91 // Create char array to store type encoding 92 std::string Str; 93 getContext().getObjCEncodingForType(ValueType, Str); 94 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); 95 96 // Cast type encoding to correct type 97 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; 98 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); 99 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT)); 100 101 Args.add(RValue::get(Cast), EncodingQT); 102 } else { 103 Args.add(EmitAnyExpr(SubExpr), ArgQT); 104 } 105 106 RValue result = Runtime.GenerateMessageSend( 107 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver, 108 Args, ClassDecl, BoxingMethod); 109 return Builder.CreateBitCast(result.getScalarVal(), 110 ConvertType(E->getType())); 111 } 112 113 llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, 114 const ObjCMethodDecl *MethodWithObjects) { 115 ASTContext &Context = CGM.getContext(); 116 const ObjCDictionaryLiteral *DLE = nullptr; 117 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E); 118 if (!ALE) 119 DLE = cast<ObjCDictionaryLiteral>(E); 120 121 // Optimize empty collections by referencing constants, when available. 122 uint64_t NumElements = 123 ALE ? ALE->getNumElements() : DLE->getNumElements(); 124 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) { 125 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__"; 126 QualType IdTy(CGM.getContext().getObjCIdType()); 127 llvm::Constant *Constant = 128 CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName); 129 LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy); 130 llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getBeginLoc()); 131 cast<llvm::LoadInst>(Ptr)->setMetadata( 132 CGM.getModule().getMDKindID("invariant.load"), 133 llvm::MDNode::get(getLLVMContext(), None)); 134 return Builder.CreateBitCast(Ptr, ConvertType(E->getType())); 135 } 136 137 // Compute the type of the array we're initializing. 138 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()), 139 NumElements); 140 QualType ElementType = Context.getObjCIdType().withConst(); 141 QualType ElementArrayType 142 = Context.getConstantArrayType(ElementType, APNumElements, 143 ArrayType::Normal, /*IndexTypeQuals=*/0); 144 145 // Allocate the temporary array(s). 146 Address Objects = CreateMemTemp(ElementArrayType, "objects"); 147 Address Keys = Address::invalid(); 148 if (DLE) 149 Keys = CreateMemTemp(ElementArrayType, "keys"); 150 151 // In ARC, we may need to do extra work to keep all the keys and 152 // values alive until after the call. 153 SmallVector<llvm::Value *, 16> NeededObjects; 154 bool TrackNeededObjects = 155 (getLangOpts().ObjCAutoRefCount && 156 CGM.getCodeGenOpts().OptimizationLevel != 0); 157 158 // Perform the actual initialialization of the array(s). 159 for (uint64_t i = 0; i < NumElements; i++) { 160 if (ALE) { 161 // Emit the element and store it to the appropriate array slot. 162 const Expr *Rhs = ALE->getElement(i); 163 LValue LV = MakeAddrLValue( 164 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()), 165 ElementType, AlignmentSource::Decl); 166 167 llvm::Value *value = EmitScalarExpr(Rhs); 168 EmitStoreThroughLValue(RValue::get(value), LV, true); 169 if (TrackNeededObjects) { 170 NeededObjects.push_back(value); 171 } 172 } else { 173 // Emit the key and store it to the appropriate array slot. 174 const Expr *Key = DLE->getKeyValueElement(i).Key; 175 LValue KeyLV = MakeAddrLValue( 176 Builder.CreateConstArrayGEP(Keys, i, getPointerSize()), 177 ElementType, AlignmentSource::Decl); 178 llvm::Value *keyValue = EmitScalarExpr(Key); 179 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); 180 181 // Emit the value and store it to the appropriate array slot. 182 const Expr *Value = DLE->getKeyValueElement(i).Value; 183 LValue ValueLV = MakeAddrLValue( 184 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()), 185 ElementType, AlignmentSource::Decl); 186 llvm::Value *valueValue = EmitScalarExpr(Value); 187 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); 188 if (TrackNeededObjects) { 189 NeededObjects.push_back(keyValue); 190 NeededObjects.push_back(valueValue); 191 } 192 } 193 } 194 195 // Generate the argument list. 196 CallArgList Args; 197 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); 198 const ParmVarDecl *argDecl = *PI++; 199 QualType ArgQT = argDecl->getType().getUnqualifiedType(); 200 Args.add(RValue::get(Objects.getPointer()), ArgQT); 201 if (DLE) { 202 argDecl = *PI++; 203 ArgQT = argDecl->getType().getUnqualifiedType(); 204 Args.add(RValue::get(Keys.getPointer()), ArgQT); 205 } 206 argDecl = *PI; 207 ArgQT = argDecl->getType().getUnqualifiedType(); 208 llvm::Value *Count = 209 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements); 210 Args.add(RValue::get(Count), ArgQT); 211 212 // Generate a reference to the class pointer, which will be the receiver. 213 Selector Sel = MethodWithObjects->getSelector(); 214 QualType ResultType = E->getType(); 215 const ObjCObjectPointerType *InterfacePointerType 216 = ResultType->getAsObjCInterfacePointerType(); 217 ObjCInterfaceDecl *Class 218 = InterfacePointerType->getObjectType()->getInterface(); 219 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 220 llvm::Value *Receiver = Runtime.GetClass(*this, Class); 221 222 // Generate the message send. 223 RValue result = Runtime.GenerateMessageSend( 224 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel, 225 Receiver, Args, Class, MethodWithObjects); 226 227 // The above message send needs these objects, but in ARC they are 228 // passed in a buffer that is essentially __unsafe_unretained. 229 // Therefore we must prevent the optimizer from releasing them until 230 // after the call. 231 if (TrackNeededObjects) { 232 EmitARCIntrinsicUse(NeededObjects); 233 } 234 235 return Builder.CreateBitCast(result.getScalarVal(), 236 ConvertType(E->getType())); 237 } 238 239 llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { 240 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); 241 } 242 243 llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( 244 const ObjCDictionaryLiteral *E) { 245 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); 246 } 247 248 /// Emit a selector. 249 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { 250 // Untyped selector. 251 // Note that this implementation allows for non-constant strings to be passed 252 // as arguments to @selector(). Currently, the only thing preventing this 253 // behaviour is the type checking in the front end. 254 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector()); 255 } 256 257 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { 258 // FIXME: This should pass the Decl not the name. 259 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol()); 260 } 261 262 /// Adjust the type of an Objective-C object that doesn't match up due 263 /// to type erasure at various points, e.g., related result types or the use 264 /// of parameterized classes. 265 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, 266 RValue Result) { 267 if (!ExpT->isObjCRetainableType()) 268 return Result; 269 270 // If the converted types are the same, we're done. 271 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT); 272 if (ExpLLVMTy == Result.getScalarVal()->getType()) 273 return Result; 274 275 // We have applied a substitution. Cast the rvalue appropriately. 276 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(), 277 ExpLLVMTy)); 278 } 279 280 /// Decide whether to extend the lifetime of the receiver of a 281 /// returns-inner-pointer message. 282 static bool 283 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { 284 switch (message->getReceiverKind()) { 285 286 // For a normal instance message, we should extend unless the 287 // receiver is loaded from a variable with precise lifetime. 288 case ObjCMessageExpr::Instance: { 289 const Expr *receiver = message->getInstanceReceiver(); 290 291 // Look through OVEs. 292 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 293 if (opaque->getSourceExpr()) 294 receiver = opaque->getSourceExpr()->IgnoreParens(); 295 } 296 297 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver); 298 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true; 299 receiver = ice->getSubExpr()->IgnoreParens(); 300 301 // Look through OVEs. 302 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 303 if (opaque->getSourceExpr()) 304 receiver = opaque->getSourceExpr()->IgnoreParens(); 305 } 306 307 // Only __strong variables. 308 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 309 return true; 310 311 // All ivars and fields have precise lifetime. 312 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver)) 313 return false; 314 315 // Otherwise, check for variables. 316 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); 317 if (!declRef) return true; 318 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl()); 319 if (!var) return true; 320 321 // All variables have precise lifetime except local variables with 322 // automatic storage duration that aren't specially marked. 323 return (var->hasLocalStorage() && 324 !var->hasAttr<ObjCPreciseLifetimeAttr>()); 325 } 326 327 case ObjCMessageExpr::Class: 328 case ObjCMessageExpr::SuperClass: 329 // It's never necessary for class objects. 330 return false; 331 332 case ObjCMessageExpr::SuperInstance: 333 // We generally assume that 'self' lives throughout a method call. 334 return false; 335 } 336 337 llvm_unreachable("invalid receiver kind"); 338 } 339 340 /// Given an expression of ObjC pointer type, check whether it was 341 /// immediately loaded from an ARC __weak l-value. 342 static const Expr *findWeakLValue(const Expr *E) { 343 assert(E->getType()->isObjCRetainableType()); 344 E = E->IgnoreParens(); 345 if (auto CE = dyn_cast<CastExpr>(E)) { 346 if (CE->getCastKind() == CK_LValueToRValue) { 347 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) 348 return CE->getSubExpr(); 349 } 350 } 351 352 return nullptr; 353 } 354 355 /// The ObjC runtime may provide entrypoints that are likely to be faster 356 /// than an ordinary message send of the appropriate selector. 357 /// 358 /// The entrypoints are guaranteed to be equivalent to just sending the 359 /// corresponding message. If the entrypoint is implemented naively as just a 360 /// message send, using it is a trade-off: it sacrifices a few cycles of 361 /// overhead to save a small amount of code. However, it's possible for 362 /// runtimes to detect and special-case classes that use "standard" 363 /// behavior; if that's dynamically a large proportion of all objects, using 364 /// the entrypoint will also be faster than using a message send. 365 /// 366 /// If the runtime does support a required entrypoint, then this method will 367 /// generate a call and return the resulting value. Otherwise it will return 368 /// None and the caller can generate a msgSend instead. 369 static Optional<llvm::Value *> 370 tryGenerateSpecializedMessageSend(CodeGenFunction &CGF, QualType ResultType, 371 llvm::Value *Receiver, 372 const CallArgList& Args, Selector Sel, 373 const ObjCMethodDecl *method) { 374 auto &CGM = CGF.CGM; 375 if (!CGM.getCodeGenOpts().ObjCConvertMessagesToRuntimeCalls) 376 return None; 377 378 auto &Runtime = CGM.getLangOpts().ObjCRuntime; 379 switch (Sel.getMethodFamily()) { 380 case OMF_alloc: 381 if (Runtime.shouldUseRuntimeFunctionsForAlloc() && 382 ResultType->isObjCObjectPointerType()) { 383 // [Foo alloc] -> objc_alloc(Foo) 384 if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "alloc") 385 return CGF.EmitObjCAlloc(Receiver, CGF.ConvertType(ResultType)); 386 // [Foo allocWithZone:nil] -> objc_allocWithZone(Foo) 387 if (Sel.isKeywordSelector() && Sel.getNumArgs() == 1 && 388 Args.size() == 1 && Args.front().getType()->isPointerType() && 389 Sel.getNameForSlot(0) == "allocWithZone") { 390 const llvm::Value* arg = Args.front().getKnownRValue().getScalarVal(); 391 if (isa<llvm::ConstantPointerNull>(arg)) 392 return CGF.EmitObjCAllocWithZone(Receiver, 393 CGF.ConvertType(ResultType)); 394 return None; 395 } 396 } 397 break; 398 399 default: 400 break; 401 } 402 return None; 403 } 404 405 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, 406 ReturnValueSlot Return) { 407 // Only the lookup mechanism and first two arguments of the method 408 // implementation vary between runtimes. We can get the receiver and 409 // arguments in generic code. 410 411 bool isDelegateInit = E->isDelegateInitCall(); 412 413 const ObjCMethodDecl *method = E->getMethodDecl(); 414 415 // If the method is -retain, and the receiver's being loaded from 416 // a __weak variable, peephole the entire operation to objc_loadWeakRetained. 417 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance && 418 method->getMethodFamily() == OMF_retain) { 419 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { 420 LValue lvalue = EmitLValue(lvalueExpr); 421 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress()); 422 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); 423 } 424 } 425 426 // We don't retain the receiver in delegate init calls, and this is 427 // safe because the receiver value is always loaded from 'self', 428 // which we zero out. We don't want to Block_copy block receivers, 429 // though. 430 bool retainSelf = 431 (!isDelegateInit && 432 CGM.getLangOpts().ObjCAutoRefCount && 433 method && 434 method->hasAttr<NSConsumesSelfAttr>()); 435 436 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 437 bool isSuperMessage = false; 438 bool isClassMessage = false; 439 ObjCInterfaceDecl *OID = nullptr; 440 // Find the receiver 441 QualType ReceiverType; 442 llvm::Value *Receiver = nullptr; 443 switch (E->getReceiverKind()) { 444 case ObjCMessageExpr::Instance: 445 ReceiverType = E->getInstanceReceiver()->getType(); 446 if (retainSelf) { 447 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this, 448 E->getInstanceReceiver()); 449 Receiver = ter.getPointer(); 450 if (ter.getInt()) retainSelf = false; 451 } else 452 Receiver = EmitScalarExpr(E->getInstanceReceiver()); 453 break; 454 455 case ObjCMessageExpr::Class: { 456 ReceiverType = E->getClassReceiver(); 457 const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>(); 458 assert(ObjTy && "Invalid Objective-C class message send"); 459 OID = ObjTy->getInterface(); 460 assert(OID && "Invalid Objective-C class message send"); 461 Receiver = Runtime.GetClass(*this, OID); 462 isClassMessage = true; 463 break; 464 } 465 466 case ObjCMessageExpr::SuperInstance: 467 ReceiverType = E->getSuperType(); 468 Receiver = LoadObjCSelf(); 469 isSuperMessage = true; 470 break; 471 472 case ObjCMessageExpr::SuperClass: 473 ReceiverType = E->getSuperType(); 474 Receiver = LoadObjCSelf(); 475 isSuperMessage = true; 476 isClassMessage = true; 477 break; 478 } 479 480 if (retainSelf) 481 Receiver = EmitARCRetainNonBlock(Receiver); 482 483 // In ARC, we sometimes want to "extend the lifetime" 484 // (i.e. retain+autorelease) of receivers of returns-inner-pointer 485 // messages. 486 if (getLangOpts().ObjCAutoRefCount && method && 487 method->hasAttr<ObjCReturnsInnerPointerAttr>() && 488 shouldExtendReceiverForInnerPointerMessage(E)) 489 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver); 490 491 QualType ResultType = method ? method->getReturnType() : E->getType(); 492 493 CallArgList Args; 494 EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method)); 495 496 // For delegate init calls in ARC, do an unsafe store of null into 497 // self. This represents the call taking direct ownership of that 498 // value. We have to do this after emitting the other call 499 // arguments because they might also reference self, but we don't 500 // have to worry about any of them modifying self because that would 501 // be an undefined read and write of an object in unordered 502 // expressions. 503 if (isDelegateInit) { 504 assert(getLangOpts().ObjCAutoRefCount && 505 "delegate init calls should only be marked in ARC"); 506 507 // Do an unsafe store of null into self. 508 Address selfAddr = 509 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 510 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr); 511 } 512 513 RValue result; 514 if (isSuperMessage) { 515 // super is only valid in an Objective-C method 516 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 517 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); 518 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType, 519 E->getSelector(), 520 OMD->getClassInterface(), 521 isCategoryImpl, 522 Receiver, 523 isClassMessage, 524 Args, 525 method); 526 } else { 527 // Call runtime methods directly if we can. 528 if (Optional<llvm::Value *> SpecializedResult = 529 tryGenerateSpecializedMessageSend(*this, ResultType, Receiver, Args, 530 E->getSelector(), method)) { 531 result = RValue::get(SpecializedResult.getValue()); 532 } else { 533 result = Runtime.GenerateMessageSend(*this, Return, ResultType, 534 E->getSelector(), Receiver, Args, 535 OID, method); 536 } 537 } 538 539 // For delegate init calls in ARC, implicitly store the result of 540 // the call back into self. This takes ownership of the value. 541 if (isDelegateInit) { 542 Address selfAddr = 543 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 544 llvm::Value *newSelf = result.getScalarVal(); 545 546 // The delegate return type isn't necessarily a matching type; in 547 // fact, it's quite likely to be 'id'. 548 llvm::Type *selfTy = selfAddr.getElementType(); 549 newSelf = Builder.CreateBitCast(newSelf, selfTy); 550 551 Builder.CreateStore(newSelf, selfAddr); 552 } 553 554 return AdjustObjCObjectType(*this, E->getType(), result); 555 } 556 557 namespace { 558 struct FinishARCDealloc final : EHScopeStack::Cleanup { 559 void Emit(CodeGenFunction &CGF, Flags flags) override { 560 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl); 561 562 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); 563 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 564 if (!iface->getSuperClass()) return; 565 566 bool isCategory = isa<ObjCCategoryImplDecl>(impl); 567 568 // Call [super dealloc] if we have a superclass. 569 llvm::Value *self = CGF.LoadObjCSelf(); 570 571 CallArgList args; 572 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(), 573 CGF.getContext().VoidTy, 574 method->getSelector(), 575 iface, 576 isCategory, 577 self, 578 /*is class msg*/ false, 579 args, 580 method); 581 } 582 }; 583 } 584 585 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates 586 /// the LLVM function and sets the other context used by 587 /// CodeGenFunction. 588 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, 589 const ObjCContainerDecl *CD) { 590 SourceLocation StartLoc = OMD->getBeginLoc(); 591 FunctionArgList args; 592 // Check if we should generate debug info for this method. 593 if (OMD->hasAttr<NoDebugAttr>()) 594 DebugInfo = nullptr; // disable debug info indefinitely for this function 595 596 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); 597 598 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD); 599 CGM.SetInternalFunctionAttributes(OMD, Fn, FI); 600 601 args.push_back(OMD->getSelfDecl()); 602 args.push_back(OMD->getCmdDecl()); 603 604 args.append(OMD->param_begin(), OMD->param_end()); 605 606 CurGD = OMD; 607 CurEHLocation = OMD->getEndLoc(); 608 609 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, 610 OMD->getLocation(), StartLoc); 611 612 // In ARC, certain methods get an extra cleanup. 613 if (CGM.getLangOpts().ObjCAutoRefCount && 614 OMD->isInstanceMethod() && 615 OMD->getSelector().isUnarySelector()) { 616 const IdentifierInfo *ident = 617 OMD->getSelector().getIdentifierInfoForSlot(0); 618 if (ident->isStr("dealloc")) 619 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind()); 620 } 621 } 622 623 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 624 LValue lvalue, QualType type); 625 626 /// Generate an Objective-C method. An Objective-C method is a C function with 627 /// its pointer, name, and types registered in the class structure. 628 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { 629 StartObjCMethod(OMD, OMD->getClassInterface()); 630 PGO.assignRegionCounters(GlobalDecl(OMD), CurFn); 631 assert(isa<CompoundStmt>(OMD->getBody())); 632 incrementProfileCounter(OMD->getBody()); 633 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); 634 FinishFunction(OMD->getBodyRBrace()); 635 } 636 637 /// emitStructGetterCall - Call the runtime function to load a property 638 /// into the return value slot. 639 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, 640 bool isAtomic, bool hasStrong) { 641 ASTContext &Context = CGF.getContext(); 642 643 Address src = 644 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 645 .getAddress(); 646 647 // objc_copyStruct (ReturnValue, &structIvar, 648 // sizeof (Type of Ivar), isAtomic, false); 649 CallArgList args; 650 651 Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy); 652 args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy); 653 654 src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy); 655 args.add(RValue::get(src.getPointer()), Context.VoidPtrTy); 656 657 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); 658 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType()); 659 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy); 660 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy); 661 662 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); 663 CGCallee callee = CGCallee::forDirect(fn); 664 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args), 665 callee, ReturnValueSlot(), args); 666 } 667 668 /// Determine whether the given architecture supports unaligned atomic 669 /// accesses. They don't have to be fast, just faster than a function 670 /// call and a mutex. 671 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { 672 // FIXME: Allow unaligned atomic load/store on x86. (It is not 673 // currently supported by the backend.) 674 return 0; 675 } 676 677 /// Return the maximum size that permits atomic accesses for the given 678 /// architecture. 679 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, 680 llvm::Triple::ArchType arch) { 681 // ARM has 8-byte atomic accesses, but it's not clear whether we 682 // want to rely on them here. 683 684 // In the default case, just assume that any size up to a pointer is 685 // fine given adequate alignment. 686 return CharUnits::fromQuantity(CGM.PointerSizeInBytes); 687 } 688 689 namespace { 690 class PropertyImplStrategy { 691 public: 692 enum StrategyKind { 693 /// The 'native' strategy is to use the architecture's provided 694 /// reads and writes. 695 Native, 696 697 /// Use objc_setProperty and objc_getProperty. 698 GetSetProperty, 699 700 /// Use objc_setProperty for the setter, but use expression 701 /// evaluation for the getter. 702 SetPropertyAndExpressionGet, 703 704 /// Use objc_copyStruct. 705 CopyStruct, 706 707 /// The 'expression' strategy is to emit normal assignment or 708 /// lvalue-to-rvalue expressions. 709 Expression 710 }; 711 712 StrategyKind getKind() const { return StrategyKind(Kind); } 713 714 bool hasStrongMember() const { return HasStrong; } 715 bool isAtomic() const { return IsAtomic; } 716 bool isCopy() const { return IsCopy; } 717 718 CharUnits getIvarSize() const { return IvarSize; } 719 CharUnits getIvarAlignment() const { return IvarAlignment; } 720 721 PropertyImplStrategy(CodeGenModule &CGM, 722 const ObjCPropertyImplDecl *propImpl); 723 724 private: 725 unsigned Kind : 8; 726 unsigned IsAtomic : 1; 727 unsigned IsCopy : 1; 728 unsigned HasStrong : 1; 729 730 CharUnits IvarSize; 731 CharUnits IvarAlignment; 732 }; 733 } 734 735 /// Pick an implementation strategy for the given property synthesis. 736 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, 737 const ObjCPropertyImplDecl *propImpl) { 738 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 739 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); 740 741 IsCopy = (setterKind == ObjCPropertyDecl::Copy); 742 IsAtomic = prop->isAtomic(); 743 HasStrong = false; // doesn't matter here. 744 745 // Evaluate the ivar's size and alignment. 746 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 747 QualType ivarType = ivar->getType(); 748 std::tie(IvarSize, IvarAlignment) = 749 CGM.getContext().getTypeInfoInChars(ivarType); 750 751 // If we have a copy property, we always have to use getProperty/setProperty. 752 // TODO: we could actually use setProperty and an expression for non-atomics. 753 if (IsCopy) { 754 Kind = GetSetProperty; 755 return; 756 } 757 758 // Handle retain. 759 if (setterKind == ObjCPropertyDecl::Retain) { 760 // In GC-only, there's nothing special that needs to be done. 761 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { 762 // fallthrough 763 764 // In ARC, if the property is non-atomic, use expression emission, 765 // which translates to objc_storeStrong. This isn't required, but 766 // it's slightly nicer. 767 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) { 768 // Using standard expression emission for the setter is only 769 // acceptable if the ivar is __strong, which won't be true if 770 // the property is annotated with __attribute__((NSObject)). 771 // TODO: falling all the way back to objc_setProperty here is 772 // just laziness, though; we could still use objc_storeStrong 773 // if we hacked it right. 774 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) 775 Kind = Expression; 776 else 777 Kind = SetPropertyAndExpressionGet; 778 return; 779 780 // Otherwise, we need to at least use setProperty. However, if 781 // the property isn't atomic, we can use normal expression 782 // emission for the getter. 783 } else if (!IsAtomic) { 784 Kind = SetPropertyAndExpressionGet; 785 return; 786 787 // Otherwise, we have to use both setProperty and getProperty. 788 } else { 789 Kind = GetSetProperty; 790 return; 791 } 792 } 793 794 // If we're not atomic, just use expression accesses. 795 if (!IsAtomic) { 796 Kind = Expression; 797 return; 798 } 799 800 // Properties on bitfield ivars need to be emitted using expression 801 // accesses even if they're nominally atomic. 802 if (ivar->isBitField()) { 803 Kind = Expression; 804 return; 805 } 806 807 // GC-qualified or ARC-qualified ivars need to be emitted as 808 // expressions. This actually works out to being atomic anyway, 809 // except for ARC __strong, but that should trigger the above code. 810 if (ivarType.hasNonTrivialObjCLifetime() || 811 (CGM.getLangOpts().getGC() && 812 CGM.getContext().getObjCGCAttrKind(ivarType))) { 813 Kind = Expression; 814 return; 815 } 816 817 // Compute whether the ivar has strong members. 818 if (CGM.getLangOpts().getGC()) 819 if (const RecordType *recordType = ivarType->getAs<RecordType>()) 820 HasStrong = recordType->getDecl()->hasObjectMember(); 821 822 // We can never access structs with object members with a native 823 // access, because we need to use write barriers. This is what 824 // objc_copyStruct is for. 825 if (HasStrong) { 826 Kind = CopyStruct; 827 return; 828 } 829 830 // Otherwise, this is target-dependent and based on the size and 831 // alignment of the ivar. 832 833 // If the size of the ivar is not a power of two, give up. We don't 834 // want to get into the business of doing compare-and-swaps. 835 if (!IvarSize.isPowerOfTwo()) { 836 Kind = CopyStruct; 837 return; 838 } 839 840 llvm::Triple::ArchType arch = 841 CGM.getTarget().getTriple().getArch(); 842 843 // Most architectures require memory to fit within a single cache 844 // line, so the alignment has to be at least the size of the access. 845 // Otherwise we have to grab a lock. 846 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) { 847 Kind = CopyStruct; 848 return; 849 } 850 851 // If the ivar's size exceeds the architecture's maximum atomic 852 // access size, we have to use CopyStruct. 853 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { 854 Kind = CopyStruct; 855 return; 856 } 857 858 // Otherwise, we can use native loads and stores. 859 Kind = Native; 860 } 861 862 /// Generate an Objective-C property getter function. 863 /// 864 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 865 /// is illegal within a category. 866 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, 867 const ObjCPropertyImplDecl *PID) { 868 llvm::Constant *AtomicHelperFn = 869 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); 870 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 871 ObjCMethodDecl *OMD = PD->getGetterMethodDecl(); 872 assert(OMD && "Invalid call to generate getter (empty method)"); 873 StartObjCMethod(OMD, IMP->getClassInterface()); 874 875 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn); 876 877 FinishFunction(); 878 } 879 880 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { 881 const Expr *getter = propImpl->getGetterCXXConstructor(); 882 if (!getter) return true; 883 884 // Sema only makes only of these when the ivar has a C++ class type, 885 // so the form is pretty constrained. 886 887 // If the property has a reference type, we might just be binding a 888 // reference, in which case the result will be a gl-value. We should 889 // treat this as a non-trivial operation. 890 if (getter->isGLValue()) 891 return false; 892 893 // If we selected a trivial copy-constructor, we're okay. 894 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter)) 895 return (construct->getConstructor()->isTrivial()); 896 897 // The constructor might require cleanups (in which case it's never 898 // trivial). 899 assert(isa<ExprWithCleanups>(getter)); 900 return false; 901 } 902 903 /// emitCPPObjectAtomicGetterCall - Call the runtime function to 904 /// copy the ivar into the resturn slot. 905 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, 906 llvm::Value *returnAddr, 907 ObjCIvarDecl *ivar, 908 llvm::Constant *AtomicHelperFn) { 909 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, 910 // AtomicHelperFn); 911 CallArgList args; 912 913 // The 1st argument is the return Slot. 914 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy); 915 916 // The 2nd argument is the address of the ivar. 917 llvm::Value *ivarAddr = 918 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 919 CGF.LoadObjCSelf(), ivar, 0).getPointer(); 920 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 921 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 922 923 // Third argument is the helper function. 924 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 925 926 llvm::Constant *copyCppAtomicObjectFn = 927 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); 928 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn); 929 CGF.EmitCall( 930 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 931 callee, ReturnValueSlot(), args); 932 } 933 934 void 935 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, 936 const ObjCPropertyImplDecl *propImpl, 937 const ObjCMethodDecl *GetterMethodDecl, 938 llvm::Constant *AtomicHelperFn) { 939 // If there's a non-trivial 'get' expression, we just have to emit that. 940 if (!hasTrivialGetExpr(propImpl)) { 941 if (!AtomicHelperFn) { 942 auto *ret = ReturnStmt::Create(getContext(), SourceLocation(), 943 propImpl->getGetterCXXConstructor(), 944 /* NRVOCandidate=*/nullptr); 945 EmitReturnStmt(*ret); 946 } 947 else { 948 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 949 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), 950 ivar, AtomicHelperFn); 951 } 952 return; 953 } 954 955 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 956 QualType propType = prop->getType(); 957 ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl(); 958 959 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 960 961 // Pick an implementation strategy. 962 PropertyImplStrategy strategy(CGM, propImpl); 963 switch (strategy.getKind()) { 964 case PropertyImplStrategy::Native: { 965 // We don't need to do anything for a zero-size struct. 966 if (strategy.getIvarSize().isZero()) 967 return; 968 969 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 970 971 // Currently, all atomic accesses have to be through integer 972 // types, so there's no point in trying to pick a prettier type. 973 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize()); 974 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); 975 bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay 976 977 // Perform an atomic load. This does not impose ordering constraints. 978 Address ivarAddr = LV.getAddress(); 979 ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType); 980 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); 981 load->setAtomic(llvm::AtomicOrdering::Unordered); 982 983 // Store that value into the return address. Doing this with a 984 // bitcast is likely to produce some pretty ugly IR, but it's not 985 // the *most* terrible thing in the world. 986 llvm::Type *retTy = ConvertType(getterMethod->getReturnType()); 987 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy); 988 llvm::Value *ivarVal = load; 989 if (ivarSize > retTySize) { 990 llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize); 991 ivarVal = Builder.CreateTrunc(load, newTy); 992 bitcastType = newTy->getPointerTo(); 993 } 994 Builder.CreateStore(ivarVal, 995 Builder.CreateBitCast(ReturnValue, bitcastType)); 996 997 // Make sure we don't do an autorelease. 998 AutoreleaseResult = false; 999 return; 1000 } 1001 1002 case PropertyImplStrategy::GetSetProperty: { 1003 llvm::Constant *getPropertyFn = 1004 CGM.getObjCRuntime().GetPropertyGetFunction(); 1005 if (!getPropertyFn) { 1006 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy"); 1007 return; 1008 } 1009 CGCallee callee = CGCallee::forDirect(getPropertyFn); 1010 1011 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). 1012 // FIXME: Can't this be simpler? This might even be worse than the 1013 // corresponding gcc code. 1014 llvm::Value *cmd = 1015 Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd"); 1016 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1017 llvm::Value *ivarOffset = 1018 EmitIvarOffset(classImpl->getClassInterface(), ivar); 1019 1020 CallArgList args; 1021 args.add(RValue::get(self), getContext().getObjCIdType()); 1022 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1023 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1024 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1025 getContext().BoolTy); 1026 1027 // FIXME: We shouldn't need to get the function info here, the 1028 // runtime already should have computed it to build the function. 1029 llvm::Instruction *CallInstruction; 1030 RValue RV = EmitCall( 1031 getTypes().arrangeBuiltinFunctionCall(propType, args), 1032 callee, ReturnValueSlot(), args, &CallInstruction); 1033 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction)) 1034 call->setTailCall(); 1035 1036 // We need to fix the type here. Ivars with copy & retain are 1037 // always objects so we don't need to worry about complex or 1038 // aggregates. 1039 RV = RValue::get(Builder.CreateBitCast( 1040 RV.getScalarVal(), 1041 getTypes().ConvertType(getterMethod->getReturnType()))); 1042 1043 EmitReturnOfRValue(RV, propType); 1044 1045 // objc_getProperty does an autorelease, so we should suppress ours. 1046 AutoreleaseResult = false; 1047 1048 return; 1049 } 1050 1051 case PropertyImplStrategy::CopyStruct: 1052 emitStructGetterCall(*this, ivar, strategy.isAtomic(), 1053 strategy.hasStrongMember()); 1054 return; 1055 1056 case PropertyImplStrategy::Expression: 1057 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1058 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1059 1060 QualType ivarType = ivar->getType(); 1061 switch (getEvaluationKind(ivarType)) { 1062 case TEK_Complex: { 1063 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation()); 1064 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType), 1065 /*init*/ true); 1066 return; 1067 } 1068 case TEK_Aggregate: { 1069 // The return value slot is guaranteed to not be aliased, but 1070 // that's not necessarily the same as "on the stack", so 1071 // we still potentially need objc_memmove_collectable. 1072 EmitAggregateCopy(/* Dest= */ MakeAddrLValue(ReturnValue, ivarType), 1073 /* Src= */ LV, ivarType, overlapForReturnValue()); 1074 return; 1075 } 1076 case TEK_Scalar: { 1077 llvm::Value *value; 1078 if (propType->isReferenceType()) { 1079 value = LV.getAddress().getPointer(); 1080 } else { 1081 // We want to load and autoreleaseReturnValue ARC __weak ivars. 1082 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1083 if (getLangOpts().ObjCAutoRefCount) { 1084 value = emitARCRetainLoadOfScalar(*this, LV, ivarType); 1085 } else { 1086 value = EmitARCLoadWeak(LV.getAddress()); 1087 } 1088 1089 // Otherwise we want to do a simple load, suppressing the 1090 // final autorelease. 1091 } else { 1092 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal(); 1093 AutoreleaseResult = false; 1094 } 1095 1096 value = Builder.CreateBitCast( 1097 value, ConvertType(GetterMethodDecl->getReturnType())); 1098 } 1099 1100 EmitReturnOfRValue(RValue::get(value), propType); 1101 return; 1102 } 1103 } 1104 llvm_unreachable("bad evaluation kind"); 1105 } 1106 1107 } 1108 llvm_unreachable("bad @property implementation strategy!"); 1109 } 1110 1111 /// emitStructSetterCall - Call the runtime function to store the value 1112 /// from the first formal parameter into the given ivar. 1113 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, 1114 ObjCIvarDecl *ivar) { 1115 // objc_copyStruct (&structIvar, &Arg, 1116 // sizeof (struct something), true, false); 1117 CallArgList args; 1118 1119 // The first argument is the address of the ivar. 1120 llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 1121 CGF.LoadObjCSelf(), ivar, 0) 1122 .getPointer(); 1123 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1124 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1125 1126 // The second argument is the address of the parameter variable. 1127 ParmVarDecl *argVar = *OMD->param_begin(); 1128 DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(), 1129 VK_LValue, SourceLocation()); 1130 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); 1131 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1132 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1133 1134 // The third argument is the sizeof the type. 1135 llvm::Value *size = 1136 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType())); 1137 args.add(RValue::get(size), CGF.getContext().getSizeType()); 1138 1139 // The fourth argument is the 'isAtomic' flag. 1140 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy); 1141 1142 // The fifth argument is the 'hasStrong' flag. 1143 // FIXME: should this really always be false? 1144 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy); 1145 1146 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); 1147 CGCallee callee = CGCallee::forDirect(fn); 1148 CGF.EmitCall( 1149 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1150 callee, ReturnValueSlot(), args); 1151 } 1152 1153 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store 1154 /// the value from the first formal parameter into the given ivar, using 1155 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. 1156 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, 1157 ObjCMethodDecl *OMD, 1158 ObjCIvarDecl *ivar, 1159 llvm::Constant *AtomicHelperFn) { 1160 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, 1161 // AtomicHelperFn); 1162 CallArgList args; 1163 1164 // The first argument is the address of the ivar. 1165 llvm::Value *ivarAddr = 1166 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 1167 CGF.LoadObjCSelf(), ivar, 0).getPointer(); 1168 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1169 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1170 1171 // The second argument is the address of the parameter variable. 1172 ParmVarDecl *argVar = *OMD->param_begin(); 1173 DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(), 1174 VK_LValue, SourceLocation()); 1175 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); 1176 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1177 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1178 1179 // Third argument is the helper function. 1180 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1181 1182 llvm::Constant *fn = 1183 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); 1184 CGCallee callee = CGCallee::forDirect(fn); 1185 CGF.EmitCall( 1186 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1187 callee, ReturnValueSlot(), args); 1188 } 1189 1190 1191 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { 1192 Expr *setter = PID->getSetterCXXAssignment(); 1193 if (!setter) return true; 1194 1195 // Sema only makes only of these when the ivar has a C++ class type, 1196 // so the form is pretty constrained. 1197 1198 // An operator call is trivial if the function it calls is trivial. 1199 // This also implies that there's nothing non-trivial going on with 1200 // the arguments, because operator= can only be trivial if it's a 1201 // synthesized assignment operator and therefore both parameters are 1202 // references. 1203 if (CallExpr *call = dyn_cast<CallExpr>(setter)) { 1204 if (const FunctionDecl *callee 1205 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl())) 1206 if (callee->isTrivial()) 1207 return true; 1208 return false; 1209 } 1210 1211 assert(isa<ExprWithCleanups>(setter)); 1212 return false; 1213 } 1214 1215 static bool UseOptimizedSetter(CodeGenModule &CGM) { 1216 if (CGM.getLangOpts().getGC() != LangOptions::NonGC) 1217 return false; 1218 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); 1219 } 1220 1221 void 1222 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, 1223 const ObjCPropertyImplDecl *propImpl, 1224 llvm::Constant *AtomicHelperFn) { 1225 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 1226 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1227 ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl(); 1228 1229 // Just use the setter expression if Sema gave us one and it's 1230 // non-trivial. 1231 if (!hasTrivialSetExpr(propImpl)) { 1232 if (!AtomicHelperFn) 1233 // If non-atomic, assignment is called directly. 1234 EmitStmt(propImpl->getSetterCXXAssignment()); 1235 else 1236 // If atomic, assignment is called via a locking api. 1237 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, 1238 AtomicHelperFn); 1239 return; 1240 } 1241 1242 PropertyImplStrategy strategy(CGM, propImpl); 1243 switch (strategy.getKind()) { 1244 case PropertyImplStrategy::Native: { 1245 // We don't need to do anything for a zero-size struct. 1246 if (strategy.getIvarSize().isZero()) 1247 return; 1248 1249 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1250 1251 LValue ivarLValue = 1252 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); 1253 Address ivarAddr = ivarLValue.getAddress(); 1254 1255 // Currently, all atomic accesses have to be through integer 1256 // types, so there's no point in trying to pick a prettier type. 1257 llvm::Type *bitcastType = 1258 llvm::Type::getIntNTy(getLLVMContext(), 1259 getContext().toBits(strategy.getIvarSize())); 1260 1261 // Cast both arguments to the chosen operation type. 1262 argAddr = Builder.CreateElementBitCast(argAddr, bitcastType); 1263 ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType); 1264 1265 // This bitcast load is likely to cause some nasty IR. 1266 llvm::Value *load = Builder.CreateLoad(argAddr); 1267 1268 // Perform an atomic store. There are no memory ordering requirements. 1269 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); 1270 store->setAtomic(llvm::AtomicOrdering::Unordered); 1271 return; 1272 } 1273 1274 case PropertyImplStrategy::GetSetProperty: 1275 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1276 1277 llvm::Constant *setOptimizedPropertyFn = nullptr; 1278 llvm::Constant *setPropertyFn = nullptr; 1279 if (UseOptimizedSetter(CGM)) { 1280 // 10.8 and iOS 6.0 code and GC is off 1281 setOptimizedPropertyFn = 1282 CGM.getObjCRuntime() 1283 .GetOptimizedPropertySetFunction(strategy.isAtomic(), 1284 strategy.isCopy()); 1285 if (!setOptimizedPropertyFn) { 1286 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI"); 1287 return; 1288 } 1289 } 1290 else { 1291 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); 1292 if (!setPropertyFn) { 1293 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy"); 1294 return; 1295 } 1296 } 1297 1298 // Emit objc_setProperty((id) self, _cmd, offset, arg, 1299 // <is-atomic>, <is-copy>). 1300 llvm::Value *cmd = 1301 Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl())); 1302 llvm::Value *self = 1303 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1304 llvm::Value *ivarOffset = 1305 EmitIvarOffset(classImpl->getClassInterface(), ivar); 1306 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1307 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg"); 1308 arg = Builder.CreateBitCast(arg, VoidPtrTy); 1309 1310 CallArgList args; 1311 args.add(RValue::get(self), getContext().getObjCIdType()); 1312 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1313 if (setOptimizedPropertyFn) { 1314 args.add(RValue::get(arg), getContext().getObjCIdType()); 1315 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1316 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn); 1317 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1318 callee, ReturnValueSlot(), args); 1319 } else { 1320 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1321 args.add(RValue::get(arg), getContext().getObjCIdType()); 1322 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1323 getContext().BoolTy); 1324 args.add(RValue::get(Builder.getInt1(strategy.isCopy())), 1325 getContext().BoolTy); 1326 // FIXME: We shouldn't need to get the function info here, the runtime 1327 // already should have computed it to build the function. 1328 CGCallee callee = CGCallee::forDirect(setPropertyFn); 1329 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1330 callee, ReturnValueSlot(), args); 1331 } 1332 1333 return; 1334 } 1335 1336 case PropertyImplStrategy::CopyStruct: 1337 emitStructSetterCall(*this, setterMethod, ivar); 1338 return; 1339 1340 case PropertyImplStrategy::Expression: 1341 break; 1342 } 1343 1344 // Otherwise, fake up some ASTs and emit a normal assignment. 1345 ValueDecl *selfDecl = setterMethod->getSelfDecl(); 1346 DeclRefExpr self(selfDecl, false, selfDecl->getType(), 1347 VK_LValue, SourceLocation()); 1348 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, 1349 selfDecl->getType(), CK_LValueToRValue, &self, 1350 VK_RValue); 1351 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), 1352 SourceLocation(), SourceLocation(), 1353 &selfLoad, true, true); 1354 1355 ParmVarDecl *argDecl = *setterMethod->param_begin(); 1356 QualType argType = argDecl->getType().getNonReferenceType(); 1357 DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation()); 1358 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, 1359 argType.getUnqualifiedType(), CK_LValueToRValue, 1360 &arg, VK_RValue); 1361 1362 // The property type can differ from the ivar type in some situations with 1363 // Objective-C pointer types, we can always bit cast the RHS in these cases. 1364 // The following absurdity is just to ensure well-formed IR. 1365 CastKind argCK = CK_NoOp; 1366 if (ivarRef.getType()->isObjCObjectPointerType()) { 1367 if (argLoad.getType()->isObjCObjectPointerType()) 1368 argCK = CK_BitCast; 1369 else if (argLoad.getType()->isBlockPointerType()) 1370 argCK = CK_BlockPointerToObjCPointerCast; 1371 else 1372 argCK = CK_CPointerToObjCPointerCast; 1373 } else if (ivarRef.getType()->isBlockPointerType()) { 1374 if (argLoad.getType()->isBlockPointerType()) 1375 argCK = CK_BitCast; 1376 else 1377 argCK = CK_AnyPointerToBlockPointerCast; 1378 } else if (ivarRef.getType()->isPointerType()) { 1379 argCK = CK_BitCast; 1380 } 1381 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, 1382 ivarRef.getType(), argCK, &argLoad, 1383 VK_RValue); 1384 Expr *finalArg = &argLoad; 1385 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(), 1386 argLoad.getType())) 1387 finalArg = &argCast; 1388 1389 1390 BinaryOperator assign(&ivarRef, finalArg, BO_Assign, 1391 ivarRef.getType(), VK_RValue, OK_Ordinary, 1392 SourceLocation(), FPOptions()); 1393 EmitStmt(&assign); 1394 } 1395 1396 /// Generate an Objective-C property setter function. 1397 /// 1398 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1399 /// is illegal within a category. 1400 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, 1401 const ObjCPropertyImplDecl *PID) { 1402 llvm::Constant *AtomicHelperFn = 1403 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); 1404 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 1405 ObjCMethodDecl *OMD = PD->getSetterMethodDecl(); 1406 assert(OMD && "Invalid call to generate setter (empty method)"); 1407 StartObjCMethod(OMD, IMP->getClassInterface()); 1408 1409 generateObjCSetterBody(IMP, PID, AtomicHelperFn); 1410 1411 FinishFunction(); 1412 } 1413 1414 namespace { 1415 struct DestroyIvar final : EHScopeStack::Cleanup { 1416 private: 1417 llvm::Value *addr; 1418 const ObjCIvarDecl *ivar; 1419 CodeGenFunction::Destroyer *destroyer; 1420 bool useEHCleanupForArray; 1421 public: 1422 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, 1423 CodeGenFunction::Destroyer *destroyer, 1424 bool useEHCleanupForArray) 1425 : addr(addr), ivar(ivar), destroyer(destroyer), 1426 useEHCleanupForArray(useEHCleanupForArray) {} 1427 1428 void Emit(CodeGenFunction &CGF, Flags flags) override { 1429 LValue lvalue 1430 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); 1431 CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer, 1432 flags.isForNormalCleanup() && useEHCleanupForArray); 1433 } 1434 }; 1435 } 1436 1437 /// Like CodeGenFunction::destroyARCStrong, but do it with a call. 1438 static void destroyARCStrongWithStore(CodeGenFunction &CGF, 1439 Address addr, 1440 QualType type) { 1441 llvm::Value *null = getNullForVariable(addr); 1442 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 1443 } 1444 1445 static void emitCXXDestructMethod(CodeGenFunction &CGF, 1446 ObjCImplementationDecl *impl) { 1447 CodeGenFunction::RunCleanupsScope scope(CGF); 1448 1449 llvm::Value *self = CGF.LoadObjCSelf(); 1450 1451 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 1452 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); 1453 ivar; ivar = ivar->getNextIvar()) { 1454 QualType type = ivar->getType(); 1455 1456 // Check whether the ivar is a destructible type. 1457 QualType::DestructionKind dtorKind = type.isDestructedType(); 1458 if (!dtorKind) continue; 1459 1460 CodeGenFunction::Destroyer *destroyer = nullptr; 1461 1462 // Use a call to objc_storeStrong to destroy strong ivars, for the 1463 // general benefit of the tools. 1464 if (dtorKind == QualType::DK_objc_strong_lifetime) { 1465 destroyer = destroyARCStrongWithStore; 1466 1467 // Otherwise use the default for the destruction kind. 1468 } else { 1469 destroyer = CGF.getDestroyer(dtorKind); 1470 } 1471 1472 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind); 1473 1474 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer, 1475 cleanupKind & EHCleanup); 1476 } 1477 1478 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?"); 1479 } 1480 1481 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, 1482 ObjCMethodDecl *MD, 1483 bool ctor) { 1484 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface()); 1485 StartObjCMethod(MD, IMP->getClassInterface()); 1486 1487 // Emit .cxx_construct. 1488 if (ctor) { 1489 // Suppress the final autorelease in ARC. 1490 AutoreleaseResult = false; 1491 1492 for (const auto *IvarInit : IMP->inits()) { 1493 FieldDecl *Field = IvarInit->getAnyMember(); 1494 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field); 1495 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), 1496 LoadObjCSelf(), Ivar, 0); 1497 EmitAggExpr(IvarInit->getInit(), 1498 AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, 1499 AggValueSlot::DoesNotNeedGCBarriers, 1500 AggValueSlot::IsNotAliased, 1501 AggValueSlot::DoesNotOverlap)); 1502 } 1503 // constructor returns 'self'. 1504 CodeGenTypes &Types = CGM.getTypes(); 1505 QualType IdTy(CGM.getContext().getObjCIdType()); 1506 llvm::Value *SelfAsId = 1507 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); 1508 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy); 1509 1510 // Emit .cxx_destruct. 1511 } else { 1512 emitCXXDestructMethod(*this, IMP); 1513 } 1514 FinishFunction(); 1515 } 1516 1517 llvm::Value *CodeGenFunction::LoadObjCSelf() { 1518 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 1519 DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), 1520 Self->getType(), VK_LValue, SourceLocation()); 1521 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation()); 1522 } 1523 1524 QualType CodeGenFunction::TypeOfSelfObject() { 1525 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 1526 ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); 1527 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( 1528 getContext().getCanonicalType(selfDecl->getType())); 1529 return PTy->getPointeeType(); 1530 } 1531 1532 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ 1533 llvm::Constant *EnumerationMutationFnPtr = 1534 CGM.getObjCRuntime().EnumerationMutationFunction(); 1535 if (!EnumerationMutationFnPtr) { 1536 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); 1537 return; 1538 } 1539 CGCallee EnumerationMutationFn = 1540 CGCallee::forDirect(EnumerationMutationFnPtr); 1541 1542 CGDebugInfo *DI = getDebugInfo(); 1543 if (DI) 1544 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); 1545 1546 RunCleanupsScope ForScope(*this); 1547 1548 // The local variable comes into scope immediately. 1549 AutoVarEmission variable = AutoVarEmission::invalid(); 1550 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) 1551 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl())); 1552 1553 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end"); 1554 1555 // Fast enumeration state. 1556 QualType StateTy = CGM.getObjCFastEnumerationStateType(); 1557 Address StatePtr = CreateMemTemp(StateTy, "state.ptr"); 1558 EmitNullInitialization(StatePtr, StateTy); 1559 1560 // Number of elements in the items array. 1561 static const unsigned NumItems = 16; 1562 1563 // Fetch the countByEnumeratingWithState:objects:count: selector. 1564 IdentifierInfo *II[] = { 1565 &CGM.getContext().Idents.get("countByEnumeratingWithState"), 1566 &CGM.getContext().Idents.get("objects"), 1567 &CGM.getContext().Idents.get("count") 1568 }; 1569 Selector FastEnumSel = 1570 CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]); 1571 1572 QualType ItemsTy = 1573 getContext().getConstantArrayType(getContext().getObjCIdType(), 1574 llvm::APInt(32, NumItems), 1575 ArrayType::Normal, 0); 1576 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr"); 1577 1578 // Emit the collection pointer. In ARC, we do a retain. 1579 llvm::Value *Collection; 1580 if (getLangOpts().ObjCAutoRefCount) { 1581 Collection = EmitARCRetainScalarExpr(S.getCollection()); 1582 1583 // Enter a cleanup to do the release. 1584 EmitObjCConsumeObject(S.getCollection()->getType(), Collection); 1585 } else { 1586 Collection = EmitScalarExpr(S.getCollection()); 1587 } 1588 1589 // The 'continue' label needs to appear within the cleanup for the 1590 // collection object. 1591 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next"); 1592 1593 // Send it our message: 1594 CallArgList Args; 1595 1596 // The first argument is a temporary of the enumeration-state type. 1597 Args.add(RValue::get(StatePtr.getPointer()), 1598 getContext().getPointerType(StateTy)); 1599 1600 // The second argument is a temporary array with space for NumItems 1601 // pointers. We'll actually be loading elements from the array 1602 // pointer written into the control state; this buffer is so that 1603 // collections that *aren't* backed by arrays can still queue up 1604 // batches of elements. 1605 Args.add(RValue::get(ItemsPtr.getPointer()), 1606 getContext().getPointerType(ItemsTy)); 1607 1608 // The third argument is the capacity of that temporary array. 1609 llvm::Type *NSUIntegerTy = ConvertType(getContext().getNSUIntegerType()); 1610 llvm::Constant *Count = llvm::ConstantInt::get(NSUIntegerTy, NumItems); 1611 Args.add(RValue::get(Count), getContext().getNSUIntegerType()); 1612 1613 // Start the enumeration. 1614 RValue CountRV = 1615 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1616 getContext().getNSUIntegerType(), 1617 FastEnumSel, Collection, Args); 1618 1619 // The initial number of objects that were returned in the buffer. 1620 llvm::Value *initialBufferLimit = CountRV.getScalarVal(); 1621 1622 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty"); 1623 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit"); 1624 1625 llvm::Value *zero = llvm::Constant::getNullValue(NSUIntegerTy); 1626 1627 // If the limit pointer was zero to begin with, the collection is 1628 // empty; skip all this. Set the branch weight assuming this has the same 1629 // probability of exiting the loop as any other loop exit. 1630 uint64_t EntryCount = getCurrentProfileCount(); 1631 Builder.CreateCondBr( 1632 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, 1633 LoopInitBB, 1634 createProfileWeights(EntryCount, getProfileCount(S.getBody()))); 1635 1636 // Otherwise, initialize the loop. 1637 EmitBlock(LoopInitBB); 1638 1639 // Save the initial mutations value. This is the value at an 1640 // address that was written into the state object by 1641 // countByEnumeratingWithState:objects:count:. 1642 Address StateMutationsPtrPtr = Builder.CreateStructGEP( 1643 StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr"); 1644 llvm::Value *StateMutationsPtr 1645 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1646 1647 llvm::Value *initialMutations = 1648 Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1649 "forcoll.initial-mutations"); 1650 1651 // Start looping. This is the point we return to whenever we have a 1652 // fresh, non-empty batch of objects. 1653 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody"); 1654 EmitBlock(LoopBodyBB); 1655 1656 // The current index into the buffer. 1657 llvm::PHINode *index = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.index"); 1658 index->addIncoming(zero, LoopInitBB); 1659 1660 // The current buffer size. 1661 llvm::PHINode *count = Builder.CreatePHI(NSUIntegerTy, 3, "forcoll.count"); 1662 count->addIncoming(initialBufferLimit, LoopInitBB); 1663 1664 incrementProfileCounter(&S); 1665 1666 // Check whether the mutations value has changed from where it was 1667 // at start. StateMutationsPtr should actually be invariant between 1668 // refreshes. 1669 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1670 llvm::Value *currentMutations 1671 = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1672 "statemutations"); 1673 1674 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated"); 1675 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated"); 1676 1677 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations), 1678 WasNotMutatedBB, WasMutatedBB); 1679 1680 // If so, call the enumeration-mutation function. 1681 EmitBlock(WasMutatedBB); 1682 llvm::Value *V = 1683 Builder.CreateBitCast(Collection, 1684 ConvertType(getContext().getObjCIdType())); 1685 CallArgList Args2; 1686 Args2.add(RValue::get(V), getContext().getObjCIdType()); 1687 // FIXME: We shouldn't need to get the function info here, the runtime already 1688 // should have computed it to build the function. 1689 EmitCall( 1690 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2), 1691 EnumerationMutationFn, ReturnValueSlot(), Args2); 1692 1693 // Otherwise, or if the mutation function returns, just continue. 1694 EmitBlock(WasNotMutatedBB); 1695 1696 // Initialize the element variable. 1697 RunCleanupsScope elementVariableScope(*this); 1698 bool elementIsVariable; 1699 LValue elementLValue; 1700 QualType elementType; 1701 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { 1702 // Initialize the variable, in case it's a __block variable or something. 1703 EmitAutoVarInit(variable); 1704 1705 const VarDecl* D = cast<VarDecl>(SD->getSingleDecl()); 1706 DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(), 1707 VK_LValue, SourceLocation()); 1708 elementLValue = EmitLValue(&tempDRE); 1709 elementType = D->getType(); 1710 elementIsVariable = true; 1711 1712 if (D->isARCPseudoStrong()) 1713 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); 1714 } else { 1715 elementLValue = LValue(); // suppress warning 1716 elementType = cast<Expr>(S.getElement())->getType(); 1717 elementIsVariable = false; 1718 } 1719 llvm::Type *convertedElementType = ConvertType(elementType); 1720 1721 // Fetch the buffer out of the enumeration state. 1722 // TODO: this pointer should actually be invariant between 1723 // refreshes, which would help us do certain loop optimizations. 1724 Address StateItemsPtr = Builder.CreateStructGEP( 1725 StatePtr, 1, getPointerSize(), "stateitems.ptr"); 1726 llvm::Value *EnumStateItems = 1727 Builder.CreateLoad(StateItemsPtr, "stateitems"); 1728 1729 // Fetch the value at the current index from the buffer. 1730 llvm::Value *CurrentItemPtr = 1731 Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr"); 1732 llvm::Value *CurrentItem = 1733 Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign()); 1734 1735 // Cast that value to the right type. 1736 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType, 1737 "currentitem"); 1738 1739 // Make sure we have an l-value. Yes, this gets evaluated every 1740 // time through the loop. 1741 if (!elementIsVariable) { 1742 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 1743 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue); 1744 } else { 1745 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, 1746 /*isInit*/ true); 1747 } 1748 1749 // If we do have an element variable, this assignment is the end of 1750 // its initialization. 1751 if (elementIsVariable) 1752 EmitAutoVarCleanups(variable); 1753 1754 // Perform the loop body, setting up break and continue labels. 1755 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); 1756 { 1757 RunCleanupsScope Scope(*this); 1758 EmitStmt(S.getBody()); 1759 } 1760 BreakContinueStack.pop_back(); 1761 1762 // Destroy the element variable now. 1763 elementVariableScope.ForceCleanup(); 1764 1765 // Check whether there are more elements. 1766 EmitBlock(AfterBody.getBlock()); 1767 1768 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch"); 1769 1770 // First we check in the local buffer. 1771 llvm::Value *indexPlusOne = 1772 Builder.CreateAdd(index, llvm::ConstantInt::get(NSUIntegerTy, 1)); 1773 1774 // If we haven't overrun the buffer yet, we can continue. 1775 // Set the branch weights based on the simplifying assumption that this is 1776 // like a while-loop, i.e., ignoring that the false branch fetches more 1777 // elements and then returns to the loop. 1778 Builder.CreateCondBr( 1779 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, 1780 createProfileWeights(getProfileCount(S.getBody()), EntryCount)); 1781 1782 index->addIncoming(indexPlusOne, AfterBody.getBlock()); 1783 count->addIncoming(count, AfterBody.getBlock()); 1784 1785 // Otherwise, we have to fetch more elements. 1786 EmitBlock(FetchMoreBB); 1787 1788 CountRV = 1789 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1790 getContext().getNSUIntegerType(), 1791 FastEnumSel, Collection, Args); 1792 1793 // If we got a zero count, we're done. 1794 llvm::Value *refetchCount = CountRV.getScalarVal(); 1795 1796 // (note that the message send might split FetchMoreBB) 1797 index->addIncoming(zero, Builder.GetInsertBlock()); 1798 count->addIncoming(refetchCount, Builder.GetInsertBlock()); 1799 1800 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero), 1801 EmptyBB, LoopBodyBB); 1802 1803 // No more elements. 1804 EmitBlock(EmptyBB); 1805 1806 if (!elementIsVariable) { 1807 // If the element was not a declaration, set it to be null. 1808 1809 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType); 1810 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 1811 EmitStoreThroughLValue(RValue::get(null), elementLValue); 1812 } 1813 1814 if (DI) 1815 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); 1816 1817 ForScope.ForceCleanup(); 1818 EmitBlock(LoopEnd.getBlock()); 1819 } 1820 1821 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { 1822 CGM.getObjCRuntime().EmitTryStmt(*this, S); 1823 } 1824 1825 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { 1826 CGM.getObjCRuntime().EmitThrowStmt(*this, S); 1827 } 1828 1829 void CodeGenFunction::EmitObjCAtSynchronizedStmt( 1830 const ObjCAtSynchronizedStmt &S) { 1831 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); 1832 } 1833 1834 namespace { 1835 struct CallObjCRelease final : EHScopeStack::Cleanup { 1836 CallObjCRelease(llvm::Value *object) : object(object) {} 1837 llvm::Value *object; 1838 1839 void Emit(CodeGenFunction &CGF, Flags flags) override { 1840 // Releases at the end of the full-expression are imprecise. 1841 CGF.EmitARCRelease(object, ARCImpreciseLifetime); 1842 } 1843 }; 1844 } 1845 1846 /// Produce the code for a CK_ARCConsumeObject. Does a primitive 1847 /// release at the end of the full-expression. 1848 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, 1849 llvm::Value *object) { 1850 // If we're in a conditional branch, we need to make the cleanup 1851 // conditional. 1852 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object); 1853 return object; 1854 } 1855 1856 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, 1857 llvm::Value *value) { 1858 return EmitARCRetainAutorelease(type, value); 1859 } 1860 1861 /// Given a number of pointers, inform the optimizer that they're 1862 /// being intrinsically used up until this point in the program. 1863 void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { 1864 llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use; 1865 if (!fn) { 1866 llvm::FunctionType *fnType = 1867 llvm::FunctionType::get(CGM.VoidTy, None, true); 1868 fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use"); 1869 } 1870 1871 // This isn't really a "runtime" function, but as an intrinsic it 1872 // doesn't really matter as long as we align things up. 1873 EmitNounwindRuntimeCall(fn, values); 1874 } 1875 1876 1877 static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM, 1878 llvm::FunctionType *FTy, 1879 StringRef Name) { 1880 llvm::Constant *RTF = CGM.CreateRuntimeFunction(FTy, Name); 1881 1882 if (auto *F = dyn_cast<llvm::Function>(RTF)) { 1883 // If the target runtime doesn't naturally support ARC, emit weak 1884 // references to the runtime support library. We don't really 1885 // permit this to fail, but we need a particular relocation style. 1886 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && 1887 !CGM.getTriple().isOSBinFormatCOFF()) { 1888 F->setLinkage(llvm::Function::ExternalWeakLinkage); 1889 } else if (Name == "objc_retain" || Name == "objc_release") { 1890 // If we have Native ARC, set nonlazybind attribute for these APIs for 1891 // performance. 1892 F->addFnAttr(llvm::Attribute::NonLazyBind); 1893 } 1894 } 1895 1896 return RTF; 1897 } 1898 1899 /// Perform an operation having the signature 1900 /// i8* (i8*) 1901 /// where a null input causes a no-op and returns null. 1902 static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF, 1903 llvm::Value *value, 1904 llvm::Type *returnType, 1905 llvm::Constant *&fn, 1906 StringRef fnName, 1907 bool isTailCall = false) { 1908 if (isa<llvm::ConstantPointerNull>(value)) 1909 return value; 1910 1911 if (!fn) { 1912 llvm::FunctionType *fnType = 1913 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false); 1914 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1915 } 1916 1917 // Cast the argument to 'id'. 1918 llvm::Type *origType = returnType ? returnType : value->getType(); 1919 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 1920 1921 // Call the function. 1922 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value); 1923 if (isTailCall) 1924 call->setTailCall(); 1925 1926 // Cast the result back to the original type. 1927 return CGF.Builder.CreateBitCast(call, origType); 1928 } 1929 1930 /// Perform an operation having the following signature: 1931 /// i8* (i8**) 1932 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, 1933 Address addr, 1934 llvm::Constant *&fn, 1935 StringRef fnName) { 1936 if (!fn) { 1937 llvm::FunctionType *fnType = 1938 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrPtrTy, false); 1939 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1940 } 1941 1942 // Cast the argument to 'id*'. 1943 llvm::Type *origType = addr.getElementType(); 1944 addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy); 1945 1946 // Call the function. 1947 llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); 1948 1949 // Cast the result back to a dereference of the original type. 1950 if (origType != CGF.Int8PtrTy) 1951 result = CGF.Builder.CreateBitCast(result, origType); 1952 1953 return result; 1954 } 1955 1956 /// Perform an operation having the following signature: 1957 /// i8* (i8**, i8*) 1958 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, 1959 Address addr, 1960 llvm::Value *value, 1961 llvm::Constant *&fn, 1962 StringRef fnName, 1963 bool ignored) { 1964 assert(addr.getElementType() == value->getType()); 1965 1966 if (!fn) { 1967 llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy }; 1968 1969 llvm::FunctionType *fnType 1970 = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false); 1971 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1972 } 1973 1974 llvm::Type *origType = value->getType(); 1975 1976 llvm::Value *args[] = { 1977 CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), 1978 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) 1979 }; 1980 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); 1981 1982 if (ignored) return nullptr; 1983 1984 return CGF.Builder.CreateBitCast(result, origType); 1985 } 1986 1987 /// Perform an operation having the following signature: 1988 /// void (i8**, i8**) 1989 static void emitARCCopyOperation(CodeGenFunction &CGF, 1990 Address dst, 1991 Address src, 1992 llvm::Constant *&fn, 1993 StringRef fnName) { 1994 assert(dst.getType() == src.getType()); 1995 1996 if (!fn) { 1997 llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy }; 1998 1999 llvm::FunctionType *fnType 2000 = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false); 2001 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 2002 } 2003 2004 llvm::Value *args[] = { 2005 CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), 2006 CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) 2007 }; 2008 CGF.EmitNounwindRuntimeCall(fn, args); 2009 } 2010 2011 /// Produce the code to do a retain. Based on the type, calls one of: 2012 /// call i8* \@objc_retain(i8* %value) 2013 /// call i8* \@objc_retainBlock(i8* %value) 2014 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { 2015 if (type->isBlockPointerType()) 2016 return EmitARCRetainBlock(value, /*mandatory*/ false); 2017 else 2018 return EmitARCRetainNonBlock(value); 2019 } 2020 2021 /// Retain the given object, with normal retain semantics. 2022 /// call i8* \@objc_retain(i8* %value) 2023 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { 2024 return emitARCValueOperation(*this, value, nullptr, 2025 CGM.getObjCEntrypoints().objc_retain, 2026 "objc_retain"); 2027 } 2028 2029 /// Retain the given block, with _Block_copy semantics. 2030 /// call i8* \@objc_retainBlock(i8* %value) 2031 /// 2032 /// \param mandatory - If false, emit the call with metadata 2033 /// indicating that it's okay for the optimizer to eliminate this call 2034 /// if it can prove that the block never escapes except down the stack. 2035 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, 2036 bool mandatory) { 2037 llvm::Value *result 2038 = emitARCValueOperation(*this, value, nullptr, 2039 CGM.getObjCEntrypoints().objc_retainBlock, 2040 "objc_retainBlock"); 2041 2042 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to 2043 // tell the optimizer that it doesn't need to do this copy if the 2044 // block doesn't escape, where being passed as an argument doesn't 2045 // count as escaping. 2046 if (!mandatory && isa<llvm::Instruction>(result)) { 2047 llvm::CallInst *call 2048 = cast<llvm::CallInst>(result->stripPointerCasts()); 2049 assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock); 2050 2051 call->setMetadata("clang.arc.copy_on_escape", 2052 llvm::MDNode::get(Builder.getContext(), None)); 2053 } 2054 2055 return result; 2056 } 2057 2058 static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { 2059 // Fetch the void(void) inline asm which marks that we're going to 2060 // do something with the autoreleased return value. 2061 llvm::InlineAsm *&marker 2062 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; 2063 if (!marker) { 2064 StringRef assembly 2065 = CGF.CGM.getTargetCodeGenInfo() 2066 .getARCRetainAutoreleasedReturnValueMarker(); 2067 2068 // If we have an empty assembly string, there's nothing to do. 2069 if (assembly.empty()) { 2070 2071 // Otherwise, at -O0, build an inline asm that we're going to call 2072 // in a moment. 2073 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { 2074 llvm::FunctionType *type = 2075 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false); 2076 2077 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true); 2078 2079 // If we're at -O1 and above, we don't want to litter the code 2080 // with this marker yet, so leave a breadcrumb for the ARC 2081 // optimizer to pick up. 2082 } else { 2083 llvm::NamedMDNode *metadata = 2084 CGF.CGM.getModule().getOrInsertNamedMetadata( 2085 "clang.arc.retainAutoreleasedReturnValueMarker"); 2086 assert(metadata->getNumOperands() <= 1); 2087 if (metadata->getNumOperands() == 0) { 2088 auto &ctx = CGF.getLLVMContext(); 2089 metadata->addOperand(llvm::MDNode::get(ctx, 2090 llvm::MDString::get(ctx, assembly))); 2091 } 2092 } 2093 } 2094 2095 // Call the marker asm if we made one, which we do only at -O0. 2096 if (marker) 2097 CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker)); 2098 } 2099 2100 /// Retain the given object which is the result of a function call. 2101 /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) 2102 /// 2103 /// Yes, this function name is one character away from a different 2104 /// call with completely different semantics. 2105 llvm::Value * 2106 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { 2107 emitAutoreleasedReturnValueMarker(*this); 2108 return emitARCValueOperation(*this, value, nullptr, 2109 CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue, 2110 "objc_retainAutoreleasedReturnValue"); 2111 } 2112 2113 /// Claim a possibly-autoreleased return value at +0. This is only 2114 /// valid to do in contexts which do not rely on the retain to keep 2115 /// the object valid for all of its uses; for example, when 2116 /// the value is ignored, or when it is being assigned to an 2117 /// __unsafe_unretained variable. 2118 /// 2119 /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) 2120 llvm::Value * 2121 CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { 2122 emitAutoreleasedReturnValueMarker(*this); 2123 return emitARCValueOperation(*this, value, nullptr, 2124 CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue, 2125 "objc_unsafeClaimAutoreleasedReturnValue"); 2126 } 2127 2128 /// Release the given object. 2129 /// call void \@objc_release(i8* %value) 2130 void CodeGenFunction::EmitARCRelease(llvm::Value *value, 2131 ARCPreciseLifetime_t precise) { 2132 if (isa<llvm::ConstantPointerNull>(value)) return; 2133 2134 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release; 2135 if (!fn) { 2136 llvm::FunctionType *fnType = 2137 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2138 fn = createARCRuntimeFunction(CGM, fnType, "objc_release"); 2139 } 2140 2141 // Cast the argument to 'id'. 2142 value = Builder.CreateBitCast(value, Int8PtrTy); 2143 2144 // Call objc_release. 2145 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value); 2146 2147 if (precise == ARCImpreciseLifetime) { 2148 call->setMetadata("clang.imprecise_release", 2149 llvm::MDNode::get(Builder.getContext(), None)); 2150 } 2151 } 2152 2153 /// Destroy a __strong variable. 2154 /// 2155 /// At -O0, emit a call to store 'null' into the address; 2156 /// instrumenting tools prefer this because the address is exposed, 2157 /// but it's relatively cumbersome to optimize. 2158 /// 2159 /// At -O1 and above, just load and call objc_release. 2160 /// 2161 /// call void \@objc_storeStrong(i8** %addr, i8* null) 2162 void CodeGenFunction::EmitARCDestroyStrong(Address addr, 2163 ARCPreciseLifetime_t precise) { 2164 if (CGM.getCodeGenOpts().OptimizationLevel == 0) { 2165 llvm::Value *null = getNullForVariable(addr); 2166 EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 2167 return; 2168 } 2169 2170 llvm::Value *value = Builder.CreateLoad(addr); 2171 EmitARCRelease(value, precise); 2172 } 2173 2174 /// Store into a strong object. Always calls this: 2175 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2176 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, 2177 llvm::Value *value, 2178 bool ignored) { 2179 assert(addr.getElementType() == value->getType()); 2180 2181 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong; 2182 if (!fn) { 2183 llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy }; 2184 llvm::FunctionType *fnType 2185 = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false); 2186 fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong"); 2187 } 2188 2189 llvm::Value *args[] = { 2190 Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), 2191 Builder.CreateBitCast(value, Int8PtrTy) 2192 }; 2193 EmitNounwindRuntimeCall(fn, args); 2194 2195 if (ignored) return nullptr; 2196 return value; 2197 } 2198 2199 /// Store into a strong object. Sometimes calls this: 2200 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2201 /// Other times, breaks it down into components. 2202 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, 2203 llvm::Value *newValue, 2204 bool ignored) { 2205 QualType type = dst.getType(); 2206 bool isBlock = type->isBlockPointerType(); 2207 2208 // Use a store barrier at -O0 unless this is a block type or the 2209 // lvalue is inadequately aligned. 2210 if (shouldUseFusedARCCalls() && 2211 !isBlock && 2212 (dst.getAlignment().isZero() || 2213 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { 2214 return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored); 2215 } 2216 2217 // Otherwise, split it out. 2218 2219 // Retain the new value. 2220 newValue = EmitARCRetain(type, newValue); 2221 2222 // Read the old value. 2223 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation()); 2224 2225 // Store. We do this before the release so that any deallocs won't 2226 // see the old value. 2227 EmitStoreOfScalar(newValue, dst); 2228 2229 // Finally, release the old value. 2230 EmitARCRelease(oldValue, dst.isARCPreciseLifetime()); 2231 2232 return newValue; 2233 } 2234 2235 /// Autorelease the given object. 2236 /// call i8* \@objc_autorelease(i8* %value) 2237 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { 2238 return emitARCValueOperation(*this, value, nullptr, 2239 CGM.getObjCEntrypoints().objc_autorelease, 2240 "objc_autorelease"); 2241 } 2242 2243 /// Autorelease the given object. 2244 /// call i8* \@objc_autoreleaseReturnValue(i8* %value) 2245 llvm::Value * 2246 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { 2247 return emitARCValueOperation(*this, value, nullptr, 2248 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, 2249 "objc_autoreleaseReturnValue", 2250 /*isTailCall*/ true); 2251 } 2252 2253 /// Do a fused retain/autorelease of the given object. 2254 /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) 2255 llvm::Value * 2256 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { 2257 return emitARCValueOperation(*this, value, nullptr, 2258 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, 2259 "objc_retainAutoreleaseReturnValue", 2260 /*isTailCall*/ true); 2261 } 2262 2263 /// Do a fused retain/autorelease of the given object. 2264 /// call i8* \@objc_retainAutorelease(i8* %value) 2265 /// or 2266 /// %retain = call i8* \@objc_retainBlock(i8* %value) 2267 /// call i8* \@objc_autorelease(i8* %retain) 2268 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, 2269 llvm::Value *value) { 2270 if (!type->isBlockPointerType()) 2271 return EmitARCRetainAutoreleaseNonBlock(value); 2272 2273 if (isa<llvm::ConstantPointerNull>(value)) return value; 2274 2275 llvm::Type *origType = value->getType(); 2276 value = Builder.CreateBitCast(value, Int8PtrTy); 2277 value = EmitARCRetainBlock(value, /*mandatory*/ true); 2278 value = EmitARCAutorelease(value); 2279 return Builder.CreateBitCast(value, origType); 2280 } 2281 2282 /// Do a fused retain/autorelease of the given object. 2283 /// call i8* \@objc_retainAutorelease(i8* %value) 2284 llvm::Value * 2285 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { 2286 return emitARCValueOperation(*this, value, nullptr, 2287 CGM.getObjCEntrypoints().objc_retainAutorelease, 2288 "objc_retainAutorelease"); 2289 } 2290 2291 /// i8* \@objc_loadWeak(i8** %addr) 2292 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). 2293 llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { 2294 return emitARCLoadOperation(*this, addr, 2295 CGM.getObjCEntrypoints().objc_loadWeak, 2296 "objc_loadWeak"); 2297 } 2298 2299 /// i8* \@objc_loadWeakRetained(i8** %addr) 2300 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { 2301 return emitARCLoadOperation(*this, addr, 2302 CGM.getObjCEntrypoints().objc_loadWeakRetained, 2303 "objc_loadWeakRetained"); 2304 } 2305 2306 /// i8* \@objc_storeWeak(i8** %addr, i8* %value) 2307 /// Returns %value. 2308 llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, 2309 llvm::Value *value, 2310 bool ignored) { 2311 return emitARCStoreOperation(*this, addr, value, 2312 CGM.getObjCEntrypoints().objc_storeWeak, 2313 "objc_storeWeak", ignored); 2314 } 2315 2316 /// i8* \@objc_initWeak(i8** %addr, i8* %value) 2317 /// Returns %value. %addr is known to not have a current weak entry. 2318 /// Essentially equivalent to: 2319 /// *addr = nil; objc_storeWeak(addr, value); 2320 void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { 2321 // If we're initializing to null, just write null to memory; no need 2322 // to get the runtime involved. But don't do this if optimization 2323 // is enabled, because accounting for this would make the optimizer 2324 // much more complicated. 2325 if (isa<llvm::ConstantPointerNull>(value) && 2326 CGM.getCodeGenOpts().OptimizationLevel == 0) { 2327 Builder.CreateStore(value, addr); 2328 return; 2329 } 2330 2331 emitARCStoreOperation(*this, addr, value, 2332 CGM.getObjCEntrypoints().objc_initWeak, 2333 "objc_initWeak", /*ignored*/ true); 2334 } 2335 2336 /// void \@objc_destroyWeak(i8** %addr) 2337 /// Essentially objc_storeWeak(addr, nil). 2338 void CodeGenFunction::EmitARCDestroyWeak(Address addr) { 2339 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; 2340 if (!fn) { 2341 llvm::FunctionType *fnType = 2342 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrPtrTy, false); 2343 fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak"); 2344 } 2345 2346 // Cast the argument to 'id*'. 2347 addr = Builder.CreateBitCast(addr, Int8PtrPtrTy); 2348 2349 EmitNounwindRuntimeCall(fn, addr.getPointer()); 2350 } 2351 2352 /// void \@objc_moveWeak(i8** %dest, i8** %src) 2353 /// Disregards the current value in %dest. Leaves %src pointing to nothing. 2354 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). 2355 void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { 2356 emitARCCopyOperation(*this, dst, src, 2357 CGM.getObjCEntrypoints().objc_moveWeak, 2358 "objc_moveWeak"); 2359 } 2360 2361 /// void \@objc_copyWeak(i8** %dest, i8** %src) 2362 /// Disregards the current value in %dest. Essentially 2363 /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) 2364 void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { 2365 emitARCCopyOperation(*this, dst, src, 2366 CGM.getObjCEntrypoints().objc_copyWeak, 2367 "objc_copyWeak"); 2368 } 2369 2370 void CodeGenFunction::emitARCCopyAssignWeak(QualType Ty, Address DstAddr, 2371 Address SrcAddr) { 2372 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2373 Object = EmitObjCConsumeObject(Ty, Object); 2374 EmitARCStoreWeak(DstAddr, Object, false); 2375 } 2376 2377 void CodeGenFunction::emitARCMoveAssignWeak(QualType Ty, Address DstAddr, 2378 Address SrcAddr) { 2379 llvm::Value *Object = EmitARCLoadWeakRetained(SrcAddr); 2380 Object = EmitObjCConsumeObject(Ty, Object); 2381 EmitARCStoreWeak(DstAddr, Object, false); 2382 EmitARCDestroyWeak(SrcAddr); 2383 } 2384 2385 /// Produce the code to do a objc_autoreleasepool_push. 2386 /// call i8* \@objc_autoreleasePoolPush(void) 2387 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { 2388 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; 2389 if (!fn) { 2390 llvm::FunctionType *fnType = 2391 llvm::FunctionType::get(Int8PtrTy, false); 2392 fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush"); 2393 } 2394 2395 return EmitNounwindRuntimeCall(fn); 2396 } 2397 2398 /// Produce the code to do a primitive release. 2399 /// call void \@objc_autoreleasePoolPop(i8* %ptr) 2400 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { 2401 assert(value->getType() == Int8PtrTy); 2402 2403 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; 2404 if (!fn) { 2405 llvm::FunctionType *fnType = 2406 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2407 2408 // We don't want to use a weak import here; instead we should not 2409 // fall into this path. 2410 fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop"); 2411 } 2412 2413 // objc_autoreleasePoolPop can throw. 2414 EmitRuntimeCallOrInvoke(fn, value); 2415 } 2416 2417 /// Produce the code to do an MRR version objc_autoreleasepool_push. 2418 /// Which is: [[NSAutoreleasePool alloc] init]; 2419 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. 2420 /// init is declared as: - (id) init; in its NSObject super class. 2421 /// 2422 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { 2423 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 2424 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); 2425 // [NSAutoreleasePool alloc] 2426 IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); 2427 Selector AllocSel = getContext().Selectors.getSelector(0, &II); 2428 CallArgList Args; 2429 RValue AllocRV = 2430 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2431 getContext().getObjCIdType(), 2432 AllocSel, Receiver, Args); 2433 2434 // [Receiver init] 2435 Receiver = AllocRV.getScalarVal(); 2436 II = &CGM.getContext().Idents.get("init"); 2437 Selector InitSel = getContext().Selectors.getSelector(0, &II); 2438 RValue InitRV = 2439 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2440 getContext().getObjCIdType(), 2441 InitSel, Receiver, Args); 2442 return InitRV.getScalarVal(); 2443 } 2444 2445 /// Allocate the given objc object. 2446 /// call i8* \@objc_alloc(i8* %value) 2447 llvm::Value *CodeGenFunction::EmitObjCAlloc(llvm::Value *value, 2448 llvm::Type *resultType) { 2449 return emitARCValueOperation(*this, value, resultType, 2450 CGM.getObjCEntrypoints().objc_alloc, 2451 "objc_alloc"); 2452 } 2453 2454 /// Allocate the given objc object. 2455 /// call i8* \@objc_allocWithZone(i8* %value) 2456 llvm::Value *CodeGenFunction::EmitObjCAllocWithZone(llvm::Value *value, 2457 llvm::Type *resultType) { 2458 return emitARCValueOperation(*this, value, resultType, 2459 CGM.getObjCEntrypoints().objc_allocWithZone, 2460 "objc_allocWithZone"); 2461 } 2462 2463 /// Produce the code to do a primitive release. 2464 /// [tmp drain]; 2465 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { 2466 IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); 2467 Selector DrainSel = getContext().Selectors.getSelector(0, &II); 2468 CallArgList Args; 2469 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 2470 getContext().VoidTy, DrainSel, Arg, Args); 2471 } 2472 2473 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, 2474 Address addr, 2475 QualType type) { 2476 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime); 2477 } 2478 2479 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, 2480 Address addr, 2481 QualType type) { 2482 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime); 2483 } 2484 2485 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, 2486 Address addr, 2487 QualType type) { 2488 CGF.EmitARCDestroyWeak(addr); 2489 } 2490 2491 void CodeGenFunction::emitARCIntrinsicUse(CodeGenFunction &CGF, Address addr, 2492 QualType type) { 2493 llvm::Value *value = CGF.Builder.CreateLoad(addr); 2494 CGF.EmitARCIntrinsicUse(value); 2495 } 2496 2497 namespace { 2498 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { 2499 llvm::Value *Token; 2500 2501 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2502 2503 void Emit(CodeGenFunction &CGF, Flags flags) override { 2504 CGF.EmitObjCAutoreleasePoolPop(Token); 2505 } 2506 }; 2507 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { 2508 llvm::Value *Token; 2509 2510 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2511 2512 void Emit(CodeGenFunction &CGF, Flags flags) override { 2513 CGF.EmitObjCMRRAutoreleasePoolPop(Token); 2514 } 2515 }; 2516 } 2517 2518 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { 2519 if (CGM.getLangOpts().ObjCAutoRefCount) 2520 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr); 2521 else 2522 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr); 2523 } 2524 2525 static bool shouldRetainObjCLifetime(Qualifiers::ObjCLifetime lifetime) { 2526 switch (lifetime) { 2527 case Qualifiers::OCL_None: 2528 case Qualifiers::OCL_ExplicitNone: 2529 case Qualifiers::OCL_Strong: 2530 case Qualifiers::OCL_Autoreleasing: 2531 return true; 2532 2533 case Qualifiers::OCL_Weak: 2534 return false; 2535 } 2536 2537 llvm_unreachable("impossible lifetime!"); 2538 } 2539 2540 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2541 LValue lvalue, 2542 QualType type) { 2543 llvm::Value *result; 2544 bool shouldRetain = shouldRetainObjCLifetime(type.getObjCLifetime()); 2545 if (shouldRetain) { 2546 result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal(); 2547 } else { 2548 assert(type.getObjCLifetime() == Qualifiers::OCL_Weak); 2549 result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress()); 2550 } 2551 return TryEmitResult(result, !shouldRetain); 2552 } 2553 2554 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2555 const Expr *e) { 2556 e = e->IgnoreParens(); 2557 QualType type = e->getType(); 2558 2559 // If we're loading retained from a __strong xvalue, we can avoid 2560 // an extra retain/release pair by zeroing out the source of this 2561 // "move" operation. 2562 if (e->isXValue() && 2563 !type.isConstQualified() && 2564 type.getObjCLifetime() == Qualifiers::OCL_Strong) { 2565 // Emit the lvalue. 2566 LValue lv = CGF.EmitLValue(e); 2567 2568 // Load the object pointer. 2569 llvm::Value *result = CGF.EmitLoadOfLValue(lv, 2570 SourceLocation()).getScalarVal(); 2571 2572 // Set the source pointer to NULL. 2573 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv); 2574 2575 return TryEmitResult(result, true); 2576 } 2577 2578 // As a very special optimization, in ARC++, if the l-value is the 2579 // result of a non-volatile assignment, do a simple retain of the 2580 // result of the call to objc_storeWeak instead of reloading. 2581 if (CGF.getLangOpts().CPlusPlus && 2582 !type.isVolatileQualified() && 2583 type.getObjCLifetime() == Qualifiers::OCL_Weak && 2584 isa<BinaryOperator>(e) && 2585 cast<BinaryOperator>(e)->getOpcode() == BO_Assign) 2586 return TryEmitResult(CGF.EmitScalarExpr(e), false); 2587 2588 // Try to emit code for scalar constant instead of emitting LValue and 2589 // loading it because we are not guaranteed to have an l-value. One of such 2590 // cases is DeclRefExpr referencing non-odr-used constant-evaluated variable. 2591 if (const auto *decl_expr = dyn_cast<DeclRefExpr>(e)) { 2592 auto *DRE = const_cast<DeclRefExpr *>(decl_expr); 2593 if (CodeGenFunction::ConstantEmission constant = CGF.tryEmitAsConstant(DRE)) 2594 return TryEmitResult(CGF.emitScalarConstant(constant, DRE), 2595 !shouldRetainObjCLifetime(type.getObjCLifetime())); 2596 } 2597 2598 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type); 2599 } 2600 2601 typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, 2602 llvm::Value *value)> 2603 ValueTransform; 2604 2605 /// Insert code immediately after a call. 2606 static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, 2607 llvm::Value *value, 2608 ValueTransform doAfterCall, 2609 ValueTransform doFallback) { 2610 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) { 2611 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2612 2613 // Place the retain immediately following the call. 2614 CGF.Builder.SetInsertPoint(call->getParent(), 2615 ++llvm::BasicBlock::iterator(call)); 2616 value = doAfterCall(CGF, value); 2617 2618 CGF.Builder.restoreIP(ip); 2619 return value; 2620 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) { 2621 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2622 2623 // Place the retain at the beginning of the normal destination block. 2624 llvm::BasicBlock *BB = invoke->getNormalDest(); 2625 CGF.Builder.SetInsertPoint(BB, BB->begin()); 2626 value = doAfterCall(CGF, value); 2627 2628 CGF.Builder.restoreIP(ip); 2629 return value; 2630 2631 // Bitcasts can arise because of related-result returns. Rewrite 2632 // the operand. 2633 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) { 2634 llvm::Value *operand = bitcast->getOperand(0); 2635 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback); 2636 bitcast->setOperand(0, operand); 2637 return bitcast; 2638 2639 // Generic fall-back case. 2640 } else { 2641 // Retain using the non-block variant: we never need to do a copy 2642 // of a block that's been returned to us. 2643 return doFallback(CGF, value); 2644 } 2645 } 2646 2647 /// Given that the given expression is some sort of call (which does 2648 /// not return retained), emit a retain following it. 2649 static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, 2650 const Expr *e) { 2651 llvm::Value *value = CGF.EmitScalarExpr(e); 2652 return emitARCOperationAfterCall(CGF, value, 2653 [](CodeGenFunction &CGF, llvm::Value *value) { 2654 return CGF.EmitARCRetainAutoreleasedReturnValue(value); 2655 }, 2656 [](CodeGenFunction &CGF, llvm::Value *value) { 2657 return CGF.EmitARCRetainNonBlock(value); 2658 }); 2659 } 2660 2661 /// Given that the given expression is some sort of call (which does 2662 /// not return retained), perform an unsafeClaim following it. 2663 static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, 2664 const Expr *e) { 2665 llvm::Value *value = CGF.EmitScalarExpr(e); 2666 return emitARCOperationAfterCall(CGF, value, 2667 [](CodeGenFunction &CGF, llvm::Value *value) { 2668 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); 2669 }, 2670 [](CodeGenFunction &CGF, llvm::Value *value) { 2671 return value; 2672 }); 2673 } 2674 2675 llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, 2676 bool allowUnsafeClaim) { 2677 if (allowUnsafeClaim && 2678 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) { 2679 return emitARCUnsafeClaimCallResult(*this, E); 2680 } else { 2681 llvm::Value *value = emitARCRetainCallResult(*this, E); 2682 return EmitObjCConsumeObject(E->getType(), value); 2683 } 2684 } 2685 2686 /// Determine whether it might be important to emit a separate 2687 /// objc_retain_block on the result of the given expression, or 2688 /// whether it's okay to just emit it in a +1 context. 2689 static bool shouldEmitSeparateBlockRetain(const Expr *e) { 2690 assert(e->getType()->isBlockPointerType()); 2691 e = e->IgnoreParens(); 2692 2693 // For future goodness, emit block expressions directly in +1 2694 // contexts if we can. 2695 if (isa<BlockExpr>(e)) 2696 return false; 2697 2698 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) { 2699 switch (cast->getCastKind()) { 2700 // Emitting these operations in +1 contexts is goodness. 2701 case CK_LValueToRValue: 2702 case CK_ARCReclaimReturnedObject: 2703 case CK_ARCConsumeObject: 2704 case CK_ARCProduceObject: 2705 return false; 2706 2707 // These operations preserve a block type. 2708 case CK_NoOp: 2709 case CK_BitCast: 2710 return shouldEmitSeparateBlockRetain(cast->getSubExpr()); 2711 2712 // These operations are known to be bad (or haven't been considered). 2713 case CK_AnyPointerToBlockPointerCast: 2714 default: 2715 return true; 2716 } 2717 } 2718 2719 return true; 2720 } 2721 2722 namespace { 2723 /// A CRTP base class for emitting expressions of retainable object 2724 /// pointer type in ARC. 2725 template <typename Impl, typename Result> class ARCExprEmitter { 2726 protected: 2727 CodeGenFunction &CGF; 2728 Impl &asImpl() { return *static_cast<Impl*>(this); } 2729 2730 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} 2731 2732 public: 2733 Result visit(const Expr *e); 2734 Result visitCastExpr(const CastExpr *e); 2735 Result visitPseudoObjectExpr(const PseudoObjectExpr *e); 2736 Result visitBinaryOperator(const BinaryOperator *e); 2737 Result visitBinAssign(const BinaryOperator *e); 2738 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); 2739 Result visitBinAssignAutoreleasing(const BinaryOperator *e); 2740 Result visitBinAssignWeak(const BinaryOperator *e); 2741 Result visitBinAssignStrong(const BinaryOperator *e); 2742 2743 // Minimal implementation: 2744 // Result visitLValueToRValue(const Expr *e) 2745 // Result visitConsumeObject(const Expr *e) 2746 // Result visitExtendBlockObject(const Expr *e) 2747 // Result visitReclaimReturnedObject(const Expr *e) 2748 // Result visitCall(const Expr *e) 2749 // Result visitExpr(const Expr *e) 2750 // 2751 // Result emitBitCast(Result result, llvm::Type *resultType) 2752 // llvm::Value *getValueOfResult(Result result) 2753 }; 2754 } 2755 2756 /// Try to emit a PseudoObjectExpr under special ARC rules. 2757 /// 2758 /// This massively duplicates emitPseudoObjectRValue. 2759 template <typename Impl, typename Result> 2760 Result 2761 ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { 2762 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 2763 2764 // Find the result expression. 2765 const Expr *resultExpr = E->getResultExpr(); 2766 assert(resultExpr); 2767 Result result; 2768 2769 for (PseudoObjectExpr::const_semantics_iterator 2770 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 2771 const Expr *semantic = *i; 2772 2773 // If this semantic expression is an opaque value, bind it 2774 // to the result of its source expression. 2775 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 2776 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 2777 OVMA opaqueData; 2778 2779 // If this semantic is the result of the pseudo-object 2780 // expression, try to evaluate the source as +1. 2781 if (ov == resultExpr) { 2782 assert(!OVMA::shouldBindAsLValue(ov)); 2783 result = asImpl().visit(ov->getSourceExpr()); 2784 opaqueData = OVMA::bind(CGF, ov, 2785 RValue::get(asImpl().getValueOfResult(result))); 2786 2787 // Otherwise, just bind it. 2788 } else { 2789 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 2790 } 2791 opaques.push_back(opaqueData); 2792 2793 // Otherwise, if the expression is the result, evaluate it 2794 // and remember the result. 2795 } else if (semantic == resultExpr) { 2796 result = asImpl().visit(semantic); 2797 2798 // Otherwise, evaluate the expression in an ignored context. 2799 } else { 2800 CGF.EmitIgnoredExpr(semantic); 2801 } 2802 } 2803 2804 // Unbind all the opaques now. 2805 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 2806 opaques[i].unbind(CGF); 2807 2808 return result; 2809 } 2810 2811 template <typename Impl, typename Result> 2812 Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { 2813 switch (e->getCastKind()) { 2814 2815 // No-op casts don't change the type, so we just ignore them. 2816 case CK_NoOp: 2817 return asImpl().visit(e->getSubExpr()); 2818 2819 // These casts can change the type. 2820 case CK_CPointerToObjCPointerCast: 2821 case CK_BlockPointerToObjCPointerCast: 2822 case CK_AnyPointerToBlockPointerCast: 2823 case CK_BitCast: { 2824 llvm::Type *resultType = CGF.ConvertType(e->getType()); 2825 assert(e->getSubExpr()->getType()->hasPointerRepresentation()); 2826 Result result = asImpl().visit(e->getSubExpr()); 2827 return asImpl().emitBitCast(result, resultType); 2828 } 2829 2830 // Handle some casts specially. 2831 case CK_LValueToRValue: 2832 return asImpl().visitLValueToRValue(e->getSubExpr()); 2833 case CK_ARCConsumeObject: 2834 return asImpl().visitConsumeObject(e->getSubExpr()); 2835 case CK_ARCExtendBlockObject: 2836 return asImpl().visitExtendBlockObject(e->getSubExpr()); 2837 case CK_ARCReclaimReturnedObject: 2838 return asImpl().visitReclaimReturnedObject(e->getSubExpr()); 2839 2840 // Otherwise, use the default logic. 2841 default: 2842 return asImpl().visitExpr(e); 2843 } 2844 } 2845 2846 template <typename Impl, typename Result> 2847 Result 2848 ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { 2849 switch (e->getOpcode()) { 2850 case BO_Comma: 2851 CGF.EmitIgnoredExpr(e->getLHS()); 2852 CGF.EnsureInsertPoint(); 2853 return asImpl().visit(e->getRHS()); 2854 2855 case BO_Assign: 2856 return asImpl().visitBinAssign(e); 2857 2858 default: 2859 return asImpl().visitExpr(e); 2860 } 2861 } 2862 2863 template <typename Impl, typename Result> 2864 Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { 2865 switch (e->getLHS()->getType().getObjCLifetime()) { 2866 case Qualifiers::OCL_ExplicitNone: 2867 return asImpl().visitBinAssignUnsafeUnretained(e); 2868 2869 case Qualifiers::OCL_Weak: 2870 return asImpl().visitBinAssignWeak(e); 2871 2872 case Qualifiers::OCL_Autoreleasing: 2873 return asImpl().visitBinAssignAutoreleasing(e); 2874 2875 case Qualifiers::OCL_Strong: 2876 return asImpl().visitBinAssignStrong(e); 2877 2878 case Qualifiers::OCL_None: 2879 return asImpl().visitExpr(e); 2880 } 2881 llvm_unreachable("bad ObjC ownership qualifier"); 2882 } 2883 2884 /// The default rule for __unsafe_unretained emits the RHS recursively, 2885 /// stores into the unsafe variable, and propagates the result outward. 2886 template <typename Impl, typename Result> 2887 Result ARCExprEmitter<Impl,Result>:: 2888 visitBinAssignUnsafeUnretained(const BinaryOperator *e) { 2889 // Recursively emit the RHS. 2890 // For __block safety, do this before emitting the LHS. 2891 Result result = asImpl().visit(e->getRHS()); 2892 2893 // Perform the store. 2894 LValue lvalue = 2895 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); 2896 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), 2897 lvalue); 2898 2899 return result; 2900 } 2901 2902 template <typename Impl, typename Result> 2903 Result 2904 ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { 2905 return asImpl().visitExpr(e); 2906 } 2907 2908 template <typename Impl, typename Result> 2909 Result 2910 ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { 2911 return asImpl().visitExpr(e); 2912 } 2913 2914 template <typename Impl, typename Result> 2915 Result 2916 ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { 2917 return asImpl().visitExpr(e); 2918 } 2919 2920 /// The general expression-emission logic. 2921 template <typename Impl, typename Result> 2922 Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { 2923 // We should *never* see a nested full-expression here, because if 2924 // we fail to emit at +1, our caller must not retain after we close 2925 // out the full-expression. This isn't as important in the unsafe 2926 // emitter. 2927 assert(!isa<ExprWithCleanups>(e)); 2928 2929 // Look through parens, __extension__, generic selection, etc. 2930 e = e->IgnoreParens(); 2931 2932 // Handle certain kinds of casts. 2933 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { 2934 return asImpl().visitCastExpr(ce); 2935 2936 // Handle the comma operator. 2937 } else if (auto op = dyn_cast<BinaryOperator>(e)) { 2938 return asImpl().visitBinaryOperator(op); 2939 2940 // TODO: handle conditional operators here 2941 2942 // For calls and message sends, use the retained-call logic. 2943 // Delegate inits are a special case in that they're the only 2944 // returns-retained expression that *isn't* surrounded by 2945 // a consume. 2946 } else if (isa<CallExpr>(e) || 2947 (isa<ObjCMessageExpr>(e) && 2948 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) { 2949 return asImpl().visitCall(e); 2950 2951 // Look through pseudo-object expressions. 2952 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 2953 return asImpl().visitPseudoObjectExpr(pseudo); 2954 } 2955 2956 return asImpl().visitExpr(e); 2957 } 2958 2959 namespace { 2960 2961 /// An emitter for +1 results. 2962 struct ARCRetainExprEmitter : 2963 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { 2964 2965 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 2966 2967 llvm::Value *getValueOfResult(TryEmitResult result) { 2968 return result.getPointer(); 2969 } 2970 2971 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { 2972 llvm::Value *value = result.getPointer(); 2973 value = CGF.Builder.CreateBitCast(value, resultType); 2974 result.setPointer(value); 2975 return result; 2976 } 2977 2978 TryEmitResult visitLValueToRValue(const Expr *e) { 2979 return tryEmitARCRetainLoadOfScalar(CGF, e); 2980 } 2981 2982 /// For consumptions, just emit the subexpression and thus elide 2983 /// the retain/release pair. 2984 TryEmitResult visitConsumeObject(const Expr *e) { 2985 llvm::Value *result = CGF.EmitScalarExpr(e); 2986 return TryEmitResult(result, true); 2987 } 2988 2989 /// Block extends are net +0. Naively, we could just recurse on 2990 /// the subexpression, but actually we need to ensure that the 2991 /// value is copied as a block, so there's a little filter here. 2992 TryEmitResult visitExtendBlockObject(const Expr *e) { 2993 llvm::Value *result; // will be a +0 value 2994 2995 // If we can't safely assume the sub-expression will produce a 2996 // block-copied value, emit the sub-expression at +0. 2997 if (shouldEmitSeparateBlockRetain(e)) { 2998 result = CGF.EmitScalarExpr(e); 2999 3000 // Otherwise, try to emit the sub-expression at +1 recursively. 3001 } else { 3002 TryEmitResult subresult = asImpl().visit(e); 3003 3004 // If that produced a retained value, just use that. 3005 if (subresult.getInt()) { 3006 return subresult; 3007 } 3008 3009 // Otherwise it's +0. 3010 result = subresult.getPointer(); 3011 } 3012 3013 // Retain the object as a block. 3014 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true); 3015 return TryEmitResult(result, true); 3016 } 3017 3018 /// For reclaims, emit the subexpression as a retained call and 3019 /// skip the consumption. 3020 TryEmitResult visitReclaimReturnedObject(const Expr *e) { 3021 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3022 return TryEmitResult(result, true); 3023 } 3024 3025 /// When we have an undecorated call, retroactively do a claim. 3026 TryEmitResult visitCall(const Expr *e) { 3027 llvm::Value *result = emitARCRetainCallResult(CGF, e); 3028 return TryEmitResult(result, true); 3029 } 3030 3031 // TODO: maybe special-case visitBinAssignWeak? 3032 3033 TryEmitResult visitExpr(const Expr *e) { 3034 // We didn't find an obvious production, so emit what we've got and 3035 // tell the caller that we didn't manage to retain. 3036 llvm::Value *result = CGF.EmitScalarExpr(e); 3037 return TryEmitResult(result, false); 3038 } 3039 }; 3040 } 3041 3042 static TryEmitResult 3043 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { 3044 return ARCRetainExprEmitter(CGF).visit(e); 3045 } 3046 3047 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 3048 LValue lvalue, 3049 QualType type) { 3050 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); 3051 llvm::Value *value = result.getPointer(); 3052 if (!result.getInt()) 3053 value = CGF.EmitARCRetain(type, value); 3054 return value; 3055 } 3056 3057 /// EmitARCRetainScalarExpr - Semantically equivalent to 3058 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a 3059 /// best-effort attempt to peephole expressions that naturally produce 3060 /// retained objects. 3061 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { 3062 // The retain needs to happen within the full-expression. 3063 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3064 enterFullExpression(cleanups); 3065 RunCleanupsScope scope(*this); 3066 return EmitARCRetainScalarExpr(cleanups->getSubExpr()); 3067 } 3068 3069 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3070 llvm::Value *value = result.getPointer(); 3071 if (!result.getInt()) 3072 value = EmitARCRetain(e->getType(), value); 3073 return value; 3074 } 3075 3076 llvm::Value * 3077 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { 3078 // The retain needs to happen within the full-expression. 3079 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3080 enterFullExpression(cleanups); 3081 RunCleanupsScope scope(*this); 3082 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); 3083 } 3084 3085 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 3086 llvm::Value *value = result.getPointer(); 3087 if (result.getInt()) 3088 value = EmitARCAutorelease(value); 3089 else 3090 value = EmitARCRetainAutorelease(e->getType(), value); 3091 return value; 3092 } 3093 3094 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { 3095 llvm::Value *result; 3096 bool doRetain; 3097 3098 if (shouldEmitSeparateBlockRetain(e)) { 3099 result = EmitScalarExpr(e); 3100 doRetain = true; 3101 } else { 3102 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e); 3103 result = subresult.getPointer(); 3104 doRetain = !subresult.getInt(); 3105 } 3106 3107 if (doRetain) 3108 result = EmitARCRetainBlock(result, /*mandatory*/ true); 3109 return EmitObjCConsumeObject(e->getType(), result); 3110 } 3111 3112 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { 3113 // In ARC, retain and autorelease the expression. 3114 if (getLangOpts().ObjCAutoRefCount) { 3115 // Do so before running any cleanups for the full-expression. 3116 // EmitARCRetainAutoreleaseScalarExpr does this for us. 3117 return EmitARCRetainAutoreleaseScalarExpr(expr); 3118 } 3119 3120 // Otherwise, use the normal scalar-expression emission. The 3121 // exception machinery doesn't do anything special with the 3122 // exception like retaining it, so there's no safety associated with 3123 // only running cleanups after the throw has started, and when it 3124 // matters it tends to be substantially inferior code. 3125 return EmitScalarExpr(expr); 3126 } 3127 3128 namespace { 3129 3130 /// An emitter for assigning into an __unsafe_unretained context. 3131 struct ARCUnsafeUnretainedExprEmitter : 3132 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { 3133 3134 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3135 3136 llvm::Value *getValueOfResult(llvm::Value *value) { 3137 return value; 3138 } 3139 3140 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { 3141 return CGF.Builder.CreateBitCast(value, resultType); 3142 } 3143 3144 llvm::Value *visitLValueToRValue(const Expr *e) { 3145 return CGF.EmitScalarExpr(e); 3146 } 3147 3148 /// For consumptions, just emit the subexpression and perform the 3149 /// consumption like normal. 3150 llvm::Value *visitConsumeObject(const Expr *e) { 3151 llvm::Value *value = CGF.EmitScalarExpr(e); 3152 return CGF.EmitObjCConsumeObject(e->getType(), value); 3153 } 3154 3155 /// No special logic for block extensions. (This probably can't 3156 /// actually happen in this emitter, though.) 3157 llvm::Value *visitExtendBlockObject(const Expr *e) { 3158 return CGF.EmitARCExtendBlockObject(e); 3159 } 3160 3161 /// For reclaims, perform an unsafeClaim if that's enabled. 3162 llvm::Value *visitReclaimReturnedObject(const Expr *e) { 3163 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true); 3164 } 3165 3166 /// When we have an undecorated call, just emit it without adding 3167 /// the unsafeClaim. 3168 llvm::Value *visitCall(const Expr *e) { 3169 return CGF.EmitScalarExpr(e); 3170 } 3171 3172 /// Just do normal scalar emission in the default case. 3173 llvm::Value *visitExpr(const Expr *e) { 3174 return CGF.EmitScalarExpr(e); 3175 } 3176 }; 3177 } 3178 3179 static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, 3180 const Expr *e) { 3181 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); 3182 } 3183 3184 /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to 3185 /// immediately releasing the resut of EmitARCRetainScalarExpr, but 3186 /// avoiding any spurious retains, including by performing reclaims 3187 /// with objc_unsafeClaimAutoreleasedReturnValue. 3188 llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { 3189 // Look through full-expressions. 3190 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3191 enterFullExpression(cleanups); 3192 RunCleanupsScope scope(*this); 3193 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); 3194 } 3195 3196 return emitARCUnsafeUnretainedScalarExpr(*this, e); 3197 } 3198 3199 std::pair<LValue,llvm::Value*> 3200 CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, 3201 bool ignored) { 3202 // Evaluate the RHS first. If we're ignoring the result, assume 3203 // that we can emit at an unsafe +0. 3204 llvm::Value *value; 3205 if (ignored) { 3206 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS()); 3207 } else { 3208 value = EmitScalarExpr(e->getRHS()); 3209 } 3210 3211 // Emit the LHS and perform the store. 3212 LValue lvalue = EmitLValue(e->getLHS()); 3213 EmitStoreOfScalar(value, lvalue); 3214 3215 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); 3216 } 3217 3218 std::pair<LValue,llvm::Value*> 3219 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, 3220 bool ignored) { 3221 // Evaluate the RHS first. 3222 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS()); 3223 llvm::Value *value = result.getPointer(); 3224 3225 bool hasImmediateRetain = result.getInt(); 3226 3227 // If we didn't emit a retained object, and the l-value is of block 3228 // type, then we need to emit the block-retain immediately in case 3229 // it invalidates the l-value. 3230 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) { 3231 value = EmitARCRetainBlock(value, /*mandatory*/ false); 3232 hasImmediateRetain = true; 3233 } 3234 3235 LValue lvalue = EmitLValue(e->getLHS()); 3236 3237 // If the RHS was emitted retained, expand this. 3238 if (hasImmediateRetain) { 3239 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation()); 3240 EmitStoreOfScalar(value, lvalue); 3241 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime()); 3242 } else { 3243 value = EmitARCStoreStrong(lvalue, value, ignored); 3244 } 3245 3246 return std::pair<LValue,llvm::Value*>(lvalue, value); 3247 } 3248 3249 std::pair<LValue,llvm::Value*> 3250 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { 3251 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS()); 3252 LValue lvalue = EmitLValue(e->getLHS()); 3253 3254 EmitStoreOfScalar(value, lvalue); 3255 3256 return std::pair<LValue,llvm::Value*>(lvalue, value); 3257 } 3258 3259 void CodeGenFunction::EmitObjCAutoreleasePoolStmt( 3260 const ObjCAutoreleasePoolStmt &ARPS) { 3261 const Stmt *subStmt = ARPS.getSubStmt(); 3262 const CompoundStmt &S = cast<CompoundStmt>(*subStmt); 3263 3264 CGDebugInfo *DI = getDebugInfo(); 3265 if (DI) 3266 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc()); 3267 3268 // Keep track of the current cleanup stack depth. 3269 RunCleanupsScope Scope(*this); 3270 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { 3271 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 3272 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token); 3273 } else { 3274 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); 3275 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token); 3276 } 3277 3278 for (const auto *I : S.body()) 3279 EmitStmt(I); 3280 3281 if (DI) 3282 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc()); 3283 } 3284 3285 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, 3286 /// make sure it survives garbage collection until this point. 3287 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { 3288 // We just use an inline assembly. 3289 llvm::FunctionType *extenderType 3290 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All); 3291 llvm::Value *extender 3292 = llvm::InlineAsm::get(extenderType, 3293 /* assembly */ "", 3294 /* constraints */ "r", 3295 /* side effects */ true); 3296 3297 object = Builder.CreateBitCast(object, VoidPtrTy); 3298 EmitNounwindRuntimeCall(extender, object); 3299 } 3300 3301 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with 3302 /// non-trivial copy assignment function, produce following helper function. 3303 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } 3304 /// 3305 llvm::Constant * 3306 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( 3307 const ObjCPropertyImplDecl *PID) { 3308 if (!getLangOpts().CPlusPlus || 3309 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3310 return nullptr; 3311 QualType Ty = PID->getPropertyIvarDecl()->getType(); 3312 if (!Ty->isRecordType()) 3313 return nullptr; 3314 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3315 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic))) 3316 return nullptr; 3317 llvm::Constant *HelperFn = nullptr; 3318 if (hasTrivialSetExpr(PID)) 3319 return nullptr; 3320 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"); 3321 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) 3322 return HelperFn; 3323 3324 ASTContext &C = getContext(); 3325 IdentifierInfo *II 3326 = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); 3327 3328 QualType ReturnTy = C.VoidTy; 3329 QualType DestTy = C.getPointerType(Ty); 3330 QualType SrcTy = Ty; 3331 SrcTy.addConst(); 3332 SrcTy = C.getPointerType(SrcTy); 3333 3334 SmallVector<QualType, 2> ArgTys; 3335 ArgTys.push_back(DestTy); 3336 ArgTys.push_back(SrcTy); 3337 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3338 3339 FunctionDecl *FD = FunctionDecl::Create( 3340 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3341 FunctionTy, nullptr, SC_Static, false, false); 3342 3343 FunctionArgList args; 3344 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy, 3345 ImplicitParamDecl::Other); 3346 args.push_back(&DstDecl); 3347 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy, 3348 ImplicitParamDecl::Other); 3349 args.push_back(&SrcDecl); 3350 3351 const CGFunctionInfo &FI = 3352 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3353 3354 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3355 3356 llvm::Function *Fn = 3357 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 3358 "__assign_helper_atomic_property_", 3359 &CGM.getModule()); 3360 3361 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3362 3363 StartFunction(FD, ReturnTy, Fn, FI, args); 3364 3365 DeclRefExpr DstExpr(&DstDecl, false, DestTy, 3366 VK_RValue, SourceLocation()); 3367 UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(), 3368 VK_LValue, OK_Ordinary, SourceLocation(), false); 3369 3370 DeclRefExpr SrcExpr(&SrcDecl, false, SrcTy, 3371 VK_RValue, SourceLocation()); 3372 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(), 3373 VK_LValue, OK_Ordinary, SourceLocation(), false); 3374 3375 Expr *Args[2] = { &DST, &SRC }; 3376 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment()); 3377 CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(), 3378 Args, DestTy->getPointeeType(), 3379 VK_LValue, SourceLocation(), FPOptions()); 3380 3381 EmitStmt(&TheCall); 3382 3383 FinishFunction(); 3384 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3385 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); 3386 return HelperFn; 3387 } 3388 3389 llvm::Constant * 3390 CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( 3391 const ObjCPropertyImplDecl *PID) { 3392 if (!getLangOpts().CPlusPlus || 3393 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3394 return nullptr; 3395 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3396 QualType Ty = PD->getType(); 3397 if (!Ty->isRecordType()) 3398 return nullptr; 3399 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic))) 3400 return nullptr; 3401 llvm::Constant *HelperFn = nullptr; 3402 if (hasTrivialGetExpr(PID)) 3403 return nullptr; 3404 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"); 3405 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) 3406 return HelperFn; 3407 3408 ASTContext &C = getContext(); 3409 IdentifierInfo *II = 3410 &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); 3411 3412 QualType ReturnTy = C.VoidTy; 3413 QualType DestTy = C.getPointerType(Ty); 3414 QualType SrcTy = Ty; 3415 SrcTy.addConst(); 3416 SrcTy = C.getPointerType(SrcTy); 3417 3418 SmallVector<QualType, 2> ArgTys; 3419 ArgTys.push_back(DestTy); 3420 ArgTys.push_back(SrcTy); 3421 QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {}); 3422 3423 FunctionDecl *FD = FunctionDecl::Create( 3424 C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II, 3425 FunctionTy, nullptr, SC_Static, false, false); 3426 3427 FunctionArgList args; 3428 ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy, 3429 ImplicitParamDecl::Other); 3430 args.push_back(&DstDecl); 3431 ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy, 3432 ImplicitParamDecl::Other); 3433 args.push_back(&SrcDecl); 3434 3435 const CGFunctionInfo &FI = 3436 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args); 3437 3438 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3439 3440 llvm::Function *Fn = llvm::Function::Create( 3441 LTy, llvm::GlobalValue::InternalLinkage, "__copy_helper_atomic_property_", 3442 &CGM.getModule()); 3443 3444 CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI); 3445 3446 StartFunction(FD, ReturnTy, Fn, FI, args); 3447 3448 DeclRefExpr SrcExpr(&SrcDecl, false, SrcTy, 3449 VK_RValue, SourceLocation()); 3450 3451 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(), 3452 VK_LValue, OK_Ordinary, SourceLocation(), false); 3453 3454 CXXConstructExpr *CXXConstExpr = 3455 cast<CXXConstructExpr>(PID->getGetterCXXConstructor()); 3456 3457 SmallVector<Expr*, 4> ConstructorArgs; 3458 ConstructorArgs.push_back(&SRC); 3459 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), 3460 CXXConstExpr->arg_end()); 3461 3462 CXXConstructExpr *TheCXXConstructExpr = 3463 CXXConstructExpr::Create(C, Ty, SourceLocation(), 3464 CXXConstExpr->getConstructor(), 3465 CXXConstExpr->isElidable(), 3466 ConstructorArgs, 3467 CXXConstExpr->hadMultipleCandidates(), 3468 CXXConstExpr->isListInitialization(), 3469 CXXConstExpr->isStdInitListInitialization(), 3470 CXXConstExpr->requiresZeroInitialization(), 3471 CXXConstExpr->getConstructionKind(), 3472 SourceRange()); 3473 3474 DeclRefExpr DstExpr(&DstDecl, false, DestTy, 3475 VK_RValue, SourceLocation()); 3476 3477 RValue DV = EmitAnyExpr(&DstExpr); 3478 CharUnits Alignment 3479 = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); 3480 EmitAggExpr(TheCXXConstructExpr, 3481 AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment), 3482 Qualifiers(), 3483 AggValueSlot::IsDestructed, 3484 AggValueSlot::DoesNotNeedGCBarriers, 3485 AggValueSlot::IsNotAliased, 3486 AggValueSlot::DoesNotOverlap)); 3487 3488 FinishFunction(); 3489 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3490 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); 3491 return HelperFn; 3492 } 3493 3494 llvm::Value * 3495 CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { 3496 // Get selectors for retain/autorelease. 3497 IdentifierInfo *CopyID = &getContext().Idents.get("copy"); 3498 Selector CopySelector = 3499 getContext().Selectors.getNullarySelector(CopyID); 3500 IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); 3501 Selector AutoreleaseSelector = 3502 getContext().Selectors.getNullarySelector(AutoreleaseID); 3503 3504 // Emit calls to retain/autorelease. 3505 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 3506 llvm::Value *Val = Block; 3507 RValue Result; 3508 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3509 Ty, CopySelector, 3510 Val, CallArgList(), nullptr, nullptr); 3511 Val = Result.getScalarVal(); 3512 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3513 Ty, AutoreleaseSelector, 3514 Val, CallArgList(), nullptr, nullptr); 3515 Val = Result.getScalarVal(); 3516 return Val; 3517 } 3518 3519 llvm::Value * 3520 CodeGenFunction::EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args) { 3521 assert(Args.size() == 3 && "Expected 3 argument here!"); 3522 3523 if (!CGM.IsOSVersionAtLeastFn) { 3524 llvm::FunctionType *FTy = 3525 llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false); 3526 CGM.IsOSVersionAtLeastFn = 3527 CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast"); 3528 } 3529 3530 llvm::Value *CallRes = 3531 EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args); 3532 3533 return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty)); 3534 } 3535 3536 void CodeGenModule::emitAtAvailableLinkGuard() { 3537 if (!IsOSVersionAtLeastFn) 3538 return; 3539 // @available requires CoreFoundation only on Darwin. 3540 if (!Target.getTriple().isOSDarwin()) 3541 return; 3542 // Add -framework CoreFoundation to the linker commands. We still want to 3543 // emit the core foundation reference down below because otherwise if 3544 // CoreFoundation is not used in the code, the linker won't link the 3545 // framework. 3546 auto &Context = getLLVMContext(); 3547 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"), 3548 llvm::MDString::get(Context, "CoreFoundation")}; 3549 LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args)); 3550 // Emit a reference to a symbol from CoreFoundation to ensure that 3551 // CoreFoundation is linked into the final binary. 3552 llvm::FunctionType *FTy = 3553 llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false); 3554 llvm::Constant *CFFunc = 3555 CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber"); 3556 3557 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false); 3558 llvm::Function *CFLinkCheckFunc = cast<llvm::Function>(CreateBuiltinFunction( 3559 CheckFTy, "__clang_at_available_requires_core_foundation_framework")); 3560 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); 3561 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); 3562 CodeGenFunction CGF(*this); 3563 CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc)); 3564 CGF.EmitNounwindRuntimeCall(CFFunc, llvm::Constant::getNullValue(VoidPtrTy)); 3565 CGF.Builder.CreateUnreachable(); 3566 addCompilerUsedGlobal(CFLinkCheckFunc); 3567 } 3568 3569 CGObjCRuntime::~CGObjCRuntime() {} 3570