1 //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Objective-C code as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGDebugInfo.h" 15 #include "CGObjCRuntime.h" 16 #include "CodeGenFunction.h" 17 #include "CodeGenModule.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/StmtObjC.h" 22 #include "clang/Basic/Diagnostic.h" 23 #include "clang/CodeGen/CGFunctionInfo.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/IR/CallSite.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/InlineAsm.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; 32 static TryEmitResult 33 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); 34 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, 35 QualType ET, 36 RValue Result); 37 38 /// Given the address of a variable of pointer type, find the correct 39 /// null to store into it. 40 static llvm::Constant *getNullForVariable(Address addr) { 41 llvm::Type *type = addr.getElementType(); 42 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type)); 43 } 44 45 /// Emits an instance of NSConstantString representing the object. 46 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) 47 { 48 llvm::Constant *C = 49 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); 50 // FIXME: This bitcast should just be made an invariant on the Runtime. 51 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); 52 } 53 54 /// EmitObjCBoxedExpr - This routine generates code to call 55 /// the appropriate expression boxing method. This will either be 56 /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], 57 /// or [NSValue valueWithBytes:objCType:]. 58 /// 59 llvm::Value * 60 CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { 61 // Generate the correct selector for this literal's concrete type. 62 // Get the method. 63 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); 64 const Expr *SubExpr = E->getSubExpr(); 65 assert(BoxingMethod && "BoxingMethod is null"); 66 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"); 67 Selector Sel = BoxingMethod->getSelector(); 68 69 // Generate a reference to the class pointer, which will be the receiver. 70 // Assumes that the method was introduced in the class that should be 71 // messaged (avoids pulling it out of the result type). 72 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 73 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); 74 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl); 75 76 CallArgList Args; 77 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); 78 QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); 79 80 // ObjCBoxedExpr supports boxing of structs and unions 81 // via [NSValue valueWithBytes:objCType:] 82 const QualType ValueType(SubExpr->getType().getCanonicalType()); 83 if (ValueType->isObjCBoxableRecordType()) { 84 // Emit CodeGen for first parameter 85 // and cast value to correct type 86 Address Temporary = CreateMemTemp(SubExpr->getType()); 87 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); 88 Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT)); 89 Args.add(RValue::get(BitCast.getPointer()), ArgQT); 90 91 // Create char array to store type encoding 92 std::string Str; 93 getContext().getObjCEncodingForType(ValueType, Str); 94 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); 95 96 // Cast type encoding to correct type 97 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; 98 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); 99 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT)); 100 101 Args.add(RValue::get(Cast), EncodingQT); 102 } else { 103 Args.add(EmitAnyExpr(SubExpr), ArgQT); 104 } 105 106 RValue result = Runtime.GenerateMessageSend( 107 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver, 108 Args, ClassDecl, BoxingMethod); 109 return Builder.CreateBitCast(result.getScalarVal(), 110 ConvertType(E->getType())); 111 } 112 113 llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, 114 const ObjCMethodDecl *MethodWithObjects) { 115 ASTContext &Context = CGM.getContext(); 116 const ObjCDictionaryLiteral *DLE = nullptr; 117 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E); 118 if (!ALE) 119 DLE = cast<ObjCDictionaryLiteral>(E); 120 121 // Optimize empty collections by referencing constants, when available. 122 uint64_t NumElements = 123 ALE ? ALE->getNumElements() : DLE->getNumElements(); 124 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) { 125 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__"; 126 QualType IdTy(CGM.getContext().getObjCIdType()); 127 llvm::Constant *Constant = 128 CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName); 129 LValue LV = MakeNaturalAlignAddrLValue(Constant, IdTy); 130 llvm::Value *Ptr = EmitLoadOfScalar(LV, E->getLocStart()); 131 cast<llvm::LoadInst>(Ptr)->setMetadata( 132 CGM.getModule().getMDKindID("invariant.load"), 133 llvm::MDNode::get(getLLVMContext(), None)); 134 return Builder.CreateBitCast(Ptr, ConvertType(E->getType())); 135 } 136 137 // Compute the type of the array we're initializing. 138 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()), 139 NumElements); 140 QualType ElementType = Context.getObjCIdType().withConst(); 141 QualType ElementArrayType 142 = Context.getConstantArrayType(ElementType, APNumElements, 143 ArrayType::Normal, /*IndexTypeQuals=*/0); 144 145 // Allocate the temporary array(s). 146 Address Objects = CreateMemTemp(ElementArrayType, "objects"); 147 Address Keys = Address::invalid(); 148 if (DLE) 149 Keys = CreateMemTemp(ElementArrayType, "keys"); 150 151 // In ARC, we may need to do extra work to keep all the keys and 152 // values alive until after the call. 153 SmallVector<llvm::Value *, 16> NeededObjects; 154 bool TrackNeededObjects = 155 (getLangOpts().ObjCAutoRefCount && 156 CGM.getCodeGenOpts().OptimizationLevel != 0); 157 158 // Perform the actual initialialization of the array(s). 159 for (uint64_t i = 0; i < NumElements; i++) { 160 if (ALE) { 161 // Emit the element and store it to the appropriate array slot. 162 const Expr *Rhs = ALE->getElement(i); 163 LValue LV = MakeAddrLValue( 164 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()), 165 ElementType, AlignmentSource::Decl); 166 167 llvm::Value *value = EmitScalarExpr(Rhs); 168 EmitStoreThroughLValue(RValue::get(value), LV, true); 169 if (TrackNeededObjects) { 170 NeededObjects.push_back(value); 171 } 172 } else { 173 // Emit the key and store it to the appropriate array slot. 174 const Expr *Key = DLE->getKeyValueElement(i).Key; 175 LValue KeyLV = MakeAddrLValue( 176 Builder.CreateConstArrayGEP(Keys, i, getPointerSize()), 177 ElementType, AlignmentSource::Decl); 178 llvm::Value *keyValue = EmitScalarExpr(Key); 179 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); 180 181 // Emit the value and store it to the appropriate array slot. 182 const Expr *Value = DLE->getKeyValueElement(i).Value; 183 LValue ValueLV = MakeAddrLValue( 184 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()), 185 ElementType, AlignmentSource::Decl); 186 llvm::Value *valueValue = EmitScalarExpr(Value); 187 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); 188 if (TrackNeededObjects) { 189 NeededObjects.push_back(keyValue); 190 NeededObjects.push_back(valueValue); 191 } 192 } 193 } 194 195 // Generate the argument list. 196 CallArgList Args; 197 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); 198 const ParmVarDecl *argDecl = *PI++; 199 QualType ArgQT = argDecl->getType().getUnqualifiedType(); 200 Args.add(RValue::get(Objects.getPointer()), ArgQT); 201 if (DLE) { 202 argDecl = *PI++; 203 ArgQT = argDecl->getType().getUnqualifiedType(); 204 Args.add(RValue::get(Keys.getPointer()), ArgQT); 205 } 206 argDecl = *PI; 207 ArgQT = argDecl->getType().getUnqualifiedType(); 208 llvm::Value *Count = 209 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements); 210 Args.add(RValue::get(Count), ArgQT); 211 212 // Generate a reference to the class pointer, which will be the receiver. 213 Selector Sel = MethodWithObjects->getSelector(); 214 QualType ResultType = E->getType(); 215 const ObjCObjectPointerType *InterfacePointerType 216 = ResultType->getAsObjCInterfacePointerType(); 217 ObjCInterfaceDecl *Class 218 = InterfacePointerType->getObjectType()->getInterface(); 219 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 220 llvm::Value *Receiver = Runtime.GetClass(*this, Class); 221 222 // Generate the message send. 223 RValue result = Runtime.GenerateMessageSend( 224 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel, 225 Receiver, Args, Class, MethodWithObjects); 226 227 // The above message send needs these objects, but in ARC they are 228 // passed in a buffer that is essentially __unsafe_unretained. 229 // Therefore we must prevent the optimizer from releasing them until 230 // after the call. 231 if (TrackNeededObjects) { 232 EmitARCIntrinsicUse(NeededObjects); 233 } 234 235 return Builder.CreateBitCast(result.getScalarVal(), 236 ConvertType(E->getType())); 237 } 238 239 llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { 240 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); 241 } 242 243 llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( 244 const ObjCDictionaryLiteral *E) { 245 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); 246 } 247 248 /// Emit a selector. 249 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { 250 // Untyped selector. 251 // Note that this implementation allows for non-constant strings to be passed 252 // as arguments to @selector(). Currently, the only thing preventing this 253 // behaviour is the type checking in the front end. 254 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector()); 255 } 256 257 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { 258 // FIXME: This should pass the Decl not the name. 259 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol()); 260 } 261 262 /// \brief Adjust the type of an Objective-C object that doesn't match up due 263 /// to type erasure at various points, e.g., related result types or the use 264 /// of parameterized classes. 265 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, 266 RValue Result) { 267 if (!ExpT->isObjCRetainableType()) 268 return Result; 269 270 // If the converted types are the same, we're done. 271 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT); 272 if (ExpLLVMTy == Result.getScalarVal()->getType()) 273 return Result; 274 275 // We have applied a substitution. Cast the rvalue appropriately. 276 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(), 277 ExpLLVMTy)); 278 } 279 280 /// Decide whether to extend the lifetime of the receiver of a 281 /// returns-inner-pointer message. 282 static bool 283 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { 284 switch (message->getReceiverKind()) { 285 286 // For a normal instance message, we should extend unless the 287 // receiver is loaded from a variable with precise lifetime. 288 case ObjCMessageExpr::Instance: { 289 const Expr *receiver = message->getInstanceReceiver(); 290 291 // Look through OVEs. 292 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 293 if (opaque->getSourceExpr()) 294 receiver = opaque->getSourceExpr()->IgnoreParens(); 295 } 296 297 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver); 298 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true; 299 receiver = ice->getSubExpr()->IgnoreParens(); 300 301 // Look through OVEs. 302 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 303 if (opaque->getSourceExpr()) 304 receiver = opaque->getSourceExpr()->IgnoreParens(); 305 } 306 307 // Only __strong variables. 308 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 309 return true; 310 311 // All ivars and fields have precise lifetime. 312 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver)) 313 return false; 314 315 // Otherwise, check for variables. 316 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); 317 if (!declRef) return true; 318 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl()); 319 if (!var) return true; 320 321 // All variables have precise lifetime except local variables with 322 // automatic storage duration that aren't specially marked. 323 return (var->hasLocalStorage() && 324 !var->hasAttr<ObjCPreciseLifetimeAttr>()); 325 } 326 327 case ObjCMessageExpr::Class: 328 case ObjCMessageExpr::SuperClass: 329 // It's never necessary for class objects. 330 return false; 331 332 case ObjCMessageExpr::SuperInstance: 333 // We generally assume that 'self' lives throughout a method call. 334 return false; 335 } 336 337 llvm_unreachable("invalid receiver kind"); 338 } 339 340 /// Given an expression of ObjC pointer type, check whether it was 341 /// immediately loaded from an ARC __weak l-value. 342 static const Expr *findWeakLValue(const Expr *E) { 343 assert(E->getType()->isObjCRetainableType()); 344 E = E->IgnoreParens(); 345 if (auto CE = dyn_cast<CastExpr>(E)) { 346 if (CE->getCastKind() == CK_LValueToRValue) { 347 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) 348 return CE->getSubExpr(); 349 } 350 } 351 352 return nullptr; 353 } 354 355 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, 356 ReturnValueSlot Return) { 357 // Only the lookup mechanism and first two arguments of the method 358 // implementation vary between runtimes. We can get the receiver and 359 // arguments in generic code. 360 361 bool isDelegateInit = E->isDelegateInitCall(); 362 363 const ObjCMethodDecl *method = E->getMethodDecl(); 364 365 // If the method is -retain, and the receiver's being loaded from 366 // a __weak variable, peephole the entire operation to objc_loadWeakRetained. 367 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance && 368 method->getMethodFamily() == OMF_retain) { 369 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { 370 LValue lvalue = EmitLValue(lvalueExpr); 371 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress()); 372 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); 373 } 374 } 375 376 // We don't retain the receiver in delegate init calls, and this is 377 // safe because the receiver value is always loaded from 'self', 378 // which we zero out. We don't want to Block_copy block receivers, 379 // though. 380 bool retainSelf = 381 (!isDelegateInit && 382 CGM.getLangOpts().ObjCAutoRefCount && 383 method && 384 method->hasAttr<NSConsumesSelfAttr>()); 385 386 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 387 bool isSuperMessage = false; 388 bool isClassMessage = false; 389 ObjCInterfaceDecl *OID = nullptr; 390 // Find the receiver 391 QualType ReceiverType; 392 llvm::Value *Receiver = nullptr; 393 switch (E->getReceiverKind()) { 394 case ObjCMessageExpr::Instance: 395 ReceiverType = E->getInstanceReceiver()->getType(); 396 if (retainSelf) { 397 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this, 398 E->getInstanceReceiver()); 399 Receiver = ter.getPointer(); 400 if (ter.getInt()) retainSelf = false; 401 } else 402 Receiver = EmitScalarExpr(E->getInstanceReceiver()); 403 break; 404 405 case ObjCMessageExpr::Class: { 406 ReceiverType = E->getClassReceiver(); 407 const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>(); 408 assert(ObjTy && "Invalid Objective-C class message send"); 409 OID = ObjTy->getInterface(); 410 assert(OID && "Invalid Objective-C class message send"); 411 Receiver = Runtime.GetClass(*this, OID); 412 isClassMessage = true; 413 break; 414 } 415 416 case ObjCMessageExpr::SuperInstance: 417 ReceiverType = E->getSuperType(); 418 Receiver = LoadObjCSelf(); 419 isSuperMessage = true; 420 break; 421 422 case ObjCMessageExpr::SuperClass: 423 ReceiverType = E->getSuperType(); 424 Receiver = LoadObjCSelf(); 425 isSuperMessage = true; 426 isClassMessage = true; 427 break; 428 } 429 430 if (retainSelf) 431 Receiver = EmitARCRetainNonBlock(Receiver); 432 433 // In ARC, we sometimes want to "extend the lifetime" 434 // (i.e. retain+autorelease) of receivers of returns-inner-pointer 435 // messages. 436 if (getLangOpts().ObjCAutoRefCount && method && 437 method->hasAttr<ObjCReturnsInnerPointerAttr>() && 438 shouldExtendReceiverForInnerPointerMessage(E)) 439 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver); 440 441 QualType ResultType = method ? method->getReturnType() : E->getType(); 442 443 CallArgList Args; 444 EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method)); 445 446 // For delegate init calls in ARC, do an unsafe store of null into 447 // self. This represents the call taking direct ownership of that 448 // value. We have to do this after emitting the other call 449 // arguments because they might also reference self, but we don't 450 // have to worry about any of them modifying self because that would 451 // be an undefined read and write of an object in unordered 452 // expressions. 453 if (isDelegateInit) { 454 assert(getLangOpts().ObjCAutoRefCount && 455 "delegate init calls should only be marked in ARC"); 456 457 // Do an unsafe store of null into self. 458 Address selfAddr = 459 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 460 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr); 461 } 462 463 RValue result; 464 if (isSuperMessage) { 465 // super is only valid in an Objective-C method 466 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 467 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); 468 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType, 469 E->getSelector(), 470 OMD->getClassInterface(), 471 isCategoryImpl, 472 Receiver, 473 isClassMessage, 474 Args, 475 method); 476 } else { 477 result = Runtime.GenerateMessageSend(*this, Return, ResultType, 478 E->getSelector(), 479 Receiver, Args, OID, 480 method); 481 } 482 483 // For delegate init calls in ARC, implicitly store the result of 484 // the call back into self. This takes ownership of the value. 485 if (isDelegateInit) { 486 Address selfAddr = 487 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 488 llvm::Value *newSelf = result.getScalarVal(); 489 490 // The delegate return type isn't necessarily a matching type; in 491 // fact, it's quite likely to be 'id'. 492 llvm::Type *selfTy = selfAddr.getElementType(); 493 newSelf = Builder.CreateBitCast(newSelf, selfTy); 494 495 Builder.CreateStore(newSelf, selfAddr); 496 } 497 498 return AdjustObjCObjectType(*this, E->getType(), result); 499 } 500 501 namespace { 502 struct FinishARCDealloc final : EHScopeStack::Cleanup { 503 void Emit(CodeGenFunction &CGF, Flags flags) override { 504 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl); 505 506 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); 507 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 508 if (!iface->getSuperClass()) return; 509 510 bool isCategory = isa<ObjCCategoryImplDecl>(impl); 511 512 // Call [super dealloc] if we have a superclass. 513 llvm::Value *self = CGF.LoadObjCSelf(); 514 515 CallArgList args; 516 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(), 517 CGF.getContext().VoidTy, 518 method->getSelector(), 519 iface, 520 isCategory, 521 self, 522 /*is class msg*/ false, 523 args, 524 method); 525 } 526 }; 527 } 528 529 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates 530 /// the LLVM function and sets the other context used by 531 /// CodeGenFunction. 532 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, 533 const ObjCContainerDecl *CD) { 534 SourceLocation StartLoc = OMD->getLocStart(); 535 FunctionArgList args; 536 // Check if we should generate debug info for this method. 537 if (OMD->hasAttr<NoDebugAttr>()) 538 DebugInfo = nullptr; // disable debug info indefinitely for this function 539 540 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); 541 542 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD); 543 CGM.SetInternalFunctionAttributes(OMD, Fn, FI); 544 545 args.push_back(OMD->getSelfDecl()); 546 args.push_back(OMD->getCmdDecl()); 547 548 args.append(OMD->param_begin(), OMD->param_end()); 549 550 CurGD = OMD; 551 CurEHLocation = OMD->getLocEnd(); 552 553 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, 554 OMD->getLocation(), StartLoc); 555 556 // In ARC, certain methods get an extra cleanup. 557 if (CGM.getLangOpts().ObjCAutoRefCount && 558 OMD->isInstanceMethod() && 559 OMD->getSelector().isUnarySelector()) { 560 const IdentifierInfo *ident = 561 OMD->getSelector().getIdentifierInfoForSlot(0); 562 if (ident->isStr("dealloc")) 563 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind()); 564 } 565 } 566 567 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 568 LValue lvalue, QualType type); 569 570 /// Generate an Objective-C method. An Objective-C method is a C function with 571 /// its pointer, name, and types registered in the class struture. 572 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { 573 StartObjCMethod(OMD, OMD->getClassInterface()); 574 PGO.assignRegionCounters(GlobalDecl(OMD), CurFn); 575 assert(isa<CompoundStmt>(OMD->getBody())); 576 incrementProfileCounter(OMD->getBody()); 577 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); 578 FinishFunction(OMD->getBodyRBrace()); 579 } 580 581 /// emitStructGetterCall - Call the runtime function to load a property 582 /// into the return value slot. 583 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, 584 bool isAtomic, bool hasStrong) { 585 ASTContext &Context = CGF.getContext(); 586 587 Address src = 588 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 589 .getAddress(); 590 591 // objc_copyStruct (ReturnValue, &structIvar, 592 // sizeof (Type of Ivar), isAtomic, false); 593 CallArgList args; 594 595 Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy); 596 args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy); 597 598 src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy); 599 args.add(RValue::get(src.getPointer()), Context.VoidPtrTy); 600 601 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); 602 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType()); 603 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy); 604 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy); 605 606 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); 607 CGCallee callee = CGCallee::forDirect(fn); 608 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args), 609 callee, ReturnValueSlot(), args); 610 } 611 612 /// Determine whether the given architecture supports unaligned atomic 613 /// accesses. They don't have to be fast, just faster than a function 614 /// call and a mutex. 615 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { 616 // FIXME: Allow unaligned atomic load/store on x86. (It is not 617 // currently supported by the backend.) 618 return 0; 619 } 620 621 /// Return the maximum size that permits atomic accesses for the given 622 /// architecture. 623 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, 624 llvm::Triple::ArchType arch) { 625 // ARM has 8-byte atomic accesses, but it's not clear whether we 626 // want to rely on them here. 627 628 // In the default case, just assume that any size up to a pointer is 629 // fine given adequate alignment. 630 return CharUnits::fromQuantity(CGM.PointerSizeInBytes); 631 } 632 633 namespace { 634 class PropertyImplStrategy { 635 public: 636 enum StrategyKind { 637 /// The 'native' strategy is to use the architecture's provided 638 /// reads and writes. 639 Native, 640 641 /// Use objc_setProperty and objc_getProperty. 642 GetSetProperty, 643 644 /// Use objc_setProperty for the setter, but use expression 645 /// evaluation for the getter. 646 SetPropertyAndExpressionGet, 647 648 /// Use objc_copyStruct. 649 CopyStruct, 650 651 /// The 'expression' strategy is to emit normal assignment or 652 /// lvalue-to-rvalue expressions. 653 Expression 654 }; 655 656 StrategyKind getKind() const { return StrategyKind(Kind); } 657 658 bool hasStrongMember() const { return HasStrong; } 659 bool isAtomic() const { return IsAtomic; } 660 bool isCopy() const { return IsCopy; } 661 662 CharUnits getIvarSize() const { return IvarSize; } 663 CharUnits getIvarAlignment() const { return IvarAlignment; } 664 665 PropertyImplStrategy(CodeGenModule &CGM, 666 const ObjCPropertyImplDecl *propImpl); 667 668 private: 669 unsigned Kind : 8; 670 unsigned IsAtomic : 1; 671 unsigned IsCopy : 1; 672 unsigned HasStrong : 1; 673 674 CharUnits IvarSize; 675 CharUnits IvarAlignment; 676 }; 677 } 678 679 /// Pick an implementation strategy for the given property synthesis. 680 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, 681 const ObjCPropertyImplDecl *propImpl) { 682 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 683 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); 684 685 IsCopy = (setterKind == ObjCPropertyDecl::Copy); 686 IsAtomic = prop->isAtomic(); 687 HasStrong = false; // doesn't matter here. 688 689 // Evaluate the ivar's size and alignment. 690 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 691 QualType ivarType = ivar->getType(); 692 std::tie(IvarSize, IvarAlignment) = 693 CGM.getContext().getTypeInfoInChars(ivarType); 694 695 // If we have a copy property, we always have to use getProperty/setProperty. 696 // TODO: we could actually use setProperty and an expression for non-atomics. 697 if (IsCopy) { 698 Kind = GetSetProperty; 699 return; 700 } 701 702 // Handle retain. 703 if (setterKind == ObjCPropertyDecl::Retain) { 704 // In GC-only, there's nothing special that needs to be done. 705 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { 706 // fallthrough 707 708 // In ARC, if the property is non-atomic, use expression emission, 709 // which translates to objc_storeStrong. This isn't required, but 710 // it's slightly nicer. 711 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) { 712 // Using standard expression emission for the setter is only 713 // acceptable if the ivar is __strong, which won't be true if 714 // the property is annotated with __attribute__((NSObject)). 715 // TODO: falling all the way back to objc_setProperty here is 716 // just laziness, though; we could still use objc_storeStrong 717 // if we hacked it right. 718 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) 719 Kind = Expression; 720 else 721 Kind = SetPropertyAndExpressionGet; 722 return; 723 724 // Otherwise, we need to at least use setProperty. However, if 725 // the property isn't atomic, we can use normal expression 726 // emission for the getter. 727 } else if (!IsAtomic) { 728 Kind = SetPropertyAndExpressionGet; 729 return; 730 731 // Otherwise, we have to use both setProperty and getProperty. 732 } else { 733 Kind = GetSetProperty; 734 return; 735 } 736 } 737 738 // If we're not atomic, just use expression accesses. 739 if (!IsAtomic) { 740 Kind = Expression; 741 return; 742 } 743 744 // Properties on bitfield ivars need to be emitted using expression 745 // accesses even if they're nominally atomic. 746 if (ivar->isBitField()) { 747 Kind = Expression; 748 return; 749 } 750 751 // GC-qualified or ARC-qualified ivars need to be emitted as 752 // expressions. This actually works out to being atomic anyway, 753 // except for ARC __strong, but that should trigger the above code. 754 if (ivarType.hasNonTrivialObjCLifetime() || 755 (CGM.getLangOpts().getGC() && 756 CGM.getContext().getObjCGCAttrKind(ivarType))) { 757 Kind = Expression; 758 return; 759 } 760 761 // Compute whether the ivar has strong members. 762 if (CGM.getLangOpts().getGC()) 763 if (const RecordType *recordType = ivarType->getAs<RecordType>()) 764 HasStrong = recordType->getDecl()->hasObjectMember(); 765 766 // We can never access structs with object members with a native 767 // access, because we need to use write barriers. This is what 768 // objc_copyStruct is for. 769 if (HasStrong) { 770 Kind = CopyStruct; 771 return; 772 } 773 774 // Otherwise, this is target-dependent and based on the size and 775 // alignment of the ivar. 776 777 // If the size of the ivar is not a power of two, give up. We don't 778 // want to get into the business of doing compare-and-swaps. 779 if (!IvarSize.isPowerOfTwo()) { 780 Kind = CopyStruct; 781 return; 782 } 783 784 llvm::Triple::ArchType arch = 785 CGM.getTarget().getTriple().getArch(); 786 787 // Most architectures require memory to fit within a single cache 788 // line, so the alignment has to be at least the size of the access. 789 // Otherwise we have to grab a lock. 790 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) { 791 Kind = CopyStruct; 792 return; 793 } 794 795 // If the ivar's size exceeds the architecture's maximum atomic 796 // access size, we have to use CopyStruct. 797 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { 798 Kind = CopyStruct; 799 return; 800 } 801 802 // Otherwise, we can use native loads and stores. 803 Kind = Native; 804 } 805 806 /// \brief Generate an Objective-C property getter function. 807 /// 808 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 809 /// is illegal within a category. 810 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, 811 const ObjCPropertyImplDecl *PID) { 812 llvm::Constant *AtomicHelperFn = 813 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); 814 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 815 ObjCMethodDecl *OMD = PD->getGetterMethodDecl(); 816 assert(OMD && "Invalid call to generate getter (empty method)"); 817 StartObjCMethod(OMD, IMP->getClassInterface()); 818 819 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn); 820 821 FinishFunction(); 822 } 823 824 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { 825 const Expr *getter = propImpl->getGetterCXXConstructor(); 826 if (!getter) return true; 827 828 // Sema only makes only of these when the ivar has a C++ class type, 829 // so the form is pretty constrained. 830 831 // If the property has a reference type, we might just be binding a 832 // reference, in which case the result will be a gl-value. We should 833 // treat this as a non-trivial operation. 834 if (getter->isGLValue()) 835 return false; 836 837 // If we selected a trivial copy-constructor, we're okay. 838 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter)) 839 return (construct->getConstructor()->isTrivial()); 840 841 // The constructor might require cleanups (in which case it's never 842 // trivial). 843 assert(isa<ExprWithCleanups>(getter)); 844 return false; 845 } 846 847 /// emitCPPObjectAtomicGetterCall - Call the runtime function to 848 /// copy the ivar into the resturn slot. 849 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, 850 llvm::Value *returnAddr, 851 ObjCIvarDecl *ivar, 852 llvm::Constant *AtomicHelperFn) { 853 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, 854 // AtomicHelperFn); 855 CallArgList args; 856 857 // The 1st argument is the return Slot. 858 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy); 859 860 // The 2nd argument is the address of the ivar. 861 llvm::Value *ivarAddr = 862 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 863 CGF.LoadObjCSelf(), ivar, 0).getPointer(); 864 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 865 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 866 867 // Third argument is the helper function. 868 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 869 870 llvm::Constant *copyCppAtomicObjectFn = 871 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); 872 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn); 873 CGF.EmitCall( 874 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 875 callee, ReturnValueSlot(), args); 876 } 877 878 void 879 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, 880 const ObjCPropertyImplDecl *propImpl, 881 const ObjCMethodDecl *GetterMethodDecl, 882 llvm::Constant *AtomicHelperFn) { 883 // If there's a non-trivial 'get' expression, we just have to emit that. 884 if (!hasTrivialGetExpr(propImpl)) { 885 if (!AtomicHelperFn) { 886 ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(), 887 /*nrvo*/ nullptr); 888 EmitReturnStmt(ret); 889 } 890 else { 891 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 892 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), 893 ivar, AtomicHelperFn); 894 } 895 return; 896 } 897 898 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 899 QualType propType = prop->getType(); 900 ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl(); 901 902 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 903 904 // Pick an implementation strategy. 905 PropertyImplStrategy strategy(CGM, propImpl); 906 switch (strategy.getKind()) { 907 case PropertyImplStrategy::Native: { 908 // We don't need to do anything for a zero-size struct. 909 if (strategy.getIvarSize().isZero()) 910 return; 911 912 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 913 914 // Currently, all atomic accesses have to be through integer 915 // types, so there's no point in trying to pick a prettier type. 916 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize()); 917 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); 918 bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay 919 920 // Perform an atomic load. This does not impose ordering constraints. 921 Address ivarAddr = LV.getAddress(); 922 ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType); 923 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); 924 load->setAtomic(llvm::AtomicOrdering::Unordered); 925 926 // Store that value into the return address. Doing this with a 927 // bitcast is likely to produce some pretty ugly IR, but it's not 928 // the *most* terrible thing in the world. 929 llvm::Type *retTy = ConvertType(getterMethod->getReturnType()); 930 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy); 931 llvm::Value *ivarVal = load; 932 if (ivarSize > retTySize) { 933 llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize); 934 ivarVal = Builder.CreateTrunc(load, newTy); 935 bitcastType = newTy->getPointerTo(); 936 } 937 Builder.CreateStore(ivarVal, 938 Builder.CreateBitCast(ReturnValue, bitcastType)); 939 940 // Make sure we don't do an autorelease. 941 AutoreleaseResult = false; 942 return; 943 } 944 945 case PropertyImplStrategy::GetSetProperty: { 946 llvm::Constant *getPropertyFn = 947 CGM.getObjCRuntime().GetPropertyGetFunction(); 948 if (!getPropertyFn) { 949 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy"); 950 return; 951 } 952 CGCallee callee = CGCallee::forDirect(getPropertyFn); 953 954 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). 955 // FIXME: Can't this be simpler? This might even be worse than the 956 // corresponding gcc code. 957 llvm::Value *cmd = 958 Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd"); 959 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 960 llvm::Value *ivarOffset = 961 EmitIvarOffset(classImpl->getClassInterface(), ivar); 962 963 CallArgList args; 964 args.add(RValue::get(self), getContext().getObjCIdType()); 965 args.add(RValue::get(cmd), getContext().getObjCSelType()); 966 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 967 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 968 getContext().BoolTy); 969 970 // FIXME: We shouldn't need to get the function info here, the 971 // runtime already should have computed it to build the function. 972 llvm::Instruction *CallInstruction; 973 RValue RV = EmitCall( 974 getTypes().arrangeBuiltinFunctionCall(propType, args), 975 callee, ReturnValueSlot(), args, &CallInstruction); 976 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction)) 977 call->setTailCall(); 978 979 // We need to fix the type here. Ivars with copy & retain are 980 // always objects so we don't need to worry about complex or 981 // aggregates. 982 RV = RValue::get(Builder.CreateBitCast( 983 RV.getScalarVal(), 984 getTypes().ConvertType(getterMethod->getReturnType()))); 985 986 EmitReturnOfRValue(RV, propType); 987 988 // objc_getProperty does an autorelease, so we should suppress ours. 989 AutoreleaseResult = false; 990 991 return; 992 } 993 994 case PropertyImplStrategy::CopyStruct: 995 emitStructGetterCall(*this, ivar, strategy.isAtomic(), 996 strategy.hasStrongMember()); 997 return; 998 999 case PropertyImplStrategy::Expression: 1000 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1001 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1002 1003 QualType ivarType = ivar->getType(); 1004 switch (getEvaluationKind(ivarType)) { 1005 case TEK_Complex: { 1006 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation()); 1007 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType), 1008 /*init*/ true); 1009 return; 1010 } 1011 case TEK_Aggregate: 1012 // The return value slot is guaranteed to not be aliased, but 1013 // that's not necessarily the same as "on the stack", so 1014 // we still potentially need objc_memmove_collectable. 1015 EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType); 1016 return; 1017 case TEK_Scalar: { 1018 llvm::Value *value; 1019 if (propType->isReferenceType()) { 1020 value = LV.getAddress().getPointer(); 1021 } else { 1022 // We want to load and autoreleaseReturnValue ARC __weak ivars. 1023 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1024 if (getLangOpts().ObjCAutoRefCount) { 1025 value = emitARCRetainLoadOfScalar(*this, LV, ivarType); 1026 } else { 1027 value = EmitARCLoadWeak(LV.getAddress()); 1028 } 1029 1030 // Otherwise we want to do a simple load, suppressing the 1031 // final autorelease. 1032 } else { 1033 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal(); 1034 AutoreleaseResult = false; 1035 } 1036 1037 value = Builder.CreateBitCast( 1038 value, ConvertType(GetterMethodDecl->getReturnType())); 1039 } 1040 1041 EmitReturnOfRValue(RValue::get(value), propType); 1042 return; 1043 } 1044 } 1045 llvm_unreachable("bad evaluation kind"); 1046 } 1047 1048 } 1049 llvm_unreachable("bad @property implementation strategy!"); 1050 } 1051 1052 /// emitStructSetterCall - Call the runtime function to store the value 1053 /// from the first formal parameter into the given ivar. 1054 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, 1055 ObjCIvarDecl *ivar) { 1056 // objc_copyStruct (&structIvar, &Arg, 1057 // sizeof (struct something), true, false); 1058 CallArgList args; 1059 1060 // The first argument is the address of the ivar. 1061 llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 1062 CGF.LoadObjCSelf(), ivar, 0) 1063 .getPointer(); 1064 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1065 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1066 1067 // The second argument is the address of the parameter variable. 1068 ParmVarDecl *argVar = *OMD->param_begin(); 1069 DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(), 1070 VK_LValue, SourceLocation()); 1071 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); 1072 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1073 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1074 1075 // The third argument is the sizeof the type. 1076 llvm::Value *size = 1077 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType())); 1078 args.add(RValue::get(size), CGF.getContext().getSizeType()); 1079 1080 // The fourth argument is the 'isAtomic' flag. 1081 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy); 1082 1083 // The fifth argument is the 'hasStrong' flag. 1084 // FIXME: should this really always be false? 1085 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy); 1086 1087 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); 1088 CGCallee callee = CGCallee::forDirect(fn); 1089 CGF.EmitCall( 1090 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1091 callee, ReturnValueSlot(), args); 1092 } 1093 1094 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store 1095 /// the value from the first formal parameter into the given ivar, using 1096 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. 1097 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, 1098 ObjCMethodDecl *OMD, 1099 ObjCIvarDecl *ivar, 1100 llvm::Constant *AtomicHelperFn) { 1101 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, 1102 // AtomicHelperFn); 1103 CallArgList args; 1104 1105 // The first argument is the address of the ivar. 1106 llvm::Value *ivarAddr = 1107 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 1108 CGF.LoadObjCSelf(), ivar, 0).getPointer(); 1109 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1110 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1111 1112 // The second argument is the address of the parameter variable. 1113 ParmVarDecl *argVar = *OMD->param_begin(); 1114 DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(), 1115 VK_LValue, SourceLocation()); 1116 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); 1117 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1118 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1119 1120 // Third argument is the helper function. 1121 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1122 1123 llvm::Constant *fn = 1124 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); 1125 CGCallee callee = CGCallee::forDirect(fn); 1126 CGF.EmitCall( 1127 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1128 callee, ReturnValueSlot(), args); 1129 } 1130 1131 1132 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { 1133 Expr *setter = PID->getSetterCXXAssignment(); 1134 if (!setter) return true; 1135 1136 // Sema only makes only of these when the ivar has a C++ class type, 1137 // so the form is pretty constrained. 1138 1139 // An operator call is trivial if the function it calls is trivial. 1140 // This also implies that there's nothing non-trivial going on with 1141 // the arguments, because operator= can only be trivial if it's a 1142 // synthesized assignment operator and therefore both parameters are 1143 // references. 1144 if (CallExpr *call = dyn_cast<CallExpr>(setter)) { 1145 if (const FunctionDecl *callee 1146 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl())) 1147 if (callee->isTrivial()) 1148 return true; 1149 return false; 1150 } 1151 1152 assert(isa<ExprWithCleanups>(setter)); 1153 return false; 1154 } 1155 1156 static bool UseOptimizedSetter(CodeGenModule &CGM) { 1157 if (CGM.getLangOpts().getGC() != LangOptions::NonGC) 1158 return false; 1159 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); 1160 } 1161 1162 void 1163 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, 1164 const ObjCPropertyImplDecl *propImpl, 1165 llvm::Constant *AtomicHelperFn) { 1166 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 1167 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1168 ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl(); 1169 1170 // Just use the setter expression if Sema gave us one and it's 1171 // non-trivial. 1172 if (!hasTrivialSetExpr(propImpl)) { 1173 if (!AtomicHelperFn) 1174 // If non-atomic, assignment is called directly. 1175 EmitStmt(propImpl->getSetterCXXAssignment()); 1176 else 1177 // If atomic, assignment is called via a locking api. 1178 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, 1179 AtomicHelperFn); 1180 return; 1181 } 1182 1183 PropertyImplStrategy strategy(CGM, propImpl); 1184 switch (strategy.getKind()) { 1185 case PropertyImplStrategy::Native: { 1186 // We don't need to do anything for a zero-size struct. 1187 if (strategy.getIvarSize().isZero()) 1188 return; 1189 1190 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1191 1192 LValue ivarLValue = 1193 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); 1194 Address ivarAddr = ivarLValue.getAddress(); 1195 1196 // Currently, all atomic accesses have to be through integer 1197 // types, so there's no point in trying to pick a prettier type. 1198 llvm::Type *bitcastType = 1199 llvm::Type::getIntNTy(getLLVMContext(), 1200 getContext().toBits(strategy.getIvarSize())); 1201 1202 // Cast both arguments to the chosen operation type. 1203 argAddr = Builder.CreateElementBitCast(argAddr, bitcastType); 1204 ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType); 1205 1206 // This bitcast load is likely to cause some nasty IR. 1207 llvm::Value *load = Builder.CreateLoad(argAddr); 1208 1209 // Perform an atomic store. There are no memory ordering requirements. 1210 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); 1211 store->setAtomic(llvm::AtomicOrdering::Unordered); 1212 return; 1213 } 1214 1215 case PropertyImplStrategy::GetSetProperty: 1216 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1217 1218 llvm::Constant *setOptimizedPropertyFn = nullptr; 1219 llvm::Constant *setPropertyFn = nullptr; 1220 if (UseOptimizedSetter(CGM)) { 1221 // 10.8 and iOS 6.0 code and GC is off 1222 setOptimizedPropertyFn = 1223 CGM.getObjCRuntime() 1224 .GetOptimizedPropertySetFunction(strategy.isAtomic(), 1225 strategy.isCopy()); 1226 if (!setOptimizedPropertyFn) { 1227 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI"); 1228 return; 1229 } 1230 } 1231 else { 1232 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); 1233 if (!setPropertyFn) { 1234 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy"); 1235 return; 1236 } 1237 } 1238 1239 // Emit objc_setProperty((id) self, _cmd, offset, arg, 1240 // <is-atomic>, <is-copy>). 1241 llvm::Value *cmd = 1242 Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl())); 1243 llvm::Value *self = 1244 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1245 llvm::Value *ivarOffset = 1246 EmitIvarOffset(classImpl->getClassInterface(), ivar); 1247 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1248 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg"); 1249 arg = Builder.CreateBitCast(arg, VoidPtrTy); 1250 1251 CallArgList args; 1252 args.add(RValue::get(self), getContext().getObjCIdType()); 1253 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1254 if (setOptimizedPropertyFn) { 1255 args.add(RValue::get(arg), getContext().getObjCIdType()); 1256 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1257 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn); 1258 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1259 callee, ReturnValueSlot(), args); 1260 } else { 1261 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1262 args.add(RValue::get(arg), getContext().getObjCIdType()); 1263 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1264 getContext().BoolTy); 1265 args.add(RValue::get(Builder.getInt1(strategy.isCopy())), 1266 getContext().BoolTy); 1267 // FIXME: We shouldn't need to get the function info here, the runtime 1268 // already should have computed it to build the function. 1269 CGCallee callee = CGCallee::forDirect(setPropertyFn); 1270 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1271 callee, ReturnValueSlot(), args); 1272 } 1273 1274 return; 1275 } 1276 1277 case PropertyImplStrategy::CopyStruct: 1278 emitStructSetterCall(*this, setterMethod, ivar); 1279 return; 1280 1281 case PropertyImplStrategy::Expression: 1282 break; 1283 } 1284 1285 // Otherwise, fake up some ASTs and emit a normal assignment. 1286 ValueDecl *selfDecl = setterMethod->getSelfDecl(); 1287 DeclRefExpr self(selfDecl, false, selfDecl->getType(), 1288 VK_LValue, SourceLocation()); 1289 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, 1290 selfDecl->getType(), CK_LValueToRValue, &self, 1291 VK_RValue); 1292 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), 1293 SourceLocation(), SourceLocation(), 1294 &selfLoad, true, true); 1295 1296 ParmVarDecl *argDecl = *setterMethod->param_begin(); 1297 QualType argType = argDecl->getType().getNonReferenceType(); 1298 DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation()); 1299 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, 1300 argType.getUnqualifiedType(), CK_LValueToRValue, 1301 &arg, VK_RValue); 1302 1303 // The property type can differ from the ivar type in some situations with 1304 // Objective-C pointer types, we can always bit cast the RHS in these cases. 1305 // The following absurdity is just to ensure well-formed IR. 1306 CastKind argCK = CK_NoOp; 1307 if (ivarRef.getType()->isObjCObjectPointerType()) { 1308 if (argLoad.getType()->isObjCObjectPointerType()) 1309 argCK = CK_BitCast; 1310 else if (argLoad.getType()->isBlockPointerType()) 1311 argCK = CK_BlockPointerToObjCPointerCast; 1312 else 1313 argCK = CK_CPointerToObjCPointerCast; 1314 } else if (ivarRef.getType()->isBlockPointerType()) { 1315 if (argLoad.getType()->isBlockPointerType()) 1316 argCK = CK_BitCast; 1317 else 1318 argCK = CK_AnyPointerToBlockPointerCast; 1319 } else if (ivarRef.getType()->isPointerType()) { 1320 argCK = CK_BitCast; 1321 } 1322 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, 1323 ivarRef.getType(), argCK, &argLoad, 1324 VK_RValue); 1325 Expr *finalArg = &argLoad; 1326 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(), 1327 argLoad.getType())) 1328 finalArg = &argCast; 1329 1330 1331 BinaryOperator assign(&ivarRef, finalArg, BO_Assign, 1332 ivarRef.getType(), VK_RValue, OK_Ordinary, 1333 SourceLocation(), FPOptions()); 1334 EmitStmt(&assign); 1335 } 1336 1337 /// \brief Generate an Objective-C property setter function. 1338 /// 1339 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1340 /// is illegal within a category. 1341 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, 1342 const ObjCPropertyImplDecl *PID) { 1343 llvm::Constant *AtomicHelperFn = 1344 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); 1345 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 1346 ObjCMethodDecl *OMD = PD->getSetterMethodDecl(); 1347 assert(OMD && "Invalid call to generate setter (empty method)"); 1348 StartObjCMethod(OMD, IMP->getClassInterface()); 1349 1350 generateObjCSetterBody(IMP, PID, AtomicHelperFn); 1351 1352 FinishFunction(); 1353 } 1354 1355 namespace { 1356 struct DestroyIvar final : EHScopeStack::Cleanup { 1357 private: 1358 llvm::Value *addr; 1359 const ObjCIvarDecl *ivar; 1360 CodeGenFunction::Destroyer *destroyer; 1361 bool useEHCleanupForArray; 1362 public: 1363 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, 1364 CodeGenFunction::Destroyer *destroyer, 1365 bool useEHCleanupForArray) 1366 : addr(addr), ivar(ivar), destroyer(destroyer), 1367 useEHCleanupForArray(useEHCleanupForArray) {} 1368 1369 void Emit(CodeGenFunction &CGF, Flags flags) override { 1370 LValue lvalue 1371 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); 1372 CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer, 1373 flags.isForNormalCleanup() && useEHCleanupForArray); 1374 } 1375 }; 1376 } 1377 1378 /// Like CodeGenFunction::destroyARCStrong, but do it with a call. 1379 static void destroyARCStrongWithStore(CodeGenFunction &CGF, 1380 Address addr, 1381 QualType type) { 1382 llvm::Value *null = getNullForVariable(addr); 1383 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 1384 } 1385 1386 static void emitCXXDestructMethod(CodeGenFunction &CGF, 1387 ObjCImplementationDecl *impl) { 1388 CodeGenFunction::RunCleanupsScope scope(CGF); 1389 1390 llvm::Value *self = CGF.LoadObjCSelf(); 1391 1392 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 1393 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); 1394 ivar; ivar = ivar->getNextIvar()) { 1395 QualType type = ivar->getType(); 1396 1397 // Check whether the ivar is a destructible type. 1398 QualType::DestructionKind dtorKind = type.isDestructedType(); 1399 if (!dtorKind) continue; 1400 1401 CodeGenFunction::Destroyer *destroyer = nullptr; 1402 1403 // Use a call to objc_storeStrong to destroy strong ivars, for the 1404 // general benefit of the tools. 1405 if (dtorKind == QualType::DK_objc_strong_lifetime) { 1406 destroyer = destroyARCStrongWithStore; 1407 1408 // Otherwise use the default for the destruction kind. 1409 } else { 1410 destroyer = CGF.getDestroyer(dtorKind); 1411 } 1412 1413 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind); 1414 1415 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer, 1416 cleanupKind & EHCleanup); 1417 } 1418 1419 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?"); 1420 } 1421 1422 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, 1423 ObjCMethodDecl *MD, 1424 bool ctor) { 1425 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface()); 1426 StartObjCMethod(MD, IMP->getClassInterface()); 1427 1428 // Emit .cxx_construct. 1429 if (ctor) { 1430 // Suppress the final autorelease in ARC. 1431 AutoreleaseResult = false; 1432 1433 for (const auto *IvarInit : IMP->inits()) { 1434 FieldDecl *Field = IvarInit->getAnyMember(); 1435 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field); 1436 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), 1437 LoadObjCSelf(), Ivar, 0); 1438 EmitAggExpr(IvarInit->getInit(), 1439 AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, 1440 AggValueSlot::DoesNotNeedGCBarriers, 1441 AggValueSlot::IsNotAliased)); 1442 } 1443 // constructor returns 'self'. 1444 CodeGenTypes &Types = CGM.getTypes(); 1445 QualType IdTy(CGM.getContext().getObjCIdType()); 1446 llvm::Value *SelfAsId = 1447 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); 1448 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy); 1449 1450 // Emit .cxx_destruct. 1451 } else { 1452 emitCXXDestructMethod(*this, IMP); 1453 } 1454 FinishFunction(); 1455 } 1456 1457 llvm::Value *CodeGenFunction::LoadObjCSelf() { 1458 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 1459 DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), 1460 Self->getType(), VK_LValue, SourceLocation()); 1461 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation()); 1462 } 1463 1464 QualType CodeGenFunction::TypeOfSelfObject() { 1465 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 1466 ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); 1467 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( 1468 getContext().getCanonicalType(selfDecl->getType())); 1469 return PTy->getPointeeType(); 1470 } 1471 1472 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ 1473 llvm::Constant *EnumerationMutationFnPtr = 1474 CGM.getObjCRuntime().EnumerationMutationFunction(); 1475 if (!EnumerationMutationFnPtr) { 1476 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); 1477 return; 1478 } 1479 CGCallee EnumerationMutationFn = 1480 CGCallee::forDirect(EnumerationMutationFnPtr); 1481 1482 CGDebugInfo *DI = getDebugInfo(); 1483 if (DI) 1484 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); 1485 1486 RunCleanupsScope ForScope(*this); 1487 1488 // The local variable comes into scope immediately. 1489 AutoVarEmission variable = AutoVarEmission::invalid(); 1490 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) 1491 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl())); 1492 1493 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end"); 1494 1495 // Fast enumeration state. 1496 QualType StateTy = CGM.getObjCFastEnumerationStateType(); 1497 Address StatePtr = CreateMemTemp(StateTy, "state.ptr"); 1498 EmitNullInitialization(StatePtr, StateTy); 1499 1500 // Number of elements in the items array. 1501 static const unsigned NumItems = 16; 1502 1503 // Fetch the countByEnumeratingWithState:objects:count: selector. 1504 IdentifierInfo *II[] = { 1505 &CGM.getContext().Idents.get("countByEnumeratingWithState"), 1506 &CGM.getContext().Idents.get("objects"), 1507 &CGM.getContext().Idents.get("count") 1508 }; 1509 Selector FastEnumSel = 1510 CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]); 1511 1512 QualType ItemsTy = 1513 getContext().getConstantArrayType(getContext().getObjCIdType(), 1514 llvm::APInt(32, NumItems), 1515 ArrayType::Normal, 0); 1516 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr"); 1517 1518 // Emit the collection pointer. In ARC, we do a retain. 1519 llvm::Value *Collection; 1520 if (getLangOpts().ObjCAutoRefCount) { 1521 Collection = EmitARCRetainScalarExpr(S.getCollection()); 1522 1523 // Enter a cleanup to do the release. 1524 EmitObjCConsumeObject(S.getCollection()->getType(), Collection); 1525 } else { 1526 Collection = EmitScalarExpr(S.getCollection()); 1527 } 1528 1529 // The 'continue' label needs to appear within the cleanup for the 1530 // collection object. 1531 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next"); 1532 1533 // Send it our message: 1534 CallArgList Args; 1535 1536 // The first argument is a temporary of the enumeration-state type. 1537 Args.add(RValue::get(StatePtr.getPointer()), 1538 getContext().getPointerType(StateTy)); 1539 1540 // The second argument is a temporary array with space for NumItems 1541 // pointers. We'll actually be loading elements from the array 1542 // pointer written into the control state; this buffer is so that 1543 // collections that *aren't* backed by arrays can still queue up 1544 // batches of elements. 1545 Args.add(RValue::get(ItemsPtr.getPointer()), 1546 getContext().getPointerType(ItemsTy)); 1547 1548 // The third argument is the capacity of that temporary array. 1549 llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy); 1550 llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems); 1551 Args.add(RValue::get(Count), getContext().UnsignedLongTy); 1552 1553 // Start the enumeration. 1554 RValue CountRV = 1555 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1556 getContext().UnsignedLongTy, 1557 FastEnumSel, 1558 Collection, Args); 1559 1560 // The initial number of objects that were returned in the buffer. 1561 llvm::Value *initialBufferLimit = CountRV.getScalarVal(); 1562 1563 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty"); 1564 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit"); 1565 1566 llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy); 1567 1568 // If the limit pointer was zero to begin with, the collection is 1569 // empty; skip all this. Set the branch weight assuming this has the same 1570 // probability of exiting the loop as any other loop exit. 1571 uint64_t EntryCount = getCurrentProfileCount(); 1572 Builder.CreateCondBr( 1573 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, 1574 LoopInitBB, 1575 createProfileWeights(EntryCount, getProfileCount(S.getBody()))); 1576 1577 // Otherwise, initialize the loop. 1578 EmitBlock(LoopInitBB); 1579 1580 // Save the initial mutations value. This is the value at an 1581 // address that was written into the state object by 1582 // countByEnumeratingWithState:objects:count:. 1583 Address StateMutationsPtrPtr = Builder.CreateStructGEP( 1584 StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr"); 1585 llvm::Value *StateMutationsPtr 1586 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1587 1588 llvm::Value *initialMutations = 1589 Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1590 "forcoll.initial-mutations"); 1591 1592 // Start looping. This is the point we return to whenever we have a 1593 // fresh, non-empty batch of objects. 1594 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody"); 1595 EmitBlock(LoopBodyBB); 1596 1597 // The current index into the buffer. 1598 llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index"); 1599 index->addIncoming(zero, LoopInitBB); 1600 1601 // The current buffer size. 1602 llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count"); 1603 count->addIncoming(initialBufferLimit, LoopInitBB); 1604 1605 incrementProfileCounter(&S); 1606 1607 // Check whether the mutations value has changed from where it was 1608 // at start. StateMutationsPtr should actually be invariant between 1609 // refreshes. 1610 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1611 llvm::Value *currentMutations 1612 = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1613 "statemutations"); 1614 1615 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated"); 1616 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated"); 1617 1618 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations), 1619 WasNotMutatedBB, WasMutatedBB); 1620 1621 // If so, call the enumeration-mutation function. 1622 EmitBlock(WasMutatedBB); 1623 llvm::Value *V = 1624 Builder.CreateBitCast(Collection, 1625 ConvertType(getContext().getObjCIdType())); 1626 CallArgList Args2; 1627 Args2.add(RValue::get(V), getContext().getObjCIdType()); 1628 // FIXME: We shouldn't need to get the function info here, the runtime already 1629 // should have computed it to build the function. 1630 EmitCall( 1631 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2), 1632 EnumerationMutationFn, ReturnValueSlot(), Args2); 1633 1634 // Otherwise, or if the mutation function returns, just continue. 1635 EmitBlock(WasNotMutatedBB); 1636 1637 // Initialize the element variable. 1638 RunCleanupsScope elementVariableScope(*this); 1639 bool elementIsVariable; 1640 LValue elementLValue; 1641 QualType elementType; 1642 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { 1643 // Initialize the variable, in case it's a __block variable or something. 1644 EmitAutoVarInit(variable); 1645 1646 const VarDecl* D = cast<VarDecl>(SD->getSingleDecl()); 1647 DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(), 1648 VK_LValue, SourceLocation()); 1649 elementLValue = EmitLValue(&tempDRE); 1650 elementType = D->getType(); 1651 elementIsVariable = true; 1652 1653 if (D->isARCPseudoStrong()) 1654 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); 1655 } else { 1656 elementLValue = LValue(); // suppress warning 1657 elementType = cast<Expr>(S.getElement())->getType(); 1658 elementIsVariable = false; 1659 } 1660 llvm::Type *convertedElementType = ConvertType(elementType); 1661 1662 // Fetch the buffer out of the enumeration state. 1663 // TODO: this pointer should actually be invariant between 1664 // refreshes, which would help us do certain loop optimizations. 1665 Address StateItemsPtr = Builder.CreateStructGEP( 1666 StatePtr, 1, getPointerSize(), "stateitems.ptr"); 1667 llvm::Value *EnumStateItems = 1668 Builder.CreateLoad(StateItemsPtr, "stateitems"); 1669 1670 // Fetch the value at the current index from the buffer. 1671 llvm::Value *CurrentItemPtr = 1672 Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr"); 1673 llvm::Value *CurrentItem = 1674 Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign()); 1675 1676 // Cast that value to the right type. 1677 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType, 1678 "currentitem"); 1679 1680 // Make sure we have an l-value. Yes, this gets evaluated every 1681 // time through the loop. 1682 if (!elementIsVariable) { 1683 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 1684 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue); 1685 } else { 1686 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, 1687 /*isInit*/ true); 1688 } 1689 1690 // If we do have an element variable, this assignment is the end of 1691 // its initialization. 1692 if (elementIsVariable) 1693 EmitAutoVarCleanups(variable); 1694 1695 // Perform the loop body, setting up break and continue labels. 1696 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); 1697 { 1698 RunCleanupsScope Scope(*this); 1699 EmitStmt(S.getBody()); 1700 } 1701 BreakContinueStack.pop_back(); 1702 1703 // Destroy the element variable now. 1704 elementVariableScope.ForceCleanup(); 1705 1706 // Check whether there are more elements. 1707 EmitBlock(AfterBody.getBlock()); 1708 1709 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch"); 1710 1711 // First we check in the local buffer. 1712 llvm::Value *indexPlusOne 1713 = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1)); 1714 1715 // If we haven't overrun the buffer yet, we can continue. 1716 // Set the branch weights based on the simplifying assumption that this is 1717 // like a while-loop, i.e., ignoring that the false branch fetches more 1718 // elements and then returns to the loop. 1719 Builder.CreateCondBr( 1720 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, 1721 createProfileWeights(getProfileCount(S.getBody()), EntryCount)); 1722 1723 index->addIncoming(indexPlusOne, AfterBody.getBlock()); 1724 count->addIncoming(count, AfterBody.getBlock()); 1725 1726 // Otherwise, we have to fetch more elements. 1727 EmitBlock(FetchMoreBB); 1728 1729 CountRV = 1730 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1731 getContext().UnsignedLongTy, 1732 FastEnumSel, 1733 Collection, Args); 1734 1735 // If we got a zero count, we're done. 1736 llvm::Value *refetchCount = CountRV.getScalarVal(); 1737 1738 // (note that the message send might split FetchMoreBB) 1739 index->addIncoming(zero, Builder.GetInsertBlock()); 1740 count->addIncoming(refetchCount, Builder.GetInsertBlock()); 1741 1742 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero), 1743 EmptyBB, LoopBodyBB); 1744 1745 // No more elements. 1746 EmitBlock(EmptyBB); 1747 1748 if (!elementIsVariable) { 1749 // If the element was not a declaration, set it to be null. 1750 1751 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType); 1752 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 1753 EmitStoreThroughLValue(RValue::get(null), elementLValue); 1754 } 1755 1756 if (DI) 1757 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); 1758 1759 ForScope.ForceCleanup(); 1760 EmitBlock(LoopEnd.getBlock()); 1761 } 1762 1763 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { 1764 CGM.getObjCRuntime().EmitTryStmt(*this, S); 1765 } 1766 1767 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { 1768 CGM.getObjCRuntime().EmitThrowStmt(*this, S); 1769 } 1770 1771 void CodeGenFunction::EmitObjCAtSynchronizedStmt( 1772 const ObjCAtSynchronizedStmt &S) { 1773 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); 1774 } 1775 1776 namespace { 1777 struct CallObjCRelease final : EHScopeStack::Cleanup { 1778 CallObjCRelease(llvm::Value *object) : object(object) {} 1779 llvm::Value *object; 1780 1781 void Emit(CodeGenFunction &CGF, Flags flags) override { 1782 // Releases at the end of the full-expression are imprecise. 1783 CGF.EmitARCRelease(object, ARCImpreciseLifetime); 1784 } 1785 }; 1786 } 1787 1788 /// Produce the code for a CK_ARCConsumeObject. Does a primitive 1789 /// release at the end of the full-expression. 1790 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, 1791 llvm::Value *object) { 1792 // If we're in a conditional branch, we need to make the cleanup 1793 // conditional. 1794 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object); 1795 return object; 1796 } 1797 1798 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, 1799 llvm::Value *value) { 1800 return EmitARCRetainAutorelease(type, value); 1801 } 1802 1803 /// Given a number of pointers, inform the optimizer that they're 1804 /// being intrinsically used up until this point in the program. 1805 void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { 1806 llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use; 1807 if (!fn) { 1808 llvm::FunctionType *fnType = 1809 llvm::FunctionType::get(CGM.VoidTy, None, true); 1810 fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use"); 1811 } 1812 1813 // This isn't really a "runtime" function, but as an intrinsic it 1814 // doesn't really matter as long as we align things up. 1815 EmitNounwindRuntimeCall(fn, values); 1816 } 1817 1818 1819 static bool IsForwarding(StringRef Name) { 1820 return llvm::StringSwitch<bool>(Name) 1821 .Cases("objc_autoreleaseReturnValue", // ARCInstKind::AutoreleaseRV 1822 "objc_autorelease", // ARCInstKind::Autorelease 1823 "objc_retainAutoreleaseReturnValue", // ARCInstKind::FusedRetainAutoreleaseRV 1824 "objc_retainAutoreleasedReturnValue", // ARCInstKind::RetainRV 1825 "objc_retainAutorelease", // ARCInstKind::FusedRetainAutorelease 1826 "objc_retainedObject", // ARCInstKind::NoopCast 1827 "objc_retain", // ARCInstKind::Retain 1828 "objc_unretainedObject", // ARCInstKind::NoopCast 1829 "objc_unretainedPointer", // ARCInstKind::NoopCast 1830 "objc_unsafeClaimAutoreleasedReturnValue", // ARCInstKind::ClaimRV 1831 true) 1832 .Default(false); 1833 } 1834 1835 static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM, 1836 llvm::FunctionType *FTy, 1837 StringRef Name) { 1838 llvm::Constant *RTF = CGM.CreateRuntimeFunction(FTy, Name); 1839 1840 if (auto *F = dyn_cast<llvm::Function>(RTF)) { 1841 // If the target runtime doesn't naturally support ARC, emit weak 1842 // references to the runtime support library. We don't really 1843 // permit this to fail, but we need a particular relocation style. 1844 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && 1845 !CGM.getTriple().isOSBinFormatCOFF()) { 1846 F->setLinkage(llvm::Function::ExternalWeakLinkage); 1847 } else if (Name == "objc_retain" || Name == "objc_release") { 1848 // If we have Native ARC, set nonlazybind attribute for these APIs for 1849 // performance. 1850 F->addFnAttr(llvm::Attribute::NonLazyBind); 1851 } 1852 1853 if (IsForwarding(Name)) { 1854 llvm::AttrBuilder B; 1855 B.addAttribute(llvm::Attribute::Returned); 1856 1857 F->arg_begin()->addAttr(llvm::AttributeList::get(F->getContext(), 1, B)); 1858 } 1859 } 1860 1861 return RTF; 1862 } 1863 1864 /// Perform an operation having the signature 1865 /// i8* (i8*) 1866 /// where a null input causes a no-op and returns null. 1867 static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF, 1868 llvm::Value *value, 1869 llvm::Constant *&fn, 1870 StringRef fnName, 1871 bool isTailCall = false) { 1872 if (isa<llvm::ConstantPointerNull>(value)) 1873 return value; 1874 1875 if (!fn) { 1876 llvm::FunctionType *fnType = 1877 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false); 1878 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1879 } 1880 1881 // Cast the argument to 'id'. 1882 llvm::Type *origType = value->getType(); 1883 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 1884 1885 // Call the function. 1886 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value); 1887 if (isTailCall) 1888 call->setTailCall(); 1889 1890 // Cast the result back to the original type. 1891 return CGF.Builder.CreateBitCast(call, origType); 1892 } 1893 1894 /// Perform an operation having the following signature: 1895 /// i8* (i8**) 1896 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, 1897 Address addr, 1898 llvm::Constant *&fn, 1899 StringRef fnName) { 1900 if (!fn) { 1901 llvm::FunctionType *fnType = 1902 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrPtrTy, false); 1903 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1904 } 1905 1906 // Cast the argument to 'id*'. 1907 llvm::Type *origType = addr.getElementType(); 1908 addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy); 1909 1910 // Call the function. 1911 llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); 1912 1913 // Cast the result back to a dereference of the original type. 1914 if (origType != CGF.Int8PtrTy) 1915 result = CGF.Builder.CreateBitCast(result, origType); 1916 1917 return result; 1918 } 1919 1920 /// Perform an operation having the following signature: 1921 /// i8* (i8**, i8*) 1922 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, 1923 Address addr, 1924 llvm::Value *value, 1925 llvm::Constant *&fn, 1926 StringRef fnName, 1927 bool ignored) { 1928 assert(addr.getElementType() == value->getType()); 1929 1930 if (!fn) { 1931 llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy }; 1932 1933 llvm::FunctionType *fnType 1934 = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false); 1935 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1936 } 1937 1938 llvm::Type *origType = value->getType(); 1939 1940 llvm::Value *args[] = { 1941 CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), 1942 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) 1943 }; 1944 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); 1945 1946 if (ignored) return nullptr; 1947 1948 return CGF.Builder.CreateBitCast(result, origType); 1949 } 1950 1951 /// Perform an operation having the following signature: 1952 /// void (i8**, i8**) 1953 static void emitARCCopyOperation(CodeGenFunction &CGF, 1954 Address dst, 1955 Address src, 1956 llvm::Constant *&fn, 1957 StringRef fnName) { 1958 assert(dst.getType() == src.getType()); 1959 1960 if (!fn) { 1961 llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy }; 1962 1963 llvm::FunctionType *fnType 1964 = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false); 1965 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1966 } 1967 1968 llvm::Value *args[] = { 1969 CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), 1970 CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) 1971 }; 1972 CGF.EmitNounwindRuntimeCall(fn, args); 1973 } 1974 1975 /// Produce the code to do a retain. Based on the type, calls one of: 1976 /// call i8* \@objc_retain(i8* %value) 1977 /// call i8* \@objc_retainBlock(i8* %value) 1978 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { 1979 if (type->isBlockPointerType()) 1980 return EmitARCRetainBlock(value, /*mandatory*/ false); 1981 else 1982 return EmitARCRetainNonBlock(value); 1983 } 1984 1985 /// Retain the given object, with normal retain semantics. 1986 /// call i8* \@objc_retain(i8* %value) 1987 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { 1988 return emitARCValueOperation(*this, value, 1989 CGM.getObjCEntrypoints().objc_retain, 1990 "objc_retain"); 1991 } 1992 1993 /// Retain the given block, with _Block_copy semantics. 1994 /// call i8* \@objc_retainBlock(i8* %value) 1995 /// 1996 /// \param mandatory - If false, emit the call with metadata 1997 /// indicating that it's okay for the optimizer to eliminate this call 1998 /// if it can prove that the block never escapes except down the stack. 1999 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, 2000 bool mandatory) { 2001 llvm::Value *result 2002 = emitARCValueOperation(*this, value, 2003 CGM.getObjCEntrypoints().objc_retainBlock, 2004 "objc_retainBlock"); 2005 2006 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to 2007 // tell the optimizer that it doesn't need to do this copy if the 2008 // block doesn't escape, where being passed as an argument doesn't 2009 // count as escaping. 2010 if (!mandatory && isa<llvm::Instruction>(result)) { 2011 llvm::CallInst *call 2012 = cast<llvm::CallInst>(result->stripPointerCasts()); 2013 assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock); 2014 2015 call->setMetadata("clang.arc.copy_on_escape", 2016 llvm::MDNode::get(Builder.getContext(), None)); 2017 } 2018 2019 return result; 2020 } 2021 2022 static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { 2023 // Fetch the void(void) inline asm which marks that we're going to 2024 // do something with the autoreleased return value. 2025 llvm::InlineAsm *&marker 2026 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; 2027 if (!marker) { 2028 StringRef assembly 2029 = CGF.CGM.getTargetCodeGenInfo() 2030 .getARCRetainAutoreleasedReturnValueMarker(); 2031 2032 // If we have an empty assembly string, there's nothing to do. 2033 if (assembly.empty()) { 2034 2035 // Otherwise, at -O0, build an inline asm that we're going to call 2036 // in a moment. 2037 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { 2038 llvm::FunctionType *type = 2039 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false); 2040 2041 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true); 2042 2043 // If we're at -O1 and above, we don't want to litter the code 2044 // with this marker yet, so leave a breadcrumb for the ARC 2045 // optimizer to pick up. 2046 } else { 2047 llvm::NamedMDNode *metadata = 2048 CGF.CGM.getModule().getOrInsertNamedMetadata( 2049 "clang.arc.retainAutoreleasedReturnValueMarker"); 2050 assert(metadata->getNumOperands() <= 1); 2051 if (metadata->getNumOperands() == 0) { 2052 auto &ctx = CGF.getLLVMContext(); 2053 metadata->addOperand(llvm::MDNode::get(ctx, 2054 llvm::MDString::get(ctx, assembly))); 2055 } 2056 } 2057 } 2058 2059 // Call the marker asm if we made one, which we do only at -O0. 2060 if (marker) 2061 CGF.Builder.CreateCall(marker); 2062 } 2063 2064 /// Retain the given object which is the result of a function call. 2065 /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) 2066 /// 2067 /// Yes, this function name is one character away from a different 2068 /// call with completely different semantics. 2069 llvm::Value * 2070 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { 2071 emitAutoreleasedReturnValueMarker(*this); 2072 return emitARCValueOperation(*this, value, 2073 CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue, 2074 "objc_retainAutoreleasedReturnValue"); 2075 } 2076 2077 /// Claim a possibly-autoreleased return value at +0. This is only 2078 /// valid to do in contexts which do not rely on the retain to keep 2079 /// the object valid for for all of its uses; for example, when 2080 /// the value is ignored, or when it is being assigned to an 2081 /// __unsafe_unretained variable. 2082 /// 2083 /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) 2084 llvm::Value * 2085 CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { 2086 emitAutoreleasedReturnValueMarker(*this); 2087 return emitARCValueOperation(*this, value, 2088 CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue, 2089 "objc_unsafeClaimAutoreleasedReturnValue"); 2090 } 2091 2092 /// Release the given object. 2093 /// call void \@objc_release(i8* %value) 2094 void CodeGenFunction::EmitARCRelease(llvm::Value *value, 2095 ARCPreciseLifetime_t precise) { 2096 if (isa<llvm::ConstantPointerNull>(value)) return; 2097 2098 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release; 2099 if (!fn) { 2100 llvm::FunctionType *fnType = 2101 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2102 fn = createARCRuntimeFunction(CGM, fnType, "objc_release"); 2103 } 2104 2105 // Cast the argument to 'id'. 2106 value = Builder.CreateBitCast(value, Int8PtrTy); 2107 2108 // Call objc_release. 2109 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value); 2110 2111 if (precise == ARCImpreciseLifetime) { 2112 call->setMetadata("clang.imprecise_release", 2113 llvm::MDNode::get(Builder.getContext(), None)); 2114 } 2115 } 2116 2117 /// Destroy a __strong variable. 2118 /// 2119 /// At -O0, emit a call to store 'null' into the address; 2120 /// instrumenting tools prefer this because the address is exposed, 2121 /// but it's relatively cumbersome to optimize. 2122 /// 2123 /// At -O1 and above, just load and call objc_release. 2124 /// 2125 /// call void \@objc_storeStrong(i8** %addr, i8* null) 2126 void CodeGenFunction::EmitARCDestroyStrong(Address addr, 2127 ARCPreciseLifetime_t precise) { 2128 if (CGM.getCodeGenOpts().OptimizationLevel == 0) { 2129 llvm::Value *null = getNullForVariable(addr); 2130 EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 2131 return; 2132 } 2133 2134 llvm::Value *value = Builder.CreateLoad(addr); 2135 EmitARCRelease(value, precise); 2136 } 2137 2138 /// Store into a strong object. Always calls this: 2139 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2140 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, 2141 llvm::Value *value, 2142 bool ignored) { 2143 assert(addr.getElementType() == value->getType()); 2144 2145 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong; 2146 if (!fn) { 2147 llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy }; 2148 llvm::FunctionType *fnType 2149 = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false); 2150 fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong"); 2151 } 2152 2153 llvm::Value *args[] = { 2154 Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), 2155 Builder.CreateBitCast(value, Int8PtrTy) 2156 }; 2157 EmitNounwindRuntimeCall(fn, args); 2158 2159 if (ignored) return nullptr; 2160 return value; 2161 } 2162 2163 /// Store into a strong object. Sometimes calls this: 2164 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2165 /// Other times, breaks it down into components. 2166 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, 2167 llvm::Value *newValue, 2168 bool ignored) { 2169 QualType type = dst.getType(); 2170 bool isBlock = type->isBlockPointerType(); 2171 2172 // Use a store barrier at -O0 unless this is a block type or the 2173 // lvalue is inadequately aligned. 2174 if (shouldUseFusedARCCalls() && 2175 !isBlock && 2176 (dst.getAlignment().isZero() || 2177 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { 2178 return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored); 2179 } 2180 2181 // Otherwise, split it out. 2182 2183 // Retain the new value. 2184 newValue = EmitARCRetain(type, newValue); 2185 2186 // Read the old value. 2187 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation()); 2188 2189 // Store. We do this before the release so that any deallocs won't 2190 // see the old value. 2191 EmitStoreOfScalar(newValue, dst); 2192 2193 // Finally, release the old value. 2194 EmitARCRelease(oldValue, dst.isARCPreciseLifetime()); 2195 2196 return newValue; 2197 } 2198 2199 /// Autorelease the given object. 2200 /// call i8* \@objc_autorelease(i8* %value) 2201 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { 2202 return emitARCValueOperation(*this, value, 2203 CGM.getObjCEntrypoints().objc_autorelease, 2204 "objc_autorelease"); 2205 } 2206 2207 /// Autorelease the given object. 2208 /// call i8* \@objc_autoreleaseReturnValue(i8* %value) 2209 llvm::Value * 2210 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { 2211 return emitARCValueOperation(*this, value, 2212 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, 2213 "objc_autoreleaseReturnValue", 2214 /*isTailCall*/ true); 2215 } 2216 2217 /// Do a fused retain/autorelease of the given object. 2218 /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) 2219 llvm::Value * 2220 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { 2221 return emitARCValueOperation(*this, value, 2222 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, 2223 "objc_retainAutoreleaseReturnValue", 2224 /*isTailCall*/ true); 2225 } 2226 2227 /// Do a fused retain/autorelease of the given object. 2228 /// call i8* \@objc_retainAutorelease(i8* %value) 2229 /// or 2230 /// %retain = call i8* \@objc_retainBlock(i8* %value) 2231 /// call i8* \@objc_autorelease(i8* %retain) 2232 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, 2233 llvm::Value *value) { 2234 if (!type->isBlockPointerType()) 2235 return EmitARCRetainAutoreleaseNonBlock(value); 2236 2237 if (isa<llvm::ConstantPointerNull>(value)) return value; 2238 2239 llvm::Type *origType = value->getType(); 2240 value = Builder.CreateBitCast(value, Int8PtrTy); 2241 value = EmitARCRetainBlock(value, /*mandatory*/ true); 2242 value = EmitARCAutorelease(value); 2243 return Builder.CreateBitCast(value, origType); 2244 } 2245 2246 /// Do a fused retain/autorelease of the given object. 2247 /// call i8* \@objc_retainAutorelease(i8* %value) 2248 llvm::Value * 2249 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { 2250 return emitARCValueOperation(*this, value, 2251 CGM.getObjCEntrypoints().objc_retainAutorelease, 2252 "objc_retainAutorelease"); 2253 } 2254 2255 /// i8* \@objc_loadWeak(i8** %addr) 2256 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). 2257 llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { 2258 return emitARCLoadOperation(*this, addr, 2259 CGM.getObjCEntrypoints().objc_loadWeak, 2260 "objc_loadWeak"); 2261 } 2262 2263 /// i8* \@objc_loadWeakRetained(i8** %addr) 2264 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { 2265 return emitARCLoadOperation(*this, addr, 2266 CGM.getObjCEntrypoints().objc_loadWeakRetained, 2267 "objc_loadWeakRetained"); 2268 } 2269 2270 /// i8* \@objc_storeWeak(i8** %addr, i8* %value) 2271 /// Returns %value. 2272 llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, 2273 llvm::Value *value, 2274 bool ignored) { 2275 return emitARCStoreOperation(*this, addr, value, 2276 CGM.getObjCEntrypoints().objc_storeWeak, 2277 "objc_storeWeak", ignored); 2278 } 2279 2280 /// i8* \@objc_initWeak(i8** %addr, i8* %value) 2281 /// Returns %value. %addr is known to not have a current weak entry. 2282 /// Essentially equivalent to: 2283 /// *addr = nil; objc_storeWeak(addr, value); 2284 void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { 2285 // If we're initializing to null, just write null to memory; no need 2286 // to get the runtime involved. But don't do this if optimization 2287 // is enabled, because accounting for this would make the optimizer 2288 // much more complicated. 2289 if (isa<llvm::ConstantPointerNull>(value) && 2290 CGM.getCodeGenOpts().OptimizationLevel == 0) { 2291 Builder.CreateStore(value, addr); 2292 return; 2293 } 2294 2295 emitARCStoreOperation(*this, addr, value, 2296 CGM.getObjCEntrypoints().objc_initWeak, 2297 "objc_initWeak", /*ignored*/ true); 2298 } 2299 2300 /// void \@objc_destroyWeak(i8** %addr) 2301 /// Essentially objc_storeWeak(addr, nil). 2302 void CodeGenFunction::EmitARCDestroyWeak(Address addr) { 2303 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; 2304 if (!fn) { 2305 llvm::FunctionType *fnType = 2306 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrPtrTy, false); 2307 fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak"); 2308 } 2309 2310 // Cast the argument to 'id*'. 2311 addr = Builder.CreateBitCast(addr, Int8PtrPtrTy); 2312 2313 EmitNounwindRuntimeCall(fn, addr.getPointer()); 2314 } 2315 2316 /// void \@objc_moveWeak(i8** %dest, i8** %src) 2317 /// Disregards the current value in %dest. Leaves %src pointing to nothing. 2318 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). 2319 void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { 2320 emitARCCopyOperation(*this, dst, src, 2321 CGM.getObjCEntrypoints().objc_moveWeak, 2322 "objc_moveWeak"); 2323 } 2324 2325 /// void \@objc_copyWeak(i8** %dest, i8** %src) 2326 /// Disregards the current value in %dest. Essentially 2327 /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) 2328 void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { 2329 emitARCCopyOperation(*this, dst, src, 2330 CGM.getObjCEntrypoints().objc_copyWeak, 2331 "objc_copyWeak"); 2332 } 2333 2334 /// Produce the code to do a objc_autoreleasepool_push. 2335 /// call i8* \@objc_autoreleasePoolPush(void) 2336 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { 2337 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; 2338 if (!fn) { 2339 llvm::FunctionType *fnType = 2340 llvm::FunctionType::get(Int8PtrTy, false); 2341 fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush"); 2342 } 2343 2344 return EmitNounwindRuntimeCall(fn); 2345 } 2346 2347 /// Produce the code to do a primitive release. 2348 /// call void \@objc_autoreleasePoolPop(i8* %ptr) 2349 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { 2350 assert(value->getType() == Int8PtrTy); 2351 2352 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; 2353 if (!fn) { 2354 llvm::FunctionType *fnType = 2355 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2356 2357 // We don't want to use a weak import here; instead we should not 2358 // fall into this path. 2359 fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop"); 2360 } 2361 2362 // objc_autoreleasePoolPop can throw. 2363 EmitRuntimeCallOrInvoke(fn, value); 2364 } 2365 2366 /// Produce the code to do an MRR version objc_autoreleasepool_push. 2367 /// Which is: [[NSAutoreleasePool alloc] init]; 2368 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. 2369 /// init is declared as: - (id) init; in its NSObject super class. 2370 /// 2371 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { 2372 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 2373 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); 2374 // [NSAutoreleasePool alloc] 2375 IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); 2376 Selector AllocSel = getContext().Selectors.getSelector(0, &II); 2377 CallArgList Args; 2378 RValue AllocRV = 2379 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2380 getContext().getObjCIdType(), 2381 AllocSel, Receiver, Args); 2382 2383 // [Receiver init] 2384 Receiver = AllocRV.getScalarVal(); 2385 II = &CGM.getContext().Idents.get("init"); 2386 Selector InitSel = getContext().Selectors.getSelector(0, &II); 2387 RValue InitRV = 2388 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2389 getContext().getObjCIdType(), 2390 InitSel, Receiver, Args); 2391 return InitRV.getScalarVal(); 2392 } 2393 2394 /// Produce the code to do a primitive release. 2395 /// [tmp drain]; 2396 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { 2397 IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); 2398 Selector DrainSel = getContext().Selectors.getSelector(0, &II); 2399 CallArgList Args; 2400 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 2401 getContext().VoidTy, DrainSel, Arg, Args); 2402 } 2403 2404 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, 2405 Address addr, 2406 QualType type) { 2407 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime); 2408 } 2409 2410 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, 2411 Address addr, 2412 QualType type) { 2413 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime); 2414 } 2415 2416 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, 2417 Address addr, 2418 QualType type) { 2419 CGF.EmitARCDestroyWeak(addr); 2420 } 2421 2422 namespace { 2423 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { 2424 llvm::Value *Token; 2425 2426 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2427 2428 void Emit(CodeGenFunction &CGF, Flags flags) override { 2429 CGF.EmitObjCAutoreleasePoolPop(Token); 2430 } 2431 }; 2432 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { 2433 llvm::Value *Token; 2434 2435 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2436 2437 void Emit(CodeGenFunction &CGF, Flags flags) override { 2438 CGF.EmitObjCMRRAutoreleasePoolPop(Token); 2439 } 2440 }; 2441 } 2442 2443 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { 2444 if (CGM.getLangOpts().ObjCAutoRefCount) 2445 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr); 2446 else 2447 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr); 2448 } 2449 2450 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2451 LValue lvalue, 2452 QualType type) { 2453 switch (type.getObjCLifetime()) { 2454 case Qualifiers::OCL_None: 2455 case Qualifiers::OCL_ExplicitNone: 2456 case Qualifiers::OCL_Strong: 2457 case Qualifiers::OCL_Autoreleasing: 2458 return TryEmitResult(CGF.EmitLoadOfLValue(lvalue, 2459 SourceLocation()).getScalarVal(), 2460 false); 2461 2462 case Qualifiers::OCL_Weak: 2463 return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()), 2464 true); 2465 } 2466 2467 llvm_unreachable("impossible lifetime!"); 2468 } 2469 2470 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2471 const Expr *e) { 2472 e = e->IgnoreParens(); 2473 QualType type = e->getType(); 2474 2475 // If we're loading retained from a __strong xvalue, we can avoid 2476 // an extra retain/release pair by zeroing out the source of this 2477 // "move" operation. 2478 if (e->isXValue() && 2479 !type.isConstQualified() && 2480 type.getObjCLifetime() == Qualifiers::OCL_Strong) { 2481 // Emit the lvalue. 2482 LValue lv = CGF.EmitLValue(e); 2483 2484 // Load the object pointer. 2485 llvm::Value *result = CGF.EmitLoadOfLValue(lv, 2486 SourceLocation()).getScalarVal(); 2487 2488 // Set the source pointer to NULL. 2489 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv); 2490 2491 return TryEmitResult(result, true); 2492 } 2493 2494 // As a very special optimization, in ARC++, if the l-value is the 2495 // result of a non-volatile assignment, do a simple retain of the 2496 // result of the call to objc_storeWeak instead of reloading. 2497 if (CGF.getLangOpts().CPlusPlus && 2498 !type.isVolatileQualified() && 2499 type.getObjCLifetime() == Qualifiers::OCL_Weak && 2500 isa<BinaryOperator>(e) && 2501 cast<BinaryOperator>(e)->getOpcode() == BO_Assign) 2502 return TryEmitResult(CGF.EmitScalarExpr(e), false); 2503 2504 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type); 2505 } 2506 2507 typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, 2508 llvm::Value *value)> 2509 ValueTransform; 2510 2511 /// Insert code immediately after a call. 2512 static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, 2513 llvm::Value *value, 2514 ValueTransform doAfterCall, 2515 ValueTransform doFallback) { 2516 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) { 2517 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2518 2519 // Place the retain immediately following the call. 2520 CGF.Builder.SetInsertPoint(call->getParent(), 2521 ++llvm::BasicBlock::iterator(call)); 2522 value = doAfterCall(CGF, value); 2523 2524 CGF.Builder.restoreIP(ip); 2525 return value; 2526 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) { 2527 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2528 2529 // Place the retain at the beginning of the normal destination block. 2530 llvm::BasicBlock *BB = invoke->getNormalDest(); 2531 CGF.Builder.SetInsertPoint(BB, BB->begin()); 2532 value = doAfterCall(CGF, value); 2533 2534 CGF.Builder.restoreIP(ip); 2535 return value; 2536 2537 // Bitcasts can arise because of related-result returns. Rewrite 2538 // the operand. 2539 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) { 2540 llvm::Value *operand = bitcast->getOperand(0); 2541 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback); 2542 bitcast->setOperand(0, operand); 2543 return bitcast; 2544 2545 // Generic fall-back case. 2546 } else { 2547 // Retain using the non-block variant: we never need to do a copy 2548 // of a block that's been returned to us. 2549 return doFallback(CGF, value); 2550 } 2551 } 2552 2553 /// Given that the given expression is some sort of call (which does 2554 /// not return retained), emit a retain following it. 2555 static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, 2556 const Expr *e) { 2557 llvm::Value *value = CGF.EmitScalarExpr(e); 2558 return emitARCOperationAfterCall(CGF, value, 2559 [](CodeGenFunction &CGF, llvm::Value *value) { 2560 return CGF.EmitARCRetainAutoreleasedReturnValue(value); 2561 }, 2562 [](CodeGenFunction &CGF, llvm::Value *value) { 2563 return CGF.EmitARCRetainNonBlock(value); 2564 }); 2565 } 2566 2567 /// Given that the given expression is some sort of call (which does 2568 /// not return retained), perform an unsafeClaim following it. 2569 static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, 2570 const Expr *e) { 2571 llvm::Value *value = CGF.EmitScalarExpr(e); 2572 return emitARCOperationAfterCall(CGF, value, 2573 [](CodeGenFunction &CGF, llvm::Value *value) { 2574 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); 2575 }, 2576 [](CodeGenFunction &CGF, llvm::Value *value) { 2577 return value; 2578 }); 2579 } 2580 2581 llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, 2582 bool allowUnsafeClaim) { 2583 if (allowUnsafeClaim && 2584 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) { 2585 return emitARCUnsafeClaimCallResult(*this, E); 2586 } else { 2587 llvm::Value *value = emitARCRetainCallResult(*this, E); 2588 return EmitObjCConsumeObject(E->getType(), value); 2589 } 2590 } 2591 2592 /// Determine whether it might be important to emit a separate 2593 /// objc_retain_block on the result of the given expression, or 2594 /// whether it's okay to just emit it in a +1 context. 2595 static bool shouldEmitSeparateBlockRetain(const Expr *e) { 2596 assert(e->getType()->isBlockPointerType()); 2597 e = e->IgnoreParens(); 2598 2599 // For future goodness, emit block expressions directly in +1 2600 // contexts if we can. 2601 if (isa<BlockExpr>(e)) 2602 return false; 2603 2604 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) { 2605 switch (cast->getCastKind()) { 2606 // Emitting these operations in +1 contexts is goodness. 2607 case CK_LValueToRValue: 2608 case CK_ARCReclaimReturnedObject: 2609 case CK_ARCConsumeObject: 2610 case CK_ARCProduceObject: 2611 return false; 2612 2613 // These operations preserve a block type. 2614 case CK_NoOp: 2615 case CK_BitCast: 2616 return shouldEmitSeparateBlockRetain(cast->getSubExpr()); 2617 2618 // These operations are known to be bad (or haven't been considered). 2619 case CK_AnyPointerToBlockPointerCast: 2620 default: 2621 return true; 2622 } 2623 } 2624 2625 return true; 2626 } 2627 2628 namespace { 2629 /// A CRTP base class for emitting expressions of retainable object 2630 /// pointer type in ARC. 2631 template <typename Impl, typename Result> class ARCExprEmitter { 2632 protected: 2633 CodeGenFunction &CGF; 2634 Impl &asImpl() { return *static_cast<Impl*>(this); } 2635 2636 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} 2637 2638 public: 2639 Result visit(const Expr *e); 2640 Result visitCastExpr(const CastExpr *e); 2641 Result visitPseudoObjectExpr(const PseudoObjectExpr *e); 2642 Result visitBinaryOperator(const BinaryOperator *e); 2643 Result visitBinAssign(const BinaryOperator *e); 2644 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); 2645 Result visitBinAssignAutoreleasing(const BinaryOperator *e); 2646 Result visitBinAssignWeak(const BinaryOperator *e); 2647 Result visitBinAssignStrong(const BinaryOperator *e); 2648 2649 // Minimal implementation: 2650 // Result visitLValueToRValue(const Expr *e) 2651 // Result visitConsumeObject(const Expr *e) 2652 // Result visitExtendBlockObject(const Expr *e) 2653 // Result visitReclaimReturnedObject(const Expr *e) 2654 // Result visitCall(const Expr *e) 2655 // Result visitExpr(const Expr *e) 2656 // 2657 // Result emitBitCast(Result result, llvm::Type *resultType) 2658 // llvm::Value *getValueOfResult(Result result) 2659 }; 2660 } 2661 2662 /// Try to emit a PseudoObjectExpr under special ARC rules. 2663 /// 2664 /// This massively duplicates emitPseudoObjectRValue. 2665 template <typename Impl, typename Result> 2666 Result 2667 ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { 2668 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 2669 2670 // Find the result expression. 2671 const Expr *resultExpr = E->getResultExpr(); 2672 assert(resultExpr); 2673 Result result; 2674 2675 for (PseudoObjectExpr::const_semantics_iterator 2676 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 2677 const Expr *semantic = *i; 2678 2679 // If this semantic expression is an opaque value, bind it 2680 // to the result of its source expression. 2681 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 2682 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 2683 OVMA opaqueData; 2684 2685 // If this semantic is the result of the pseudo-object 2686 // expression, try to evaluate the source as +1. 2687 if (ov == resultExpr) { 2688 assert(!OVMA::shouldBindAsLValue(ov)); 2689 result = asImpl().visit(ov->getSourceExpr()); 2690 opaqueData = OVMA::bind(CGF, ov, 2691 RValue::get(asImpl().getValueOfResult(result))); 2692 2693 // Otherwise, just bind it. 2694 } else { 2695 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 2696 } 2697 opaques.push_back(opaqueData); 2698 2699 // Otherwise, if the expression is the result, evaluate it 2700 // and remember the result. 2701 } else if (semantic == resultExpr) { 2702 result = asImpl().visit(semantic); 2703 2704 // Otherwise, evaluate the expression in an ignored context. 2705 } else { 2706 CGF.EmitIgnoredExpr(semantic); 2707 } 2708 } 2709 2710 // Unbind all the opaques now. 2711 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 2712 opaques[i].unbind(CGF); 2713 2714 return result; 2715 } 2716 2717 template <typename Impl, typename Result> 2718 Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { 2719 switch (e->getCastKind()) { 2720 2721 // No-op casts don't change the type, so we just ignore them. 2722 case CK_NoOp: 2723 return asImpl().visit(e->getSubExpr()); 2724 2725 // These casts can change the type. 2726 case CK_CPointerToObjCPointerCast: 2727 case CK_BlockPointerToObjCPointerCast: 2728 case CK_AnyPointerToBlockPointerCast: 2729 case CK_BitCast: { 2730 llvm::Type *resultType = CGF.ConvertType(e->getType()); 2731 assert(e->getSubExpr()->getType()->hasPointerRepresentation()); 2732 Result result = asImpl().visit(e->getSubExpr()); 2733 return asImpl().emitBitCast(result, resultType); 2734 } 2735 2736 // Handle some casts specially. 2737 case CK_LValueToRValue: 2738 return asImpl().visitLValueToRValue(e->getSubExpr()); 2739 case CK_ARCConsumeObject: 2740 return asImpl().visitConsumeObject(e->getSubExpr()); 2741 case CK_ARCExtendBlockObject: 2742 return asImpl().visitExtendBlockObject(e->getSubExpr()); 2743 case CK_ARCReclaimReturnedObject: 2744 return asImpl().visitReclaimReturnedObject(e->getSubExpr()); 2745 2746 // Otherwise, use the default logic. 2747 default: 2748 return asImpl().visitExpr(e); 2749 } 2750 } 2751 2752 template <typename Impl, typename Result> 2753 Result 2754 ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { 2755 switch (e->getOpcode()) { 2756 case BO_Comma: 2757 CGF.EmitIgnoredExpr(e->getLHS()); 2758 CGF.EnsureInsertPoint(); 2759 return asImpl().visit(e->getRHS()); 2760 2761 case BO_Assign: 2762 return asImpl().visitBinAssign(e); 2763 2764 default: 2765 return asImpl().visitExpr(e); 2766 } 2767 } 2768 2769 template <typename Impl, typename Result> 2770 Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { 2771 switch (e->getLHS()->getType().getObjCLifetime()) { 2772 case Qualifiers::OCL_ExplicitNone: 2773 return asImpl().visitBinAssignUnsafeUnretained(e); 2774 2775 case Qualifiers::OCL_Weak: 2776 return asImpl().visitBinAssignWeak(e); 2777 2778 case Qualifiers::OCL_Autoreleasing: 2779 return asImpl().visitBinAssignAutoreleasing(e); 2780 2781 case Qualifiers::OCL_Strong: 2782 return asImpl().visitBinAssignStrong(e); 2783 2784 case Qualifiers::OCL_None: 2785 return asImpl().visitExpr(e); 2786 } 2787 llvm_unreachable("bad ObjC ownership qualifier"); 2788 } 2789 2790 /// The default rule for __unsafe_unretained emits the RHS recursively, 2791 /// stores into the unsafe variable, and propagates the result outward. 2792 template <typename Impl, typename Result> 2793 Result ARCExprEmitter<Impl,Result>:: 2794 visitBinAssignUnsafeUnretained(const BinaryOperator *e) { 2795 // Recursively emit the RHS. 2796 // For __block safety, do this before emitting the LHS. 2797 Result result = asImpl().visit(e->getRHS()); 2798 2799 // Perform the store. 2800 LValue lvalue = 2801 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); 2802 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), 2803 lvalue); 2804 2805 return result; 2806 } 2807 2808 template <typename Impl, typename Result> 2809 Result 2810 ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { 2811 return asImpl().visitExpr(e); 2812 } 2813 2814 template <typename Impl, typename Result> 2815 Result 2816 ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { 2817 return asImpl().visitExpr(e); 2818 } 2819 2820 template <typename Impl, typename Result> 2821 Result 2822 ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { 2823 return asImpl().visitExpr(e); 2824 } 2825 2826 /// The general expression-emission logic. 2827 template <typename Impl, typename Result> 2828 Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { 2829 // We should *never* see a nested full-expression here, because if 2830 // we fail to emit at +1, our caller must not retain after we close 2831 // out the full-expression. This isn't as important in the unsafe 2832 // emitter. 2833 assert(!isa<ExprWithCleanups>(e)); 2834 2835 // Look through parens, __extension__, generic selection, etc. 2836 e = e->IgnoreParens(); 2837 2838 // Handle certain kinds of casts. 2839 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { 2840 return asImpl().visitCastExpr(ce); 2841 2842 // Handle the comma operator. 2843 } else if (auto op = dyn_cast<BinaryOperator>(e)) { 2844 return asImpl().visitBinaryOperator(op); 2845 2846 // TODO: handle conditional operators here 2847 2848 // For calls and message sends, use the retained-call logic. 2849 // Delegate inits are a special case in that they're the only 2850 // returns-retained expression that *isn't* surrounded by 2851 // a consume. 2852 } else if (isa<CallExpr>(e) || 2853 (isa<ObjCMessageExpr>(e) && 2854 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) { 2855 return asImpl().visitCall(e); 2856 2857 // Look through pseudo-object expressions. 2858 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 2859 return asImpl().visitPseudoObjectExpr(pseudo); 2860 } 2861 2862 return asImpl().visitExpr(e); 2863 } 2864 2865 namespace { 2866 2867 /// An emitter for +1 results. 2868 struct ARCRetainExprEmitter : 2869 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { 2870 2871 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 2872 2873 llvm::Value *getValueOfResult(TryEmitResult result) { 2874 return result.getPointer(); 2875 } 2876 2877 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { 2878 llvm::Value *value = result.getPointer(); 2879 value = CGF.Builder.CreateBitCast(value, resultType); 2880 result.setPointer(value); 2881 return result; 2882 } 2883 2884 TryEmitResult visitLValueToRValue(const Expr *e) { 2885 return tryEmitARCRetainLoadOfScalar(CGF, e); 2886 } 2887 2888 /// For consumptions, just emit the subexpression and thus elide 2889 /// the retain/release pair. 2890 TryEmitResult visitConsumeObject(const Expr *e) { 2891 llvm::Value *result = CGF.EmitScalarExpr(e); 2892 return TryEmitResult(result, true); 2893 } 2894 2895 /// Block extends are net +0. Naively, we could just recurse on 2896 /// the subexpression, but actually we need to ensure that the 2897 /// value is copied as a block, so there's a little filter here. 2898 TryEmitResult visitExtendBlockObject(const Expr *e) { 2899 llvm::Value *result; // will be a +0 value 2900 2901 // If we can't safely assume the sub-expression will produce a 2902 // block-copied value, emit the sub-expression at +0. 2903 if (shouldEmitSeparateBlockRetain(e)) { 2904 result = CGF.EmitScalarExpr(e); 2905 2906 // Otherwise, try to emit the sub-expression at +1 recursively. 2907 } else { 2908 TryEmitResult subresult = asImpl().visit(e); 2909 2910 // If that produced a retained value, just use that. 2911 if (subresult.getInt()) { 2912 return subresult; 2913 } 2914 2915 // Otherwise it's +0. 2916 result = subresult.getPointer(); 2917 } 2918 2919 // Retain the object as a block. 2920 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true); 2921 return TryEmitResult(result, true); 2922 } 2923 2924 /// For reclaims, emit the subexpression as a retained call and 2925 /// skip the consumption. 2926 TryEmitResult visitReclaimReturnedObject(const Expr *e) { 2927 llvm::Value *result = emitARCRetainCallResult(CGF, e); 2928 return TryEmitResult(result, true); 2929 } 2930 2931 /// When we have an undecorated call, retroactively do a claim. 2932 TryEmitResult visitCall(const Expr *e) { 2933 llvm::Value *result = emitARCRetainCallResult(CGF, e); 2934 return TryEmitResult(result, true); 2935 } 2936 2937 // TODO: maybe special-case visitBinAssignWeak? 2938 2939 TryEmitResult visitExpr(const Expr *e) { 2940 // We didn't find an obvious production, so emit what we've got and 2941 // tell the caller that we didn't manage to retain. 2942 llvm::Value *result = CGF.EmitScalarExpr(e); 2943 return TryEmitResult(result, false); 2944 } 2945 }; 2946 } 2947 2948 static TryEmitResult 2949 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { 2950 return ARCRetainExprEmitter(CGF).visit(e); 2951 } 2952 2953 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2954 LValue lvalue, 2955 QualType type) { 2956 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); 2957 llvm::Value *value = result.getPointer(); 2958 if (!result.getInt()) 2959 value = CGF.EmitARCRetain(type, value); 2960 return value; 2961 } 2962 2963 /// EmitARCRetainScalarExpr - Semantically equivalent to 2964 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a 2965 /// best-effort attempt to peephole expressions that naturally produce 2966 /// retained objects. 2967 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { 2968 // The retain needs to happen within the full-expression. 2969 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 2970 enterFullExpression(cleanups); 2971 RunCleanupsScope scope(*this); 2972 return EmitARCRetainScalarExpr(cleanups->getSubExpr()); 2973 } 2974 2975 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 2976 llvm::Value *value = result.getPointer(); 2977 if (!result.getInt()) 2978 value = EmitARCRetain(e->getType(), value); 2979 return value; 2980 } 2981 2982 llvm::Value * 2983 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { 2984 // The retain needs to happen within the full-expression. 2985 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 2986 enterFullExpression(cleanups); 2987 RunCleanupsScope scope(*this); 2988 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); 2989 } 2990 2991 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 2992 llvm::Value *value = result.getPointer(); 2993 if (result.getInt()) 2994 value = EmitARCAutorelease(value); 2995 else 2996 value = EmitARCRetainAutorelease(e->getType(), value); 2997 return value; 2998 } 2999 3000 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { 3001 llvm::Value *result; 3002 bool doRetain; 3003 3004 if (shouldEmitSeparateBlockRetain(e)) { 3005 result = EmitScalarExpr(e); 3006 doRetain = true; 3007 } else { 3008 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e); 3009 result = subresult.getPointer(); 3010 doRetain = !subresult.getInt(); 3011 } 3012 3013 if (doRetain) 3014 result = EmitARCRetainBlock(result, /*mandatory*/ true); 3015 return EmitObjCConsumeObject(e->getType(), result); 3016 } 3017 3018 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { 3019 // In ARC, retain and autorelease the expression. 3020 if (getLangOpts().ObjCAutoRefCount) { 3021 // Do so before running any cleanups for the full-expression. 3022 // EmitARCRetainAutoreleaseScalarExpr does this for us. 3023 return EmitARCRetainAutoreleaseScalarExpr(expr); 3024 } 3025 3026 // Otherwise, use the normal scalar-expression emission. The 3027 // exception machinery doesn't do anything special with the 3028 // exception like retaining it, so there's no safety associated with 3029 // only running cleanups after the throw has started, and when it 3030 // matters it tends to be substantially inferior code. 3031 return EmitScalarExpr(expr); 3032 } 3033 3034 namespace { 3035 3036 /// An emitter for assigning into an __unsafe_unretained context. 3037 struct ARCUnsafeUnretainedExprEmitter : 3038 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { 3039 3040 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3041 3042 llvm::Value *getValueOfResult(llvm::Value *value) { 3043 return value; 3044 } 3045 3046 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { 3047 return CGF.Builder.CreateBitCast(value, resultType); 3048 } 3049 3050 llvm::Value *visitLValueToRValue(const Expr *e) { 3051 return CGF.EmitScalarExpr(e); 3052 } 3053 3054 /// For consumptions, just emit the subexpression and perform the 3055 /// consumption like normal. 3056 llvm::Value *visitConsumeObject(const Expr *e) { 3057 llvm::Value *value = CGF.EmitScalarExpr(e); 3058 return CGF.EmitObjCConsumeObject(e->getType(), value); 3059 } 3060 3061 /// No special logic for block extensions. (This probably can't 3062 /// actually happen in this emitter, though.) 3063 llvm::Value *visitExtendBlockObject(const Expr *e) { 3064 return CGF.EmitARCExtendBlockObject(e); 3065 } 3066 3067 /// For reclaims, perform an unsafeClaim if that's enabled. 3068 llvm::Value *visitReclaimReturnedObject(const Expr *e) { 3069 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true); 3070 } 3071 3072 /// When we have an undecorated call, just emit it without adding 3073 /// the unsafeClaim. 3074 llvm::Value *visitCall(const Expr *e) { 3075 return CGF.EmitScalarExpr(e); 3076 } 3077 3078 /// Just do normal scalar emission in the default case. 3079 llvm::Value *visitExpr(const Expr *e) { 3080 return CGF.EmitScalarExpr(e); 3081 } 3082 }; 3083 } 3084 3085 static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, 3086 const Expr *e) { 3087 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); 3088 } 3089 3090 /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to 3091 /// immediately releasing the resut of EmitARCRetainScalarExpr, but 3092 /// avoiding any spurious retains, including by performing reclaims 3093 /// with objc_unsafeClaimAutoreleasedReturnValue. 3094 llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { 3095 // Look through full-expressions. 3096 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3097 enterFullExpression(cleanups); 3098 RunCleanupsScope scope(*this); 3099 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); 3100 } 3101 3102 return emitARCUnsafeUnretainedScalarExpr(*this, e); 3103 } 3104 3105 std::pair<LValue,llvm::Value*> 3106 CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, 3107 bool ignored) { 3108 // Evaluate the RHS first. If we're ignoring the result, assume 3109 // that we can emit at an unsafe +0. 3110 llvm::Value *value; 3111 if (ignored) { 3112 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS()); 3113 } else { 3114 value = EmitScalarExpr(e->getRHS()); 3115 } 3116 3117 // Emit the LHS and perform the store. 3118 LValue lvalue = EmitLValue(e->getLHS()); 3119 EmitStoreOfScalar(value, lvalue); 3120 3121 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); 3122 } 3123 3124 std::pair<LValue,llvm::Value*> 3125 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, 3126 bool ignored) { 3127 // Evaluate the RHS first. 3128 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS()); 3129 llvm::Value *value = result.getPointer(); 3130 3131 bool hasImmediateRetain = result.getInt(); 3132 3133 // If we didn't emit a retained object, and the l-value is of block 3134 // type, then we need to emit the block-retain immediately in case 3135 // it invalidates the l-value. 3136 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) { 3137 value = EmitARCRetainBlock(value, /*mandatory*/ false); 3138 hasImmediateRetain = true; 3139 } 3140 3141 LValue lvalue = EmitLValue(e->getLHS()); 3142 3143 // If the RHS was emitted retained, expand this. 3144 if (hasImmediateRetain) { 3145 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation()); 3146 EmitStoreOfScalar(value, lvalue); 3147 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime()); 3148 } else { 3149 value = EmitARCStoreStrong(lvalue, value, ignored); 3150 } 3151 3152 return std::pair<LValue,llvm::Value*>(lvalue, value); 3153 } 3154 3155 std::pair<LValue,llvm::Value*> 3156 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { 3157 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS()); 3158 LValue lvalue = EmitLValue(e->getLHS()); 3159 3160 EmitStoreOfScalar(value, lvalue); 3161 3162 return std::pair<LValue,llvm::Value*>(lvalue, value); 3163 } 3164 3165 void CodeGenFunction::EmitObjCAutoreleasePoolStmt( 3166 const ObjCAutoreleasePoolStmt &ARPS) { 3167 const Stmt *subStmt = ARPS.getSubStmt(); 3168 const CompoundStmt &S = cast<CompoundStmt>(*subStmt); 3169 3170 CGDebugInfo *DI = getDebugInfo(); 3171 if (DI) 3172 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc()); 3173 3174 // Keep track of the current cleanup stack depth. 3175 RunCleanupsScope Scope(*this); 3176 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { 3177 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 3178 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token); 3179 } else { 3180 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); 3181 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token); 3182 } 3183 3184 for (const auto *I : S.body()) 3185 EmitStmt(I); 3186 3187 if (DI) 3188 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc()); 3189 } 3190 3191 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, 3192 /// make sure it survives garbage collection until this point. 3193 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { 3194 // We just use an inline assembly. 3195 llvm::FunctionType *extenderType 3196 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All); 3197 llvm::Value *extender 3198 = llvm::InlineAsm::get(extenderType, 3199 /* assembly */ "", 3200 /* constraints */ "r", 3201 /* side effects */ true); 3202 3203 object = Builder.CreateBitCast(object, VoidPtrTy); 3204 EmitNounwindRuntimeCall(extender, object); 3205 } 3206 3207 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with 3208 /// non-trivial copy assignment function, produce following helper function. 3209 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } 3210 /// 3211 llvm::Constant * 3212 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( 3213 const ObjCPropertyImplDecl *PID) { 3214 if (!getLangOpts().CPlusPlus || 3215 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3216 return nullptr; 3217 QualType Ty = PID->getPropertyIvarDecl()->getType(); 3218 if (!Ty->isRecordType()) 3219 return nullptr; 3220 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3221 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic))) 3222 return nullptr; 3223 llvm::Constant *HelperFn = nullptr; 3224 if (hasTrivialSetExpr(PID)) 3225 return nullptr; 3226 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"); 3227 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) 3228 return HelperFn; 3229 3230 ASTContext &C = getContext(); 3231 IdentifierInfo *II 3232 = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); 3233 FunctionDecl *FD = FunctionDecl::Create(C, 3234 C.getTranslationUnitDecl(), 3235 SourceLocation(), 3236 SourceLocation(), II, C.VoidTy, 3237 nullptr, SC_Static, 3238 false, 3239 false); 3240 3241 QualType DestTy = C.getPointerType(Ty); 3242 QualType SrcTy = Ty; 3243 SrcTy.addConst(); 3244 SrcTy = C.getPointerType(SrcTy); 3245 3246 FunctionArgList args; 3247 ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy); 3248 args.push_back(&dstDecl); 3249 ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy); 3250 args.push_back(&srcDecl); 3251 3252 const CGFunctionInfo &FI = 3253 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args); 3254 3255 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3256 3257 llvm::Function *Fn = 3258 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 3259 "__assign_helper_atomic_property_", 3260 &CGM.getModule()); 3261 3262 CGM.SetInternalFunctionAttributes(nullptr, Fn, FI); 3263 3264 StartFunction(FD, C.VoidTy, Fn, FI, args); 3265 3266 DeclRefExpr DstExpr(&dstDecl, false, DestTy, 3267 VK_RValue, SourceLocation()); 3268 UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(), 3269 VK_LValue, OK_Ordinary, SourceLocation()); 3270 3271 DeclRefExpr SrcExpr(&srcDecl, false, SrcTy, 3272 VK_RValue, SourceLocation()); 3273 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(), 3274 VK_LValue, OK_Ordinary, SourceLocation()); 3275 3276 Expr *Args[2] = { &DST, &SRC }; 3277 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment()); 3278 CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(), 3279 Args, DestTy->getPointeeType(), 3280 VK_LValue, SourceLocation(), FPOptions()); 3281 3282 EmitStmt(&TheCall); 3283 3284 FinishFunction(); 3285 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3286 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); 3287 return HelperFn; 3288 } 3289 3290 llvm::Constant * 3291 CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( 3292 const ObjCPropertyImplDecl *PID) { 3293 if (!getLangOpts().CPlusPlus || 3294 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3295 return nullptr; 3296 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3297 QualType Ty = PD->getType(); 3298 if (!Ty->isRecordType()) 3299 return nullptr; 3300 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic))) 3301 return nullptr; 3302 llvm::Constant *HelperFn = nullptr; 3303 3304 if (hasTrivialGetExpr(PID)) 3305 return nullptr; 3306 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"); 3307 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) 3308 return HelperFn; 3309 3310 3311 ASTContext &C = getContext(); 3312 IdentifierInfo *II 3313 = &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); 3314 FunctionDecl *FD = FunctionDecl::Create(C, 3315 C.getTranslationUnitDecl(), 3316 SourceLocation(), 3317 SourceLocation(), II, C.VoidTy, 3318 nullptr, SC_Static, 3319 false, 3320 false); 3321 3322 QualType DestTy = C.getPointerType(Ty); 3323 QualType SrcTy = Ty; 3324 SrcTy.addConst(); 3325 SrcTy = C.getPointerType(SrcTy); 3326 3327 FunctionArgList args; 3328 ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy); 3329 args.push_back(&dstDecl); 3330 ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy); 3331 args.push_back(&srcDecl); 3332 3333 const CGFunctionInfo &FI = 3334 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args); 3335 3336 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3337 3338 llvm::Function *Fn = 3339 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 3340 "__copy_helper_atomic_property_", &CGM.getModule()); 3341 3342 CGM.SetInternalFunctionAttributes(nullptr, Fn, FI); 3343 3344 StartFunction(FD, C.VoidTy, Fn, FI, args); 3345 3346 DeclRefExpr SrcExpr(&srcDecl, false, SrcTy, 3347 VK_RValue, SourceLocation()); 3348 3349 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(), 3350 VK_LValue, OK_Ordinary, SourceLocation()); 3351 3352 CXXConstructExpr *CXXConstExpr = 3353 cast<CXXConstructExpr>(PID->getGetterCXXConstructor()); 3354 3355 SmallVector<Expr*, 4> ConstructorArgs; 3356 ConstructorArgs.push_back(&SRC); 3357 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), 3358 CXXConstExpr->arg_end()); 3359 3360 CXXConstructExpr *TheCXXConstructExpr = 3361 CXXConstructExpr::Create(C, Ty, SourceLocation(), 3362 CXXConstExpr->getConstructor(), 3363 CXXConstExpr->isElidable(), 3364 ConstructorArgs, 3365 CXXConstExpr->hadMultipleCandidates(), 3366 CXXConstExpr->isListInitialization(), 3367 CXXConstExpr->isStdInitListInitialization(), 3368 CXXConstExpr->requiresZeroInitialization(), 3369 CXXConstExpr->getConstructionKind(), 3370 SourceRange()); 3371 3372 DeclRefExpr DstExpr(&dstDecl, false, DestTy, 3373 VK_RValue, SourceLocation()); 3374 3375 RValue DV = EmitAnyExpr(&DstExpr); 3376 CharUnits Alignment 3377 = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); 3378 EmitAggExpr(TheCXXConstructExpr, 3379 AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment), 3380 Qualifiers(), 3381 AggValueSlot::IsDestructed, 3382 AggValueSlot::DoesNotNeedGCBarriers, 3383 AggValueSlot::IsNotAliased)); 3384 3385 FinishFunction(); 3386 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3387 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); 3388 return HelperFn; 3389 } 3390 3391 llvm::Value * 3392 CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { 3393 // Get selectors for retain/autorelease. 3394 IdentifierInfo *CopyID = &getContext().Idents.get("copy"); 3395 Selector CopySelector = 3396 getContext().Selectors.getNullarySelector(CopyID); 3397 IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); 3398 Selector AutoreleaseSelector = 3399 getContext().Selectors.getNullarySelector(AutoreleaseID); 3400 3401 // Emit calls to retain/autorelease. 3402 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 3403 llvm::Value *Val = Block; 3404 RValue Result; 3405 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3406 Ty, CopySelector, 3407 Val, CallArgList(), nullptr, nullptr); 3408 Val = Result.getScalarVal(); 3409 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3410 Ty, AutoreleaseSelector, 3411 Val, CallArgList(), nullptr, nullptr); 3412 Val = Result.getScalarVal(); 3413 return Val; 3414 } 3415 3416 llvm::Value * 3417 CodeGenFunction::EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args) { 3418 assert(Args.size() == 3 && "Expected 3 argument here!"); 3419 3420 if (!CGM.IsOSVersionAtLeastFn) { 3421 llvm::FunctionType *FTy = 3422 llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false); 3423 CGM.IsOSVersionAtLeastFn = 3424 CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast"); 3425 } 3426 3427 llvm::Value *CallRes = 3428 EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args); 3429 3430 return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty)); 3431 } 3432 3433 void CodeGenModule::emitAtAvailableLinkGuard() { 3434 if (!IsOSVersionAtLeastFn) 3435 return; 3436 // @available requires CoreFoundation only on Darwin. 3437 if (!Target.getTriple().isOSDarwin()) 3438 return; 3439 // Add -framework CoreFoundation to the linker commands. We still want to 3440 // emit the core foundation reference down below because otherwise if 3441 // CoreFoundation is not used in the code, the linker won't link the 3442 // framework. 3443 auto &Context = getLLVMContext(); 3444 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"), 3445 llvm::MDString::get(Context, "CoreFoundation")}; 3446 LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args)); 3447 // Emit a reference to a symbol from CoreFoundation to ensure that 3448 // CoreFoundation is linked into the final binary. 3449 llvm::FunctionType *FTy = 3450 llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false); 3451 llvm::Constant *CFFunc = 3452 CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber"); 3453 3454 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false); 3455 llvm::Function *CFLinkCheckFunc = cast<llvm::Function>(CreateBuiltinFunction( 3456 CheckFTy, "__clang_at_available_requires_core_foundation_framework")); 3457 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); 3458 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); 3459 CodeGenFunction CGF(*this); 3460 CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc)); 3461 CGF.EmitNounwindRuntimeCall(CFFunc, llvm::Constant::getNullValue(VoidPtrTy)); 3462 CGF.Builder.CreateUnreachable(); 3463 addCompilerUsedGlobal(CFLinkCheckFunc); 3464 } 3465 3466 CGObjCRuntime::~CGObjCRuntime() {} 3467