1 //===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This contains code to emit Objective-C code as LLVM code. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGDebugInfo.h" 15 #include "CGObjCRuntime.h" 16 #include "CodeGenFunction.h" 17 #include "CodeGenModule.h" 18 #include "TargetInfo.h" 19 #include "clang/AST/ASTContext.h" 20 #include "clang/AST/DeclObjC.h" 21 #include "clang/AST/StmtObjC.h" 22 #include "clang/Basic/Diagnostic.h" 23 #include "clang/CodeGen/CGFunctionInfo.h" 24 #include "llvm/ADT/STLExtras.h" 25 #include "llvm/IR/CallSite.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/InlineAsm.h" 28 using namespace clang; 29 using namespace CodeGen; 30 31 typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult; 32 static TryEmitResult 33 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e); 34 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, 35 QualType ET, 36 RValue Result); 37 38 /// Given the address of a variable of pointer type, find the correct 39 /// null to store into it. 40 static llvm::Constant *getNullForVariable(Address addr) { 41 llvm::Type *type = addr.getElementType(); 42 return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type)); 43 } 44 45 /// Emits an instance of NSConstantString representing the object. 46 llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E) 47 { 48 llvm::Constant *C = 49 CGM.getObjCRuntime().GenerateConstantString(E->getString()).getPointer(); 50 // FIXME: This bitcast should just be made an invariant on the Runtime. 51 return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType())); 52 } 53 54 /// EmitObjCBoxedExpr - This routine generates code to call 55 /// the appropriate expression boxing method. This will either be 56 /// one of +[NSNumber numberWith<Type>:], or +[NSString stringWithUTF8String:], 57 /// or [NSValue valueWithBytes:objCType:]. 58 /// 59 llvm::Value * 60 CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) { 61 // Generate the correct selector for this literal's concrete type. 62 // Get the method. 63 const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod(); 64 const Expr *SubExpr = E->getSubExpr(); 65 assert(BoxingMethod && "BoxingMethod is null"); 66 assert(BoxingMethod->isClassMethod() && "BoxingMethod must be a class method"); 67 Selector Sel = BoxingMethod->getSelector(); 68 69 // Generate a reference to the class pointer, which will be the receiver. 70 // Assumes that the method was introduced in the class that should be 71 // messaged (avoids pulling it out of the result type). 72 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 73 const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface(); 74 llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl); 75 76 CallArgList Args; 77 const ParmVarDecl *ArgDecl = *BoxingMethod->param_begin(); 78 QualType ArgQT = ArgDecl->getType().getUnqualifiedType(); 79 80 // ObjCBoxedExpr supports boxing of structs and unions 81 // via [NSValue valueWithBytes:objCType:] 82 const QualType ValueType(SubExpr->getType().getCanonicalType()); 83 if (ValueType->isObjCBoxableRecordType()) { 84 // Emit CodeGen for first parameter 85 // and cast value to correct type 86 Address Temporary = CreateMemTemp(SubExpr->getType()); 87 EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true); 88 Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT)); 89 Args.add(RValue::get(BitCast.getPointer()), ArgQT); 90 91 // Create char array to store type encoding 92 std::string Str; 93 getContext().getObjCEncodingForType(ValueType, Str); 94 llvm::Constant *GV = CGM.GetAddrOfConstantCString(Str).getPointer(); 95 96 // Cast type encoding to correct type 97 const ParmVarDecl *EncodingDecl = BoxingMethod->parameters()[1]; 98 QualType EncodingQT = EncodingDecl->getType().getUnqualifiedType(); 99 llvm::Value *Cast = Builder.CreateBitCast(GV, ConvertType(EncodingQT)); 100 101 Args.add(RValue::get(Cast), EncodingQT); 102 } else { 103 Args.add(EmitAnyExpr(SubExpr), ArgQT); 104 } 105 106 RValue result = Runtime.GenerateMessageSend( 107 *this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver, 108 Args, ClassDecl, BoxingMethod); 109 return Builder.CreateBitCast(result.getScalarVal(), 110 ConvertType(E->getType())); 111 } 112 113 llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E, 114 const ObjCMethodDecl *MethodWithObjects) { 115 ASTContext &Context = CGM.getContext(); 116 const ObjCDictionaryLiteral *DLE = nullptr; 117 const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E); 118 if (!ALE) 119 DLE = cast<ObjCDictionaryLiteral>(E); 120 121 // Optimize empty collections by referencing constants, when available. 122 uint64_t NumElements = 123 ALE ? ALE->getNumElements() : DLE->getNumElements(); 124 if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) { 125 StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__"; 126 QualType IdTy(CGM.getContext().getObjCIdType()); 127 llvm::Constant *Constant = 128 CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName); 129 Address Addr(Constant, Context.getTypeAlignInChars(IdTy)); 130 LValue LV = MakeAddrLValue(Addr, IdTy); 131 return Builder.CreateBitCast(EmitLoadOfScalar(LV, E->getLocStart()), 132 ConvertType(E->getType())); 133 } 134 135 // Compute the type of the array we're initializing. 136 llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()), 137 NumElements); 138 QualType ElementType = Context.getObjCIdType().withConst(); 139 QualType ElementArrayType 140 = Context.getConstantArrayType(ElementType, APNumElements, 141 ArrayType::Normal, /*IndexTypeQuals=*/0); 142 143 // Allocate the temporary array(s). 144 Address Objects = CreateMemTemp(ElementArrayType, "objects"); 145 Address Keys = Address::invalid(); 146 if (DLE) 147 Keys = CreateMemTemp(ElementArrayType, "keys"); 148 149 // In ARC, we may need to do extra work to keep all the keys and 150 // values alive until after the call. 151 SmallVector<llvm::Value *, 16> NeededObjects; 152 bool TrackNeededObjects = 153 (getLangOpts().ObjCAutoRefCount && 154 CGM.getCodeGenOpts().OptimizationLevel != 0); 155 156 // Perform the actual initialialization of the array(s). 157 for (uint64_t i = 0; i < NumElements; i++) { 158 if (ALE) { 159 // Emit the element and store it to the appropriate array slot. 160 const Expr *Rhs = ALE->getElement(i); 161 LValue LV = MakeAddrLValue( 162 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()), 163 ElementType, AlignmentSource::Decl); 164 165 llvm::Value *value = EmitScalarExpr(Rhs); 166 EmitStoreThroughLValue(RValue::get(value), LV, true); 167 if (TrackNeededObjects) { 168 NeededObjects.push_back(value); 169 } 170 } else { 171 // Emit the key and store it to the appropriate array slot. 172 const Expr *Key = DLE->getKeyValueElement(i).Key; 173 LValue KeyLV = MakeAddrLValue( 174 Builder.CreateConstArrayGEP(Keys, i, getPointerSize()), 175 ElementType, AlignmentSource::Decl); 176 llvm::Value *keyValue = EmitScalarExpr(Key); 177 EmitStoreThroughLValue(RValue::get(keyValue), KeyLV, /*isInit=*/true); 178 179 // Emit the value and store it to the appropriate array slot. 180 const Expr *Value = DLE->getKeyValueElement(i).Value; 181 LValue ValueLV = MakeAddrLValue( 182 Builder.CreateConstArrayGEP(Objects, i, getPointerSize()), 183 ElementType, AlignmentSource::Decl); 184 llvm::Value *valueValue = EmitScalarExpr(Value); 185 EmitStoreThroughLValue(RValue::get(valueValue), ValueLV, /*isInit=*/true); 186 if (TrackNeededObjects) { 187 NeededObjects.push_back(keyValue); 188 NeededObjects.push_back(valueValue); 189 } 190 } 191 } 192 193 // Generate the argument list. 194 CallArgList Args; 195 ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin(); 196 const ParmVarDecl *argDecl = *PI++; 197 QualType ArgQT = argDecl->getType().getUnqualifiedType(); 198 Args.add(RValue::get(Objects.getPointer()), ArgQT); 199 if (DLE) { 200 argDecl = *PI++; 201 ArgQT = argDecl->getType().getUnqualifiedType(); 202 Args.add(RValue::get(Keys.getPointer()), ArgQT); 203 } 204 argDecl = *PI; 205 ArgQT = argDecl->getType().getUnqualifiedType(); 206 llvm::Value *Count = 207 llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements); 208 Args.add(RValue::get(Count), ArgQT); 209 210 // Generate a reference to the class pointer, which will be the receiver. 211 Selector Sel = MethodWithObjects->getSelector(); 212 QualType ResultType = E->getType(); 213 const ObjCObjectPointerType *InterfacePointerType 214 = ResultType->getAsObjCInterfacePointerType(); 215 ObjCInterfaceDecl *Class 216 = InterfacePointerType->getObjectType()->getInterface(); 217 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 218 llvm::Value *Receiver = Runtime.GetClass(*this, Class); 219 220 // Generate the message send. 221 RValue result = Runtime.GenerateMessageSend( 222 *this, ReturnValueSlot(), MethodWithObjects->getReturnType(), Sel, 223 Receiver, Args, Class, MethodWithObjects); 224 225 // The above message send needs these objects, but in ARC they are 226 // passed in a buffer that is essentially __unsafe_unretained. 227 // Therefore we must prevent the optimizer from releasing them until 228 // after the call. 229 if (TrackNeededObjects) { 230 EmitARCIntrinsicUse(NeededObjects); 231 } 232 233 return Builder.CreateBitCast(result.getScalarVal(), 234 ConvertType(E->getType())); 235 } 236 237 llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) { 238 return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod()); 239 } 240 241 llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral( 242 const ObjCDictionaryLiteral *E) { 243 return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod()); 244 } 245 246 /// Emit a selector. 247 llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) { 248 // Untyped selector. 249 // Note that this implementation allows for non-constant strings to be passed 250 // as arguments to @selector(). Currently, the only thing preventing this 251 // behaviour is the type checking in the front end. 252 return CGM.getObjCRuntime().GetSelector(*this, E->getSelector()); 253 } 254 255 llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) { 256 // FIXME: This should pass the Decl not the name. 257 return CGM.getObjCRuntime().GenerateProtocolRef(*this, E->getProtocol()); 258 } 259 260 /// \brief Adjust the type of an Objective-C object that doesn't match up due 261 /// to type erasure at various points, e.g., related result types or the use 262 /// of parameterized classes. 263 static RValue AdjustObjCObjectType(CodeGenFunction &CGF, QualType ExpT, 264 RValue Result) { 265 if (!ExpT->isObjCRetainableType()) 266 return Result; 267 268 // If the converted types are the same, we're done. 269 llvm::Type *ExpLLVMTy = CGF.ConvertType(ExpT); 270 if (ExpLLVMTy == Result.getScalarVal()->getType()) 271 return Result; 272 273 // We have applied a substitution. Cast the rvalue appropriately. 274 return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(), 275 ExpLLVMTy)); 276 } 277 278 /// Decide whether to extend the lifetime of the receiver of a 279 /// returns-inner-pointer message. 280 static bool 281 shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) { 282 switch (message->getReceiverKind()) { 283 284 // For a normal instance message, we should extend unless the 285 // receiver is loaded from a variable with precise lifetime. 286 case ObjCMessageExpr::Instance: { 287 const Expr *receiver = message->getInstanceReceiver(); 288 289 // Look through OVEs. 290 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 291 if (opaque->getSourceExpr()) 292 receiver = opaque->getSourceExpr()->IgnoreParens(); 293 } 294 295 const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver); 296 if (!ice || ice->getCastKind() != CK_LValueToRValue) return true; 297 receiver = ice->getSubExpr()->IgnoreParens(); 298 299 // Look through OVEs. 300 if (auto opaque = dyn_cast<OpaqueValueExpr>(receiver)) { 301 if (opaque->getSourceExpr()) 302 receiver = opaque->getSourceExpr()->IgnoreParens(); 303 } 304 305 // Only __strong variables. 306 if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong) 307 return true; 308 309 // All ivars and fields have precise lifetime. 310 if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver)) 311 return false; 312 313 // Otherwise, check for variables. 314 const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr()); 315 if (!declRef) return true; 316 const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl()); 317 if (!var) return true; 318 319 // All variables have precise lifetime except local variables with 320 // automatic storage duration that aren't specially marked. 321 return (var->hasLocalStorage() && 322 !var->hasAttr<ObjCPreciseLifetimeAttr>()); 323 } 324 325 case ObjCMessageExpr::Class: 326 case ObjCMessageExpr::SuperClass: 327 // It's never necessary for class objects. 328 return false; 329 330 case ObjCMessageExpr::SuperInstance: 331 // We generally assume that 'self' lives throughout a method call. 332 return false; 333 } 334 335 llvm_unreachable("invalid receiver kind"); 336 } 337 338 /// Given an expression of ObjC pointer type, check whether it was 339 /// immediately loaded from an ARC __weak l-value. 340 static const Expr *findWeakLValue(const Expr *E) { 341 assert(E->getType()->isObjCRetainableType()); 342 E = E->IgnoreParens(); 343 if (auto CE = dyn_cast<CastExpr>(E)) { 344 if (CE->getCastKind() == CK_LValueToRValue) { 345 if (CE->getSubExpr()->getType().getObjCLifetime() == Qualifiers::OCL_Weak) 346 return CE->getSubExpr(); 347 } 348 } 349 350 return nullptr; 351 } 352 353 RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E, 354 ReturnValueSlot Return) { 355 // Only the lookup mechanism and first two arguments of the method 356 // implementation vary between runtimes. We can get the receiver and 357 // arguments in generic code. 358 359 bool isDelegateInit = E->isDelegateInitCall(); 360 361 const ObjCMethodDecl *method = E->getMethodDecl(); 362 363 // If the method is -retain, and the receiver's being loaded from 364 // a __weak variable, peephole the entire operation to objc_loadWeakRetained. 365 if (method && E->getReceiverKind() == ObjCMessageExpr::Instance && 366 method->getMethodFamily() == OMF_retain) { 367 if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) { 368 LValue lvalue = EmitLValue(lvalueExpr); 369 llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress()); 370 return AdjustObjCObjectType(*this, E->getType(), RValue::get(result)); 371 } 372 } 373 374 // We don't retain the receiver in delegate init calls, and this is 375 // safe because the receiver value is always loaded from 'self', 376 // which we zero out. We don't want to Block_copy block receivers, 377 // though. 378 bool retainSelf = 379 (!isDelegateInit && 380 CGM.getLangOpts().ObjCAutoRefCount && 381 method && 382 method->hasAttr<NSConsumesSelfAttr>()); 383 384 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 385 bool isSuperMessage = false; 386 bool isClassMessage = false; 387 ObjCInterfaceDecl *OID = nullptr; 388 // Find the receiver 389 QualType ReceiverType; 390 llvm::Value *Receiver = nullptr; 391 switch (E->getReceiverKind()) { 392 case ObjCMessageExpr::Instance: 393 ReceiverType = E->getInstanceReceiver()->getType(); 394 if (retainSelf) { 395 TryEmitResult ter = tryEmitARCRetainScalarExpr(*this, 396 E->getInstanceReceiver()); 397 Receiver = ter.getPointer(); 398 if (ter.getInt()) retainSelf = false; 399 } else 400 Receiver = EmitScalarExpr(E->getInstanceReceiver()); 401 break; 402 403 case ObjCMessageExpr::Class: { 404 ReceiverType = E->getClassReceiver(); 405 const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>(); 406 assert(ObjTy && "Invalid Objective-C class message send"); 407 OID = ObjTy->getInterface(); 408 assert(OID && "Invalid Objective-C class message send"); 409 Receiver = Runtime.GetClass(*this, OID); 410 isClassMessage = true; 411 break; 412 } 413 414 case ObjCMessageExpr::SuperInstance: 415 ReceiverType = E->getSuperType(); 416 Receiver = LoadObjCSelf(); 417 isSuperMessage = true; 418 break; 419 420 case ObjCMessageExpr::SuperClass: 421 ReceiverType = E->getSuperType(); 422 Receiver = LoadObjCSelf(); 423 isSuperMessage = true; 424 isClassMessage = true; 425 break; 426 } 427 428 if (retainSelf) 429 Receiver = EmitARCRetainNonBlock(Receiver); 430 431 // In ARC, we sometimes want to "extend the lifetime" 432 // (i.e. retain+autorelease) of receivers of returns-inner-pointer 433 // messages. 434 if (getLangOpts().ObjCAutoRefCount && method && 435 method->hasAttr<ObjCReturnsInnerPointerAttr>() && 436 shouldExtendReceiverForInnerPointerMessage(E)) 437 Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver); 438 439 QualType ResultType = method ? method->getReturnType() : E->getType(); 440 441 CallArgList Args; 442 EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method)); 443 444 // For delegate init calls in ARC, do an unsafe store of null into 445 // self. This represents the call taking direct ownership of that 446 // value. We have to do this after emitting the other call 447 // arguments because they might also reference self, but we don't 448 // have to worry about any of them modifying self because that would 449 // be an undefined read and write of an object in unordered 450 // expressions. 451 if (isDelegateInit) { 452 assert(getLangOpts().ObjCAutoRefCount && 453 "delegate init calls should only be marked in ARC"); 454 455 // Do an unsafe store of null into self. 456 Address selfAddr = 457 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 458 Builder.CreateStore(getNullForVariable(selfAddr), selfAddr); 459 } 460 461 RValue result; 462 if (isSuperMessage) { 463 // super is only valid in an Objective-C method 464 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 465 bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext()); 466 result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType, 467 E->getSelector(), 468 OMD->getClassInterface(), 469 isCategoryImpl, 470 Receiver, 471 isClassMessage, 472 Args, 473 method); 474 } else { 475 result = Runtime.GenerateMessageSend(*this, Return, ResultType, 476 E->getSelector(), 477 Receiver, Args, OID, 478 method); 479 } 480 481 // For delegate init calls in ARC, implicitly store the result of 482 // the call back into self. This takes ownership of the value. 483 if (isDelegateInit) { 484 Address selfAddr = 485 GetAddrOfLocalVar(cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()); 486 llvm::Value *newSelf = result.getScalarVal(); 487 488 // The delegate return type isn't necessarily a matching type; in 489 // fact, it's quite likely to be 'id'. 490 llvm::Type *selfTy = selfAddr.getElementType(); 491 newSelf = Builder.CreateBitCast(newSelf, selfTy); 492 493 Builder.CreateStore(newSelf, selfAddr); 494 } 495 496 return AdjustObjCObjectType(*this, E->getType(), result); 497 } 498 499 namespace { 500 struct FinishARCDealloc final : EHScopeStack::Cleanup { 501 void Emit(CodeGenFunction &CGF, Flags flags) override { 502 const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl); 503 504 const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext()); 505 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 506 if (!iface->getSuperClass()) return; 507 508 bool isCategory = isa<ObjCCategoryImplDecl>(impl); 509 510 // Call [super dealloc] if we have a superclass. 511 llvm::Value *self = CGF.LoadObjCSelf(); 512 513 CallArgList args; 514 CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(), 515 CGF.getContext().VoidTy, 516 method->getSelector(), 517 iface, 518 isCategory, 519 self, 520 /*is class msg*/ false, 521 args, 522 method); 523 } 524 }; 525 } 526 527 /// StartObjCMethod - Begin emission of an ObjCMethod. This generates 528 /// the LLVM function and sets the other context used by 529 /// CodeGenFunction. 530 void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD, 531 const ObjCContainerDecl *CD) { 532 SourceLocation StartLoc = OMD->getLocStart(); 533 FunctionArgList args; 534 // Check if we should generate debug info for this method. 535 if (OMD->hasAttr<NoDebugAttr>()) 536 DebugInfo = nullptr; // disable debug info indefinitely for this function 537 538 llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD); 539 540 const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD); 541 CGM.SetInternalFunctionAttributes(OMD, Fn, FI); 542 543 args.push_back(OMD->getSelfDecl()); 544 args.push_back(OMD->getCmdDecl()); 545 546 args.append(OMD->param_begin(), OMD->param_end()); 547 548 CurGD = OMD; 549 CurEHLocation = OMD->getLocEnd(); 550 551 StartFunction(OMD, OMD->getReturnType(), Fn, FI, args, 552 OMD->getLocation(), StartLoc); 553 554 // In ARC, certain methods get an extra cleanup. 555 if (CGM.getLangOpts().ObjCAutoRefCount && 556 OMD->isInstanceMethod() && 557 OMD->getSelector().isUnarySelector()) { 558 const IdentifierInfo *ident = 559 OMD->getSelector().getIdentifierInfoForSlot(0); 560 if (ident->isStr("dealloc")) 561 EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind()); 562 } 563 } 564 565 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 566 LValue lvalue, QualType type); 567 568 /// Generate an Objective-C method. An Objective-C method is a C function with 569 /// its pointer, name, and types registered in the class struture. 570 void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) { 571 StartObjCMethod(OMD, OMD->getClassInterface()); 572 PGO.assignRegionCounters(GlobalDecl(OMD), CurFn); 573 assert(isa<CompoundStmt>(OMD->getBody())); 574 incrementProfileCounter(OMD->getBody()); 575 EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody())); 576 FinishFunction(OMD->getBodyRBrace()); 577 } 578 579 /// emitStructGetterCall - Call the runtime function to load a property 580 /// into the return value slot. 581 static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar, 582 bool isAtomic, bool hasStrong) { 583 ASTContext &Context = CGF.getContext(); 584 585 Address src = 586 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0) 587 .getAddress(); 588 589 // objc_copyStruct (ReturnValue, &structIvar, 590 // sizeof (Type of Ivar), isAtomic, false); 591 CallArgList args; 592 593 Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy); 594 args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy); 595 596 src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy); 597 args.add(RValue::get(src.getPointer()), Context.VoidPtrTy); 598 599 CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType()); 600 args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType()); 601 args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy); 602 args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy); 603 604 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction(); 605 CGCallee callee = CGCallee::forDirect(fn); 606 CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args), 607 callee, ReturnValueSlot(), args); 608 } 609 610 /// Determine whether the given architecture supports unaligned atomic 611 /// accesses. They don't have to be fast, just faster than a function 612 /// call and a mutex. 613 static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) { 614 // FIXME: Allow unaligned atomic load/store on x86. (It is not 615 // currently supported by the backend.) 616 return 0; 617 } 618 619 /// Return the maximum size that permits atomic accesses for the given 620 /// architecture. 621 static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM, 622 llvm::Triple::ArchType arch) { 623 // ARM has 8-byte atomic accesses, but it's not clear whether we 624 // want to rely on them here. 625 626 // In the default case, just assume that any size up to a pointer is 627 // fine given adequate alignment. 628 return CharUnits::fromQuantity(CGM.PointerSizeInBytes); 629 } 630 631 namespace { 632 class PropertyImplStrategy { 633 public: 634 enum StrategyKind { 635 /// The 'native' strategy is to use the architecture's provided 636 /// reads and writes. 637 Native, 638 639 /// Use objc_setProperty and objc_getProperty. 640 GetSetProperty, 641 642 /// Use objc_setProperty for the setter, but use expression 643 /// evaluation for the getter. 644 SetPropertyAndExpressionGet, 645 646 /// Use objc_copyStruct. 647 CopyStruct, 648 649 /// The 'expression' strategy is to emit normal assignment or 650 /// lvalue-to-rvalue expressions. 651 Expression 652 }; 653 654 StrategyKind getKind() const { return StrategyKind(Kind); } 655 656 bool hasStrongMember() const { return HasStrong; } 657 bool isAtomic() const { return IsAtomic; } 658 bool isCopy() const { return IsCopy; } 659 660 CharUnits getIvarSize() const { return IvarSize; } 661 CharUnits getIvarAlignment() const { return IvarAlignment; } 662 663 PropertyImplStrategy(CodeGenModule &CGM, 664 const ObjCPropertyImplDecl *propImpl); 665 666 private: 667 unsigned Kind : 8; 668 unsigned IsAtomic : 1; 669 unsigned IsCopy : 1; 670 unsigned HasStrong : 1; 671 672 CharUnits IvarSize; 673 CharUnits IvarAlignment; 674 }; 675 } 676 677 /// Pick an implementation strategy for the given property synthesis. 678 PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM, 679 const ObjCPropertyImplDecl *propImpl) { 680 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 681 ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind(); 682 683 IsCopy = (setterKind == ObjCPropertyDecl::Copy); 684 IsAtomic = prop->isAtomic(); 685 HasStrong = false; // doesn't matter here. 686 687 // Evaluate the ivar's size and alignment. 688 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 689 QualType ivarType = ivar->getType(); 690 std::tie(IvarSize, IvarAlignment) = 691 CGM.getContext().getTypeInfoInChars(ivarType); 692 693 // If we have a copy property, we always have to use getProperty/setProperty. 694 // TODO: we could actually use setProperty and an expression for non-atomics. 695 if (IsCopy) { 696 Kind = GetSetProperty; 697 return; 698 } 699 700 // Handle retain. 701 if (setterKind == ObjCPropertyDecl::Retain) { 702 // In GC-only, there's nothing special that needs to be done. 703 if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) { 704 // fallthrough 705 706 // In ARC, if the property is non-atomic, use expression emission, 707 // which translates to objc_storeStrong. This isn't required, but 708 // it's slightly nicer. 709 } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) { 710 // Using standard expression emission for the setter is only 711 // acceptable if the ivar is __strong, which won't be true if 712 // the property is annotated with __attribute__((NSObject)). 713 // TODO: falling all the way back to objc_setProperty here is 714 // just laziness, though; we could still use objc_storeStrong 715 // if we hacked it right. 716 if (ivarType.getObjCLifetime() == Qualifiers::OCL_Strong) 717 Kind = Expression; 718 else 719 Kind = SetPropertyAndExpressionGet; 720 return; 721 722 // Otherwise, we need to at least use setProperty. However, if 723 // the property isn't atomic, we can use normal expression 724 // emission for the getter. 725 } else if (!IsAtomic) { 726 Kind = SetPropertyAndExpressionGet; 727 return; 728 729 // Otherwise, we have to use both setProperty and getProperty. 730 } else { 731 Kind = GetSetProperty; 732 return; 733 } 734 } 735 736 // If we're not atomic, just use expression accesses. 737 if (!IsAtomic) { 738 Kind = Expression; 739 return; 740 } 741 742 // Properties on bitfield ivars need to be emitted using expression 743 // accesses even if they're nominally atomic. 744 if (ivar->isBitField()) { 745 Kind = Expression; 746 return; 747 } 748 749 // GC-qualified or ARC-qualified ivars need to be emitted as 750 // expressions. This actually works out to being atomic anyway, 751 // except for ARC __strong, but that should trigger the above code. 752 if (ivarType.hasNonTrivialObjCLifetime() || 753 (CGM.getLangOpts().getGC() && 754 CGM.getContext().getObjCGCAttrKind(ivarType))) { 755 Kind = Expression; 756 return; 757 } 758 759 // Compute whether the ivar has strong members. 760 if (CGM.getLangOpts().getGC()) 761 if (const RecordType *recordType = ivarType->getAs<RecordType>()) 762 HasStrong = recordType->getDecl()->hasObjectMember(); 763 764 // We can never access structs with object members with a native 765 // access, because we need to use write barriers. This is what 766 // objc_copyStruct is for. 767 if (HasStrong) { 768 Kind = CopyStruct; 769 return; 770 } 771 772 // Otherwise, this is target-dependent and based on the size and 773 // alignment of the ivar. 774 775 // If the size of the ivar is not a power of two, give up. We don't 776 // want to get into the business of doing compare-and-swaps. 777 if (!IvarSize.isPowerOfTwo()) { 778 Kind = CopyStruct; 779 return; 780 } 781 782 llvm::Triple::ArchType arch = 783 CGM.getTarget().getTriple().getArch(); 784 785 // Most architectures require memory to fit within a single cache 786 // line, so the alignment has to be at least the size of the access. 787 // Otherwise we have to grab a lock. 788 if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) { 789 Kind = CopyStruct; 790 return; 791 } 792 793 // If the ivar's size exceeds the architecture's maximum atomic 794 // access size, we have to use CopyStruct. 795 if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) { 796 Kind = CopyStruct; 797 return; 798 } 799 800 // Otherwise, we can use native loads and stores. 801 Kind = Native; 802 } 803 804 /// \brief Generate an Objective-C property getter function. 805 /// 806 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 807 /// is illegal within a category. 808 void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP, 809 const ObjCPropertyImplDecl *PID) { 810 llvm::Constant *AtomicHelperFn = 811 CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID); 812 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 813 ObjCMethodDecl *OMD = PD->getGetterMethodDecl(); 814 assert(OMD && "Invalid call to generate getter (empty method)"); 815 StartObjCMethod(OMD, IMP->getClassInterface()); 816 817 generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn); 818 819 FinishFunction(); 820 } 821 822 static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) { 823 const Expr *getter = propImpl->getGetterCXXConstructor(); 824 if (!getter) return true; 825 826 // Sema only makes only of these when the ivar has a C++ class type, 827 // so the form is pretty constrained. 828 829 // If the property has a reference type, we might just be binding a 830 // reference, in which case the result will be a gl-value. We should 831 // treat this as a non-trivial operation. 832 if (getter->isGLValue()) 833 return false; 834 835 // If we selected a trivial copy-constructor, we're okay. 836 if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter)) 837 return (construct->getConstructor()->isTrivial()); 838 839 // The constructor might require cleanups (in which case it's never 840 // trivial). 841 assert(isa<ExprWithCleanups>(getter)); 842 return false; 843 } 844 845 /// emitCPPObjectAtomicGetterCall - Call the runtime function to 846 /// copy the ivar into the resturn slot. 847 static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF, 848 llvm::Value *returnAddr, 849 ObjCIvarDecl *ivar, 850 llvm::Constant *AtomicHelperFn) { 851 // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar, 852 // AtomicHelperFn); 853 CallArgList args; 854 855 // The 1st argument is the return Slot. 856 args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy); 857 858 // The 2nd argument is the address of the ivar. 859 llvm::Value *ivarAddr = 860 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 861 CGF.LoadObjCSelf(), ivar, 0).getPointer(); 862 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 863 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 864 865 // Third argument is the helper function. 866 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 867 868 llvm::Constant *copyCppAtomicObjectFn = 869 CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction(); 870 CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn); 871 CGF.EmitCall( 872 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 873 callee, ReturnValueSlot(), args); 874 } 875 876 void 877 CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl, 878 const ObjCPropertyImplDecl *propImpl, 879 const ObjCMethodDecl *GetterMethodDecl, 880 llvm::Constant *AtomicHelperFn) { 881 // If there's a non-trivial 'get' expression, we just have to emit that. 882 if (!hasTrivialGetExpr(propImpl)) { 883 if (!AtomicHelperFn) { 884 ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(), 885 /*nrvo*/ nullptr); 886 EmitReturnStmt(ret); 887 } 888 else { 889 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 890 emitCPPObjectAtomicGetterCall(*this, ReturnValue.getPointer(), 891 ivar, AtomicHelperFn); 892 } 893 return; 894 } 895 896 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 897 QualType propType = prop->getType(); 898 ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl(); 899 900 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 901 902 // Pick an implementation strategy. 903 PropertyImplStrategy strategy(CGM, propImpl); 904 switch (strategy.getKind()) { 905 case PropertyImplStrategy::Native: { 906 // We don't need to do anything for a zero-size struct. 907 if (strategy.getIvarSize().isZero()) 908 return; 909 910 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 911 912 // Currently, all atomic accesses have to be through integer 913 // types, so there's no point in trying to pick a prettier type. 914 uint64_t ivarSize = getContext().toBits(strategy.getIvarSize()); 915 llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize); 916 bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay 917 918 // Perform an atomic load. This does not impose ordering constraints. 919 Address ivarAddr = LV.getAddress(); 920 ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType); 921 llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load"); 922 load->setAtomic(llvm::AtomicOrdering::Unordered); 923 924 // Store that value into the return address. Doing this with a 925 // bitcast is likely to produce some pretty ugly IR, but it's not 926 // the *most* terrible thing in the world. 927 llvm::Type *retTy = ConvertType(getterMethod->getReturnType()); 928 uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy); 929 llvm::Value *ivarVal = load; 930 if (ivarSize > retTySize) { 931 llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize); 932 ivarVal = Builder.CreateTrunc(load, newTy); 933 bitcastType = newTy->getPointerTo(); 934 } 935 Builder.CreateStore(ivarVal, 936 Builder.CreateBitCast(ReturnValue, bitcastType)); 937 938 // Make sure we don't do an autorelease. 939 AutoreleaseResult = false; 940 return; 941 } 942 943 case PropertyImplStrategy::GetSetProperty: { 944 llvm::Constant *getPropertyFn = 945 CGM.getObjCRuntime().GetPropertyGetFunction(); 946 if (!getPropertyFn) { 947 CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy"); 948 return; 949 } 950 CGCallee callee = CGCallee::forDirect(getPropertyFn); 951 952 // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true). 953 // FIXME: Can't this be simpler? This might even be worse than the 954 // corresponding gcc code. 955 llvm::Value *cmd = 956 Builder.CreateLoad(GetAddrOfLocalVar(getterMethod->getCmdDecl()), "cmd"); 957 llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 958 llvm::Value *ivarOffset = 959 EmitIvarOffset(classImpl->getClassInterface(), ivar); 960 961 CallArgList args; 962 args.add(RValue::get(self), getContext().getObjCIdType()); 963 args.add(RValue::get(cmd), getContext().getObjCSelType()); 964 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 965 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 966 getContext().BoolTy); 967 968 // FIXME: We shouldn't need to get the function info here, the 969 // runtime already should have computed it to build the function. 970 llvm::Instruction *CallInstruction; 971 RValue RV = EmitCall( 972 getTypes().arrangeBuiltinFunctionCall(propType, args), 973 callee, ReturnValueSlot(), args, &CallInstruction); 974 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction)) 975 call->setTailCall(); 976 977 // We need to fix the type here. Ivars with copy & retain are 978 // always objects so we don't need to worry about complex or 979 // aggregates. 980 RV = RValue::get(Builder.CreateBitCast( 981 RV.getScalarVal(), 982 getTypes().ConvertType(getterMethod->getReturnType()))); 983 984 EmitReturnOfRValue(RV, propType); 985 986 // objc_getProperty does an autorelease, so we should suppress ours. 987 AutoreleaseResult = false; 988 989 return; 990 } 991 992 case PropertyImplStrategy::CopyStruct: 993 emitStructGetterCall(*this, ivar, strategy.isAtomic(), 994 strategy.hasStrongMember()); 995 return; 996 997 case PropertyImplStrategy::Expression: 998 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 999 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0); 1000 1001 QualType ivarType = ivar->getType(); 1002 switch (getEvaluationKind(ivarType)) { 1003 case TEK_Complex: { 1004 ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation()); 1005 EmitStoreOfComplex(pair, MakeAddrLValue(ReturnValue, ivarType), 1006 /*init*/ true); 1007 return; 1008 } 1009 case TEK_Aggregate: 1010 // The return value slot is guaranteed to not be aliased, but 1011 // that's not necessarily the same as "on the stack", so 1012 // we still potentially need objc_memmove_collectable. 1013 EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType); 1014 return; 1015 case TEK_Scalar: { 1016 llvm::Value *value; 1017 if (propType->isReferenceType()) { 1018 value = LV.getAddress().getPointer(); 1019 } else { 1020 // We want to load and autoreleaseReturnValue ARC __weak ivars. 1021 if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) { 1022 if (getLangOpts().ObjCAutoRefCount) { 1023 value = emitARCRetainLoadOfScalar(*this, LV, ivarType); 1024 } else { 1025 value = EmitARCLoadWeak(LV.getAddress()); 1026 } 1027 1028 // Otherwise we want to do a simple load, suppressing the 1029 // final autorelease. 1030 } else { 1031 value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal(); 1032 AutoreleaseResult = false; 1033 } 1034 1035 value = Builder.CreateBitCast( 1036 value, ConvertType(GetterMethodDecl->getReturnType())); 1037 } 1038 1039 EmitReturnOfRValue(RValue::get(value), propType); 1040 return; 1041 } 1042 } 1043 llvm_unreachable("bad evaluation kind"); 1044 } 1045 1046 } 1047 llvm_unreachable("bad @property implementation strategy!"); 1048 } 1049 1050 /// emitStructSetterCall - Call the runtime function to store the value 1051 /// from the first formal parameter into the given ivar. 1052 static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD, 1053 ObjCIvarDecl *ivar) { 1054 // objc_copyStruct (&structIvar, &Arg, 1055 // sizeof (struct something), true, false); 1056 CallArgList args; 1057 1058 // The first argument is the address of the ivar. 1059 llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 1060 CGF.LoadObjCSelf(), ivar, 0) 1061 .getPointer(); 1062 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1063 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1064 1065 // The second argument is the address of the parameter variable. 1066 ParmVarDecl *argVar = *OMD->param_begin(); 1067 DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(), 1068 VK_LValue, SourceLocation()); 1069 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); 1070 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1071 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1072 1073 // The third argument is the sizeof the type. 1074 llvm::Value *size = 1075 CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType())); 1076 args.add(RValue::get(size), CGF.getContext().getSizeType()); 1077 1078 // The fourth argument is the 'isAtomic' flag. 1079 args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy); 1080 1081 // The fifth argument is the 'hasStrong' flag. 1082 // FIXME: should this really always be false? 1083 args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy); 1084 1085 llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction(); 1086 CGCallee callee = CGCallee::forDirect(fn); 1087 CGF.EmitCall( 1088 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1089 callee, ReturnValueSlot(), args); 1090 } 1091 1092 /// emitCPPObjectAtomicSetterCall - Call the runtime function to store 1093 /// the value from the first formal parameter into the given ivar, using 1094 /// the Cpp API for atomic Cpp objects with non-trivial copy assignment. 1095 static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF, 1096 ObjCMethodDecl *OMD, 1097 ObjCIvarDecl *ivar, 1098 llvm::Constant *AtomicHelperFn) { 1099 // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg, 1100 // AtomicHelperFn); 1101 CallArgList args; 1102 1103 // The first argument is the address of the ivar. 1104 llvm::Value *ivarAddr = 1105 CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), 1106 CGF.LoadObjCSelf(), ivar, 0).getPointer(); 1107 ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy); 1108 args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy); 1109 1110 // The second argument is the address of the parameter variable. 1111 ParmVarDecl *argVar = *OMD->param_begin(); 1112 DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(), 1113 VK_LValue, SourceLocation()); 1114 llvm::Value *argAddr = CGF.EmitLValue(&argRef).getPointer(); 1115 argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy); 1116 args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy); 1117 1118 // Third argument is the helper function. 1119 args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy); 1120 1121 llvm::Constant *fn = 1122 CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction(); 1123 CGCallee callee = CGCallee::forDirect(fn); 1124 CGF.EmitCall( 1125 CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args), 1126 callee, ReturnValueSlot(), args); 1127 } 1128 1129 1130 static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) { 1131 Expr *setter = PID->getSetterCXXAssignment(); 1132 if (!setter) return true; 1133 1134 // Sema only makes only of these when the ivar has a C++ class type, 1135 // so the form is pretty constrained. 1136 1137 // An operator call is trivial if the function it calls is trivial. 1138 // This also implies that there's nothing non-trivial going on with 1139 // the arguments, because operator= can only be trivial if it's a 1140 // synthesized assignment operator and therefore both parameters are 1141 // references. 1142 if (CallExpr *call = dyn_cast<CallExpr>(setter)) { 1143 if (const FunctionDecl *callee 1144 = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl())) 1145 if (callee->isTrivial()) 1146 return true; 1147 return false; 1148 } 1149 1150 assert(isa<ExprWithCleanups>(setter)); 1151 return false; 1152 } 1153 1154 static bool UseOptimizedSetter(CodeGenModule &CGM) { 1155 if (CGM.getLangOpts().getGC() != LangOptions::NonGC) 1156 return false; 1157 return CGM.getLangOpts().ObjCRuntime.hasOptimizedSetter(); 1158 } 1159 1160 void 1161 CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl, 1162 const ObjCPropertyImplDecl *propImpl, 1163 llvm::Constant *AtomicHelperFn) { 1164 const ObjCPropertyDecl *prop = propImpl->getPropertyDecl(); 1165 ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl(); 1166 ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl(); 1167 1168 // Just use the setter expression if Sema gave us one and it's 1169 // non-trivial. 1170 if (!hasTrivialSetExpr(propImpl)) { 1171 if (!AtomicHelperFn) 1172 // If non-atomic, assignment is called directly. 1173 EmitStmt(propImpl->getSetterCXXAssignment()); 1174 else 1175 // If atomic, assignment is called via a locking api. 1176 emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar, 1177 AtomicHelperFn); 1178 return; 1179 } 1180 1181 PropertyImplStrategy strategy(CGM, propImpl); 1182 switch (strategy.getKind()) { 1183 case PropertyImplStrategy::Native: { 1184 // We don't need to do anything for a zero-size struct. 1185 if (strategy.getIvarSize().isZero()) 1186 return; 1187 1188 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1189 1190 LValue ivarLValue = 1191 EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0); 1192 Address ivarAddr = ivarLValue.getAddress(); 1193 1194 // Currently, all atomic accesses have to be through integer 1195 // types, so there's no point in trying to pick a prettier type. 1196 llvm::Type *bitcastType = 1197 llvm::Type::getIntNTy(getLLVMContext(), 1198 getContext().toBits(strategy.getIvarSize())); 1199 1200 // Cast both arguments to the chosen operation type. 1201 argAddr = Builder.CreateElementBitCast(argAddr, bitcastType); 1202 ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType); 1203 1204 // This bitcast load is likely to cause some nasty IR. 1205 llvm::Value *load = Builder.CreateLoad(argAddr); 1206 1207 // Perform an atomic store. There are no memory ordering requirements. 1208 llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr); 1209 store->setAtomic(llvm::AtomicOrdering::Unordered); 1210 return; 1211 } 1212 1213 case PropertyImplStrategy::GetSetProperty: 1214 case PropertyImplStrategy::SetPropertyAndExpressionGet: { 1215 1216 llvm::Constant *setOptimizedPropertyFn = nullptr; 1217 llvm::Constant *setPropertyFn = nullptr; 1218 if (UseOptimizedSetter(CGM)) { 1219 // 10.8 and iOS 6.0 code and GC is off 1220 setOptimizedPropertyFn = 1221 CGM.getObjCRuntime() 1222 .GetOptimizedPropertySetFunction(strategy.isAtomic(), 1223 strategy.isCopy()); 1224 if (!setOptimizedPropertyFn) { 1225 CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI"); 1226 return; 1227 } 1228 } 1229 else { 1230 setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction(); 1231 if (!setPropertyFn) { 1232 CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy"); 1233 return; 1234 } 1235 } 1236 1237 // Emit objc_setProperty((id) self, _cmd, offset, arg, 1238 // <is-atomic>, <is-copy>). 1239 llvm::Value *cmd = 1240 Builder.CreateLoad(GetAddrOfLocalVar(setterMethod->getCmdDecl())); 1241 llvm::Value *self = 1242 Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy); 1243 llvm::Value *ivarOffset = 1244 EmitIvarOffset(classImpl->getClassInterface(), ivar); 1245 Address argAddr = GetAddrOfLocalVar(*setterMethod->param_begin()); 1246 llvm::Value *arg = Builder.CreateLoad(argAddr, "arg"); 1247 arg = Builder.CreateBitCast(arg, VoidPtrTy); 1248 1249 CallArgList args; 1250 args.add(RValue::get(self), getContext().getObjCIdType()); 1251 args.add(RValue::get(cmd), getContext().getObjCSelType()); 1252 if (setOptimizedPropertyFn) { 1253 args.add(RValue::get(arg), getContext().getObjCIdType()); 1254 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1255 CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn); 1256 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1257 callee, ReturnValueSlot(), args); 1258 } else { 1259 args.add(RValue::get(ivarOffset), getContext().getPointerDiffType()); 1260 args.add(RValue::get(arg), getContext().getObjCIdType()); 1261 args.add(RValue::get(Builder.getInt1(strategy.isAtomic())), 1262 getContext().BoolTy); 1263 args.add(RValue::get(Builder.getInt1(strategy.isCopy())), 1264 getContext().BoolTy); 1265 // FIXME: We shouldn't need to get the function info here, the runtime 1266 // already should have computed it to build the function. 1267 CGCallee callee = CGCallee::forDirect(setPropertyFn); 1268 EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args), 1269 callee, ReturnValueSlot(), args); 1270 } 1271 1272 return; 1273 } 1274 1275 case PropertyImplStrategy::CopyStruct: 1276 emitStructSetterCall(*this, setterMethod, ivar); 1277 return; 1278 1279 case PropertyImplStrategy::Expression: 1280 break; 1281 } 1282 1283 // Otherwise, fake up some ASTs and emit a normal assignment. 1284 ValueDecl *selfDecl = setterMethod->getSelfDecl(); 1285 DeclRefExpr self(selfDecl, false, selfDecl->getType(), 1286 VK_LValue, SourceLocation()); 1287 ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, 1288 selfDecl->getType(), CK_LValueToRValue, &self, 1289 VK_RValue); 1290 ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(), 1291 SourceLocation(), SourceLocation(), 1292 &selfLoad, true, true); 1293 1294 ParmVarDecl *argDecl = *setterMethod->param_begin(); 1295 QualType argType = argDecl->getType().getNonReferenceType(); 1296 DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation()); 1297 ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack, 1298 argType.getUnqualifiedType(), CK_LValueToRValue, 1299 &arg, VK_RValue); 1300 1301 // The property type can differ from the ivar type in some situations with 1302 // Objective-C pointer types, we can always bit cast the RHS in these cases. 1303 // The following absurdity is just to ensure well-formed IR. 1304 CastKind argCK = CK_NoOp; 1305 if (ivarRef.getType()->isObjCObjectPointerType()) { 1306 if (argLoad.getType()->isObjCObjectPointerType()) 1307 argCK = CK_BitCast; 1308 else if (argLoad.getType()->isBlockPointerType()) 1309 argCK = CK_BlockPointerToObjCPointerCast; 1310 else 1311 argCK = CK_CPointerToObjCPointerCast; 1312 } else if (ivarRef.getType()->isBlockPointerType()) { 1313 if (argLoad.getType()->isBlockPointerType()) 1314 argCK = CK_BitCast; 1315 else 1316 argCK = CK_AnyPointerToBlockPointerCast; 1317 } else if (ivarRef.getType()->isPointerType()) { 1318 argCK = CK_BitCast; 1319 } 1320 ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, 1321 ivarRef.getType(), argCK, &argLoad, 1322 VK_RValue); 1323 Expr *finalArg = &argLoad; 1324 if (!getContext().hasSameUnqualifiedType(ivarRef.getType(), 1325 argLoad.getType())) 1326 finalArg = &argCast; 1327 1328 1329 BinaryOperator assign(&ivarRef, finalArg, BO_Assign, 1330 ivarRef.getType(), VK_RValue, OK_Ordinary, 1331 SourceLocation(), FPOptions()); 1332 EmitStmt(&assign); 1333 } 1334 1335 /// \brief Generate an Objective-C property setter function. 1336 /// 1337 /// The given Decl must be an ObjCImplementationDecl. \@synthesize 1338 /// is illegal within a category. 1339 void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP, 1340 const ObjCPropertyImplDecl *PID) { 1341 llvm::Constant *AtomicHelperFn = 1342 CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID); 1343 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 1344 ObjCMethodDecl *OMD = PD->getSetterMethodDecl(); 1345 assert(OMD && "Invalid call to generate setter (empty method)"); 1346 StartObjCMethod(OMD, IMP->getClassInterface()); 1347 1348 generateObjCSetterBody(IMP, PID, AtomicHelperFn); 1349 1350 FinishFunction(); 1351 } 1352 1353 namespace { 1354 struct DestroyIvar final : EHScopeStack::Cleanup { 1355 private: 1356 llvm::Value *addr; 1357 const ObjCIvarDecl *ivar; 1358 CodeGenFunction::Destroyer *destroyer; 1359 bool useEHCleanupForArray; 1360 public: 1361 DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar, 1362 CodeGenFunction::Destroyer *destroyer, 1363 bool useEHCleanupForArray) 1364 : addr(addr), ivar(ivar), destroyer(destroyer), 1365 useEHCleanupForArray(useEHCleanupForArray) {} 1366 1367 void Emit(CodeGenFunction &CGF, Flags flags) override { 1368 LValue lvalue 1369 = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0); 1370 CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer, 1371 flags.isForNormalCleanup() && useEHCleanupForArray); 1372 } 1373 }; 1374 } 1375 1376 /// Like CodeGenFunction::destroyARCStrong, but do it with a call. 1377 static void destroyARCStrongWithStore(CodeGenFunction &CGF, 1378 Address addr, 1379 QualType type) { 1380 llvm::Value *null = getNullForVariable(addr); 1381 CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 1382 } 1383 1384 static void emitCXXDestructMethod(CodeGenFunction &CGF, 1385 ObjCImplementationDecl *impl) { 1386 CodeGenFunction::RunCleanupsScope scope(CGF); 1387 1388 llvm::Value *self = CGF.LoadObjCSelf(); 1389 1390 const ObjCInterfaceDecl *iface = impl->getClassInterface(); 1391 for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin(); 1392 ivar; ivar = ivar->getNextIvar()) { 1393 QualType type = ivar->getType(); 1394 1395 // Check whether the ivar is a destructible type. 1396 QualType::DestructionKind dtorKind = type.isDestructedType(); 1397 if (!dtorKind) continue; 1398 1399 CodeGenFunction::Destroyer *destroyer = nullptr; 1400 1401 // Use a call to objc_storeStrong to destroy strong ivars, for the 1402 // general benefit of the tools. 1403 if (dtorKind == QualType::DK_objc_strong_lifetime) { 1404 destroyer = destroyARCStrongWithStore; 1405 1406 // Otherwise use the default for the destruction kind. 1407 } else { 1408 destroyer = CGF.getDestroyer(dtorKind); 1409 } 1410 1411 CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind); 1412 1413 CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer, 1414 cleanupKind & EHCleanup); 1415 } 1416 1417 assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?"); 1418 } 1419 1420 void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP, 1421 ObjCMethodDecl *MD, 1422 bool ctor) { 1423 MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface()); 1424 StartObjCMethod(MD, IMP->getClassInterface()); 1425 1426 // Emit .cxx_construct. 1427 if (ctor) { 1428 // Suppress the final autorelease in ARC. 1429 AutoreleaseResult = false; 1430 1431 for (const auto *IvarInit : IMP->inits()) { 1432 FieldDecl *Field = IvarInit->getAnyMember(); 1433 ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field); 1434 LValue LV = EmitLValueForIvar(TypeOfSelfObject(), 1435 LoadObjCSelf(), Ivar, 0); 1436 EmitAggExpr(IvarInit->getInit(), 1437 AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed, 1438 AggValueSlot::DoesNotNeedGCBarriers, 1439 AggValueSlot::IsNotAliased)); 1440 } 1441 // constructor returns 'self'. 1442 CodeGenTypes &Types = CGM.getTypes(); 1443 QualType IdTy(CGM.getContext().getObjCIdType()); 1444 llvm::Value *SelfAsId = 1445 Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy)); 1446 EmitReturnOfRValue(RValue::get(SelfAsId), IdTy); 1447 1448 // Emit .cxx_destruct. 1449 } else { 1450 emitCXXDestructMethod(*this, IMP); 1451 } 1452 FinishFunction(); 1453 } 1454 1455 llvm::Value *CodeGenFunction::LoadObjCSelf() { 1456 VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl(); 1457 DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl), 1458 Self->getType(), VK_LValue, SourceLocation()); 1459 return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation()); 1460 } 1461 1462 QualType CodeGenFunction::TypeOfSelfObject() { 1463 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl); 1464 ImplicitParamDecl *selfDecl = OMD->getSelfDecl(); 1465 const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>( 1466 getContext().getCanonicalType(selfDecl->getType())); 1467 return PTy->getPointeeType(); 1468 } 1469 1470 void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){ 1471 llvm::Constant *EnumerationMutationFnPtr = 1472 CGM.getObjCRuntime().EnumerationMutationFunction(); 1473 if (!EnumerationMutationFnPtr) { 1474 CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime"); 1475 return; 1476 } 1477 CGCallee EnumerationMutationFn = 1478 CGCallee::forDirect(EnumerationMutationFnPtr); 1479 1480 CGDebugInfo *DI = getDebugInfo(); 1481 if (DI) 1482 DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin()); 1483 1484 RunCleanupsScope ForScope(*this); 1485 1486 // The local variable comes into scope immediately. 1487 AutoVarEmission variable = AutoVarEmission::invalid(); 1488 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) 1489 variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl())); 1490 1491 JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end"); 1492 1493 // Fast enumeration state. 1494 QualType StateTy = CGM.getObjCFastEnumerationStateType(); 1495 Address StatePtr = CreateMemTemp(StateTy, "state.ptr"); 1496 EmitNullInitialization(StatePtr, StateTy); 1497 1498 // Number of elements in the items array. 1499 static const unsigned NumItems = 16; 1500 1501 // Fetch the countByEnumeratingWithState:objects:count: selector. 1502 IdentifierInfo *II[] = { 1503 &CGM.getContext().Idents.get("countByEnumeratingWithState"), 1504 &CGM.getContext().Idents.get("objects"), 1505 &CGM.getContext().Idents.get("count") 1506 }; 1507 Selector FastEnumSel = 1508 CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]); 1509 1510 QualType ItemsTy = 1511 getContext().getConstantArrayType(getContext().getObjCIdType(), 1512 llvm::APInt(32, NumItems), 1513 ArrayType::Normal, 0); 1514 Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr"); 1515 1516 // Emit the collection pointer. In ARC, we do a retain. 1517 llvm::Value *Collection; 1518 if (getLangOpts().ObjCAutoRefCount) { 1519 Collection = EmitARCRetainScalarExpr(S.getCollection()); 1520 1521 // Enter a cleanup to do the release. 1522 EmitObjCConsumeObject(S.getCollection()->getType(), Collection); 1523 } else { 1524 Collection = EmitScalarExpr(S.getCollection()); 1525 } 1526 1527 // The 'continue' label needs to appear within the cleanup for the 1528 // collection object. 1529 JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next"); 1530 1531 // Send it our message: 1532 CallArgList Args; 1533 1534 // The first argument is a temporary of the enumeration-state type. 1535 Args.add(RValue::get(StatePtr.getPointer()), 1536 getContext().getPointerType(StateTy)); 1537 1538 // The second argument is a temporary array with space for NumItems 1539 // pointers. We'll actually be loading elements from the array 1540 // pointer written into the control state; this buffer is so that 1541 // collections that *aren't* backed by arrays can still queue up 1542 // batches of elements. 1543 Args.add(RValue::get(ItemsPtr.getPointer()), 1544 getContext().getPointerType(ItemsTy)); 1545 1546 // The third argument is the capacity of that temporary array. 1547 llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy); 1548 llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems); 1549 Args.add(RValue::get(Count), getContext().UnsignedLongTy); 1550 1551 // Start the enumeration. 1552 RValue CountRV = 1553 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1554 getContext().UnsignedLongTy, 1555 FastEnumSel, 1556 Collection, Args); 1557 1558 // The initial number of objects that were returned in the buffer. 1559 llvm::Value *initialBufferLimit = CountRV.getScalarVal(); 1560 1561 llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty"); 1562 llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit"); 1563 1564 llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy); 1565 1566 // If the limit pointer was zero to begin with, the collection is 1567 // empty; skip all this. Set the branch weight assuming this has the same 1568 // probability of exiting the loop as any other loop exit. 1569 uint64_t EntryCount = getCurrentProfileCount(); 1570 Builder.CreateCondBr( 1571 Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"), EmptyBB, 1572 LoopInitBB, 1573 createProfileWeights(EntryCount, getProfileCount(S.getBody()))); 1574 1575 // Otherwise, initialize the loop. 1576 EmitBlock(LoopInitBB); 1577 1578 // Save the initial mutations value. This is the value at an 1579 // address that was written into the state object by 1580 // countByEnumeratingWithState:objects:count:. 1581 Address StateMutationsPtrPtr = Builder.CreateStructGEP( 1582 StatePtr, 2, 2 * getPointerSize(), "mutationsptr.ptr"); 1583 llvm::Value *StateMutationsPtr 1584 = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1585 1586 llvm::Value *initialMutations = 1587 Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1588 "forcoll.initial-mutations"); 1589 1590 // Start looping. This is the point we return to whenever we have a 1591 // fresh, non-empty batch of objects. 1592 llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody"); 1593 EmitBlock(LoopBodyBB); 1594 1595 // The current index into the buffer. 1596 llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index"); 1597 index->addIncoming(zero, LoopInitBB); 1598 1599 // The current buffer size. 1600 llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count"); 1601 count->addIncoming(initialBufferLimit, LoopInitBB); 1602 1603 incrementProfileCounter(&S); 1604 1605 // Check whether the mutations value has changed from where it was 1606 // at start. StateMutationsPtr should actually be invariant between 1607 // refreshes. 1608 StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr"); 1609 llvm::Value *currentMutations 1610 = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(), 1611 "statemutations"); 1612 1613 llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated"); 1614 llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated"); 1615 1616 Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations), 1617 WasNotMutatedBB, WasMutatedBB); 1618 1619 // If so, call the enumeration-mutation function. 1620 EmitBlock(WasMutatedBB); 1621 llvm::Value *V = 1622 Builder.CreateBitCast(Collection, 1623 ConvertType(getContext().getObjCIdType())); 1624 CallArgList Args2; 1625 Args2.add(RValue::get(V), getContext().getObjCIdType()); 1626 // FIXME: We shouldn't need to get the function info here, the runtime already 1627 // should have computed it to build the function. 1628 EmitCall( 1629 CGM.getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, Args2), 1630 EnumerationMutationFn, ReturnValueSlot(), Args2); 1631 1632 // Otherwise, or if the mutation function returns, just continue. 1633 EmitBlock(WasNotMutatedBB); 1634 1635 // Initialize the element variable. 1636 RunCleanupsScope elementVariableScope(*this); 1637 bool elementIsVariable; 1638 LValue elementLValue; 1639 QualType elementType; 1640 if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) { 1641 // Initialize the variable, in case it's a __block variable or something. 1642 EmitAutoVarInit(variable); 1643 1644 const VarDecl* D = cast<VarDecl>(SD->getSingleDecl()); 1645 DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(), 1646 VK_LValue, SourceLocation()); 1647 elementLValue = EmitLValue(&tempDRE); 1648 elementType = D->getType(); 1649 elementIsVariable = true; 1650 1651 if (D->isARCPseudoStrong()) 1652 elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone); 1653 } else { 1654 elementLValue = LValue(); // suppress warning 1655 elementType = cast<Expr>(S.getElement())->getType(); 1656 elementIsVariable = false; 1657 } 1658 llvm::Type *convertedElementType = ConvertType(elementType); 1659 1660 // Fetch the buffer out of the enumeration state. 1661 // TODO: this pointer should actually be invariant between 1662 // refreshes, which would help us do certain loop optimizations. 1663 Address StateItemsPtr = Builder.CreateStructGEP( 1664 StatePtr, 1, getPointerSize(), "stateitems.ptr"); 1665 llvm::Value *EnumStateItems = 1666 Builder.CreateLoad(StateItemsPtr, "stateitems"); 1667 1668 // Fetch the value at the current index from the buffer. 1669 llvm::Value *CurrentItemPtr = 1670 Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr"); 1671 llvm::Value *CurrentItem = 1672 Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign()); 1673 1674 // Cast that value to the right type. 1675 CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType, 1676 "currentitem"); 1677 1678 // Make sure we have an l-value. Yes, this gets evaluated every 1679 // time through the loop. 1680 if (!elementIsVariable) { 1681 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 1682 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue); 1683 } else { 1684 EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue, 1685 /*isInit*/ true); 1686 } 1687 1688 // If we do have an element variable, this assignment is the end of 1689 // its initialization. 1690 if (elementIsVariable) 1691 EmitAutoVarCleanups(variable); 1692 1693 // Perform the loop body, setting up break and continue labels. 1694 BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody)); 1695 { 1696 RunCleanupsScope Scope(*this); 1697 EmitStmt(S.getBody()); 1698 } 1699 BreakContinueStack.pop_back(); 1700 1701 // Destroy the element variable now. 1702 elementVariableScope.ForceCleanup(); 1703 1704 // Check whether there are more elements. 1705 EmitBlock(AfterBody.getBlock()); 1706 1707 llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch"); 1708 1709 // First we check in the local buffer. 1710 llvm::Value *indexPlusOne 1711 = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1)); 1712 1713 // If we haven't overrun the buffer yet, we can continue. 1714 // Set the branch weights based on the simplifying assumption that this is 1715 // like a while-loop, i.e., ignoring that the false branch fetches more 1716 // elements and then returns to the loop. 1717 Builder.CreateCondBr( 1718 Builder.CreateICmpULT(indexPlusOne, count), LoopBodyBB, FetchMoreBB, 1719 createProfileWeights(getProfileCount(S.getBody()), EntryCount)); 1720 1721 index->addIncoming(indexPlusOne, AfterBody.getBlock()); 1722 count->addIncoming(count, AfterBody.getBlock()); 1723 1724 // Otherwise, we have to fetch more elements. 1725 EmitBlock(FetchMoreBB); 1726 1727 CountRV = 1728 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 1729 getContext().UnsignedLongTy, 1730 FastEnumSel, 1731 Collection, Args); 1732 1733 // If we got a zero count, we're done. 1734 llvm::Value *refetchCount = CountRV.getScalarVal(); 1735 1736 // (note that the message send might split FetchMoreBB) 1737 index->addIncoming(zero, Builder.GetInsertBlock()); 1738 count->addIncoming(refetchCount, Builder.GetInsertBlock()); 1739 1740 Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero), 1741 EmptyBB, LoopBodyBB); 1742 1743 // No more elements. 1744 EmitBlock(EmptyBB); 1745 1746 if (!elementIsVariable) { 1747 // If the element was not a declaration, set it to be null. 1748 1749 llvm::Value *null = llvm::Constant::getNullValue(convertedElementType); 1750 elementLValue = EmitLValue(cast<Expr>(S.getElement())); 1751 EmitStoreThroughLValue(RValue::get(null), elementLValue); 1752 } 1753 1754 if (DI) 1755 DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd()); 1756 1757 ForScope.ForceCleanup(); 1758 EmitBlock(LoopEnd.getBlock()); 1759 } 1760 1761 void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) { 1762 CGM.getObjCRuntime().EmitTryStmt(*this, S); 1763 } 1764 1765 void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) { 1766 CGM.getObjCRuntime().EmitThrowStmt(*this, S); 1767 } 1768 1769 void CodeGenFunction::EmitObjCAtSynchronizedStmt( 1770 const ObjCAtSynchronizedStmt &S) { 1771 CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S); 1772 } 1773 1774 namespace { 1775 struct CallObjCRelease final : EHScopeStack::Cleanup { 1776 CallObjCRelease(llvm::Value *object) : object(object) {} 1777 llvm::Value *object; 1778 1779 void Emit(CodeGenFunction &CGF, Flags flags) override { 1780 // Releases at the end of the full-expression are imprecise. 1781 CGF.EmitARCRelease(object, ARCImpreciseLifetime); 1782 } 1783 }; 1784 } 1785 1786 /// Produce the code for a CK_ARCConsumeObject. Does a primitive 1787 /// release at the end of the full-expression. 1788 llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type, 1789 llvm::Value *object) { 1790 // If we're in a conditional branch, we need to make the cleanup 1791 // conditional. 1792 pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object); 1793 return object; 1794 } 1795 1796 llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type, 1797 llvm::Value *value) { 1798 return EmitARCRetainAutorelease(type, value); 1799 } 1800 1801 /// Given a number of pointers, inform the optimizer that they're 1802 /// being intrinsically used up until this point in the program. 1803 void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) { 1804 llvm::Constant *&fn = CGM.getObjCEntrypoints().clang_arc_use; 1805 if (!fn) { 1806 llvm::FunctionType *fnType = 1807 llvm::FunctionType::get(CGM.VoidTy, None, true); 1808 fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use"); 1809 } 1810 1811 // This isn't really a "runtime" function, but as an intrinsic it 1812 // doesn't really matter as long as we align things up. 1813 EmitNounwindRuntimeCall(fn, values); 1814 } 1815 1816 1817 static bool IsForwarding(StringRef Name) { 1818 return llvm::StringSwitch<bool>(Name) 1819 .Cases("objc_autoreleaseReturnValue", // ARCInstKind::AutoreleaseRV 1820 "objc_autorelease", // ARCInstKind::Autorelease 1821 "objc_retainAutoreleaseReturnValue", // ARCInstKind::FusedRetainAutoreleaseRV 1822 "objc_retainAutoreleasedReturnValue", // ARCInstKind::RetainRV 1823 "objc_retainAutorelease", // ARCInstKind::FusedRetainAutorelease 1824 "objc_retainedObject", // ARCInstKind::NoopCast 1825 "objc_retain", // ARCInstKind::Retain 1826 "objc_unretainedObject", // ARCInstKind::NoopCast 1827 "objc_unretainedPointer", // ARCInstKind::NoopCast 1828 "objc_unsafeClaimAutoreleasedReturnValue", // ARCInstKind::ClaimRV 1829 true) 1830 .Default(false); 1831 } 1832 1833 static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM, 1834 llvm::FunctionType *FTy, 1835 StringRef Name) { 1836 llvm::Constant *RTF = CGM.CreateRuntimeFunction(FTy, Name); 1837 1838 if (auto *F = dyn_cast<llvm::Function>(RTF)) { 1839 // If the target runtime doesn't naturally support ARC, emit weak 1840 // references to the runtime support library. We don't really 1841 // permit this to fail, but we need a particular relocation style. 1842 if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() && 1843 !CGM.getTriple().isOSBinFormatCOFF()) { 1844 F->setLinkage(llvm::Function::ExternalWeakLinkage); 1845 } else if (Name == "objc_retain" || Name == "objc_release") { 1846 // If we have Native ARC, set nonlazybind attribute for these APIs for 1847 // performance. 1848 F->addFnAttr(llvm::Attribute::NonLazyBind); 1849 } 1850 1851 if (IsForwarding(Name)) { 1852 llvm::AttrBuilder B; 1853 B.addAttribute(llvm::Attribute::Returned); 1854 1855 F->arg_begin()->addAttr(llvm::AttributeList::get(F->getContext(), 1, B)); 1856 } 1857 } 1858 1859 return RTF; 1860 } 1861 1862 /// Perform an operation having the signature 1863 /// i8* (i8*) 1864 /// where a null input causes a no-op and returns null. 1865 static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF, 1866 llvm::Value *value, 1867 llvm::Constant *&fn, 1868 StringRef fnName, 1869 bool isTailCall = false) { 1870 if (isa<llvm::ConstantPointerNull>(value)) 1871 return value; 1872 1873 if (!fn) { 1874 llvm::FunctionType *fnType = 1875 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, false); 1876 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1877 } 1878 1879 // Cast the argument to 'id'. 1880 llvm::Type *origType = value->getType(); 1881 value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy); 1882 1883 // Call the function. 1884 llvm::CallInst *call = CGF.EmitNounwindRuntimeCall(fn, value); 1885 if (isTailCall) 1886 call->setTailCall(); 1887 1888 // Cast the result back to the original type. 1889 return CGF.Builder.CreateBitCast(call, origType); 1890 } 1891 1892 /// Perform an operation having the following signature: 1893 /// i8* (i8**) 1894 static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, 1895 Address addr, 1896 llvm::Constant *&fn, 1897 StringRef fnName) { 1898 if (!fn) { 1899 llvm::FunctionType *fnType = 1900 llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrPtrTy, false); 1901 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1902 } 1903 1904 // Cast the argument to 'id*'. 1905 llvm::Type *origType = addr.getElementType(); 1906 addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy); 1907 1908 // Call the function. 1909 llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer()); 1910 1911 // Cast the result back to a dereference of the original type. 1912 if (origType != CGF.Int8PtrTy) 1913 result = CGF.Builder.CreateBitCast(result, origType); 1914 1915 return result; 1916 } 1917 1918 /// Perform an operation having the following signature: 1919 /// i8* (i8**, i8*) 1920 static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF, 1921 Address addr, 1922 llvm::Value *value, 1923 llvm::Constant *&fn, 1924 StringRef fnName, 1925 bool ignored) { 1926 assert(addr.getElementType() == value->getType()); 1927 1928 if (!fn) { 1929 llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy }; 1930 1931 llvm::FunctionType *fnType 1932 = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false); 1933 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1934 } 1935 1936 llvm::Type *origType = value->getType(); 1937 1938 llvm::Value *args[] = { 1939 CGF.Builder.CreateBitCast(addr.getPointer(), CGF.Int8PtrPtrTy), 1940 CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy) 1941 }; 1942 llvm::CallInst *result = CGF.EmitNounwindRuntimeCall(fn, args); 1943 1944 if (ignored) return nullptr; 1945 1946 return CGF.Builder.CreateBitCast(result, origType); 1947 } 1948 1949 /// Perform an operation having the following signature: 1950 /// void (i8**, i8**) 1951 static void emitARCCopyOperation(CodeGenFunction &CGF, 1952 Address dst, 1953 Address src, 1954 llvm::Constant *&fn, 1955 StringRef fnName) { 1956 assert(dst.getType() == src.getType()); 1957 1958 if (!fn) { 1959 llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrPtrTy }; 1960 1961 llvm::FunctionType *fnType 1962 = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false); 1963 fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName); 1964 } 1965 1966 llvm::Value *args[] = { 1967 CGF.Builder.CreateBitCast(dst.getPointer(), CGF.Int8PtrPtrTy), 1968 CGF.Builder.CreateBitCast(src.getPointer(), CGF.Int8PtrPtrTy) 1969 }; 1970 CGF.EmitNounwindRuntimeCall(fn, args); 1971 } 1972 1973 /// Produce the code to do a retain. Based on the type, calls one of: 1974 /// call i8* \@objc_retain(i8* %value) 1975 /// call i8* \@objc_retainBlock(i8* %value) 1976 llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) { 1977 if (type->isBlockPointerType()) 1978 return EmitARCRetainBlock(value, /*mandatory*/ false); 1979 else 1980 return EmitARCRetainNonBlock(value); 1981 } 1982 1983 /// Retain the given object, with normal retain semantics. 1984 /// call i8* \@objc_retain(i8* %value) 1985 llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) { 1986 return emitARCValueOperation(*this, value, 1987 CGM.getObjCEntrypoints().objc_retain, 1988 "objc_retain"); 1989 } 1990 1991 /// Retain the given block, with _Block_copy semantics. 1992 /// call i8* \@objc_retainBlock(i8* %value) 1993 /// 1994 /// \param mandatory - If false, emit the call with metadata 1995 /// indicating that it's okay for the optimizer to eliminate this call 1996 /// if it can prove that the block never escapes except down the stack. 1997 llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value, 1998 bool mandatory) { 1999 llvm::Value *result 2000 = emitARCValueOperation(*this, value, 2001 CGM.getObjCEntrypoints().objc_retainBlock, 2002 "objc_retainBlock"); 2003 2004 // If the copy isn't mandatory, add !clang.arc.copy_on_escape to 2005 // tell the optimizer that it doesn't need to do this copy if the 2006 // block doesn't escape, where being passed as an argument doesn't 2007 // count as escaping. 2008 if (!mandatory && isa<llvm::Instruction>(result)) { 2009 llvm::CallInst *call 2010 = cast<llvm::CallInst>(result->stripPointerCasts()); 2011 assert(call->getCalledValue() == CGM.getObjCEntrypoints().objc_retainBlock); 2012 2013 call->setMetadata("clang.arc.copy_on_escape", 2014 llvm::MDNode::get(Builder.getContext(), None)); 2015 } 2016 2017 return result; 2018 } 2019 2020 static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) { 2021 // Fetch the void(void) inline asm which marks that we're going to 2022 // do something with the autoreleased return value. 2023 llvm::InlineAsm *&marker 2024 = CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker; 2025 if (!marker) { 2026 StringRef assembly 2027 = CGF.CGM.getTargetCodeGenInfo() 2028 .getARCRetainAutoreleasedReturnValueMarker(); 2029 2030 // If we have an empty assembly string, there's nothing to do. 2031 if (assembly.empty()) { 2032 2033 // Otherwise, at -O0, build an inline asm that we're going to call 2034 // in a moment. 2035 } else if (CGF.CGM.getCodeGenOpts().OptimizationLevel == 0) { 2036 llvm::FunctionType *type = 2037 llvm::FunctionType::get(CGF.VoidTy, /*variadic*/false); 2038 2039 marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true); 2040 2041 // If we're at -O1 and above, we don't want to litter the code 2042 // with this marker yet, so leave a breadcrumb for the ARC 2043 // optimizer to pick up. 2044 } else { 2045 llvm::NamedMDNode *metadata = 2046 CGF.CGM.getModule().getOrInsertNamedMetadata( 2047 "clang.arc.retainAutoreleasedReturnValueMarker"); 2048 assert(metadata->getNumOperands() <= 1); 2049 if (metadata->getNumOperands() == 0) { 2050 auto &ctx = CGF.getLLVMContext(); 2051 metadata->addOperand(llvm::MDNode::get(ctx, 2052 llvm::MDString::get(ctx, assembly))); 2053 } 2054 } 2055 } 2056 2057 // Call the marker asm if we made one, which we do only at -O0. 2058 if (marker) 2059 CGF.Builder.CreateCall(marker); 2060 } 2061 2062 /// Retain the given object which is the result of a function call. 2063 /// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value) 2064 /// 2065 /// Yes, this function name is one character away from a different 2066 /// call with completely different semantics. 2067 llvm::Value * 2068 CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) { 2069 emitAutoreleasedReturnValueMarker(*this); 2070 return emitARCValueOperation(*this, value, 2071 CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue, 2072 "objc_retainAutoreleasedReturnValue"); 2073 } 2074 2075 /// Claim a possibly-autoreleased return value at +0. This is only 2076 /// valid to do in contexts which do not rely on the retain to keep 2077 /// the object valid for for all of its uses; for example, when 2078 /// the value is ignored, or when it is being assigned to an 2079 /// __unsafe_unretained variable. 2080 /// 2081 /// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value) 2082 llvm::Value * 2083 CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) { 2084 emitAutoreleasedReturnValueMarker(*this); 2085 return emitARCValueOperation(*this, value, 2086 CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue, 2087 "objc_unsafeClaimAutoreleasedReturnValue"); 2088 } 2089 2090 /// Release the given object. 2091 /// call void \@objc_release(i8* %value) 2092 void CodeGenFunction::EmitARCRelease(llvm::Value *value, 2093 ARCPreciseLifetime_t precise) { 2094 if (isa<llvm::ConstantPointerNull>(value)) return; 2095 2096 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_release; 2097 if (!fn) { 2098 llvm::FunctionType *fnType = 2099 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2100 fn = createARCRuntimeFunction(CGM, fnType, "objc_release"); 2101 } 2102 2103 // Cast the argument to 'id'. 2104 value = Builder.CreateBitCast(value, Int8PtrTy); 2105 2106 // Call objc_release. 2107 llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value); 2108 2109 if (precise == ARCImpreciseLifetime) { 2110 call->setMetadata("clang.imprecise_release", 2111 llvm::MDNode::get(Builder.getContext(), None)); 2112 } 2113 } 2114 2115 /// Destroy a __strong variable. 2116 /// 2117 /// At -O0, emit a call to store 'null' into the address; 2118 /// instrumenting tools prefer this because the address is exposed, 2119 /// but it's relatively cumbersome to optimize. 2120 /// 2121 /// At -O1 and above, just load and call objc_release. 2122 /// 2123 /// call void \@objc_storeStrong(i8** %addr, i8* null) 2124 void CodeGenFunction::EmitARCDestroyStrong(Address addr, 2125 ARCPreciseLifetime_t precise) { 2126 if (CGM.getCodeGenOpts().OptimizationLevel == 0) { 2127 llvm::Value *null = getNullForVariable(addr); 2128 EmitARCStoreStrongCall(addr, null, /*ignored*/ true); 2129 return; 2130 } 2131 2132 llvm::Value *value = Builder.CreateLoad(addr); 2133 EmitARCRelease(value, precise); 2134 } 2135 2136 /// Store into a strong object. Always calls this: 2137 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2138 llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(Address addr, 2139 llvm::Value *value, 2140 bool ignored) { 2141 assert(addr.getElementType() == value->getType()); 2142 2143 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_storeStrong; 2144 if (!fn) { 2145 llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy }; 2146 llvm::FunctionType *fnType 2147 = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false); 2148 fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong"); 2149 } 2150 2151 llvm::Value *args[] = { 2152 Builder.CreateBitCast(addr.getPointer(), Int8PtrPtrTy), 2153 Builder.CreateBitCast(value, Int8PtrTy) 2154 }; 2155 EmitNounwindRuntimeCall(fn, args); 2156 2157 if (ignored) return nullptr; 2158 return value; 2159 } 2160 2161 /// Store into a strong object. Sometimes calls this: 2162 /// call void \@objc_storeStrong(i8** %addr, i8* %value) 2163 /// Other times, breaks it down into components. 2164 llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst, 2165 llvm::Value *newValue, 2166 bool ignored) { 2167 QualType type = dst.getType(); 2168 bool isBlock = type->isBlockPointerType(); 2169 2170 // Use a store barrier at -O0 unless this is a block type or the 2171 // lvalue is inadequately aligned. 2172 if (shouldUseFusedARCCalls() && 2173 !isBlock && 2174 (dst.getAlignment().isZero() || 2175 dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) { 2176 return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored); 2177 } 2178 2179 // Otherwise, split it out. 2180 2181 // Retain the new value. 2182 newValue = EmitARCRetain(type, newValue); 2183 2184 // Read the old value. 2185 llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation()); 2186 2187 // Store. We do this before the release so that any deallocs won't 2188 // see the old value. 2189 EmitStoreOfScalar(newValue, dst); 2190 2191 // Finally, release the old value. 2192 EmitARCRelease(oldValue, dst.isARCPreciseLifetime()); 2193 2194 return newValue; 2195 } 2196 2197 /// Autorelease the given object. 2198 /// call i8* \@objc_autorelease(i8* %value) 2199 llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) { 2200 return emitARCValueOperation(*this, value, 2201 CGM.getObjCEntrypoints().objc_autorelease, 2202 "objc_autorelease"); 2203 } 2204 2205 /// Autorelease the given object. 2206 /// call i8* \@objc_autoreleaseReturnValue(i8* %value) 2207 llvm::Value * 2208 CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) { 2209 return emitARCValueOperation(*this, value, 2210 CGM.getObjCEntrypoints().objc_autoreleaseReturnValue, 2211 "objc_autoreleaseReturnValue", 2212 /*isTailCall*/ true); 2213 } 2214 2215 /// Do a fused retain/autorelease of the given object. 2216 /// call i8* \@objc_retainAutoreleaseReturnValue(i8* %value) 2217 llvm::Value * 2218 CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) { 2219 return emitARCValueOperation(*this, value, 2220 CGM.getObjCEntrypoints().objc_retainAutoreleaseReturnValue, 2221 "objc_retainAutoreleaseReturnValue", 2222 /*isTailCall*/ true); 2223 } 2224 2225 /// Do a fused retain/autorelease of the given object. 2226 /// call i8* \@objc_retainAutorelease(i8* %value) 2227 /// or 2228 /// %retain = call i8* \@objc_retainBlock(i8* %value) 2229 /// call i8* \@objc_autorelease(i8* %retain) 2230 llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type, 2231 llvm::Value *value) { 2232 if (!type->isBlockPointerType()) 2233 return EmitARCRetainAutoreleaseNonBlock(value); 2234 2235 if (isa<llvm::ConstantPointerNull>(value)) return value; 2236 2237 llvm::Type *origType = value->getType(); 2238 value = Builder.CreateBitCast(value, Int8PtrTy); 2239 value = EmitARCRetainBlock(value, /*mandatory*/ true); 2240 value = EmitARCAutorelease(value); 2241 return Builder.CreateBitCast(value, origType); 2242 } 2243 2244 /// Do a fused retain/autorelease of the given object. 2245 /// call i8* \@objc_retainAutorelease(i8* %value) 2246 llvm::Value * 2247 CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) { 2248 return emitARCValueOperation(*this, value, 2249 CGM.getObjCEntrypoints().objc_retainAutorelease, 2250 "objc_retainAutorelease"); 2251 } 2252 2253 /// i8* \@objc_loadWeak(i8** %addr) 2254 /// Essentially objc_autorelease(objc_loadWeakRetained(addr)). 2255 llvm::Value *CodeGenFunction::EmitARCLoadWeak(Address addr) { 2256 return emitARCLoadOperation(*this, addr, 2257 CGM.getObjCEntrypoints().objc_loadWeak, 2258 "objc_loadWeak"); 2259 } 2260 2261 /// i8* \@objc_loadWeakRetained(i8** %addr) 2262 llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(Address addr) { 2263 return emitARCLoadOperation(*this, addr, 2264 CGM.getObjCEntrypoints().objc_loadWeakRetained, 2265 "objc_loadWeakRetained"); 2266 } 2267 2268 /// i8* \@objc_storeWeak(i8** %addr, i8* %value) 2269 /// Returns %value. 2270 llvm::Value *CodeGenFunction::EmitARCStoreWeak(Address addr, 2271 llvm::Value *value, 2272 bool ignored) { 2273 return emitARCStoreOperation(*this, addr, value, 2274 CGM.getObjCEntrypoints().objc_storeWeak, 2275 "objc_storeWeak", ignored); 2276 } 2277 2278 /// i8* \@objc_initWeak(i8** %addr, i8* %value) 2279 /// Returns %value. %addr is known to not have a current weak entry. 2280 /// Essentially equivalent to: 2281 /// *addr = nil; objc_storeWeak(addr, value); 2282 void CodeGenFunction::EmitARCInitWeak(Address addr, llvm::Value *value) { 2283 // If we're initializing to null, just write null to memory; no need 2284 // to get the runtime involved. But don't do this if optimization 2285 // is enabled, because accounting for this would make the optimizer 2286 // much more complicated. 2287 if (isa<llvm::ConstantPointerNull>(value) && 2288 CGM.getCodeGenOpts().OptimizationLevel == 0) { 2289 Builder.CreateStore(value, addr); 2290 return; 2291 } 2292 2293 emitARCStoreOperation(*this, addr, value, 2294 CGM.getObjCEntrypoints().objc_initWeak, 2295 "objc_initWeak", /*ignored*/ true); 2296 } 2297 2298 /// void \@objc_destroyWeak(i8** %addr) 2299 /// Essentially objc_storeWeak(addr, nil). 2300 void CodeGenFunction::EmitARCDestroyWeak(Address addr) { 2301 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_destroyWeak; 2302 if (!fn) { 2303 llvm::FunctionType *fnType = 2304 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrPtrTy, false); 2305 fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak"); 2306 } 2307 2308 // Cast the argument to 'id*'. 2309 addr = Builder.CreateBitCast(addr, Int8PtrPtrTy); 2310 2311 EmitNounwindRuntimeCall(fn, addr.getPointer()); 2312 } 2313 2314 /// void \@objc_moveWeak(i8** %dest, i8** %src) 2315 /// Disregards the current value in %dest. Leaves %src pointing to nothing. 2316 /// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)). 2317 void CodeGenFunction::EmitARCMoveWeak(Address dst, Address src) { 2318 emitARCCopyOperation(*this, dst, src, 2319 CGM.getObjCEntrypoints().objc_moveWeak, 2320 "objc_moveWeak"); 2321 } 2322 2323 /// void \@objc_copyWeak(i8** %dest, i8** %src) 2324 /// Disregards the current value in %dest. Essentially 2325 /// objc_release(objc_initWeak(dest, objc_readWeakRetained(src))) 2326 void CodeGenFunction::EmitARCCopyWeak(Address dst, Address src) { 2327 emitARCCopyOperation(*this, dst, src, 2328 CGM.getObjCEntrypoints().objc_copyWeak, 2329 "objc_copyWeak"); 2330 } 2331 2332 /// Produce the code to do a objc_autoreleasepool_push. 2333 /// call i8* \@objc_autoreleasePoolPush(void) 2334 llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() { 2335 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPush; 2336 if (!fn) { 2337 llvm::FunctionType *fnType = 2338 llvm::FunctionType::get(Int8PtrTy, false); 2339 fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush"); 2340 } 2341 2342 return EmitNounwindRuntimeCall(fn); 2343 } 2344 2345 /// Produce the code to do a primitive release. 2346 /// call void \@objc_autoreleasePoolPop(i8* %ptr) 2347 void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) { 2348 assert(value->getType() == Int8PtrTy); 2349 2350 llvm::Constant *&fn = CGM.getObjCEntrypoints().objc_autoreleasePoolPop; 2351 if (!fn) { 2352 llvm::FunctionType *fnType = 2353 llvm::FunctionType::get(Builder.getVoidTy(), Int8PtrTy, false); 2354 2355 // We don't want to use a weak import here; instead we should not 2356 // fall into this path. 2357 fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop"); 2358 } 2359 2360 // objc_autoreleasePoolPop can throw. 2361 EmitRuntimeCallOrInvoke(fn, value); 2362 } 2363 2364 /// Produce the code to do an MRR version objc_autoreleasepool_push. 2365 /// Which is: [[NSAutoreleasePool alloc] init]; 2366 /// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class. 2367 /// init is declared as: - (id) init; in its NSObject super class. 2368 /// 2369 llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() { 2370 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 2371 llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(*this); 2372 // [NSAutoreleasePool alloc] 2373 IdentifierInfo *II = &CGM.getContext().Idents.get("alloc"); 2374 Selector AllocSel = getContext().Selectors.getSelector(0, &II); 2375 CallArgList Args; 2376 RValue AllocRV = 2377 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2378 getContext().getObjCIdType(), 2379 AllocSel, Receiver, Args); 2380 2381 // [Receiver init] 2382 Receiver = AllocRV.getScalarVal(); 2383 II = &CGM.getContext().Idents.get("init"); 2384 Selector InitSel = getContext().Selectors.getSelector(0, &II); 2385 RValue InitRV = 2386 Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 2387 getContext().getObjCIdType(), 2388 InitSel, Receiver, Args); 2389 return InitRV.getScalarVal(); 2390 } 2391 2392 /// Produce the code to do a primitive release. 2393 /// [tmp drain]; 2394 void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) { 2395 IdentifierInfo *II = &CGM.getContext().Idents.get("drain"); 2396 Selector DrainSel = getContext().Selectors.getSelector(0, &II); 2397 CallArgList Args; 2398 CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(), 2399 getContext().VoidTy, DrainSel, Arg, Args); 2400 } 2401 2402 void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF, 2403 Address addr, 2404 QualType type) { 2405 CGF.EmitARCDestroyStrong(addr, ARCPreciseLifetime); 2406 } 2407 2408 void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF, 2409 Address addr, 2410 QualType type) { 2411 CGF.EmitARCDestroyStrong(addr, ARCImpreciseLifetime); 2412 } 2413 2414 void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF, 2415 Address addr, 2416 QualType type) { 2417 CGF.EmitARCDestroyWeak(addr); 2418 } 2419 2420 namespace { 2421 struct CallObjCAutoreleasePoolObject final : EHScopeStack::Cleanup { 2422 llvm::Value *Token; 2423 2424 CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2425 2426 void Emit(CodeGenFunction &CGF, Flags flags) override { 2427 CGF.EmitObjCAutoreleasePoolPop(Token); 2428 } 2429 }; 2430 struct CallObjCMRRAutoreleasePoolObject final : EHScopeStack::Cleanup { 2431 llvm::Value *Token; 2432 2433 CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {} 2434 2435 void Emit(CodeGenFunction &CGF, Flags flags) override { 2436 CGF.EmitObjCMRRAutoreleasePoolPop(Token); 2437 } 2438 }; 2439 } 2440 2441 void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) { 2442 if (CGM.getLangOpts().ObjCAutoRefCount) 2443 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr); 2444 else 2445 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr); 2446 } 2447 2448 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2449 LValue lvalue, 2450 QualType type) { 2451 switch (type.getObjCLifetime()) { 2452 case Qualifiers::OCL_None: 2453 case Qualifiers::OCL_ExplicitNone: 2454 case Qualifiers::OCL_Strong: 2455 case Qualifiers::OCL_Autoreleasing: 2456 return TryEmitResult(CGF.EmitLoadOfLValue(lvalue, 2457 SourceLocation()).getScalarVal(), 2458 false); 2459 2460 case Qualifiers::OCL_Weak: 2461 return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()), 2462 true); 2463 } 2464 2465 llvm_unreachable("impossible lifetime!"); 2466 } 2467 2468 static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2469 const Expr *e) { 2470 e = e->IgnoreParens(); 2471 QualType type = e->getType(); 2472 2473 // If we're loading retained from a __strong xvalue, we can avoid 2474 // an extra retain/release pair by zeroing out the source of this 2475 // "move" operation. 2476 if (e->isXValue() && 2477 !type.isConstQualified() && 2478 type.getObjCLifetime() == Qualifiers::OCL_Strong) { 2479 // Emit the lvalue. 2480 LValue lv = CGF.EmitLValue(e); 2481 2482 // Load the object pointer. 2483 llvm::Value *result = CGF.EmitLoadOfLValue(lv, 2484 SourceLocation()).getScalarVal(); 2485 2486 // Set the source pointer to NULL. 2487 CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv); 2488 2489 return TryEmitResult(result, true); 2490 } 2491 2492 // As a very special optimization, in ARC++, if the l-value is the 2493 // result of a non-volatile assignment, do a simple retain of the 2494 // result of the call to objc_storeWeak instead of reloading. 2495 if (CGF.getLangOpts().CPlusPlus && 2496 !type.isVolatileQualified() && 2497 type.getObjCLifetime() == Qualifiers::OCL_Weak && 2498 isa<BinaryOperator>(e) && 2499 cast<BinaryOperator>(e)->getOpcode() == BO_Assign) 2500 return TryEmitResult(CGF.EmitScalarExpr(e), false); 2501 2502 return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type); 2503 } 2504 2505 typedef llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, 2506 llvm::Value *value)> 2507 ValueTransform; 2508 2509 /// Insert code immediately after a call. 2510 static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF, 2511 llvm::Value *value, 2512 ValueTransform doAfterCall, 2513 ValueTransform doFallback) { 2514 if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) { 2515 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2516 2517 // Place the retain immediately following the call. 2518 CGF.Builder.SetInsertPoint(call->getParent(), 2519 ++llvm::BasicBlock::iterator(call)); 2520 value = doAfterCall(CGF, value); 2521 2522 CGF.Builder.restoreIP(ip); 2523 return value; 2524 } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) { 2525 CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP(); 2526 2527 // Place the retain at the beginning of the normal destination block. 2528 llvm::BasicBlock *BB = invoke->getNormalDest(); 2529 CGF.Builder.SetInsertPoint(BB, BB->begin()); 2530 value = doAfterCall(CGF, value); 2531 2532 CGF.Builder.restoreIP(ip); 2533 return value; 2534 2535 // Bitcasts can arise because of related-result returns. Rewrite 2536 // the operand. 2537 } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) { 2538 llvm::Value *operand = bitcast->getOperand(0); 2539 operand = emitARCOperationAfterCall(CGF, operand, doAfterCall, doFallback); 2540 bitcast->setOperand(0, operand); 2541 return bitcast; 2542 2543 // Generic fall-back case. 2544 } else { 2545 // Retain using the non-block variant: we never need to do a copy 2546 // of a block that's been returned to us. 2547 return doFallback(CGF, value); 2548 } 2549 } 2550 2551 /// Given that the given expression is some sort of call (which does 2552 /// not return retained), emit a retain following it. 2553 static llvm::Value *emitARCRetainCallResult(CodeGenFunction &CGF, 2554 const Expr *e) { 2555 llvm::Value *value = CGF.EmitScalarExpr(e); 2556 return emitARCOperationAfterCall(CGF, value, 2557 [](CodeGenFunction &CGF, llvm::Value *value) { 2558 return CGF.EmitARCRetainAutoreleasedReturnValue(value); 2559 }, 2560 [](CodeGenFunction &CGF, llvm::Value *value) { 2561 return CGF.EmitARCRetainNonBlock(value); 2562 }); 2563 } 2564 2565 /// Given that the given expression is some sort of call (which does 2566 /// not return retained), perform an unsafeClaim following it. 2567 static llvm::Value *emitARCUnsafeClaimCallResult(CodeGenFunction &CGF, 2568 const Expr *e) { 2569 llvm::Value *value = CGF.EmitScalarExpr(e); 2570 return emitARCOperationAfterCall(CGF, value, 2571 [](CodeGenFunction &CGF, llvm::Value *value) { 2572 return CGF.EmitARCUnsafeClaimAutoreleasedReturnValue(value); 2573 }, 2574 [](CodeGenFunction &CGF, llvm::Value *value) { 2575 return value; 2576 }); 2577 } 2578 2579 llvm::Value *CodeGenFunction::EmitARCReclaimReturnedObject(const Expr *E, 2580 bool allowUnsafeClaim) { 2581 if (allowUnsafeClaim && 2582 CGM.getLangOpts().ObjCRuntime.hasARCUnsafeClaimAutoreleasedReturnValue()) { 2583 return emitARCUnsafeClaimCallResult(*this, E); 2584 } else { 2585 llvm::Value *value = emitARCRetainCallResult(*this, E); 2586 return EmitObjCConsumeObject(E->getType(), value); 2587 } 2588 } 2589 2590 /// Determine whether it might be important to emit a separate 2591 /// objc_retain_block on the result of the given expression, or 2592 /// whether it's okay to just emit it in a +1 context. 2593 static bool shouldEmitSeparateBlockRetain(const Expr *e) { 2594 assert(e->getType()->isBlockPointerType()); 2595 e = e->IgnoreParens(); 2596 2597 // For future goodness, emit block expressions directly in +1 2598 // contexts if we can. 2599 if (isa<BlockExpr>(e)) 2600 return false; 2601 2602 if (const CastExpr *cast = dyn_cast<CastExpr>(e)) { 2603 switch (cast->getCastKind()) { 2604 // Emitting these operations in +1 contexts is goodness. 2605 case CK_LValueToRValue: 2606 case CK_ARCReclaimReturnedObject: 2607 case CK_ARCConsumeObject: 2608 case CK_ARCProduceObject: 2609 return false; 2610 2611 // These operations preserve a block type. 2612 case CK_NoOp: 2613 case CK_BitCast: 2614 return shouldEmitSeparateBlockRetain(cast->getSubExpr()); 2615 2616 // These operations are known to be bad (or haven't been considered). 2617 case CK_AnyPointerToBlockPointerCast: 2618 default: 2619 return true; 2620 } 2621 } 2622 2623 return true; 2624 } 2625 2626 namespace { 2627 /// A CRTP base class for emitting expressions of retainable object 2628 /// pointer type in ARC. 2629 template <typename Impl, typename Result> class ARCExprEmitter { 2630 protected: 2631 CodeGenFunction &CGF; 2632 Impl &asImpl() { return *static_cast<Impl*>(this); } 2633 2634 ARCExprEmitter(CodeGenFunction &CGF) : CGF(CGF) {} 2635 2636 public: 2637 Result visit(const Expr *e); 2638 Result visitCastExpr(const CastExpr *e); 2639 Result visitPseudoObjectExpr(const PseudoObjectExpr *e); 2640 Result visitBinaryOperator(const BinaryOperator *e); 2641 Result visitBinAssign(const BinaryOperator *e); 2642 Result visitBinAssignUnsafeUnretained(const BinaryOperator *e); 2643 Result visitBinAssignAutoreleasing(const BinaryOperator *e); 2644 Result visitBinAssignWeak(const BinaryOperator *e); 2645 Result visitBinAssignStrong(const BinaryOperator *e); 2646 2647 // Minimal implementation: 2648 // Result visitLValueToRValue(const Expr *e) 2649 // Result visitConsumeObject(const Expr *e) 2650 // Result visitExtendBlockObject(const Expr *e) 2651 // Result visitReclaimReturnedObject(const Expr *e) 2652 // Result visitCall(const Expr *e) 2653 // Result visitExpr(const Expr *e) 2654 // 2655 // Result emitBitCast(Result result, llvm::Type *resultType) 2656 // llvm::Value *getValueOfResult(Result result) 2657 }; 2658 } 2659 2660 /// Try to emit a PseudoObjectExpr under special ARC rules. 2661 /// 2662 /// This massively duplicates emitPseudoObjectRValue. 2663 template <typename Impl, typename Result> 2664 Result 2665 ARCExprEmitter<Impl,Result>::visitPseudoObjectExpr(const PseudoObjectExpr *E) { 2666 SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques; 2667 2668 // Find the result expression. 2669 const Expr *resultExpr = E->getResultExpr(); 2670 assert(resultExpr); 2671 Result result; 2672 2673 for (PseudoObjectExpr::const_semantics_iterator 2674 i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) { 2675 const Expr *semantic = *i; 2676 2677 // If this semantic expression is an opaque value, bind it 2678 // to the result of its source expression. 2679 if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) { 2680 typedef CodeGenFunction::OpaqueValueMappingData OVMA; 2681 OVMA opaqueData; 2682 2683 // If this semantic is the result of the pseudo-object 2684 // expression, try to evaluate the source as +1. 2685 if (ov == resultExpr) { 2686 assert(!OVMA::shouldBindAsLValue(ov)); 2687 result = asImpl().visit(ov->getSourceExpr()); 2688 opaqueData = OVMA::bind(CGF, ov, 2689 RValue::get(asImpl().getValueOfResult(result))); 2690 2691 // Otherwise, just bind it. 2692 } else { 2693 opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr()); 2694 } 2695 opaques.push_back(opaqueData); 2696 2697 // Otherwise, if the expression is the result, evaluate it 2698 // and remember the result. 2699 } else if (semantic == resultExpr) { 2700 result = asImpl().visit(semantic); 2701 2702 // Otherwise, evaluate the expression in an ignored context. 2703 } else { 2704 CGF.EmitIgnoredExpr(semantic); 2705 } 2706 } 2707 2708 // Unbind all the opaques now. 2709 for (unsigned i = 0, e = opaques.size(); i != e; ++i) 2710 opaques[i].unbind(CGF); 2711 2712 return result; 2713 } 2714 2715 template <typename Impl, typename Result> 2716 Result ARCExprEmitter<Impl,Result>::visitCastExpr(const CastExpr *e) { 2717 switch (e->getCastKind()) { 2718 2719 // No-op casts don't change the type, so we just ignore them. 2720 case CK_NoOp: 2721 return asImpl().visit(e->getSubExpr()); 2722 2723 // These casts can change the type. 2724 case CK_CPointerToObjCPointerCast: 2725 case CK_BlockPointerToObjCPointerCast: 2726 case CK_AnyPointerToBlockPointerCast: 2727 case CK_BitCast: { 2728 llvm::Type *resultType = CGF.ConvertType(e->getType()); 2729 assert(e->getSubExpr()->getType()->hasPointerRepresentation()); 2730 Result result = asImpl().visit(e->getSubExpr()); 2731 return asImpl().emitBitCast(result, resultType); 2732 } 2733 2734 // Handle some casts specially. 2735 case CK_LValueToRValue: 2736 return asImpl().visitLValueToRValue(e->getSubExpr()); 2737 case CK_ARCConsumeObject: 2738 return asImpl().visitConsumeObject(e->getSubExpr()); 2739 case CK_ARCExtendBlockObject: 2740 return asImpl().visitExtendBlockObject(e->getSubExpr()); 2741 case CK_ARCReclaimReturnedObject: 2742 return asImpl().visitReclaimReturnedObject(e->getSubExpr()); 2743 2744 // Otherwise, use the default logic. 2745 default: 2746 return asImpl().visitExpr(e); 2747 } 2748 } 2749 2750 template <typename Impl, typename Result> 2751 Result 2752 ARCExprEmitter<Impl,Result>::visitBinaryOperator(const BinaryOperator *e) { 2753 switch (e->getOpcode()) { 2754 case BO_Comma: 2755 CGF.EmitIgnoredExpr(e->getLHS()); 2756 CGF.EnsureInsertPoint(); 2757 return asImpl().visit(e->getRHS()); 2758 2759 case BO_Assign: 2760 return asImpl().visitBinAssign(e); 2761 2762 default: 2763 return asImpl().visitExpr(e); 2764 } 2765 } 2766 2767 template <typename Impl, typename Result> 2768 Result ARCExprEmitter<Impl,Result>::visitBinAssign(const BinaryOperator *e) { 2769 switch (e->getLHS()->getType().getObjCLifetime()) { 2770 case Qualifiers::OCL_ExplicitNone: 2771 return asImpl().visitBinAssignUnsafeUnretained(e); 2772 2773 case Qualifiers::OCL_Weak: 2774 return asImpl().visitBinAssignWeak(e); 2775 2776 case Qualifiers::OCL_Autoreleasing: 2777 return asImpl().visitBinAssignAutoreleasing(e); 2778 2779 case Qualifiers::OCL_Strong: 2780 return asImpl().visitBinAssignStrong(e); 2781 2782 case Qualifiers::OCL_None: 2783 return asImpl().visitExpr(e); 2784 } 2785 llvm_unreachable("bad ObjC ownership qualifier"); 2786 } 2787 2788 /// The default rule for __unsafe_unretained emits the RHS recursively, 2789 /// stores into the unsafe variable, and propagates the result outward. 2790 template <typename Impl, typename Result> 2791 Result ARCExprEmitter<Impl,Result>:: 2792 visitBinAssignUnsafeUnretained(const BinaryOperator *e) { 2793 // Recursively emit the RHS. 2794 // For __block safety, do this before emitting the LHS. 2795 Result result = asImpl().visit(e->getRHS()); 2796 2797 // Perform the store. 2798 LValue lvalue = 2799 CGF.EmitCheckedLValue(e->getLHS(), CodeGenFunction::TCK_Store); 2800 CGF.EmitStoreThroughLValue(RValue::get(asImpl().getValueOfResult(result)), 2801 lvalue); 2802 2803 return result; 2804 } 2805 2806 template <typename Impl, typename Result> 2807 Result 2808 ARCExprEmitter<Impl,Result>::visitBinAssignAutoreleasing(const BinaryOperator *e) { 2809 return asImpl().visitExpr(e); 2810 } 2811 2812 template <typename Impl, typename Result> 2813 Result 2814 ARCExprEmitter<Impl,Result>::visitBinAssignWeak(const BinaryOperator *e) { 2815 return asImpl().visitExpr(e); 2816 } 2817 2818 template <typename Impl, typename Result> 2819 Result 2820 ARCExprEmitter<Impl,Result>::visitBinAssignStrong(const BinaryOperator *e) { 2821 return asImpl().visitExpr(e); 2822 } 2823 2824 /// The general expression-emission logic. 2825 template <typename Impl, typename Result> 2826 Result ARCExprEmitter<Impl,Result>::visit(const Expr *e) { 2827 // We should *never* see a nested full-expression here, because if 2828 // we fail to emit at +1, our caller must not retain after we close 2829 // out the full-expression. This isn't as important in the unsafe 2830 // emitter. 2831 assert(!isa<ExprWithCleanups>(e)); 2832 2833 // Look through parens, __extension__, generic selection, etc. 2834 e = e->IgnoreParens(); 2835 2836 // Handle certain kinds of casts. 2837 if (const CastExpr *ce = dyn_cast<CastExpr>(e)) { 2838 return asImpl().visitCastExpr(ce); 2839 2840 // Handle the comma operator. 2841 } else if (auto op = dyn_cast<BinaryOperator>(e)) { 2842 return asImpl().visitBinaryOperator(op); 2843 2844 // TODO: handle conditional operators here 2845 2846 // For calls and message sends, use the retained-call logic. 2847 // Delegate inits are a special case in that they're the only 2848 // returns-retained expression that *isn't* surrounded by 2849 // a consume. 2850 } else if (isa<CallExpr>(e) || 2851 (isa<ObjCMessageExpr>(e) && 2852 !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) { 2853 return asImpl().visitCall(e); 2854 2855 // Look through pseudo-object expressions. 2856 } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) { 2857 return asImpl().visitPseudoObjectExpr(pseudo); 2858 } 2859 2860 return asImpl().visitExpr(e); 2861 } 2862 2863 namespace { 2864 2865 /// An emitter for +1 results. 2866 struct ARCRetainExprEmitter : 2867 public ARCExprEmitter<ARCRetainExprEmitter, TryEmitResult> { 2868 2869 ARCRetainExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 2870 2871 llvm::Value *getValueOfResult(TryEmitResult result) { 2872 return result.getPointer(); 2873 } 2874 2875 TryEmitResult emitBitCast(TryEmitResult result, llvm::Type *resultType) { 2876 llvm::Value *value = result.getPointer(); 2877 value = CGF.Builder.CreateBitCast(value, resultType); 2878 result.setPointer(value); 2879 return result; 2880 } 2881 2882 TryEmitResult visitLValueToRValue(const Expr *e) { 2883 return tryEmitARCRetainLoadOfScalar(CGF, e); 2884 } 2885 2886 /// For consumptions, just emit the subexpression and thus elide 2887 /// the retain/release pair. 2888 TryEmitResult visitConsumeObject(const Expr *e) { 2889 llvm::Value *result = CGF.EmitScalarExpr(e); 2890 return TryEmitResult(result, true); 2891 } 2892 2893 /// Block extends are net +0. Naively, we could just recurse on 2894 /// the subexpression, but actually we need to ensure that the 2895 /// value is copied as a block, so there's a little filter here. 2896 TryEmitResult visitExtendBlockObject(const Expr *e) { 2897 llvm::Value *result; // will be a +0 value 2898 2899 // If we can't safely assume the sub-expression will produce a 2900 // block-copied value, emit the sub-expression at +0. 2901 if (shouldEmitSeparateBlockRetain(e)) { 2902 result = CGF.EmitScalarExpr(e); 2903 2904 // Otherwise, try to emit the sub-expression at +1 recursively. 2905 } else { 2906 TryEmitResult subresult = asImpl().visit(e); 2907 2908 // If that produced a retained value, just use that. 2909 if (subresult.getInt()) { 2910 return subresult; 2911 } 2912 2913 // Otherwise it's +0. 2914 result = subresult.getPointer(); 2915 } 2916 2917 // Retain the object as a block. 2918 result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true); 2919 return TryEmitResult(result, true); 2920 } 2921 2922 /// For reclaims, emit the subexpression as a retained call and 2923 /// skip the consumption. 2924 TryEmitResult visitReclaimReturnedObject(const Expr *e) { 2925 llvm::Value *result = emitARCRetainCallResult(CGF, e); 2926 return TryEmitResult(result, true); 2927 } 2928 2929 /// When we have an undecorated call, retroactively do a claim. 2930 TryEmitResult visitCall(const Expr *e) { 2931 llvm::Value *result = emitARCRetainCallResult(CGF, e); 2932 return TryEmitResult(result, true); 2933 } 2934 2935 // TODO: maybe special-case visitBinAssignWeak? 2936 2937 TryEmitResult visitExpr(const Expr *e) { 2938 // We didn't find an obvious production, so emit what we've got and 2939 // tell the caller that we didn't manage to retain. 2940 llvm::Value *result = CGF.EmitScalarExpr(e); 2941 return TryEmitResult(result, false); 2942 } 2943 }; 2944 } 2945 2946 static TryEmitResult 2947 tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) { 2948 return ARCRetainExprEmitter(CGF).visit(e); 2949 } 2950 2951 static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF, 2952 LValue lvalue, 2953 QualType type) { 2954 TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type); 2955 llvm::Value *value = result.getPointer(); 2956 if (!result.getInt()) 2957 value = CGF.EmitARCRetain(type, value); 2958 return value; 2959 } 2960 2961 /// EmitARCRetainScalarExpr - Semantically equivalent to 2962 /// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a 2963 /// best-effort attempt to peephole expressions that naturally produce 2964 /// retained objects. 2965 llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) { 2966 // The retain needs to happen within the full-expression. 2967 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 2968 enterFullExpression(cleanups); 2969 RunCleanupsScope scope(*this); 2970 return EmitARCRetainScalarExpr(cleanups->getSubExpr()); 2971 } 2972 2973 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 2974 llvm::Value *value = result.getPointer(); 2975 if (!result.getInt()) 2976 value = EmitARCRetain(e->getType(), value); 2977 return value; 2978 } 2979 2980 llvm::Value * 2981 CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) { 2982 // The retain needs to happen within the full-expression. 2983 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 2984 enterFullExpression(cleanups); 2985 RunCleanupsScope scope(*this); 2986 return EmitARCRetainAutoreleaseScalarExpr(cleanups->getSubExpr()); 2987 } 2988 2989 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e); 2990 llvm::Value *value = result.getPointer(); 2991 if (result.getInt()) 2992 value = EmitARCAutorelease(value); 2993 else 2994 value = EmitARCRetainAutorelease(e->getType(), value); 2995 return value; 2996 } 2997 2998 llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) { 2999 llvm::Value *result; 3000 bool doRetain; 3001 3002 if (shouldEmitSeparateBlockRetain(e)) { 3003 result = EmitScalarExpr(e); 3004 doRetain = true; 3005 } else { 3006 TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e); 3007 result = subresult.getPointer(); 3008 doRetain = !subresult.getInt(); 3009 } 3010 3011 if (doRetain) 3012 result = EmitARCRetainBlock(result, /*mandatory*/ true); 3013 return EmitObjCConsumeObject(e->getType(), result); 3014 } 3015 3016 llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) { 3017 // In ARC, retain and autorelease the expression. 3018 if (getLangOpts().ObjCAutoRefCount) { 3019 // Do so before running any cleanups for the full-expression. 3020 // EmitARCRetainAutoreleaseScalarExpr does this for us. 3021 return EmitARCRetainAutoreleaseScalarExpr(expr); 3022 } 3023 3024 // Otherwise, use the normal scalar-expression emission. The 3025 // exception machinery doesn't do anything special with the 3026 // exception like retaining it, so there's no safety associated with 3027 // only running cleanups after the throw has started, and when it 3028 // matters it tends to be substantially inferior code. 3029 return EmitScalarExpr(expr); 3030 } 3031 3032 namespace { 3033 3034 /// An emitter for assigning into an __unsafe_unretained context. 3035 struct ARCUnsafeUnretainedExprEmitter : 3036 public ARCExprEmitter<ARCUnsafeUnretainedExprEmitter, llvm::Value*> { 3037 3038 ARCUnsafeUnretainedExprEmitter(CodeGenFunction &CGF) : ARCExprEmitter(CGF) {} 3039 3040 llvm::Value *getValueOfResult(llvm::Value *value) { 3041 return value; 3042 } 3043 3044 llvm::Value *emitBitCast(llvm::Value *value, llvm::Type *resultType) { 3045 return CGF.Builder.CreateBitCast(value, resultType); 3046 } 3047 3048 llvm::Value *visitLValueToRValue(const Expr *e) { 3049 return CGF.EmitScalarExpr(e); 3050 } 3051 3052 /// For consumptions, just emit the subexpression and perform the 3053 /// consumption like normal. 3054 llvm::Value *visitConsumeObject(const Expr *e) { 3055 llvm::Value *value = CGF.EmitScalarExpr(e); 3056 return CGF.EmitObjCConsumeObject(e->getType(), value); 3057 } 3058 3059 /// No special logic for block extensions. (This probably can't 3060 /// actually happen in this emitter, though.) 3061 llvm::Value *visitExtendBlockObject(const Expr *e) { 3062 return CGF.EmitARCExtendBlockObject(e); 3063 } 3064 3065 /// For reclaims, perform an unsafeClaim if that's enabled. 3066 llvm::Value *visitReclaimReturnedObject(const Expr *e) { 3067 return CGF.EmitARCReclaimReturnedObject(e, /*unsafe*/ true); 3068 } 3069 3070 /// When we have an undecorated call, just emit it without adding 3071 /// the unsafeClaim. 3072 llvm::Value *visitCall(const Expr *e) { 3073 return CGF.EmitScalarExpr(e); 3074 } 3075 3076 /// Just do normal scalar emission in the default case. 3077 llvm::Value *visitExpr(const Expr *e) { 3078 return CGF.EmitScalarExpr(e); 3079 } 3080 }; 3081 } 3082 3083 static llvm::Value *emitARCUnsafeUnretainedScalarExpr(CodeGenFunction &CGF, 3084 const Expr *e) { 3085 return ARCUnsafeUnretainedExprEmitter(CGF).visit(e); 3086 } 3087 3088 /// EmitARCUnsafeUnretainedScalarExpr - Semantically equivalent to 3089 /// immediately releasing the resut of EmitARCRetainScalarExpr, but 3090 /// avoiding any spurious retains, including by performing reclaims 3091 /// with objc_unsafeClaimAutoreleasedReturnValue. 3092 llvm::Value *CodeGenFunction::EmitARCUnsafeUnretainedScalarExpr(const Expr *e) { 3093 // Look through full-expressions. 3094 if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) { 3095 enterFullExpression(cleanups); 3096 RunCleanupsScope scope(*this); 3097 return emitARCUnsafeUnretainedScalarExpr(*this, cleanups->getSubExpr()); 3098 } 3099 3100 return emitARCUnsafeUnretainedScalarExpr(*this, e); 3101 } 3102 3103 std::pair<LValue,llvm::Value*> 3104 CodeGenFunction::EmitARCStoreUnsafeUnretained(const BinaryOperator *e, 3105 bool ignored) { 3106 // Evaluate the RHS first. If we're ignoring the result, assume 3107 // that we can emit at an unsafe +0. 3108 llvm::Value *value; 3109 if (ignored) { 3110 value = EmitARCUnsafeUnretainedScalarExpr(e->getRHS()); 3111 } else { 3112 value = EmitScalarExpr(e->getRHS()); 3113 } 3114 3115 // Emit the LHS and perform the store. 3116 LValue lvalue = EmitLValue(e->getLHS()); 3117 EmitStoreOfScalar(value, lvalue); 3118 3119 return std::pair<LValue,llvm::Value*>(std::move(lvalue), value); 3120 } 3121 3122 std::pair<LValue,llvm::Value*> 3123 CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e, 3124 bool ignored) { 3125 // Evaluate the RHS first. 3126 TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS()); 3127 llvm::Value *value = result.getPointer(); 3128 3129 bool hasImmediateRetain = result.getInt(); 3130 3131 // If we didn't emit a retained object, and the l-value is of block 3132 // type, then we need to emit the block-retain immediately in case 3133 // it invalidates the l-value. 3134 if (!hasImmediateRetain && e->getType()->isBlockPointerType()) { 3135 value = EmitARCRetainBlock(value, /*mandatory*/ false); 3136 hasImmediateRetain = true; 3137 } 3138 3139 LValue lvalue = EmitLValue(e->getLHS()); 3140 3141 // If the RHS was emitted retained, expand this. 3142 if (hasImmediateRetain) { 3143 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation()); 3144 EmitStoreOfScalar(value, lvalue); 3145 EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime()); 3146 } else { 3147 value = EmitARCStoreStrong(lvalue, value, ignored); 3148 } 3149 3150 return std::pair<LValue,llvm::Value*>(lvalue, value); 3151 } 3152 3153 std::pair<LValue,llvm::Value*> 3154 CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) { 3155 llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS()); 3156 LValue lvalue = EmitLValue(e->getLHS()); 3157 3158 EmitStoreOfScalar(value, lvalue); 3159 3160 return std::pair<LValue,llvm::Value*>(lvalue, value); 3161 } 3162 3163 void CodeGenFunction::EmitObjCAutoreleasePoolStmt( 3164 const ObjCAutoreleasePoolStmt &ARPS) { 3165 const Stmt *subStmt = ARPS.getSubStmt(); 3166 const CompoundStmt &S = cast<CompoundStmt>(*subStmt); 3167 3168 CGDebugInfo *DI = getDebugInfo(); 3169 if (DI) 3170 DI->EmitLexicalBlockStart(Builder, S.getLBracLoc()); 3171 3172 // Keep track of the current cleanup stack depth. 3173 RunCleanupsScope Scope(*this); 3174 if (CGM.getLangOpts().ObjCRuntime.hasNativeARC()) { 3175 llvm::Value *token = EmitObjCAutoreleasePoolPush(); 3176 EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token); 3177 } else { 3178 llvm::Value *token = EmitObjCMRRAutoreleasePoolPush(); 3179 EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token); 3180 } 3181 3182 for (const auto *I : S.body()) 3183 EmitStmt(I); 3184 3185 if (DI) 3186 DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc()); 3187 } 3188 3189 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object, 3190 /// make sure it survives garbage collection until this point. 3191 void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) { 3192 // We just use an inline assembly. 3193 llvm::FunctionType *extenderType 3194 = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All); 3195 llvm::Value *extender 3196 = llvm::InlineAsm::get(extenderType, 3197 /* assembly */ "", 3198 /* constraints */ "r", 3199 /* side effects */ true); 3200 3201 object = Builder.CreateBitCast(object, VoidPtrTy); 3202 EmitNounwindRuntimeCall(extender, object); 3203 } 3204 3205 /// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with 3206 /// non-trivial copy assignment function, produce following helper function. 3207 /// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; } 3208 /// 3209 llvm::Constant * 3210 CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction( 3211 const ObjCPropertyImplDecl *PID) { 3212 if (!getLangOpts().CPlusPlus || 3213 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3214 return nullptr; 3215 QualType Ty = PID->getPropertyIvarDecl()->getType(); 3216 if (!Ty->isRecordType()) 3217 return nullptr; 3218 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3219 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic))) 3220 return nullptr; 3221 llvm::Constant *HelperFn = nullptr; 3222 if (hasTrivialSetExpr(PID)) 3223 return nullptr; 3224 assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null"); 3225 if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty))) 3226 return HelperFn; 3227 3228 ASTContext &C = getContext(); 3229 IdentifierInfo *II 3230 = &CGM.getContext().Idents.get("__assign_helper_atomic_property_"); 3231 FunctionDecl *FD = FunctionDecl::Create(C, 3232 C.getTranslationUnitDecl(), 3233 SourceLocation(), 3234 SourceLocation(), II, C.VoidTy, 3235 nullptr, SC_Static, 3236 false, 3237 false); 3238 3239 QualType DestTy = C.getPointerType(Ty); 3240 QualType SrcTy = Ty; 3241 SrcTy.addConst(); 3242 SrcTy = C.getPointerType(SrcTy); 3243 3244 FunctionArgList args; 3245 ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy); 3246 args.push_back(&dstDecl); 3247 ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy); 3248 args.push_back(&srcDecl); 3249 3250 const CGFunctionInfo &FI = 3251 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args); 3252 3253 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3254 3255 llvm::Function *Fn = 3256 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 3257 "__assign_helper_atomic_property_", 3258 &CGM.getModule()); 3259 3260 CGM.SetInternalFunctionAttributes(nullptr, Fn, FI); 3261 3262 StartFunction(FD, C.VoidTy, Fn, FI, args); 3263 3264 DeclRefExpr DstExpr(&dstDecl, false, DestTy, 3265 VK_RValue, SourceLocation()); 3266 UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(), 3267 VK_LValue, OK_Ordinary, SourceLocation()); 3268 3269 DeclRefExpr SrcExpr(&srcDecl, false, SrcTy, 3270 VK_RValue, SourceLocation()); 3271 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(), 3272 VK_LValue, OK_Ordinary, SourceLocation()); 3273 3274 Expr *Args[2] = { &DST, &SRC }; 3275 CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment()); 3276 CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(), 3277 Args, DestTy->getPointeeType(), 3278 VK_LValue, SourceLocation(), FPOptions()); 3279 3280 EmitStmt(&TheCall); 3281 3282 FinishFunction(); 3283 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3284 CGM.setAtomicSetterHelperFnMap(Ty, HelperFn); 3285 return HelperFn; 3286 } 3287 3288 llvm::Constant * 3289 CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction( 3290 const ObjCPropertyImplDecl *PID) { 3291 if (!getLangOpts().CPlusPlus || 3292 !getLangOpts().ObjCRuntime.hasAtomicCopyHelper()) 3293 return nullptr; 3294 const ObjCPropertyDecl *PD = PID->getPropertyDecl(); 3295 QualType Ty = PD->getType(); 3296 if (!Ty->isRecordType()) 3297 return nullptr; 3298 if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic))) 3299 return nullptr; 3300 llvm::Constant *HelperFn = nullptr; 3301 3302 if (hasTrivialGetExpr(PID)) 3303 return nullptr; 3304 assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null"); 3305 if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty))) 3306 return HelperFn; 3307 3308 3309 ASTContext &C = getContext(); 3310 IdentifierInfo *II 3311 = &CGM.getContext().Idents.get("__copy_helper_atomic_property_"); 3312 FunctionDecl *FD = FunctionDecl::Create(C, 3313 C.getTranslationUnitDecl(), 3314 SourceLocation(), 3315 SourceLocation(), II, C.VoidTy, 3316 nullptr, SC_Static, 3317 false, 3318 false); 3319 3320 QualType DestTy = C.getPointerType(Ty); 3321 QualType SrcTy = Ty; 3322 SrcTy.addConst(); 3323 SrcTy = C.getPointerType(SrcTy); 3324 3325 FunctionArgList args; 3326 ImplicitParamDecl dstDecl(getContext(), FD, SourceLocation(), nullptr,DestTy); 3327 args.push_back(&dstDecl); 3328 ImplicitParamDecl srcDecl(getContext(), FD, SourceLocation(), nullptr, SrcTy); 3329 args.push_back(&srcDecl); 3330 3331 const CGFunctionInfo &FI = 3332 CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, args); 3333 3334 llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI); 3335 3336 llvm::Function *Fn = 3337 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage, 3338 "__copy_helper_atomic_property_", &CGM.getModule()); 3339 3340 CGM.SetInternalFunctionAttributes(nullptr, Fn, FI); 3341 3342 StartFunction(FD, C.VoidTy, Fn, FI, args); 3343 3344 DeclRefExpr SrcExpr(&srcDecl, false, SrcTy, 3345 VK_RValue, SourceLocation()); 3346 3347 UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(), 3348 VK_LValue, OK_Ordinary, SourceLocation()); 3349 3350 CXXConstructExpr *CXXConstExpr = 3351 cast<CXXConstructExpr>(PID->getGetterCXXConstructor()); 3352 3353 SmallVector<Expr*, 4> ConstructorArgs; 3354 ConstructorArgs.push_back(&SRC); 3355 ConstructorArgs.append(std::next(CXXConstExpr->arg_begin()), 3356 CXXConstExpr->arg_end()); 3357 3358 CXXConstructExpr *TheCXXConstructExpr = 3359 CXXConstructExpr::Create(C, Ty, SourceLocation(), 3360 CXXConstExpr->getConstructor(), 3361 CXXConstExpr->isElidable(), 3362 ConstructorArgs, 3363 CXXConstExpr->hadMultipleCandidates(), 3364 CXXConstExpr->isListInitialization(), 3365 CXXConstExpr->isStdInitListInitialization(), 3366 CXXConstExpr->requiresZeroInitialization(), 3367 CXXConstExpr->getConstructionKind(), 3368 SourceRange()); 3369 3370 DeclRefExpr DstExpr(&dstDecl, false, DestTy, 3371 VK_RValue, SourceLocation()); 3372 3373 RValue DV = EmitAnyExpr(&DstExpr); 3374 CharUnits Alignment 3375 = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType()); 3376 EmitAggExpr(TheCXXConstructExpr, 3377 AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment), 3378 Qualifiers(), 3379 AggValueSlot::IsDestructed, 3380 AggValueSlot::DoesNotNeedGCBarriers, 3381 AggValueSlot::IsNotAliased)); 3382 3383 FinishFunction(); 3384 HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy); 3385 CGM.setAtomicGetterHelperFnMap(Ty, HelperFn); 3386 return HelperFn; 3387 } 3388 3389 llvm::Value * 3390 CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) { 3391 // Get selectors for retain/autorelease. 3392 IdentifierInfo *CopyID = &getContext().Idents.get("copy"); 3393 Selector CopySelector = 3394 getContext().Selectors.getNullarySelector(CopyID); 3395 IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease"); 3396 Selector AutoreleaseSelector = 3397 getContext().Selectors.getNullarySelector(AutoreleaseID); 3398 3399 // Emit calls to retain/autorelease. 3400 CGObjCRuntime &Runtime = CGM.getObjCRuntime(); 3401 llvm::Value *Val = Block; 3402 RValue Result; 3403 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3404 Ty, CopySelector, 3405 Val, CallArgList(), nullptr, nullptr); 3406 Val = Result.getScalarVal(); 3407 Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(), 3408 Ty, AutoreleaseSelector, 3409 Val, CallArgList(), nullptr, nullptr); 3410 Val = Result.getScalarVal(); 3411 return Val; 3412 } 3413 3414 llvm::Value * 3415 CodeGenFunction::EmitBuiltinAvailable(ArrayRef<llvm::Value *> Args) { 3416 assert(Args.size() == 3 && "Expected 3 argument here!"); 3417 3418 if (!CGM.IsOSVersionAtLeastFn) { 3419 llvm::FunctionType *FTy = 3420 llvm::FunctionType::get(Int32Ty, {Int32Ty, Int32Ty, Int32Ty}, false); 3421 CGM.IsOSVersionAtLeastFn = 3422 CGM.CreateRuntimeFunction(FTy, "__isOSVersionAtLeast"); 3423 } 3424 3425 llvm::Value *CallRes = 3426 EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args); 3427 3428 return Builder.CreateICmpNE(CallRes, llvm::Constant::getNullValue(Int32Ty)); 3429 } 3430 3431 void CodeGenModule::emitAtAvailableLinkGuard() { 3432 if (!IsOSVersionAtLeastFn) 3433 return; 3434 // @available requires CoreFoundation only on Darwin. 3435 if (!Target.getTriple().isOSDarwin()) 3436 return; 3437 // Add -framework CoreFoundation to the linker commands. We still want to 3438 // emit the core foundation reference down below because otherwise if 3439 // CoreFoundation is not used in the code, the linker won't link the 3440 // framework. 3441 auto &Context = getLLVMContext(); 3442 llvm::Metadata *Args[2] = {llvm::MDString::get(Context, "-framework"), 3443 llvm::MDString::get(Context, "CoreFoundation")}; 3444 LinkerOptionsMetadata.push_back(llvm::MDNode::get(Context, Args)); 3445 // Emit a reference to a symbol from CoreFoundation to ensure that 3446 // CoreFoundation is linked into the final binary. 3447 llvm::FunctionType *FTy = 3448 llvm::FunctionType::get(Int32Ty, {VoidPtrTy}, false); 3449 llvm::Constant *CFFunc = 3450 CreateRuntimeFunction(FTy, "CFBundleGetVersionNumber"); 3451 3452 llvm::FunctionType *CheckFTy = llvm::FunctionType::get(VoidTy, {}, false); 3453 llvm::Function *CFLinkCheckFunc = cast<llvm::Function>(CreateBuiltinFunction( 3454 CheckFTy, "__clang_at_available_requires_core_foundation_framework")); 3455 CFLinkCheckFunc->setLinkage(llvm::GlobalValue::LinkOnceAnyLinkage); 3456 CFLinkCheckFunc->setVisibility(llvm::GlobalValue::HiddenVisibility); 3457 CodeGenFunction CGF(*this); 3458 CGF.Builder.SetInsertPoint(CGF.createBasicBlock("", CFLinkCheckFunc)); 3459 CGF.EmitNounwindRuntimeCall(CFFunc, llvm::Constant::getNullValue(VoidPtrTy)); 3460 CGF.Builder.CreateUnreachable(); 3461 addCompilerUsedGlobal(CFLinkCheckFunc); 3462 } 3463 3464 CGObjCRuntime::~CGObjCRuntime() {} 3465