1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGCXXABI.h" 18 #include "CodeGenFunction.h" 19 #include "CodeGenModule.h" 20 #include "TargetInfo.h" 21 #include "clang/AST/Decl.h" 22 #include "clang/AST/DeclCXX.h" 23 #include "clang/AST/DeclObjC.h" 24 #include "clang/Basic/TargetInfo.h" 25 #include "clang/CodeGen/CGFunctionInfo.h" 26 #include "clang/Frontend/CodeGenOptions.h" 27 #include "llvm/ADT/StringExtras.h" 28 #include "llvm/IR/Attributes.h" 29 #include "llvm/IR/CallSite.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "llvm/IR/InlineAsm.h" 32 #include "llvm/IR/Intrinsics.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 using namespace clang; 35 using namespace CodeGen; 36 37 /***/ 38 39 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) { 40 switch (CC) { 41 default: return llvm::CallingConv::C; 42 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 43 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 44 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 45 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 46 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 47 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 48 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 49 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 50 // TODO: add support for CC_X86Pascal to llvm 51 } 52 } 53 54 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 55 /// qualification. 56 /// FIXME: address space qualification? 57 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 58 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 59 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 60 } 61 62 /// Returns the canonical formal type of the given C++ method. 63 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 64 return MD->getType()->getCanonicalTypeUnqualified() 65 .getAs<FunctionProtoType>(); 66 } 67 68 /// Returns the "extra-canonicalized" return type, which discards 69 /// qualifiers on the return type. Codegen doesn't care about them, 70 /// and it makes ABI code a little easier to be able to assume that 71 /// all parameter and return types are top-level unqualified. 72 static CanQualType GetReturnType(QualType RetTy) { 73 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 74 } 75 76 /// Arrange the argument and result information for a value of the given 77 /// unprototyped freestanding function type. 78 const CGFunctionInfo & 79 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 80 // When translating an unprototyped function type, always use a 81 // variadic type. 82 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 83 false, None, FTNP->getExtInfo(), 84 RequiredArgs(0)); 85 } 86 87 /// Arrange the LLVM function layout for a value of the given function 88 /// type, on top of any implicit parameters already stored. 89 static const CGFunctionInfo & 90 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool IsInstanceMethod, 91 SmallVectorImpl<CanQualType> &prefix, 92 CanQual<FunctionProtoType> FTP) { 93 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 94 // FIXME: Kill copy. 95 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) 96 prefix.push_back(FTP->getParamType(i)); 97 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 98 return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix, 99 FTP->getExtInfo(), required); 100 } 101 102 /// Arrange the argument and result information for a value of the 103 /// given freestanding function type. 104 const CGFunctionInfo & 105 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 106 SmallVector<CanQualType, 16> argTypes; 107 return ::arrangeLLVMFunctionInfo(*this, false, argTypes, FTP); 108 } 109 110 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 111 // Set the appropriate calling convention for the Function. 112 if (D->hasAttr<StdCallAttr>()) 113 return CC_X86StdCall; 114 115 if (D->hasAttr<FastCallAttr>()) 116 return CC_X86FastCall; 117 118 if (D->hasAttr<ThisCallAttr>()) 119 return CC_X86ThisCall; 120 121 if (D->hasAttr<PascalAttr>()) 122 return CC_X86Pascal; 123 124 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 125 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 126 127 if (D->hasAttr<PnaclCallAttr>()) 128 return CC_PnaclCall; 129 130 if (D->hasAttr<IntelOclBiccAttr>()) 131 return CC_IntelOclBicc; 132 133 if (D->hasAttr<MSABIAttr>()) 134 return IsWindows ? CC_C : CC_X86_64Win64; 135 136 if (D->hasAttr<SysVABIAttr>()) 137 return IsWindows ? CC_X86_64SysV : CC_C; 138 139 return CC_C; 140 } 141 142 static bool isAAPCSVFP(const CGFunctionInfo &FI, const TargetInfo &Target) { 143 switch (FI.getEffectiveCallingConvention()) { 144 case llvm::CallingConv::C: 145 switch (Target.getTriple().getEnvironment()) { 146 case llvm::Triple::EABIHF: 147 case llvm::Triple::GNUEABIHF: 148 return true; 149 default: 150 return false; 151 } 152 case llvm::CallingConv::ARM_AAPCS_VFP: 153 return true; 154 default: 155 return false; 156 } 157 } 158 159 /// Arrange the argument and result information for a call to an 160 /// unknown C++ non-static member function of the given abstract type. 161 /// (Zero value of RD means we don't have any meaningful "this" argument type, 162 /// so fall back to a generic pointer type). 163 /// The member function must be an ordinary function, i.e. not a 164 /// constructor or destructor. 165 const CGFunctionInfo & 166 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 167 const FunctionProtoType *FTP) { 168 SmallVector<CanQualType, 16> argTypes; 169 170 // Add the 'this' pointer. 171 if (RD) 172 argTypes.push_back(GetThisType(Context, RD)); 173 else 174 argTypes.push_back(Context.VoidPtrTy); 175 176 return ::arrangeLLVMFunctionInfo( 177 *this, true, argTypes, 178 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 179 } 180 181 /// Arrange the argument and result information for a declaration or 182 /// definition of the given C++ non-static member function. The 183 /// member function must be an ordinary function, i.e. not a 184 /// constructor or destructor. 185 const CGFunctionInfo & 186 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 187 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 188 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 189 190 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 191 192 if (MD->isInstance()) { 193 // The abstract case is perfectly fine. 194 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 195 return arrangeCXXMethodType(ThisType, prototype.getTypePtr()); 196 } 197 198 return arrangeFreeFunctionType(prototype); 199 } 200 201 /// Arrange the argument and result information for a declaration 202 /// or definition to the given constructor variant. 203 const CGFunctionInfo & 204 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D, 205 CXXCtorType ctorKind) { 206 SmallVector<CanQualType, 16> argTypes; 207 argTypes.push_back(GetThisType(Context, D->getParent())); 208 209 GlobalDecl GD(D, ctorKind); 210 CanQualType resultType = 211 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy; 212 213 CanQual<FunctionProtoType> FTP = GetFormalType(D); 214 215 // Add the formal parameters. 216 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i) 217 argTypes.push_back(FTP->getParamType(i)); 218 219 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes); 220 221 RequiredArgs required = 222 (D->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All); 223 224 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 225 return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required); 226 } 227 228 /// Arrange a call to a C++ method, passing the given arguments. 229 const CGFunctionInfo & 230 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 231 const CXXConstructorDecl *D, 232 CXXCtorType CtorKind, 233 unsigned ExtraArgs) { 234 // FIXME: Kill copy. 235 SmallVector<CanQualType, 16> ArgTypes; 236 for (const auto &Arg : args) 237 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 238 239 CanQual<FunctionProtoType> FPT = GetFormalType(D); 240 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs); 241 GlobalDecl GD(D, CtorKind); 242 CanQualType ResultType = 243 TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : Context.VoidTy; 244 245 FunctionType::ExtInfo Info = FPT->getExtInfo(); 246 return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required); 247 } 248 249 /// Arrange the argument and result information for a declaration, 250 /// definition, or call to the given destructor variant. It so 251 /// happens that all three cases produce the same information. 252 const CGFunctionInfo & 253 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D, 254 CXXDtorType dtorKind) { 255 SmallVector<CanQualType, 2> argTypes; 256 argTypes.push_back(GetThisType(Context, D->getParent())); 257 258 GlobalDecl GD(D, dtorKind); 259 CanQualType resultType = 260 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy; 261 262 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes); 263 264 CanQual<FunctionProtoType> FTP = GetFormalType(D); 265 assert(FTP->getNumParams() == 0 && "dtor with formal parameters"); 266 assert(FTP->isVariadic() == 0 && "dtor with formal parameters"); 267 268 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 269 return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, 270 RequiredArgs::All); 271 } 272 273 /// Arrange the argument and result information for the declaration or 274 /// definition of the given function. 275 const CGFunctionInfo & 276 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 277 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 278 if (MD->isInstance()) 279 return arrangeCXXMethodDeclaration(MD); 280 281 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 282 283 assert(isa<FunctionType>(FTy)); 284 285 // When declaring a function without a prototype, always use a 286 // non-variadic type. 287 if (isa<FunctionNoProtoType>(FTy)) { 288 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>(); 289 return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None, 290 noProto->getExtInfo(), RequiredArgs::All); 291 } 292 293 assert(isa<FunctionProtoType>(FTy)); 294 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>()); 295 } 296 297 /// Arrange the argument and result information for the declaration or 298 /// definition of an Objective-C method. 299 const CGFunctionInfo & 300 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 301 // It happens that this is the same as a call with no optional 302 // arguments, except also using the formal 'self' type. 303 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 304 } 305 306 /// Arrange the argument and result information for the function type 307 /// through which to perform a send to the given Objective-C method, 308 /// using the given receiver type. The receiver type is not always 309 /// the 'self' type of the method or even an Objective-C pointer type. 310 /// This is *not* the right method for actually performing such a 311 /// message send, due to the possibility of optional arguments. 312 const CGFunctionInfo & 313 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 314 QualType receiverType) { 315 SmallVector<CanQualType, 16> argTys; 316 argTys.push_back(Context.getCanonicalParamType(receiverType)); 317 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 318 // FIXME: Kill copy? 319 for (const auto *I : MD->params()) { 320 argTys.push_back(Context.getCanonicalParamType(I->getType())); 321 } 322 323 FunctionType::ExtInfo einfo; 324 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 325 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 326 327 if (getContext().getLangOpts().ObjCAutoRefCount && 328 MD->hasAttr<NSReturnsRetainedAttr>()) 329 einfo = einfo.withProducesResult(true); 330 331 RequiredArgs required = 332 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 333 334 return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false, 335 argTys, einfo, required); 336 } 337 338 const CGFunctionInfo & 339 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 340 // FIXME: Do we need to handle ObjCMethodDecl? 341 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 342 343 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 344 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType()); 345 346 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 347 return arrangeCXXDestructor(DD, GD.getDtorType()); 348 349 return arrangeFunctionDeclaration(FD); 350 } 351 352 /// Arrange a call as unto a free function, except possibly with an 353 /// additional number of formal parameters considered required. 354 static const CGFunctionInfo & 355 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 356 CodeGenModule &CGM, 357 const CallArgList &args, 358 const FunctionType *fnType, 359 unsigned numExtraRequiredArgs) { 360 assert(args.size() >= numExtraRequiredArgs); 361 362 // In most cases, there are no optional arguments. 363 RequiredArgs required = RequiredArgs::All; 364 365 // If we have a variadic prototype, the required arguments are the 366 // extra prefix plus the arguments in the prototype. 367 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 368 if (proto->isVariadic()) 369 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 370 371 // If we don't have a prototype at all, but we're supposed to 372 // explicitly use the variadic convention for unprototyped calls, 373 // treat all of the arguments as required but preserve the nominal 374 // possibility of variadics. 375 } else if (CGM.getTargetCodeGenInfo() 376 .isNoProtoCallVariadic(args, 377 cast<FunctionNoProtoType>(fnType))) { 378 required = RequiredArgs(args.size()); 379 } 380 381 return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args, 382 fnType->getExtInfo(), required); 383 } 384 385 /// Figure out the rules for calling a function with the given formal 386 /// type using the given arguments. The arguments are necessary 387 /// because the function might be unprototyped, in which case it's 388 /// target-dependent in crazy ways. 389 const CGFunctionInfo & 390 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 391 const FunctionType *fnType) { 392 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0); 393 } 394 395 /// A block function call is essentially a free-function call with an 396 /// extra implicit argument. 397 const CGFunctionInfo & 398 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 399 const FunctionType *fnType) { 400 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1); 401 } 402 403 const CGFunctionInfo & 404 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType, 405 const CallArgList &args, 406 FunctionType::ExtInfo info, 407 RequiredArgs required) { 408 // FIXME: Kill copy. 409 SmallVector<CanQualType, 16> argTypes; 410 for (const auto &Arg : args) 411 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 412 return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, 413 info, required); 414 } 415 416 /// Arrange a call to a C++ method, passing the given arguments. 417 const CGFunctionInfo & 418 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 419 const FunctionProtoType *FPT, 420 RequiredArgs required) { 421 // FIXME: Kill copy. 422 SmallVector<CanQualType, 16> argTypes; 423 for (const auto &Arg : args) 424 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 425 426 FunctionType::ExtInfo info = FPT->getExtInfo(); 427 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true, 428 argTypes, info, required); 429 } 430 431 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration( 432 QualType resultType, const FunctionArgList &args, 433 const FunctionType::ExtInfo &info, bool isVariadic) { 434 // FIXME: Kill copy. 435 SmallVector<CanQualType, 16> argTypes; 436 for (auto Arg : args) 437 argTypes.push_back(Context.getCanonicalParamType(Arg->getType())); 438 439 RequiredArgs required = 440 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All); 441 return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info, 442 required); 443 } 444 445 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 446 return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None, 447 FunctionType::ExtInfo(), RequiredArgs::All); 448 } 449 450 /// Arrange the argument and result information for an abstract value 451 /// of a given function type. This is the method which all of the 452 /// above functions ultimately defer to. 453 const CGFunctionInfo & 454 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 455 bool IsInstanceMethod, 456 ArrayRef<CanQualType> argTypes, 457 FunctionType::ExtInfo info, 458 RequiredArgs required) { 459 #ifndef NDEBUG 460 for (ArrayRef<CanQualType>::const_iterator 461 I = argTypes.begin(), E = argTypes.end(); I != E; ++I) 462 assert(I->isCanonicalAsParam()); 463 #endif 464 465 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 466 467 // Lookup or create unique function info. 468 llvm::FoldingSetNodeID ID; 469 CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType, 470 argTypes); 471 472 void *insertPos = nullptr; 473 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 474 if (FI) 475 return *FI; 476 477 // Construct the function info. We co-allocate the ArgInfos. 478 FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes, 479 required); 480 FunctionInfos.InsertNode(FI, insertPos); 481 482 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted; 483 assert(inserted && "Recursively being processed?"); 484 485 // Compute ABI information. 486 getABIInfo().computeInfo(*FI); 487 488 // Loop over all of the computed argument and return value info. If any of 489 // them are direct or extend without a specified coerce type, specify the 490 // default now. 491 ABIArgInfo &retInfo = FI->getReturnInfo(); 492 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 493 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 494 495 for (auto &I : FI->arguments()) 496 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 497 I.info.setCoerceToType(ConvertType(I.type)); 498 499 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 500 assert(erased && "Not in set?"); 501 502 return *FI; 503 } 504 505 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 506 bool IsInstanceMethod, 507 const FunctionType::ExtInfo &info, 508 CanQualType resultType, 509 ArrayRef<CanQualType> argTypes, 510 RequiredArgs required) { 511 void *buffer = operator new(sizeof(CGFunctionInfo) + 512 sizeof(ArgInfo) * (argTypes.size() + 1)); 513 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 514 FI->CallingConvention = llvmCC; 515 FI->EffectiveCallingConvention = llvmCC; 516 FI->ASTCallingConvention = info.getCC(); 517 FI->InstanceMethod = IsInstanceMethod; 518 FI->NoReturn = info.getNoReturn(); 519 FI->ReturnsRetained = info.getProducesResult(); 520 FI->Required = required; 521 FI->HasRegParm = info.getHasRegParm(); 522 FI->RegParm = info.getRegParm(); 523 FI->ArgStruct = nullptr; 524 FI->NumArgs = argTypes.size(); 525 FI->getArgsBuffer()[0].type = resultType; 526 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 527 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 528 return FI; 529 } 530 531 /***/ 532 533 void CodeGenTypes::GetExpandedTypes(QualType type, 534 SmallVectorImpl<llvm::Type*> &expandedTypes) { 535 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) { 536 uint64_t NumElts = AT->getSize().getZExtValue(); 537 for (uint64_t Elt = 0; Elt < NumElts; ++Elt) 538 GetExpandedTypes(AT->getElementType(), expandedTypes); 539 } else if (const RecordType *RT = type->getAs<RecordType>()) { 540 const RecordDecl *RD = RT->getDecl(); 541 assert(!RD->hasFlexibleArrayMember() && 542 "Cannot expand structure with flexible array."); 543 if (RD->isUnion()) { 544 // Unions can be here only in degenerative cases - all the fields are same 545 // after flattening. Thus we have to use the "largest" field. 546 const FieldDecl *LargestFD = nullptr; 547 CharUnits UnionSize = CharUnits::Zero(); 548 549 for (const auto *FD : RD->fields()) { 550 assert(!FD->isBitField() && 551 "Cannot expand structure with bit-field members."); 552 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 553 if (UnionSize < FieldSize) { 554 UnionSize = FieldSize; 555 LargestFD = FD; 556 } 557 } 558 if (LargestFD) 559 GetExpandedTypes(LargestFD->getType(), expandedTypes); 560 } else { 561 for (const auto *I : RD->fields()) { 562 assert(!I->isBitField() && 563 "Cannot expand structure with bit-field members."); 564 GetExpandedTypes(I->getType(), expandedTypes); 565 } 566 } 567 } else if (const ComplexType *CT = type->getAs<ComplexType>()) { 568 llvm::Type *EltTy = ConvertType(CT->getElementType()); 569 expandedTypes.push_back(EltTy); 570 expandedTypes.push_back(EltTy); 571 } else 572 expandedTypes.push_back(ConvertType(type)); 573 } 574 575 llvm::Function::arg_iterator 576 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 577 llvm::Function::arg_iterator AI) { 578 assert(LV.isSimple() && 579 "Unexpected non-simple lvalue during struct expansion."); 580 581 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 582 unsigned NumElts = AT->getSize().getZExtValue(); 583 QualType EltTy = AT->getElementType(); 584 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 585 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt); 586 LValue LV = MakeAddrLValue(EltAddr, EltTy); 587 AI = ExpandTypeFromArgs(EltTy, LV, AI); 588 } 589 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 590 RecordDecl *RD = RT->getDecl(); 591 if (RD->isUnion()) { 592 // Unions can be here only in degenerative cases - all the fields are same 593 // after flattening. Thus we have to use the "largest" field. 594 const FieldDecl *LargestFD = nullptr; 595 CharUnits UnionSize = CharUnits::Zero(); 596 597 for (const auto *FD : RD->fields()) { 598 assert(!FD->isBitField() && 599 "Cannot expand structure with bit-field members."); 600 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 601 if (UnionSize < FieldSize) { 602 UnionSize = FieldSize; 603 LargestFD = FD; 604 } 605 } 606 if (LargestFD) { 607 // FIXME: What are the right qualifiers here? 608 LValue SubLV = EmitLValueForField(LV, LargestFD); 609 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI); 610 } 611 } else { 612 for (const auto *FD : RD->fields()) { 613 QualType FT = FD->getType(); 614 615 // FIXME: What are the right qualifiers here? 616 LValue SubLV = EmitLValueForField(LV, FD); 617 AI = ExpandTypeFromArgs(FT, SubLV, AI); 618 } 619 } 620 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 621 QualType EltTy = CT->getElementType(); 622 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real"); 623 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy)); 624 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag"); 625 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy)); 626 } else { 627 EmitStoreThroughLValue(RValue::get(AI), LV); 628 ++AI; 629 } 630 631 return AI; 632 } 633 634 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 635 /// accessing some number of bytes out of it, try to gep into the struct to get 636 /// at its inner goodness. Dive as deep as possible without entering an element 637 /// with an in-memory size smaller than DstSize. 638 static llvm::Value * 639 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr, 640 llvm::StructType *SrcSTy, 641 uint64_t DstSize, CodeGenFunction &CGF) { 642 // We can't dive into a zero-element struct. 643 if (SrcSTy->getNumElements() == 0) return SrcPtr; 644 645 llvm::Type *FirstElt = SrcSTy->getElementType(0); 646 647 // If the first elt is at least as large as what we're looking for, or if the 648 // first element is the same size as the whole struct, we can enter it. 649 uint64_t FirstEltSize = 650 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt); 651 if (FirstEltSize < DstSize && 652 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy)) 653 return SrcPtr; 654 655 // GEP into the first element. 656 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive"); 657 658 // If the first element is a struct, recurse. 659 llvm::Type *SrcTy = 660 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 661 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 662 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 663 664 return SrcPtr; 665 } 666 667 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 668 /// are either integers or pointers. This does a truncation of the value if it 669 /// is too large or a zero extension if it is too small. 670 /// 671 /// This behaves as if the value were coerced through memory, so on big-endian 672 /// targets the high bits are preserved in a truncation, while little-endian 673 /// targets preserve the low bits. 674 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 675 llvm::Type *Ty, 676 CodeGenFunction &CGF) { 677 if (Val->getType() == Ty) 678 return Val; 679 680 if (isa<llvm::PointerType>(Val->getType())) { 681 // If this is Pointer->Pointer avoid conversion to and from int. 682 if (isa<llvm::PointerType>(Ty)) 683 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 684 685 // Convert the pointer to an integer so we can play with its width. 686 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 687 } 688 689 llvm::Type *DestIntTy = Ty; 690 if (isa<llvm::PointerType>(DestIntTy)) 691 DestIntTy = CGF.IntPtrTy; 692 693 if (Val->getType() != DestIntTy) { 694 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 695 if (DL.isBigEndian()) { 696 // Preserve the high bits on big-endian targets. 697 // That is what memory coercion does. 698 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 699 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 700 701 if (SrcSize > DstSize) { 702 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 703 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 704 } else { 705 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 706 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 707 } 708 } else { 709 // Little-endian targets preserve the low bits. No shifts required. 710 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 711 } 712 } 713 714 if (isa<llvm::PointerType>(Ty)) 715 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 716 return Val; 717 } 718 719 720 721 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 722 /// a pointer to an object of type \arg Ty. 723 /// 724 /// This safely handles the case when the src type is smaller than the 725 /// destination type; in this situation the values of bits which not 726 /// present in the src are undefined. 727 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr, 728 llvm::Type *Ty, 729 CodeGenFunction &CGF) { 730 llvm::Type *SrcTy = 731 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 732 733 // If SrcTy and Ty are the same, just do a load. 734 if (SrcTy == Ty) 735 return CGF.Builder.CreateLoad(SrcPtr); 736 737 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 738 739 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 740 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 741 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 742 } 743 744 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 745 746 // If the source and destination are integer or pointer types, just do an 747 // extension or truncation to the desired type. 748 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 749 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 750 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr); 751 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 752 } 753 754 // If load is legal, just bitcast the src pointer. 755 if (SrcSize >= DstSize) { 756 // Generally SrcSize is never greater than DstSize, since this means we are 757 // losing bits. However, this can happen in cases where the structure has 758 // additional padding, for example due to a user specified alignment. 759 // 760 // FIXME: Assert that we aren't truncating non-padding bits when have access 761 // to that information. 762 llvm::Value *Casted = 763 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty)); 764 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted); 765 // FIXME: Use better alignment / avoid requiring aligned load. 766 Load->setAlignment(1); 767 return Load; 768 } 769 770 // Otherwise do coercion through memory. This is stupid, but 771 // simple. 772 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty); 773 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 774 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 775 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy); 776 // FIXME: Use better alignment. 777 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 778 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 779 1, false); 780 return CGF.Builder.CreateLoad(Tmp); 781 } 782 783 // Function to store a first-class aggregate into memory. We prefer to 784 // store the elements rather than the aggregate to be more friendly to 785 // fast-isel. 786 // FIXME: Do we need to recurse here? 787 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 788 llvm::Value *DestPtr, bool DestIsVolatile, 789 bool LowAlignment) { 790 // Prefer scalar stores to first-class aggregate stores. 791 if (llvm::StructType *STy = 792 dyn_cast<llvm::StructType>(Val->getType())) { 793 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 794 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i); 795 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 796 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr, 797 DestIsVolatile); 798 if (LowAlignment) 799 SI->setAlignment(1); 800 } 801 } else { 802 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile); 803 if (LowAlignment) 804 SI->setAlignment(1); 805 } 806 } 807 808 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 809 /// where the source and destination may have different types. 810 /// 811 /// This safely handles the case when the src type is larger than the 812 /// destination type; the upper bits of the src will be lost. 813 static void CreateCoercedStore(llvm::Value *Src, 814 llvm::Value *DstPtr, 815 bool DstIsVolatile, 816 CodeGenFunction &CGF) { 817 llvm::Type *SrcTy = Src->getType(); 818 llvm::Type *DstTy = 819 cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 820 if (SrcTy == DstTy) { 821 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 822 return; 823 } 824 825 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 826 827 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 828 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF); 829 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType(); 830 } 831 832 // If the source and destination are integer or pointer types, just do an 833 // extension or truncation to the desired type. 834 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 835 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 836 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 837 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile); 838 return; 839 } 840 841 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 842 843 // If store is legal, just bitcast the src pointer. 844 if (SrcSize <= DstSize) { 845 llvm::Value *Casted = 846 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy)); 847 // FIXME: Use better alignment / avoid requiring aligned store. 848 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true); 849 } else { 850 // Otherwise do coercion through memory. This is stupid, but 851 // simple. 852 853 // Generally SrcSize is never greater than DstSize, since this means we are 854 // losing bits. However, this can happen in cases where the structure has 855 // additional padding, for example due to a user specified alignment. 856 // 857 // FIXME: Assert that we aren't truncating non-padding bits when have access 858 // to that information. 859 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy); 860 CGF.Builder.CreateStore(Src, Tmp); 861 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy(); 862 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy); 863 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy); 864 // FIXME: Use better alignment. 865 CGF.Builder.CreateMemCpy(DstCasted, Casted, 866 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 867 1, false); 868 } 869 } 870 871 /***/ 872 873 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 874 return FI.getReturnInfo().isIndirect(); 875 } 876 877 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 878 return ReturnTypeUsesSRet(FI) && 879 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 880 } 881 882 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 883 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 884 switch (BT->getKind()) { 885 default: 886 return false; 887 case BuiltinType::Float: 888 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 889 case BuiltinType::Double: 890 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 891 case BuiltinType::LongDouble: 892 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 893 } 894 } 895 896 return false; 897 } 898 899 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 900 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 901 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 902 if (BT->getKind() == BuiltinType::LongDouble) 903 return getTarget().useObjCFP2RetForComplexLongDouble(); 904 } 905 } 906 907 return false; 908 } 909 910 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 911 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 912 return GetFunctionType(FI); 913 } 914 915 llvm::FunctionType * 916 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 917 918 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted; 919 assert(Inserted && "Recursively being processed?"); 920 921 bool SwapThisWithSRet = false; 922 SmallVector<llvm::Type*, 8> argTypes; 923 llvm::Type *resultType = nullptr; 924 925 const ABIArgInfo &retAI = FI.getReturnInfo(); 926 switch (retAI.getKind()) { 927 case ABIArgInfo::Expand: 928 llvm_unreachable("Invalid ABI kind for return argument"); 929 930 case ABIArgInfo::Extend: 931 case ABIArgInfo::Direct: 932 resultType = retAI.getCoerceToType(); 933 break; 934 935 case ABIArgInfo::InAlloca: 936 if (retAI.getInAllocaSRet()) { 937 // sret things on win32 aren't void, they return the sret pointer. 938 QualType ret = FI.getReturnType(); 939 llvm::Type *ty = ConvertType(ret); 940 unsigned addressSpace = Context.getTargetAddressSpace(ret); 941 resultType = llvm::PointerType::get(ty, addressSpace); 942 } else { 943 resultType = llvm::Type::getVoidTy(getLLVMContext()); 944 } 945 break; 946 947 case ABIArgInfo::Indirect: { 948 assert(!retAI.getIndirectAlign() && "Align unused on indirect return."); 949 resultType = llvm::Type::getVoidTy(getLLVMContext()); 950 951 QualType ret = FI.getReturnType(); 952 llvm::Type *ty = ConvertType(ret); 953 unsigned addressSpace = Context.getTargetAddressSpace(ret); 954 argTypes.push_back(llvm::PointerType::get(ty, addressSpace)); 955 956 SwapThisWithSRet = retAI.isSRetAfterThis(); 957 break; 958 } 959 960 case ABIArgInfo::Ignore: 961 resultType = llvm::Type::getVoidTy(getLLVMContext()); 962 break; 963 } 964 965 // Add in all of the required arguments. 966 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie; 967 if (FI.isVariadic()) { 968 ie = it + FI.getRequiredArgs().getNumRequiredArgs(); 969 } else { 970 ie = FI.arg_end(); 971 } 972 for (; it != ie; ++it) { 973 const ABIArgInfo &argAI = it->info; 974 975 // Insert a padding type to ensure proper alignment. 976 if (llvm::Type *PaddingType = argAI.getPaddingType()) 977 argTypes.push_back(PaddingType); 978 979 switch (argAI.getKind()) { 980 case ABIArgInfo::Ignore: 981 case ABIArgInfo::InAlloca: 982 break; 983 984 case ABIArgInfo::Indirect: { 985 // indirect arguments are always on the stack, which is addr space #0. 986 llvm::Type *LTy = ConvertTypeForMem(it->type); 987 argTypes.push_back(LTy->getPointerTo()); 988 break; 989 } 990 991 case ABIArgInfo::Extend: 992 case ABIArgInfo::Direct: { 993 // If the coerce-to type is a first class aggregate, flatten it. Either 994 // way is semantically identical, but fast-isel and the optimizer 995 // generally likes scalar values better than FCAs. 996 // We cannot do this for functions using the AAPCS calling convention, 997 // as structures are treated differently by that calling convention. 998 llvm::Type *argType = argAI.getCoerceToType(); 999 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1000 if (st && !isAAPCSVFP(FI, getTarget())) { 1001 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1002 argTypes.push_back(st->getElementType(i)); 1003 } else { 1004 argTypes.push_back(argType); 1005 } 1006 break; 1007 } 1008 1009 case ABIArgInfo::Expand: 1010 GetExpandedTypes(it->type, argTypes); 1011 break; 1012 } 1013 } 1014 1015 // Add the inalloca struct as the last parameter type. 1016 if (llvm::StructType *ArgStruct = FI.getArgStruct()) 1017 argTypes.push_back(ArgStruct->getPointerTo()); 1018 1019 if (SwapThisWithSRet) 1020 std::swap(argTypes[0], argTypes[1]); 1021 1022 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1023 assert(Erased && "Not in set?"); 1024 1025 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic()); 1026 } 1027 1028 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1029 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1030 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1031 1032 if (!isFuncTypeConvertible(FPT)) 1033 return llvm::StructType::get(getLLVMContext()); 1034 1035 const CGFunctionInfo *Info; 1036 if (isa<CXXDestructorDecl>(MD)) 1037 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType()); 1038 else 1039 Info = &arrangeCXXMethodDeclaration(MD); 1040 return GetFunctionType(*Info); 1041 } 1042 1043 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI, 1044 const Decl *TargetDecl, 1045 AttributeListType &PAL, 1046 unsigned &CallingConv, 1047 bool AttrOnCallSite) { 1048 llvm::AttrBuilder FuncAttrs; 1049 llvm::AttrBuilder RetAttrs; 1050 1051 CallingConv = FI.getEffectiveCallingConvention(); 1052 1053 if (FI.isNoReturn()) 1054 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1055 1056 // FIXME: handle sseregparm someday... 1057 if (TargetDecl) { 1058 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1059 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1060 if (TargetDecl->hasAttr<NoThrowAttr>()) 1061 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1062 if (TargetDecl->hasAttr<NoReturnAttr>()) 1063 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1064 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1065 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1066 1067 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1068 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>(); 1069 if (FPT && FPT->isNothrow(getContext())) 1070 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1071 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1072 // These attributes are not inherited by overloads. 1073 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1074 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1075 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1076 } 1077 1078 // 'const' and 'pure' attribute functions are also nounwind. 1079 if (TargetDecl->hasAttr<ConstAttr>()) { 1080 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1081 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1082 } else if (TargetDecl->hasAttr<PureAttr>()) { 1083 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1084 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1085 } 1086 if (TargetDecl->hasAttr<MallocAttr>()) 1087 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1088 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1089 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1090 } 1091 1092 if (CodeGenOpts.OptimizeSize) 1093 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1094 if (CodeGenOpts.OptimizeSize == 2) 1095 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1096 if (CodeGenOpts.DisableRedZone) 1097 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1098 if (CodeGenOpts.NoImplicitFloat) 1099 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1100 if (CodeGenOpts.EnableSegmentedStacks && 1101 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1102 FuncAttrs.addAttribute("split-stack"); 1103 1104 if (AttrOnCallSite) { 1105 // Attributes that should go on the call site only. 1106 if (!CodeGenOpts.SimplifyLibCalls) 1107 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1108 } else { 1109 // Attributes that should go on the function, but not the call site. 1110 if (!CodeGenOpts.DisableFPElim) { 1111 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1112 } else if (CodeGenOpts.OmitLeafFramePointer) { 1113 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1114 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1115 } else { 1116 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1117 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1118 } 1119 1120 FuncAttrs.addAttribute("less-precise-fpmad", 1121 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1122 FuncAttrs.addAttribute("no-infs-fp-math", 1123 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1124 FuncAttrs.addAttribute("no-nans-fp-math", 1125 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1126 FuncAttrs.addAttribute("unsafe-fp-math", 1127 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1128 FuncAttrs.addAttribute("use-soft-float", 1129 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1130 FuncAttrs.addAttribute("stack-protector-buffer-size", 1131 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1132 1133 if (!CodeGenOpts.StackRealignment) 1134 FuncAttrs.addAttribute("no-realign-stack"); 1135 } 1136 1137 QualType RetTy = FI.getReturnType(); 1138 unsigned Index = 1; 1139 bool SwapThisWithSRet = false; 1140 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1141 switch (RetAI.getKind()) { 1142 case ABIArgInfo::Extend: 1143 if (RetTy->hasSignedIntegerRepresentation()) 1144 RetAttrs.addAttribute(llvm::Attribute::SExt); 1145 else if (RetTy->hasUnsignedIntegerRepresentation()) 1146 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1147 // FALL THROUGH 1148 case ABIArgInfo::Direct: 1149 if (RetAI.getInReg()) 1150 RetAttrs.addAttribute(llvm::Attribute::InReg); 1151 break; 1152 case ABIArgInfo::Ignore: 1153 break; 1154 1155 case ABIArgInfo::InAlloca: { 1156 // inalloca disables readnone and readonly 1157 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1158 .removeAttribute(llvm::Attribute::ReadNone); 1159 break; 1160 } 1161 1162 case ABIArgInfo::Indirect: { 1163 llvm::AttrBuilder SRETAttrs; 1164 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1165 if (RetAI.getInReg()) 1166 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1167 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1168 PAL.push_back(llvm::AttributeSet::get( 1169 getLLVMContext(), SwapThisWithSRet ? 2 : Index, SRETAttrs)); 1170 1171 if (!SwapThisWithSRet) 1172 ++Index; 1173 // sret disables readnone and readonly 1174 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1175 .removeAttribute(llvm::Attribute::ReadNone); 1176 break; 1177 } 1178 1179 case ABIArgInfo::Expand: 1180 llvm_unreachable("Invalid ABI kind for return argument"); 1181 } 1182 1183 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1184 QualType PTy = RefTy->getPointeeType(); 1185 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1186 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1187 .getQuantity()); 1188 else if (getContext().getTargetAddressSpace(PTy) == 0) 1189 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1190 } 1191 1192 if (RetAttrs.hasAttributes()) 1193 PAL.push_back(llvm:: 1194 AttributeSet::get(getLLVMContext(), 1195 llvm::AttributeSet::ReturnIndex, 1196 RetAttrs)); 1197 1198 for (const auto &I : FI.arguments()) { 1199 QualType ParamType = I.type; 1200 const ABIArgInfo &AI = I.info; 1201 llvm::AttrBuilder Attrs; 1202 1203 // Skip over the sret parameter when it comes second. We already handled it 1204 // above. 1205 if (Index == 2 && SwapThisWithSRet) 1206 ++Index; 1207 1208 if (AI.getPaddingType()) { 1209 if (AI.getPaddingInReg()) 1210 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, 1211 llvm::Attribute::InReg)); 1212 // Increment Index if there is padding. 1213 ++Index; 1214 } 1215 1216 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1217 // have the corresponding parameter variable. It doesn't make 1218 // sense to do it here because parameters are so messed up. 1219 switch (AI.getKind()) { 1220 case ABIArgInfo::Extend: 1221 if (ParamType->isSignedIntegerOrEnumerationType()) 1222 Attrs.addAttribute(llvm::Attribute::SExt); 1223 else if (ParamType->isUnsignedIntegerOrEnumerationType()) 1224 Attrs.addAttribute(llvm::Attribute::ZExt); 1225 // FALL THROUGH 1226 case ABIArgInfo::Direct: { 1227 if (AI.getInReg()) 1228 Attrs.addAttribute(llvm::Attribute::InReg); 1229 1230 // FIXME: handle sseregparm someday... 1231 1232 llvm::StructType *STy = 1233 dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1234 if (!isAAPCSVFP(FI, getTarget()) && STy) { 1235 unsigned Extra = STy->getNumElements()-1; // 1 will be added below. 1236 if (Attrs.hasAttributes()) 1237 for (unsigned I = 0; I < Extra; ++I) 1238 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I, 1239 Attrs)); 1240 Index += Extra; 1241 } 1242 break; 1243 } 1244 case ABIArgInfo::Indirect: 1245 if (AI.getInReg()) 1246 Attrs.addAttribute(llvm::Attribute::InReg); 1247 1248 if (AI.getIndirectByVal()) 1249 Attrs.addAttribute(llvm::Attribute::ByVal); 1250 1251 Attrs.addAlignmentAttr(AI.getIndirectAlign()); 1252 1253 // byval disables readnone and readonly. 1254 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1255 .removeAttribute(llvm::Attribute::ReadNone); 1256 break; 1257 1258 case ABIArgInfo::Ignore: 1259 // Skip increment, no matching LLVM parameter. 1260 continue; 1261 1262 case ABIArgInfo::InAlloca: 1263 // inalloca disables readnone and readonly. 1264 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1265 .removeAttribute(llvm::Attribute::ReadNone); 1266 // Skip increment, no matching LLVM parameter. 1267 continue; 1268 1269 case ABIArgInfo::Expand: { 1270 SmallVector<llvm::Type*, 8> types; 1271 // FIXME: This is rather inefficient. Do we ever actually need to do 1272 // anything here? The result should be just reconstructed on the other 1273 // side, so extension should be a non-issue. 1274 getTypes().GetExpandedTypes(ParamType, types); 1275 Index += types.size(); 1276 continue; 1277 } 1278 } 1279 1280 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 1281 QualType PTy = RefTy->getPointeeType(); 1282 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1283 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1284 .getQuantity()); 1285 else if (getContext().getTargetAddressSpace(PTy) == 0) 1286 Attrs.addAttribute(llvm::Attribute::NonNull); 1287 } 1288 1289 if (Attrs.hasAttributes()) 1290 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs)); 1291 ++Index; 1292 } 1293 1294 // Add the inalloca attribute to the trailing inalloca parameter if present. 1295 if (FI.usesInAlloca()) { 1296 llvm::AttrBuilder Attrs; 1297 Attrs.addAttribute(llvm::Attribute::InAlloca); 1298 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs)); 1299 } 1300 1301 if (FuncAttrs.hasAttributes()) 1302 PAL.push_back(llvm:: 1303 AttributeSet::get(getLLVMContext(), 1304 llvm::AttributeSet::FunctionIndex, 1305 FuncAttrs)); 1306 } 1307 1308 /// An argument came in as a promoted argument; demote it back to its 1309 /// declared type. 1310 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 1311 const VarDecl *var, 1312 llvm::Value *value) { 1313 llvm::Type *varType = CGF.ConvertType(var->getType()); 1314 1315 // This can happen with promotions that actually don't change the 1316 // underlying type, like the enum promotions. 1317 if (value->getType() == varType) return value; 1318 1319 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 1320 && "unexpected promotion type"); 1321 1322 if (isa<llvm::IntegerType>(varType)) 1323 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 1324 1325 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 1326 } 1327 1328 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 1329 llvm::Function *Fn, 1330 const FunctionArgList &Args) { 1331 // If this is an implicit-return-zero function, go ahead and 1332 // initialize the return value. TODO: it might be nice to have 1333 // a more general mechanism for this that didn't require synthesized 1334 // return statements. 1335 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 1336 if (FD->hasImplicitReturnZero()) { 1337 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 1338 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 1339 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 1340 Builder.CreateStore(Zero, ReturnValue); 1341 } 1342 } 1343 1344 // FIXME: We no longer need the types from FunctionArgList; lift up and 1345 // simplify. 1346 1347 // Emit allocs for param decls. Give the LLVM Argument nodes names. 1348 llvm::Function::arg_iterator AI = Fn->arg_begin(); 1349 1350 // If we're using inalloca, all the memory arguments are GEPs off of the last 1351 // parameter, which is a pointer to the complete memory area. 1352 llvm::Value *ArgStruct = nullptr; 1353 if (FI.usesInAlloca()) { 1354 llvm::Function::arg_iterator EI = Fn->arg_end(); 1355 --EI; 1356 ArgStruct = EI; 1357 assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo()); 1358 } 1359 1360 // Name the struct return parameter, which can come first or second. 1361 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1362 bool SwapThisWithSRet = false; 1363 if (RetAI.isIndirect()) { 1364 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1365 if (SwapThisWithSRet) 1366 ++AI; 1367 AI->setName("agg.result"); 1368 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1, 1369 llvm::Attribute::NoAlias)); 1370 if (SwapThisWithSRet) 1371 --AI; // Go back to the beginning for 'this'. 1372 else 1373 ++AI; // Skip the sret parameter. 1374 } 1375 1376 // Get the function-level nonnull attribute if it exists. 1377 const NonNullAttr *NNAtt = 1378 CurCodeDecl ? CurCodeDecl->getAttr<NonNullAttr>() : nullptr; 1379 1380 // Track if we received the parameter as a pointer (indirect, byval, or 1381 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 1382 // into a local alloca for us. 1383 enum ValOrPointer { HaveValue = 0, HavePointer = 1 }; 1384 typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr; 1385 SmallVector<ValueAndIsPtr, 16> ArgVals; 1386 ArgVals.reserve(Args.size()); 1387 1388 // Create a pointer value for every parameter declaration. This usually 1389 // entails copying one or more LLVM IR arguments into an alloca. Don't push 1390 // any cleanups or do anything that might unwind. We do that separately, so 1391 // we can push the cleanups in the correct order for the ABI. 1392 assert(FI.arg_size() == Args.size() && 1393 "Mismatch between function signature & arguments."); 1394 unsigned ArgNo = 1; 1395 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 1396 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 1397 i != e; ++i, ++info_it, ++ArgNo) { 1398 const VarDecl *Arg = *i; 1399 QualType Ty = info_it->type; 1400 const ABIArgInfo &ArgI = info_it->info; 1401 1402 bool isPromoted = 1403 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 1404 1405 // Skip the dummy padding argument. 1406 if (ArgI.getPaddingType()) 1407 ++AI; 1408 1409 switch (ArgI.getKind()) { 1410 case ABIArgInfo::InAlloca: { 1411 llvm::Value *V = Builder.CreateStructGEP( 1412 ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName()); 1413 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1414 continue; // Don't increment AI! 1415 } 1416 1417 case ABIArgInfo::Indirect: { 1418 llvm::Value *V = AI; 1419 1420 if (!hasScalarEvaluationKind(Ty)) { 1421 // Aggregates and complex variables are accessed by reference. All we 1422 // need to do is realign the value, if requested 1423 if (ArgI.getIndirectRealign()) { 1424 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce"); 1425 1426 // Copy from the incoming argument pointer to the temporary with the 1427 // appropriate alignment. 1428 // 1429 // FIXME: We should have a common utility for generating an aggregate 1430 // copy. 1431 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(); 1432 CharUnits Size = getContext().getTypeSizeInChars(Ty); 1433 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy); 1434 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy); 1435 Builder.CreateMemCpy(Dst, 1436 Src, 1437 llvm::ConstantInt::get(IntPtrTy, 1438 Size.getQuantity()), 1439 ArgI.getIndirectAlign(), 1440 false); 1441 V = AlignedTemp; 1442 } 1443 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1444 } else { 1445 // Load scalar value from indirect argument. 1446 CharUnits Alignment = getContext().getTypeAlignInChars(Ty); 1447 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty, 1448 Arg->getLocStart()); 1449 1450 if (isPromoted) 1451 V = emitArgumentDemotion(*this, Arg, V); 1452 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1453 } 1454 break; 1455 } 1456 1457 case ABIArgInfo::Extend: 1458 case ABIArgInfo::Direct: { 1459 1460 // If we have the trivial case, handle it with no muss and fuss. 1461 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 1462 ArgI.getCoerceToType() == ConvertType(Ty) && 1463 ArgI.getDirectOffset() == 0) { 1464 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1465 llvm::Value *V = AI; 1466 1467 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 1468 if ((NNAtt && NNAtt->isNonNull(PVD->getFunctionScopeIndex())) || 1469 PVD->hasAttr<NonNullAttr>()) 1470 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1471 AI->getArgNo() + 1, 1472 llvm::Attribute::NonNull)); 1473 1474 QualType OTy = PVD->getOriginalType(); 1475 if (const auto *ArrTy = 1476 getContext().getAsConstantArrayType(OTy)) { 1477 // A C99 array parameter declaration with the static keyword also 1478 // indicates dereferenceability, and if the size is constant we can 1479 // use the dereferenceable attribute (which requires the size in 1480 // bytes). 1481 if (ArrTy->getSizeModifier() == ArrayType::Static) { 1482 QualType ETy = ArrTy->getElementType(); 1483 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 1484 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 1485 ArrSize) { 1486 llvm::AttrBuilder Attrs; 1487 Attrs.addDereferenceableAttr( 1488 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 1489 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1490 AI->getArgNo() + 1, Attrs)); 1491 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 1492 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1493 AI->getArgNo() + 1, 1494 llvm::Attribute::NonNull)); 1495 } 1496 } 1497 } else if (const auto *ArrTy = 1498 getContext().getAsVariableArrayType(OTy)) { 1499 // For C99 VLAs with the static keyword, we don't know the size so 1500 // we can't use the dereferenceable attribute, but in addrspace(0) 1501 // we know that it must be nonnull. 1502 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 1503 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 1504 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1505 AI->getArgNo() + 1, 1506 llvm::Attribute::NonNull)); 1507 } 1508 } 1509 1510 if (Arg->getType().isRestrictQualified()) 1511 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), 1512 AI->getArgNo() + 1, 1513 llvm::Attribute::NoAlias)); 1514 1515 // Ensure the argument is the correct type. 1516 if (V->getType() != ArgI.getCoerceToType()) 1517 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 1518 1519 if (isPromoted) 1520 V = emitArgumentDemotion(*this, Arg, V); 1521 1522 if (const CXXMethodDecl *MD = 1523 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) { 1524 if (MD->isVirtual() && Arg == CXXABIThisDecl) 1525 V = CGM.getCXXABI(). 1526 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V); 1527 } 1528 1529 // Because of merging of function types from multiple decls it is 1530 // possible for the type of an argument to not match the corresponding 1531 // type in the function type. Since we are codegening the callee 1532 // in here, add a cast to the argument type. 1533 llvm::Type *LTy = ConvertType(Arg->getType()); 1534 if (V->getType() != LTy) 1535 V = Builder.CreateBitCast(V, LTy); 1536 1537 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1538 break; 1539 } 1540 1541 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName()); 1542 1543 // The alignment we need to use is the max of the requested alignment for 1544 // the argument plus the alignment required by our access code below. 1545 unsigned AlignmentToUse = 1546 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType()); 1547 AlignmentToUse = std::max(AlignmentToUse, 1548 (unsigned)getContext().getDeclAlign(Arg).getQuantity()); 1549 1550 Alloca->setAlignment(AlignmentToUse); 1551 llvm::Value *V = Alloca; 1552 llvm::Value *Ptr = V; // Pointer to store into. 1553 1554 // If the value is offset in memory, apply the offset now. 1555 if (unsigned Offs = ArgI.getDirectOffset()) { 1556 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy()); 1557 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs); 1558 Ptr = Builder.CreateBitCast(Ptr, 1559 llvm::PointerType::getUnqual(ArgI.getCoerceToType())); 1560 } 1561 1562 // If the coerce-to type is a first class aggregate, we flatten it and 1563 // pass the elements. Either way is semantically identical, but fast-isel 1564 // and the optimizer generally likes scalar values better than FCAs. 1565 // We cannot do this for functions using the AAPCS calling convention, 1566 // as structures are treated differently by that calling convention. 1567 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 1568 if (!isAAPCSVFP(FI, getTarget()) && STy && STy->getNumElements() > 1) { 1569 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 1570 llvm::Type *DstTy = 1571 cast<llvm::PointerType>(Ptr->getType())->getElementType(); 1572 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 1573 1574 if (SrcSize <= DstSize) { 1575 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 1576 1577 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1578 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1579 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1580 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i); 1581 Builder.CreateStore(AI++, EltPtr); 1582 } 1583 } else { 1584 llvm::AllocaInst *TempAlloca = 1585 CreateTempAlloca(ArgI.getCoerceToType(), "coerce"); 1586 TempAlloca->setAlignment(AlignmentToUse); 1587 llvm::Value *TempV = TempAlloca; 1588 1589 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1590 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1591 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 1592 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i); 1593 Builder.CreateStore(AI++, EltPtr); 1594 } 1595 1596 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse); 1597 } 1598 } else { 1599 // Simple case, just do a coerced store of the argument into the alloca. 1600 assert(AI != Fn->arg_end() && "Argument mismatch!"); 1601 AI->setName(Arg->getName() + ".coerce"); 1602 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this); 1603 } 1604 1605 1606 // Match to what EmitParmDecl is expecting for this type. 1607 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 1608 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart()); 1609 if (isPromoted) 1610 V = emitArgumentDemotion(*this, Arg, V); 1611 ArgVals.push_back(ValueAndIsPtr(V, HaveValue)); 1612 } else { 1613 ArgVals.push_back(ValueAndIsPtr(V, HavePointer)); 1614 } 1615 continue; // Skip ++AI increment, already done. 1616 } 1617 1618 case ABIArgInfo::Expand: { 1619 // If this structure was expanded into multiple arguments then 1620 // we need to create a temporary and reconstruct it from the 1621 // arguments. 1622 llvm::AllocaInst *Alloca = CreateMemTemp(Ty); 1623 CharUnits Align = getContext().getDeclAlign(Arg); 1624 Alloca->setAlignment(Align.getQuantity()); 1625 LValue LV = MakeAddrLValue(Alloca, Ty, Align); 1626 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI); 1627 ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer)); 1628 1629 // Name the arguments used in expansion and increment AI. 1630 unsigned Index = 0; 1631 for (; AI != End; ++AI, ++Index) 1632 AI->setName(Arg->getName() + "." + Twine(Index)); 1633 continue; 1634 } 1635 1636 case ABIArgInfo::Ignore: 1637 // Initialize the local variable appropriately. 1638 if (!hasScalarEvaluationKind(Ty)) { 1639 ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer)); 1640 } else { 1641 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 1642 ArgVals.push_back(ValueAndIsPtr(U, HaveValue)); 1643 } 1644 1645 // Skip increment, no matching LLVM parameter. 1646 continue; 1647 } 1648 1649 ++AI; 1650 1651 if (ArgNo == 1 && SwapThisWithSRet) 1652 ++AI; // Skip the sret parameter. 1653 } 1654 1655 if (FI.usesInAlloca()) 1656 ++AI; 1657 assert(AI == Fn->arg_end() && "Argument mismatch!"); 1658 1659 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 1660 for (int I = Args.size() - 1; I >= 0; --I) 1661 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 1662 I + 1); 1663 } else { 1664 for (unsigned I = 0, E = Args.size(); I != E; ++I) 1665 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(), 1666 I + 1); 1667 } 1668 } 1669 1670 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 1671 while (insn->use_empty()) { 1672 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 1673 if (!bitcast) return; 1674 1675 // This is "safe" because we would have used a ConstantExpr otherwise. 1676 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 1677 bitcast->eraseFromParent(); 1678 } 1679 } 1680 1681 /// Try to emit a fused autorelease of a return result. 1682 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 1683 llvm::Value *result) { 1684 // We must be immediately followed the cast. 1685 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 1686 if (BB->empty()) return nullptr; 1687 if (&BB->back() != result) return nullptr; 1688 1689 llvm::Type *resultType = result->getType(); 1690 1691 // result is in a BasicBlock and is therefore an Instruction. 1692 llvm::Instruction *generator = cast<llvm::Instruction>(result); 1693 1694 SmallVector<llvm::Instruction*,4> insnsToKill; 1695 1696 // Look for: 1697 // %generator = bitcast %type1* %generator2 to %type2* 1698 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 1699 // We would have emitted this as a constant if the operand weren't 1700 // an Instruction. 1701 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 1702 1703 // Require the generator to be immediately followed by the cast. 1704 if (generator->getNextNode() != bitcast) 1705 return nullptr; 1706 1707 insnsToKill.push_back(bitcast); 1708 } 1709 1710 // Look for: 1711 // %generator = call i8* @objc_retain(i8* %originalResult) 1712 // or 1713 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 1714 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 1715 if (!call) return nullptr; 1716 1717 bool doRetainAutorelease; 1718 1719 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) { 1720 doRetainAutorelease = true; 1721 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints() 1722 .objc_retainAutoreleasedReturnValue) { 1723 doRetainAutorelease = false; 1724 1725 // If we emitted an assembly marker for this call (and the 1726 // ARCEntrypoints field should have been set if so), go looking 1727 // for that call. If we can't find it, we can't do this 1728 // optimization. But it should always be the immediately previous 1729 // instruction, unless we needed bitcasts around the call. 1730 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) { 1731 llvm::Instruction *prev = call->getPrevNode(); 1732 assert(prev); 1733 if (isa<llvm::BitCastInst>(prev)) { 1734 prev = prev->getPrevNode(); 1735 assert(prev); 1736 } 1737 assert(isa<llvm::CallInst>(prev)); 1738 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 1739 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker); 1740 insnsToKill.push_back(prev); 1741 } 1742 } else { 1743 return nullptr; 1744 } 1745 1746 result = call->getArgOperand(0); 1747 insnsToKill.push_back(call); 1748 1749 // Keep killing bitcasts, for sanity. Note that we no longer care 1750 // about precise ordering as long as there's exactly one use. 1751 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 1752 if (!bitcast->hasOneUse()) break; 1753 insnsToKill.push_back(bitcast); 1754 result = bitcast->getOperand(0); 1755 } 1756 1757 // Delete all the unnecessary instructions, from latest to earliest. 1758 for (SmallVectorImpl<llvm::Instruction*>::iterator 1759 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i) 1760 (*i)->eraseFromParent(); 1761 1762 // Do the fused retain/autorelease if we were asked to. 1763 if (doRetainAutorelease) 1764 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 1765 1766 // Cast back to the result type. 1767 return CGF.Builder.CreateBitCast(result, resultType); 1768 } 1769 1770 /// If this is a +1 of the value of an immutable 'self', remove it. 1771 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 1772 llvm::Value *result) { 1773 // This is only applicable to a method with an immutable 'self'. 1774 const ObjCMethodDecl *method = 1775 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 1776 if (!method) return nullptr; 1777 const VarDecl *self = method->getSelfDecl(); 1778 if (!self->getType().isConstQualified()) return nullptr; 1779 1780 // Look for a retain call. 1781 llvm::CallInst *retainCall = 1782 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 1783 if (!retainCall || 1784 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain) 1785 return nullptr; 1786 1787 // Look for an ordinary load of 'self'. 1788 llvm::Value *retainedValue = retainCall->getArgOperand(0); 1789 llvm::LoadInst *load = 1790 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 1791 if (!load || load->isAtomic() || load->isVolatile() || 1792 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self)) 1793 return nullptr; 1794 1795 // Okay! Burn it all down. This relies for correctness on the 1796 // assumption that the retain is emitted as part of the return and 1797 // that thereafter everything is used "linearly". 1798 llvm::Type *resultType = result->getType(); 1799 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 1800 assert(retainCall->use_empty()); 1801 retainCall->eraseFromParent(); 1802 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 1803 1804 return CGF.Builder.CreateBitCast(load, resultType); 1805 } 1806 1807 /// Emit an ARC autorelease of the result of a function. 1808 /// 1809 /// \return the value to actually return from the function 1810 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 1811 llvm::Value *result) { 1812 // If we're returning 'self', kill the initial retain. This is a 1813 // heuristic attempt to "encourage correctness" in the really unfortunate 1814 // case where we have a return of self during a dealloc and we desperately 1815 // need to avoid the possible autorelease. 1816 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 1817 return self; 1818 1819 // At -O0, try to emit a fused retain/autorelease. 1820 if (CGF.shouldUseFusedARCCalls()) 1821 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 1822 return fused; 1823 1824 return CGF.EmitARCAutoreleaseReturnValue(result); 1825 } 1826 1827 /// Heuristically search for a dominating store to the return-value slot. 1828 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 1829 // If there are multiple uses of the return-value slot, just check 1830 // for something immediately preceding the IP. Sometimes this can 1831 // happen with how we generate implicit-returns; it can also happen 1832 // with noreturn cleanups. 1833 if (!CGF.ReturnValue->hasOneUse()) { 1834 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1835 if (IP->empty()) return nullptr; 1836 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back()); 1837 if (!store) return nullptr; 1838 if (store->getPointerOperand() != CGF.ReturnValue) return nullptr; 1839 assert(!store->isAtomic() && !store->isVolatile()); // see below 1840 return store; 1841 } 1842 1843 llvm::StoreInst *store = 1844 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back()); 1845 if (!store) return nullptr; 1846 1847 // These aren't actually possible for non-coerced returns, and we 1848 // only care about non-coerced returns on this code path. 1849 assert(!store->isAtomic() && !store->isVolatile()); 1850 1851 // Now do a first-and-dirty dominance check: just walk up the 1852 // single-predecessors chain from the current insertion point. 1853 llvm::BasicBlock *StoreBB = store->getParent(); 1854 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 1855 while (IP != StoreBB) { 1856 if (!(IP = IP->getSinglePredecessor())) 1857 return nullptr; 1858 } 1859 1860 // Okay, the store's basic block dominates the insertion point; we 1861 // can do our thing. 1862 return store; 1863 } 1864 1865 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 1866 bool EmitRetDbgLoc, 1867 SourceLocation EndLoc) { 1868 // Functions with no result always return void. 1869 if (!ReturnValue) { 1870 Builder.CreateRetVoid(); 1871 return; 1872 } 1873 1874 llvm::DebugLoc RetDbgLoc; 1875 llvm::Value *RV = nullptr; 1876 QualType RetTy = FI.getReturnType(); 1877 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1878 1879 switch (RetAI.getKind()) { 1880 case ABIArgInfo::InAlloca: 1881 // Aggregrates get evaluated directly into the destination. Sometimes we 1882 // need to return the sret value in a register, though. 1883 assert(hasAggregateEvaluationKind(RetTy)); 1884 if (RetAI.getInAllocaSRet()) { 1885 llvm::Function::arg_iterator EI = CurFn->arg_end(); 1886 --EI; 1887 llvm::Value *ArgStruct = EI; 1888 llvm::Value *SRet = 1889 Builder.CreateStructGEP(ArgStruct, RetAI.getInAllocaFieldIndex()); 1890 RV = Builder.CreateLoad(SRet, "sret"); 1891 } 1892 break; 1893 1894 case ABIArgInfo::Indirect: { 1895 auto AI = CurFn->arg_begin(); 1896 if (RetAI.isSRetAfterThis()) 1897 ++AI; 1898 switch (getEvaluationKind(RetTy)) { 1899 case TEK_Complex: { 1900 ComplexPairTy RT = 1901 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy), 1902 EndLoc); 1903 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy), 1904 /*isInit*/ true); 1905 break; 1906 } 1907 case TEK_Aggregate: 1908 // Do nothing; aggregrates get evaluated directly into the destination. 1909 break; 1910 case TEK_Scalar: 1911 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 1912 MakeNaturalAlignAddrLValue(AI, RetTy), 1913 /*isInit*/ true); 1914 break; 1915 } 1916 break; 1917 } 1918 1919 case ABIArgInfo::Extend: 1920 case ABIArgInfo::Direct: 1921 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 1922 RetAI.getDirectOffset() == 0) { 1923 // The internal return value temp always will have pointer-to-return-type 1924 // type, just do a load. 1925 1926 // If there is a dominating store to ReturnValue, we can elide 1927 // the load, zap the store, and usually zap the alloca. 1928 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) { 1929 // Reuse the debug location from the store unless there is 1930 // cleanup code to be emitted between the store and return 1931 // instruction. 1932 if (EmitRetDbgLoc && !AutoreleaseResult) 1933 RetDbgLoc = SI->getDebugLoc(); 1934 // Get the stored value and nuke the now-dead store. 1935 RV = SI->getValueOperand(); 1936 SI->eraseFromParent(); 1937 1938 // If that was the only use of the return value, nuke it as well now. 1939 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) { 1940 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent(); 1941 ReturnValue = nullptr; 1942 } 1943 1944 // Otherwise, we have to do a simple load. 1945 } else { 1946 RV = Builder.CreateLoad(ReturnValue); 1947 } 1948 } else { 1949 llvm::Value *V = ReturnValue; 1950 // If the value is offset in memory, apply the offset now. 1951 if (unsigned Offs = RetAI.getDirectOffset()) { 1952 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy()); 1953 V = Builder.CreateConstGEP1_32(V, Offs); 1954 V = Builder.CreateBitCast(V, 1955 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 1956 } 1957 1958 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 1959 } 1960 1961 // In ARC, end functions that return a retainable type with a call 1962 // to objc_autoreleaseReturnValue. 1963 if (AutoreleaseResult) { 1964 assert(getLangOpts().ObjCAutoRefCount && 1965 !FI.isReturnsRetained() && 1966 RetTy->isObjCRetainableType()); 1967 RV = emitAutoreleaseOfResult(*this, RV); 1968 } 1969 1970 break; 1971 1972 case ABIArgInfo::Ignore: 1973 break; 1974 1975 case ABIArgInfo::Expand: 1976 llvm_unreachable("Invalid ABI kind for return argument"); 1977 } 1978 1979 llvm::Instruction *Ret; 1980 if (RV) { 1981 if (SanOpts->ReturnsNonnullAttribute && 1982 CurGD.getDecl()->hasAttr<ReturnsNonNullAttr>()) { 1983 SanitizerScope SanScope(this); 1984 llvm::Value *Cond = 1985 Builder.CreateICmpNE(RV, llvm::Constant::getNullValue(RV->getType())); 1986 llvm::Constant *StaticData[] = { 1987 EmitCheckSourceLocation(EndLoc) 1988 }; 1989 EmitCheck(Cond, "nonnull_return", StaticData, ArrayRef<llvm::Value *>(), 1990 CRK_Recoverable); 1991 } 1992 Ret = Builder.CreateRet(RV); 1993 } else { 1994 Ret = Builder.CreateRetVoid(); 1995 } 1996 1997 if (!RetDbgLoc.isUnknown()) 1998 Ret->setDebugLoc(RetDbgLoc); 1999 } 2000 2001 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2002 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2003 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2004 } 2005 2006 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) { 2007 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2008 // placeholders. 2009 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2010 llvm::Value *Placeholder = 2011 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo()); 2012 Placeholder = CGF.Builder.CreateLoad(Placeholder); 2013 return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(), 2014 Ty.getQualifiers(), 2015 AggValueSlot::IsNotDestructed, 2016 AggValueSlot::DoesNotNeedGCBarriers, 2017 AggValueSlot::IsNotAliased); 2018 } 2019 2020 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 2021 const VarDecl *param, 2022 SourceLocation loc) { 2023 // StartFunction converted the ABI-lowered parameter(s) into a 2024 // local alloca. We need to turn that into an r-value suitable 2025 // for EmitCall. 2026 llvm::Value *local = GetAddrOfLocalVar(param); 2027 2028 QualType type = param->getType(); 2029 2030 // For the most part, we just need to load the alloca, except: 2031 // 1) aggregate r-values are actually pointers to temporaries, and 2032 // 2) references to non-scalars are pointers directly to the aggregate. 2033 // I don't know why references to scalars are different here. 2034 if (const ReferenceType *ref = type->getAs<ReferenceType>()) { 2035 if (!hasScalarEvaluationKind(ref->getPointeeType())) 2036 return args.add(RValue::getAggregate(local), type); 2037 2038 // Locals which are references to scalars are represented 2039 // with allocas holding the pointer. 2040 return args.add(RValue::get(Builder.CreateLoad(local)), type); 2041 } 2042 2043 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 2044 "cannot emit delegate call arguments for inalloca arguments!"); 2045 2046 args.add(convertTempToRValue(local, type, loc), type); 2047 } 2048 2049 static bool isProvablyNull(llvm::Value *addr) { 2050 return isa<llvm::ConstantPointerNull>(addr); 2051 } 2052 2053 static bool isProvablyNonNull(llvm::Value *addr) { 2054 return isa<llvm::AllocaInst>(addr); 2055 } 2056 2057 /// Emit the actual writing-back of a writeback. 2058 static void emitWriteback(CodeGenFunction &CGF, 2059 const CallArgList::Writeback &writeback) { 2060 const LValue &srcLV = writeback.Source; 2061 llvm::Value *srcAddr = srcLV.getAddress(); 2062 assert(!isProvablyNull(srcAddr) && 2063 "shouldn't have writeback for provably null argument"); 2064 2065 llvm::BasicBlock *contBB = nullptr; 2066 2067 // If the argument wasn't provably non-null, we need to null check 2068 // before doing the store. 2069 bool provablyNonNull = isProvablyNonNull(srcAddr); 2070 if (!provablyNonNull) { 2071 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 2072 contBB = CGF.createBasicBlock("icr.done"); 2073 2074 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2075 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 2076 CGF.EmitBlock(writebackBB); 2077 } 2078 2079 // Load the value to writeback. 2080 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 2081 2082 // Cast it back, in case we're writing an id to a Foo* or something. 2083 value = CGF.Builder.CreateBitCast(value, 2084 cast<llvm::PointerType>(srcAddr->getType())->getElementType(), 2085 "icr.writeback-cast"); 2086 2087 // Perform the writeback. 2088 2089 // If we have a "to use" value, it's something we need to emit a use 2090 // of. This has to be carefully threaded in: if it's done after the 2091 // release it's potentially undefined behavior (and the optimizer 2092 // will ignore it), and if it happens before the retain then the 2093 // optimizer could move the release there. 2094 if (writeback.ToUse) { 2095 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 2096 2097 // Retain the new value. No need to block-copy here: the block's 2098 // being passed up the stack. 2099 value = CGF.EmitARCRetainNonBlock(value); 2100 2101 // Emit the intrinsic use here. 2102 CGF.EmitARCIntrinsicUse(writeback.ToUse); 2103 2104 // Load the old value (primitively). 2105 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 2106 2107 // Put the new value in place (primitively). 2108 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 2109 2110 // Release the old value. 2111 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 2112 2113 // Otherwise, we can just do a normal lvalue store. 2114 } else { 2115 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 2116 } 2117 2118 // Jump to the continuation block. 2119 if (!provablyNonNull) 2120 CGF.EmitBlock(contBB); 2121 } 2122 2123 static void emitWritebacks(CodeGenFunction &CGF, 2124 const CallArgList &args) { 2125 for (const auto &I : args.writebacks()) 2126 emitWriteback(CGF, I); 2127 } 2128 2129 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 2130 const CallArgList &CallArgs) { 2131 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 2132 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 2133 CallArgs.getCleanupsToDeactivate(); 2134 // Iterate in reverse to increase the likelihood of popping the cleanup. 2135 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator 2136 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) { 2137 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP); 2138 I->IsActiveIP->eraseFromParent(); 2139 } 2140 } 2141 2142 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 2143 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 2144 if (uop->getOpcode() == UO_AddrOf) 2145 return uop->getSubExpr(); 2146 return nullptr; 2147 } 2148 2149 /// Emit an argument that's being passed call-by-writeback. That is, 2150 /// we are passing the address of 2151 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 2152 const ObjCIndirectCopyRestoreExpr *CRE) { 2153 LValue srcLV; 2154 2155 // Make an optimistic effort to emit the address as an l-value. 2156 // This can fail if the the argument expression is more complicated. 2157 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 2158 srcLV = CGF.EmitLValue(lvExpr); 2159 2160 // Otherwise, just emit it as a scalar. 2161 } else { 2162 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr()); 2163 2164 QualType srcAddrType = 2165 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 2166 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType); 2167 } 2168 llvm::Value *srcAddr = srcLV.getAddress(); 2169 2170 // The dest and src types don't necessarily match in LLVM terms 2171 // because of the crazy ObjC compatibility rules. 2172 2173 llvm::PointerType *destType = 2174 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 2175 2176 // If the address is a constant null, just pass the appropriate null. 2177 if (isProvablyNull(srcAddr)) { 2178 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 2179 CRE->getType()); 2180 return; 2181 } 2182 2183 // Create the temporary. 2184 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(), 2185 "icr.temp"); 2186 // Loading an l-value can introduce a cleanup if the l-value is __weak, 2187 // and that cleanup will be conditional if we can't prove that the l-value 2188 // isn't null, so we need to register a dominating point so that the cleanups 2189 // system will make valid IR. 2190 CodeGenFunction::ConditionalEvaluation condEval(CGF); 2191 2192 // Zero-initialize it if we're not doing a copy-initialization. 2193 bool shouldCopy = CRE->shouldCopy(); 2194 if (!shouldCopy) { 2195 llvm::Value *null = 2196 llvm::ConstantPointerNull::get( 2197 cast<llvm::PointerType>(destType->getElementType())); 2198 CGF.Builder.CreateStore(null, temp); 2199 } 2200 2201 llvm::BasicBlock *contBB = nullptr; 2202 llvm::BasicBlock *originBB = nullptr; 2203 2204 // If the address is *not* known to be non-null, we need to switch. 2205 llvm::Value *finalArgument; 2206 2207 bool provablyNonNull = isProvablyNonNull(srcAddr); 2208 if (provablyNonNull) { 2209 finalArgument = temp; 2210 } else { 2211 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull"); 2212 2213 finalArgument = CGF.Builder.CreateSelect(isNull, 2214 llvm::ConstantPointerNull::get(destType), 2215 temp, "icr.argument"); 2216 2217 // If we need to copy, then the load has to be conditional, which 2218 // means we need control flow. 2219 if (shouldCopy) { 2220 originBB = CGF.Builder.GetInsertBlock(); 2221 contBB = CGF.createBasicBlock("icr.cont"); 2222 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 2223 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 2224 CGF.EmitBlock(copyBB); 2225 condEval.begin(CGF); 2226 } 2227 } 2228 2229 llvm::Value *valueToUse = nullptr; 2230 2231 // Perform a copy if necessary. 2232 if (shouldCopy) { 2233 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 2234 assert(srcRV.isScalar()); 2235 2236 llvm::Value *src = srcRV.getScalarVal(); 2237 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 2238 "icr.cast"); 2239 2240 // Use an ordinary store, not a store-to-lvalue. 2241 CGF.Builder.CreateStore(src, temp); 2242 2243 // If optimization is enabled, and the value was held in a 2244 // __strong variable, we need to tell the optimizer that this 2245 // value has to stay alive until we're doing the store back. 2246 // This is because the temporary is effectively unretained, 2247 // and so otherwise we can violate the high-level semantics. 2248 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 2249 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 2250 valueToUse = src; 2251 } 2252 } 2253 2254 // Finish the control flow if we needed it. 2255 if (shouldCopy && !provablyNonNull) { 2256 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 2257 CGF.EmitBlock(contBB); 2258 2259 // Make a phi for the value to intrinsically use. 2260 if (valueToUse) { 2261 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 2262 "icr.to-use"); 2263 phiToUse->addIncoming(valueToUse, copyBB); 2264 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 2265 originBB); 2266 valueToUse = phiToUse; 2267 } 2268 2269 condEval.end(CGF); 2270 } 2271 2272 args.addWriteback(srcLV, temp, valueToUse); 2273 args.add(RValue::get(finalArgument), CRE->getType()); 2274 } 2275 2276 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 2277 assert(!StackBase && !StackCleanup.isValid()); 2278 2279 // Save the stack. 2280 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 2281 StackBase = CGF.Builder.CreateCall(F, "inalloca.save"); 2282 2283 // Control gets really tied up in landing pads, so we have to spill the 2284 // stacksave to an alloca to avoid violating SSA form. 2285 // TODO: This is dead if we never emit the cleanup. We should create the 2286 // alloca and store lazily on the first cleanup emission. 2287 StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem"); 2288 CGF.Builder.CreateStore(StackBase, StackBaseMem); 2289 CGF.pushStackRestore(EHCleanup, StackBaseMem); 2290 StackCleanup = CGF.EHStack.getInnermostEHScope(); 2291 assert(StackCleanup.isValid()); 2292 } 2293 2294 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 2295 if (StackBase) { 2296 CGF.DeactivateCleanupBlock(StackCleanup, StackBase); 2297 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 2298 // We could load StackBase from StackBaseMem, but in the non-exceptional 2299 // case we can skip it. 2300 CGF.Builder.CreateCall(F, StackBase); 2301 } 2302 } 2303 2304 void CodeGenFunction::EmitCallArgs(CallArgList &Args, 2305 ArrayRef<QualType> ArgTypes, 2306 CallExpr::const_arg_iterator ArgBeg, 2307 CallExpr::const_arg_iterator ArgEnd, 2308 bool ForceColumnInfo) { 2309 CGDebugInfo *DI = getDebugInfo(); 2310 SourceLocation CallLoc; 2311 if (DI) CallLoc = DI->getLocation(); 2312 2313 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 2314 // because arguments are destroyed left to right in the callee. 2315 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2316 // Insert a stack save if we're going to need any inalloca args. 2317 bool HasInAllocaArgs = false; 2318 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 2319 I != E && !HasInAllocaArgs; ++I) 2320 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 2321 if (HasInAllocaArgs) { 2322 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2323 Args.allocateArgumentMemory(*this); 2324 } 2325 2326 // Evaluate each argument. 2327 size_t CallArgsStart = Args.size(); 2328 for (int I = ArgTypes.size() - 1; I >= 0; --I) { 2329 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2330 EmitCallArg(Args, *Arg, ArgTypes[I]); 2331 // Restore the debug location. 2332 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); 2333 } 2334 2335 // Un-reverse the arguments we just evaluated so they match up with the LLVM 2336 // IR function. 2337 std::reverse(Args.begin() + CallArgsStart, Args.end()); 2338 return; 2339 } 2340 2341 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 2342 CallExpr::const_arg_iterator Arg = ArgBeg + I; 2343 assert(Arg != ArgEnd); 2344 EmitCallArg(Args, *Arg, ArgTypes[I]); 2345 // Restore the debug location. 2346 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo); 2347 } 2348 } 2349 2350 namespace { 2351 2352 struct DestroyUnpassedArg : EHScopeStack::Cleanup { 2353 DestroyUnpassedArg(llvm::Value *Addr, QualType Ty) 2354 : Addr(Addr), Ty(Ty) {} 2355 2356 llvm::Value *Addr; 2357 QualType Ty; 2358 2359 void Emit(CodeGenFunction &CGF, Flags flags) override { 2360 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 2361 assert(!Dtor->isTrivial()); 2362 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 2363 /*Delegating=*/false, Addr); 2364 } 2365 }; 2366 2367 } 2368 2369 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 2370 QualType type) { 2371 if (const ObjCIndirectCopyRestoreExpr *CRE 2372 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 2373 assert(getLangOpts().ObjCAutoRefCount); 2374 assert(getContext().hasSameType(E->getType(), type)); 2375 return emitWritebackArg(*this, args, CRE); 2376 } 2377 2378 assert(type->isReferenceType() == E->isGLValue() && 2379 "reference binding to unmaterialized r-value!"); 2380 2381 if (E->isGLValue()) { 2382 assert(E->getObjectKind() == OK_Ordinary); 2383 return args.add(EmitReferenceBindingToExpr(E), type); 2384 } 2385 2386 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 2387 2388 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 2389 // However, we still have to push an EH-only cleanup in case we unwind before 2390 // we make it to the call. 2391 if (HasAggregateEvalKind && 2392 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2393 // If we're using inalloca, use the argument memory. Otherwise, use a 2394 // temporary. 2395 AggValueSlot Slot; 2396 if (args.isUsingInAlloca()) 2397 Slot = createPlaceholderSlot(*this, type); 2398 else 2399 Slot = CreateAggTemp(type, "agg.tmp"); 2400 2401 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2402 bool DestroyedInCallee = 2403 RD && RD->hasNonTrivialDestructor() && 2404 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 2405 if (DestroyedInCallee) 2406 Slot.setExternallyDestructed(); 2407 2408 EmitAggExpr(E, Slot); 2409 RValue RV = Slot.asRValue(); 2410 args.add(RV, type); 2411 2412 if (DestroyedInCallee) { 2413 // Create a no-op GEP between the placeholder and the cleanup so we can 2414 // RAUW it successfully. It also serves as a marker of the first 2415 // instruction where the cleanup is active. 2416 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type); 2417 // This unreachable is a temporary marker which will be removed later. 2418 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 2419 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 2420 } 2421 return; 2422 } 2423 2424 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 2425 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 2426 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 2427 assert(L.isSimple()); 2428 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 2429 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 2430 } else { 2431 // We can't represent a misaligned lvalue in the CallArgList, so copy 2432 // to an aligned temporary now. 2433 llvm::Value *tmp = CreateMemTemp(type); 2434 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(), 2435 L.getAlignment()); 2436 args.add(RValue::getAggregate(tmp), type); 2437 } 2438 return; 2439 } 2440 2441 args.add(EmitAnyExprToTemp(E), type); 2442 } 2443 2444 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2445 // optimizer it can aggressively ignore unwind edges. 2446 void 2447 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 2448 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 2449 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 2450 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 2451 CGM.getNoObjCARCExceptionsMetadata()); 2452 } 2453 2454 /// Emits a call to the given no-arguments nounwind runtime function. 2455 llvm::CallInst * 2456 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2457 const llvm::Twine &name) { 2458 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2459 } 2460 2461 /// Emits a call to the given nounwind runtime function. 2462 llvm::CallInst * 2463 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 2464 ArrayRef<llvm::Value*> args, 2465 const llvm::Twine &name) { 2466 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 2467 call->setDoesNotThrow(); 2468 return call; 2469 } 2470 2471 /// Emits a simple call (never an invoke) to the given no-arguments 2472 /// runtime function. 2473 llvm::CallInst * 2474 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2475 const llvm::Twine &name) { 2476 return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name); 2477 } 2478 2479 /// Emits a simple call (never an invoke) to the given runtime 2480 /// function. 2481 llvm::CallInst * 2482 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 2483 ArrayRef<llvm::Value*> args, 2484 const llvm::Twine &name) { 2485 llvm::CallInst *call = Builder.CreateCall(callee, args, name); 2486 call->setCallingConv(getRuntimeCC()); 2487 return call; 2488 } 2489 2490 /// Emits a call or invoke to the given noreturn runtime function. 2491 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 2492 ArrayRef<llvm::Value*> args) { 2493 if (getInvokeDest()) { 2494 llvm::InvokeInst *invoke = 2495 Builder.CreateInvoke(callee, 2496 getUnreachableBlock(), 2497 getInvokeDest(), 2498 args); 2499 invoke->setDoesNotReturn(); 2500 invoke->setCallingConv(getRuntimeCC()); 2501 } else { 2502 llvm::CallInst *call = Builder.CreateCall(callee, args); 2503 call->setDoesNotReturn(); 2504 call->setCallingConv(getRuntimeCC()); 2505 Builder.CreateUnreachable(); 2506 } 2507 PGO.setCurrentRegionUnreachable(); 2508 } 2509 2510 /// Emits a call or invoke instruction to the given nullary runtime 2511 /// function. 2512 llvm::CallSite 2513 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2514 const Twine &name) { 2515 return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name); 2516 } 2517 2518 /// Emits a call or invoke instruction to the given runtime function. 2519 llvm::CallSite 2520 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 2521 ArrayRef<llvm::Value*> args, 2522 const Twine &name) { 2523 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 2524 callSite.setCallingConv(getRuntimeCC()); 2525 return callSite; 2526 } 2527 2528 llvm::CallSite 2529 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2530 const Twine &Name) { 2531 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name); 2532 } 2533 2534 /// Emits a call or invoke instruction to the given function, depending 2535 /// on the current state of the EH stack. 2536 llvm::CallSite 2537 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 2538 ArrayRef<llvm::Value *> Args, 2539 const Twine &Name) { 2540 llvm::BasicBlock *InvokeDest = getInvokeDest(); 2541 2542 llvm::Instruction *Inst; 2543 if (!InvokeDest) 2544 Inst = Builder.CreateCall(Callee, Args, Name); 2545 else { 2546 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 2547 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name); 2548 EmitBlock(ContBB); 2549 } 2550 2551 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 2552 // optimizer it can aggressively ignore unwind edges. 2553 if (CGM.getLangOpts().ObjCAutoRefCount) 2554 AddObjCARCExceptionMetadata(Inst); 2555 2556 return Inst; 2557 } 2558 2559 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo, 2560 llvm::FunctionType *FTy) { 2561 if (ArgNo < FTy->getNumParams()) 2562 assert(Elt->getType() == FTy->getParamType(ArgNo)); 2563 else 2564 assert(FTy->isVarArg()); 2565 ++ArgNo; 2566 } 2567 2568 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV, 2569 SmallVectorImpl<llvm::Value *> &Args, 2570 llvm::FunctionType *IRFuncTy) { 2571 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) { 2572 unsigned NumElts = AT->getSize().getZExtValue(); 2573 QualType EltTy = AT->getElementType(); 2574 llvm::Value *Addr = RV.getAggregateAddr(); 2575 for (unsigned Elt = 0; Elt < NumElts; ++Elt) { 2576 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt); 2577 RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation()); 2578 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy); 2579 } 2580 } else if (const RecordType *RT = Ty->getAs<RecordType>()) { 2581 RecordDecl *RD = RT->getDecl(); 2582 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion"); 2583 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty); 2584 2585 if (RD->isUnion()) { 2586 const FieldDecl *LargestFD = nullptr; 2587 CharUnits UnionSize = CharUnits::Zero(); 2588 2589 for (const auto *FD : RD->fields()) { 2590 assert(!FD->isBitField() && 2591 "Cannot expand structure with bit-field members."); 2592 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType()); 2593 if (UnionSize < FieldSize) { 2594 UnionSize = FieldSize; 2595 LargestFD = FD; 2596 } 2597 } 2598 if (LargestFD) { 2599 RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation()); 2600 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy); 2601 } 2602 } else { 2603 for (const auto *FD : RD->fields()) { 2604 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 2605 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy); 2606 } 2607 } 2608 } else if (Ty->isAnyComplexType()) { 2609 ComplexPairTy CV = RV.getComplexVal(); 2610 Args.push_back(CV.first); 2611 Args.push_back(CV.second); 2612 } else { 2613 assert(RV.isScalar() && 2614 "Unexpected non-scalar rvalue during struct expansion."); 2615 2616 // Insert a bitcast as needed. 2617 llvm::Value *V = RV.getScalarVal(); 2618 if (Args.size() < IRFuncTy->getNumParams() && 2619 V->getType() != IRFuncTy->getParamType(Args.size())) 2620 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size())); 2621 2622 Args.push_back(V); 2623 } 2624 } 2625 2626 /// \brief Store a non-aggregate value to an address to initialize it. For 2627 /// initialization, a non-atomic store will be used. 2628 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 2629 LValue Dst) { 2630 if (Src.isScalar()) 2631 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 2632 else 2633 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 2634 } 2635 2636 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 2637 llvm::Value *New) { 2638 DeferredReplacements.push_back(std::make_pair(Old, New)); 2639 } 2640 2641 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 2642 llvm::Value *Callee, 2643 ReturnValueSlot ReturnValue, 2644 const CallArgList &CallArgs, 2645 const Decl *TargetDecl, 2646 llvm::Instruction **callOrInvoke) { 2647 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 2648 SmallVector<llvm::Value*, 16> Args; 2649 2650 // Handle struct-return functions by passing a pointer to the 2651 // location that we would like to return into. 2652 QualType RetTy = CallInfo.getReturnType(); 2653 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 2654 2655 // IRArgNo - Keep track of the argument number in the callee we're looking at. 2656 unsigned IRArgNo = 0; 2657 llvm::FunctionType *IRFuncTy = 2658 cast<llvm::FunctionType>( 2659 cast<llvm::PointerType>(Callee->getType())->getElementType()); 2660 2661 // If we're using inalloca, insert the allocation after the stack save. 2662 // FIXME: Do this earlier rather than hacking it in here! 2663 llvm::Value *ArgMemory = nullptr; 2664 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 2665 llvm::Instruction *IP = CallArgs.getStackBase(); 2666 llvm::AllocaInst *AI; 2667 if (IP) { 2668 IP = IP->getNextNode(); 2669 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP); 2670 } else { 2671 AI = CreateTempAlloca(ArgStruct, "argmem"); 2672 } 2673 AI->setUsedWithInAlloca(true); 2674 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 2675 ArgMemory = AI; 2676 } 2677 2678 // If the call returns a temporary with struct return, create a temporary 2679 // alloca to hold the result, unless one is given to us. 2680 llvm::Value *SRetPtr = nullptr; 2681 bool SwapThisWithSRet = false; 2682 if (RetAI.isIndirect() || RetAI.isInAlloca()) { 2683 SRetPtr = ReturnValue.getValue(); 2684 if (!SRetPtr) 2685 SRetPtr = CreateMemTemp(RetTy); 2686 if (RetAI.isIndirect()) { 2687 Args.push_back(SRetPtr); 2688 SwapThisWithSRet = RetAI.isSRetAfterThis(); 2689 if (SwapThisWithSRet) 2690 IRArgNo = 1; 2691 checkArgMatches(SRetPtr, IRArgNo, IRFuncTy); 2692 if (SwapThisWithSRet) 2693 IRArgNo = 0; 2694 } else { 2695 llvm::Value *Addr = 2696 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 2697 Builder.CreateStore(SRetPtr, Addr); 2698 } 2699 } 2700 2701 assert(CallInfo.arg_size() == CallArgs.size() && 2702 "Mismatch between function signature & arguments."); 2703 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 2704 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 2705 I != E; ++I, ++info_it) { 2706 const ABIArgInfo &ArgInfo = info_it->info; 2707 RValue RV = I->RV; 2708 2709 // Skip 'sret' if it came second. 2710 if (IRArgNo == 1 && SwapThisWithSRet) 2711 ++IRArgNo; 2712 2713 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty); 2714 2715 // Insert a padding argument to ensure proper alignment. 2716 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) { 2717 Args.push_back(llvm::UndefValue::get(PaddingType)); 2718 ++IRArgNo; 2719 } 2720 2721 switch (ArgInfo.getKind()) { 2722 case ABIArgInfo::InAlloca: { 2723 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 2724 if (RV.isAggregate()) { 2725 // Replace the placeholder with the appropriate argument slot GEP. 2726 llvm::Instruction *Placeholder = 2727 cast<llvm::Instruction>(RV.getAggregateAddr()); 2728 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 2729 Builder.SetInsertPoint(Placeholder); 2730 llvm::Value *Addr = Builder.CreateStructGEP( 2731 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 2732 Builder.restoreIP(IP); 2733 deferPlaceholderReplacement(Placeholder, Addr); 2734 } else { 2735 // Store the RValue into the argument struct. 2736 llvm::Value *Addr = 2737 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 2738 unsigned AS = Addr->getType()->getPointerAddressSpace(); 2739 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 2740 // There are some cases where a trivial bitcast is not avoidable. The 2741 // definition of a type later in a translation unit may change it's type 2742 // from {}* to (%struct.foo*)*. 2743 if (Addr->getType() != MemType) 2744 Addr = Builder.CreateBitCast(Addr, MemType); 2745 LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign); 2746 EmitInitStoreOfNonAggregate(*this, RV, argLV); 2747 } 2748 break; // Don't increment IRArgNo! 2749 } 2750 2751 case ABIArgInfo::Indirect: { 2752 if (RV.isScalar() || RV.isComplex()) { 2753 // Make a temporary alloca to pass the argument. 2754 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2755 if (ArgInfo.getIndirectAlign() > AI->getAlignment()) 2756 AI->setAlignment(ArgInfo.getIndirectAlign()); 2757 Args.push_back(AI); 2758 2759 LValue argLV = MakeAddrLValue(Args.back(), I->Ty, TypeAlign); 2760 EmitInitStoreOfNonAggregate(*this, RV, argLV); 2761 2762 // Validate argument match. 2763 checkArgMatches(AI, IRArgNo, IRFuncTy); 2764 } else { 2765 // We want to avoid creating an unnecessary temporary+copy here; 2766 // however, we need one in three cases: 2767 // 1. If the argument is not byval, and we are required to copy the 2768 // source. (This case doesn't occur on any common architecture.) 2769 // 2. If the argument is byval, RV is not sufficiently aligned, and 2770 // we cannot force it to be sufficiently aligned. 2771 // 3. If the argument is byval, but RV is located in an address space 2772 // different than that of the argument (0). 2773 llvm::Value *Addr = RV.getAggregateAddr(); 2774 unsigned Align = ArgInfo.getIndirectAlign(); 2775 const llvm::DataLayout *TD = &CGM.getDataLayout(); 2776 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace(); 2777 const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ? 2778 IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0); 2779 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 2780 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align && 2781 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) || 2782 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 2783 // Create an aligned temporary, and copy to it. 2784 llvm::AllocaInst *AI = CreateMemTemp(I->Ty); 2785 if (Align > AI->getAlignment()) 2786 AI->setAlignment(Align); 2787 Args.push_back(AI); 2788 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 2789 2790 // Validate argument match. 2791 checkArgMatches(AI, IRArgNo, IRFuncTy); 2792 } else { 2793 // Skip the extra memcpy call. 2794 Args.push_back(Addr); 2795 2796 // Validate argument match. 2797 checkArgMatches(Addr, IRArgNo, IRFuncTy); 2798 } 2799 } 2800 break; 2801 } 2802 2803 case ABIArgInfo::Ignore: 2804 break; 2805 2806 case ABIArgInfo::Extend: 2807 case ABIArgInfo::Direct: { 2808 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 2809 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 2810 ArgInfo.getDirectOffset() == 0) { 2811 llvm::Value *V; 2812 if (RV.isScalar()) 2813 V = RV.getScalarVal(); 2814 else 2815 V = Builder.CreateLoad(RV.getAggregateAddr()); 2816 2817 // If the argument doesn't match, perform a bitcast to coerce it. This 2818 // can happen due to trivial type mismatches. 2819 if (IRArgNo < IRFuncTy->getNumParams() && 2820 V->getType() != IRFuncTy->getParamType(IRArgNo)) 2821 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo)); 2822 Args.push_back(V); 2823 2824 checkArgMatches(V, IRArgNo, IRFuncTy); 2825 break; 2826 } 2827 2828 // FIXME: Avoid the conversion through memory if possible. 2829 llvm::Value *SrcPtr; 2830 if (RV.isScalar() || RV.isComplex()) { 2831 SrcPtr = CreateMemTemp(I->Ty, "coerce"); 2832 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign); 2833 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 2834 } else 2835 SrcPtr = RV.getAggregateAddr(); 2836 2837 // If the value is offset in memory, apply the offset now. 2838 if (unsigned Offs = ArgInfo.getDirectOffset()) { 2839 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy()); 2840 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs); 2841 SrcPtr = Builder.CreateBitCast(SrcPtr, 2842 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType())); 2843 2844 } 2845 2846 // If the coerce-to type is a first class aggregate, we flatten it and 2847 // pass the elements. Either way is semantically identical, but fast-isel 2848 // and the optimizer generally likes scalar values better than FCAs. 2849 // We cannot do this for functions using the AAPCS calling convention, 2850 // as structures are treated differently by that calling convention. 2851 llvm::StructType *STy = 2852 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 2853 if (STy && !isAAPCSVFP(CallInfo, getTarget())) { 2854 llvm::Type *SrcTy = 2855 cast<llvm::PointerType>(SrcPtr->getType())->getElementType(); 2856 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 2857 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 2858 2859 // If the source type is smaller than the destination type of the 2860 // coerce-to logic, copy the source value into a temp alloca the size 2861 // of the destination type to allow loading all of it. The bits past 2862 // the source value are left undef. 2863 if (SrcSize < DstSize) { 2864 llvm::AllocaInst *TempAlloca 2865 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce"); 2866 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0); 2867 SrcPtr = TempAlloca; 2868 } else { 2869 SrcPtr = Builder.CreateBitCast(SrcPtr, 2870 llvm::PointerType::getUnqual(STy)); 2871 } 2872 2873 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2874 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i); 2875 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr); 2876 // We don't know what we're loading from. 2877 LI->setAlignment(1); 2878 Args.push_back(LI); 2879 2880 // Validate argument match. 2881 checkArgMatches(LI, IRArgNo, IRFuncTy); 2882 } 2883 } else { 2884 // In the simple case, just pass the coerced loaded value. 2885 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), 2886 *this)); 2887 2888 // Validate argument match. 2889 checkArgMatches(Args.back(), IRArgNo, IRFuncTy); 2890 } 2891 2892 break; 2893 } 2894 2895 case ABIArgInfo::Expand: 2896 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy); 2897 IRArgNo = Args.size(); 2898 break; 2899 } 2900 } 2901 2902 if (SwapThisWithSRet) 2903 std::swap(Args[0], Args[1]); 2904 2905 if (ArgMemory) { 2906 llvm::Value *Arg = ArgMemory; 2907 if (CallInfo.isVariadic()) { 2908 // When passing non-POD arguments by value to variadic functions, we will 2909 // end up with a variadic prototype and an inalloca call site. In such 2910 // cases, we can't do any parameter mismatch checks. Give up and bitcast 2911 // the callee. 2912 unsigned CalleeAS = 2913 cast<llvm::PointerType>(Callee->getType())->getAddressSpace(); 2914 Callee = Builder.CreateBitCast( 2915 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS)); 2916 } else { 2917 llvm::Type *LastParamTy = 2918 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 2919 if (Arg->getType() != LastParamTy) { 2920 #ifndef NDEBUG 2921 // Assert that these structs have equivalent element types. 2922 llvm::StructType *FullTy = CallInfo.getArgStruct(); 2923 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 2924 cast<llvm::PointerType>(LastParamTy)->getElementType()); 2925 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 2926 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 2927 DE = DeclaredTy->element_end(), 2928 FI = FullTy->element_begin(); 2929 DI != DE; ++DI, ++FI) 2930 assert(*DI == *FI); 2931 #endif 2932 Arg = Builder.CreateBitCast(Arg, LastParamTy); 2933 } 2934 } 2935 Args.push_back(Arg); 2936 } 2937 2938 if (!CallArgs.getCleanupsToDeactivate().empty()) 2939 deactivateArgCleanupsBeforeCall(*this, CallArgs); 2940 2941 // If the callee is a bitcast of a function to a varargs pointer to function 2942 // type, check to see if we can remove the bitcast. This handles some cases 2943 // with unprototyped functions. 2944 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee)) 2945 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) { 2946 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType()); 2947 llvm::FunctionType *CurFT = 2948 cast<llvm::FunctionType>(CurPT->getElementType()); 2949 llvm::FunctionType *ActualFT = CalleeF->getFunctionType(); 2950 2951 if (CE->getOpcode() == llvm::Instruction::BitCast && 2952 ActualFT->getReturnType() == CurFT->getReturnType() && 2953 ActualFT->getNumParams() == CurFT->getNumParams() && 2954 ActualFT->getNumParams() == Args.size() && 2955 (CurFT->isVarArg() || !ActualFT->isVarArg())) { 2956 bool ArgsMatch = true; 2957 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i) 2958 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) { 2959 ArgsMatch = false; 2960 break; 2961 } 2962 2963 // Strip the cast if we can get away with it. This is a nice cleanup, 2964 // but also allows us to inline the function at -O0 if it is marked 2965 // always_inline. 2966 if (ArgsMatch) 2967 Callee = CalleeF; 2968 } 2969 } 2970 2971 unsigned CallingConv; 2972 CodeGen::AttributeListType AttributeList; 2973 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, 2974 CallingConv, true); 2975 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(), 2976 AttributeList); 2977 2978 llvm::BasicBlock *InvokeDest = nullptr; 2979 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex, 2980 llvm::Attribute::NoUnwind)) 2981 InvokeDest = getInvokeDest(); 2982 2983 llvm::CallSite CS; 2984 if (!InvokeDest) { 2985 CS = Builder.CreateCall(Callee, Args); 2986 } else { 2987 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 2988 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args); 2989 EmitBlock(Cont); 2990 } 2991 if (callOrInvoke) 2992 *callOrInvoke = CS.getInstruction(); 2993 2994 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 2995 !CS.hasFnAttr(llvm::Attribute::NoInline)) 2996 Attrs = 2997 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex, 2998 llvm::Attribute::AlwaysInline); 2999 3000 CS.setAttributes(Attrs); 3001 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 3002 3003 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3004 // optimizer it can aggressively ignore unwind edges. 3005 if (CGM.getLangOpts().ObjCAutoRefCount) 3006 AddObjCARCExceptionMetadata(CS.getInstruction()); 3007 3008 // If the call doesn't return, finish the basic block and clear the 3009 // insertion point; this allows the rest of IRgen to discard 3010 // unreachable code. 3011 if (CS.doesNotReturn()) { 3012 Builder.CreateUnreachable(); 3013 Builder.ClearInsertionPoint(); 3014 3015 // FIXME: For now, emit a dummy basic block because expr emitters in 3016 // generally are not ready to handle emitting expressions at unreachable 3017 // points. 3018 EnsureInsertPoint(); 3019 3020 // Return a reasonable RValue. 3021 return GetUndefRValue(RetTy); 3022 } 3023 3024 llvm::Instruction *CI = CS.getInstruction(); 3025 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy()) 3026 CI->setName("call"); 3027 3028 // Emit any writebacks immediately. Arguably this should happen 3029 // after any return-value munging. 3030 if (CallArgs.hasWritebacks()) 3031 emitWritebacks(*this, CallArgs); 3032 3033 // The stack cleanup for inalloca arguments has to run out of the normal 3034 // lexical order, so deactivate it and run it manually here. 3035 CallArgs.freeArgumentMemory(*this); 3036 3037 switch (RetAI.getKind()) { 3038 case ABIArgInfo::InAlloca: 3039 case ABIArgInfo::Indirect: 3040 return convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 3041 3042 case ABIArgInfo::Ignore: 3043 // If we are ignoring an argument that had a result, make sure to 3044 // construct the appropriate return value for our caller. 3045 return GetUndefRValue(RetTy); 3046 3047 case ABIArgInfo::Extend: 3048 case ABIArgInfo::Direct: { 3049 llvm::Type *RetIRTy = ConvertType(RetTy); 3050 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 3051 switch (getEvaluationKind(RetTy)) { 3052 case TEK_Complex: { 3053 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 3054 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 3055 return RValue::getComplex(std::make_pair(Real, Imag)); 3056 } 3057 case TEK_Aggregate: { 3058 llvm::Value *DestPtr = ReturnValue.getValue(); 3059 bool DestIsVolatile = ReturnValue.isVolatile(); 3060 3061 if (!DestPtr) { 3062 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 3063 DestIsVolatile = false; 3064 } 3065 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false); 3066 return RValue::getAggregate(DestPtr); 3067 } 3068 case TEK_Scalar: { 3069 // If the argument doesn't match, perform a bitcast to coerce it. This 3070 // can happen due to trivial type mismatches. 3071 llvm::Value *V = CI; 3072 if (V->getType() != RetIRTy) 3073 V = Builder.CreateBitCast(V, RetIRTy); 3074 return RValue::get(V); 3075 } 3076 } 3077 llvm_unreachable("bad evaluation kind"); 3078 } 3079 3080 llvm::Value *DestPtr = ReturnValue.getValue(); 3081 bool DestIsVolatile = ReturnValue.isVolatile(); 3082 3083 if (!DestPtr) { 3084 DestPtr = CreateMemTemp(RetTy, "coerce"); 3085 DestIsVolatile = false; 3086 } 3087 3088 // If the value is offset in memory, apply the offset now. 3089 llvm::Value *StorePtr = DestPtr; 3090 if (unsigned Offs = RetAI.getDirectOffset()) { 3091 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy()); 3092 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs); 3093 StorePtr = Builder.CreateBitCast(StorePtr, 3094 llvm::PointerType::getUnqual(RetAI.getCoerceToType())); 3095 } 3096 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 3097 3098 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 3099 } 3100 3101 case ABIArgInfo::Expand: 3102 llvm_unreachable("Invalid ABI kind for return argument"); 3103 } 3104 3105 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 3106 } 3107 3108 /* VarArg handling */ 3109 3110 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) { 3111 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this); 3112 } 3113