1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // These classes wrap the information about a call or function 11 // definition used to handle ABI compliancy. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "CGCall.h" 16 #include "ABIInfo.h" 17 #include "CGBlocks.h" 18 #include "CGCXXABI.h" 19 #include "CGCleanup.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Decl.h" 24 #include "clang/AST/DeclCXX.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/Basic/TargetBuiltins.h" 27 #include "clang/Basic/TargetInfo.h" 28 #include "clang/CodeGen/CGFunctionInfo.h" 29 #include "clang/CodeGen/SwiftCallingConv.h" 30 #include "clang/Frontend/CodeGenOptions.h" 31 #include "llvm/ADT/StringExtras.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/CallSite.h" 36 #include "llvm/IR/DataLayout.h" 37 #include "llvm/IR/InlineAsm.h" 38 #include "llvm/IR/Intrinsics.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/Transforms/Utils/Local.h" 41 using namespace clang; 42 using namespace CodeGen; 43 44 /***/ 45 46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 47 switch (CC) { 48 default: return llvm::CallingConv::C; 49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 53 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64; 54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 58 // TODO: Add support for __pascal to LLVM. 59 case CC_X86Pascal: return llvm::CallingConv::C; 60 // TODO: Add support for __vectorcall to LLVM. 61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 62 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 63 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 64 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 65 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 66 case CC_Swift: return llvm::CallingConv::Swift; 67 } 68 } 69 70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method 71 /// qualification. 72 /// FIXME: address space qualification? 73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) { 74 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 75 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 76 } 77 78 /// Returns the canonical formal type of the given C++ method. 79 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 80 return MD->getType()->getCanonicalTypeUnqualified() 81 .getAs<FunctionProtoType>(); 82 } 83 84 /// Returns the "extra-canonicalized" return type, which discards 85 /// qualifiers on the return type. Codegen doesn't care about them, 86 /// and it makes ABI code a little easier to be able to assume that 87 /// all parameter and return types are top-level unqualified. 88 static CanQualType GetReturnType(QualType RetTy) { 89 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 90 } 91 92 /// Arrange the argument and result information for a value of the given 93 /// unprototyped freestanding function type. 94 const CGFunctionInfo & 95 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 96 // When translating an unprototyped function type, always use a 97 // variadic type. 98 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 99 /*instanceMethod=*/false, 100 /*chainCall=*/false, None, 101 FTNP->getExtInfo(), {}, RequiredArgs(0)); 102 } 103 104 static void addExtParameterInfosForCall( 105 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 106 const FunctionProtoType *proto, 107 unsigned prefixArgs, 108 unsigned totalArgs) { 109 assert(proto->hasExtParameterInfos()); 110 assert(paramInfos.size() <= prefixArgs); 111 assert(proto->getNumParams() + prefixArgs <= totalArgs); 112 113 paramInfos.reserve(totalArgs); 114 115 // Add default infos for any prefix args that don't already have infos. 116 paramInfos.resize(prefixArgs); 117 118 // Add infos for the prototype. 119 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 120 paramInfos.push_back(ParamInfo); 121 // pass_object_size params have no parameter info. 122 if (ParamInfo.hasPassObjectSize()) 123 paramInfos.emplace_back(); 124 } 125 126 assert(paramInfos.size() <= totalArgs && 127 "Did we forget to insert pass_object_size args?"); 128 // Add default infos for the variadic and/or suffix arguments. 129 paramInfos.resize(totalArgs); 130 } 131 132 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in 133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 134 static void appendParameterTypes(const CodeGenTypes &CGT, 135 SmallVectorImpl<CanQualType> &prefix, 136 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 137 CanQual<FunctionProtoType> FPT) { 138 // Fast path: don't touch param info if we don't need to. 139 if (!FPT->hasExtParameterInfos()) { 140 assert(paramInfos.empty() && 141 "We have paramInfos, but the prototype doesn't?"); 142 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 143 return; 144 } 145 146 unsigned PrefixSize = prefix.size(); 147 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 148 // parameters; the only thing that can change this is the presence of 149 // pass_object_size. So, we preallocate for the common case. 150 prefix.reserve(prefix.size() + FPT->getNumParams()); 151 152 auto ExtInfos = FPT->getExtParameterInfos(); 153 assert(ExtInfos.size() == FPT->getNumParams()); 154 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 155 prefix.push_back(FPT->getParamType(I)); 156 if (ExtInfos[I].hasPassObjectSize()) 157 prefix.push_back(CGT.getContext().getSizeType()); 158 } 159 160 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 161 prefix.size()); 162 } 163 164 /// Arrange the LLVM function layout for a value of the given function 165 /// type, on top of any implicit parameters already stored. 166 static const CGFunctionInfo & 167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 168 SmallVectorImpl<CanQualType> &prefix, 169 CanQual<FunctionProtoType> FTP, 170 const FunctionDecl *FD) { 171 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 172 RequiredArgs Required = 173 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD); 174 // FIXME: Kill copy. 175 appendParameterTypes(CGT, prefix, paramInfos, FTP); 176 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 177 178 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 179 /*chainCall=*/false, prefix, 180 FTP->getExtInfo(), paramInfos, 181 Required); 182 } 183 184 /// Arrange the argument and result information for a value of the 185 /// given freestanding function type. 186 const CGFunctionInfo & 187 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP, 188 const FunctionDecl *FD) { 189 SmallVector<CanQualType, 16> argTypes; 190 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 191 FTP, FD); 192 } 193 194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 195 // Set the appropriate calling convention for the Function. 196 if (D->hasAttr<StdCallAttr>()) 197 return CC_X86StdCall; 198 199 if (D->hasAttr<FastCallAttr>()) 200 return CC_X86FastCall; 201 202 if (D->hasAttr<RegCallAttr>()) 203 return CC_X86RegCall; 204 205 if (D->hasAttr<ThisCallAttr>()) 206 return CC_X86ThisCall; 207 208 if (D->hasAttr<VectorCallAttr>()) 209 return CC_X86VectorCall; 210 211 if (D->hasAttr<PascalAttr>()) 212 return CC_X86Pascal; 213 214 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 215 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 216 217 if (D->hasAttr<IntelOclBiccAttr>()) 218 return CC_IntelOclBicc; 219 220 if (D->hasAttr<MSABIAttr>()) 221 return IsWindows ? CC_C : CC_X86_64Win64; 222 223 if (D->hasAttr<SysVABIAttr>()) 224 return IsWindows ? CC_X86_64SysV : CC_C; 225 226 if (D->hasAttr<PreserveMostAttr>()) 227 return CC_PreserveMost; 228 229 if (D->hasAttr<PreserveAllAttr>()) 230 return CC_PreserveAll; 231 232 return CC_C; 233 } 234 235 /// Arrange the argument and result information for a call to an 236 /// unknown C++ non-static member function of the given abstract type. 237 /// (Zero value of RD means we don't have any meaningful "this" argument type, 238 /// so fall back to a generic pointer type). 239 /// The member function must be an ordinary function, i.e. not a 240 /// constructor or destructor. 241 const CGFunctionInfo & 242 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 243 const FunctionProtoType *FTP, 244 const CXXMethodDecl *MD) { 245 SmallVector<CanQualType, 16> argTypes; 246 247 // Add the 'this' pointer. 248 if (RD) 249 argTypes.push_back(GetThisType(Context, RD)); 250 else 251 argTypes.push_back(Context.VoidPtrTy); 252 253 return ::arrangeLLVMFunctionInfo( 254 *this, true, argTypes, 255 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD); 256 } 257 258 /// Arrange the argument and result information for a declaration or 259 /// definition of the given C++ non-static member function. The 260 /// member function must be an ordinary function, i.e. not a 261 /// constructor or destructor. 262 const CGFunctionInfo & 263 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 264 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 265 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 266 267 CanQual<FunctionProtoType> prototype = GetFormalType(MD); 268 269 if (MD->isInstance()) { 270 // The abstract case is perfectly fine. 271 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 272 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 273 } 274 275 return arrangeFreeFunctionType(prototype, MD); 276 } 277 278 bool CodeGenTypes::inheritingCtorHasParams( 279 const InheritedConstructor &Inherited, CXXCtorType Type) { 280 // Parameters are unnecessary if we're constructing a base class subobject 281 // and the inherited constructor lives in a virtual base. 282 return Type == Ctor_Complete || 283 !Inherited.getShadowDecl()->constructsVirtualBase() || 284 !Target.getCXXABI().hasConstructorVariants(); 285 } 286 287 const CGFunctionInfo & 288 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD, 289 StructorType Type) { 290 291 SmallVector<CanQualType, 16> argTypes; 292 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 293 argTypes.push_back(GetThisType(Context, MD->getParent())); 294 295 bool PassParams = true; 296 297 GlobalDecl GD; 298 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 299 GD = GlobalDecl(CD, toCXXCtorType(Type)); 300 301 // A base class inheriting constructor doesn't get forwarded arguments 302 // needed to construct a virtual base (or base class thereof). 303 if (auto Inherited = CD->getInheritedConstructor()) 304 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type)); 305 } else { 306 auto *DD = dyn_cast<CXXDestructorDecl>(MD); 307 GD = GlobalDecl(DD, toCXXDtorType(Type)); 308 } 309 310 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 311 312 // Add the formal parameters. 313 if (PassParams) 314 appendParameterTypes(*this, argTypes, paramInfos, FTP); 315 316 CGCXXABI::AddedStructorArgs AddedArgs = 317 TheCXXABI.buildStructorSignature(MD, Type, argTypes); 318 if (!paramInfos.empty()) { 319 // Note: prefix implies after the first param. 320 if (AddedArgs.Prefix) 321 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 322 FunctionProtoType::ExtParameterInfo{}); 323 if (AddedArgs.Suffix) 324 paramInfos.append(AddedArgs.Suffix, 325 FunctionProtoType::ExtParameterInfo{}); 326 } 327 328 RequiredArgs required = 329 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 330 : RequiredArgs::All); 331 332 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 333 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 334 ? argTypes.front() 335 : TheCXXABI.hasMostDerivedReturn(GD) 336 ? CGM.getContext().VoidPtrTy 337 : Context.VoidTy; 338 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 339 /*chainCall=*/false, argTypes, extInfo, 340 paramInfos, required); 341 } 342 343 static SmallVector<CanQualType, 16> 344 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 345 SmallVector<CanQualType, 16> argTypes; 346 for (auto &arg : args) 347 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 348 return argTypes; 349 } 350 351 static SmallVector<CanQualType, 16> 352 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 353 SmallVector<CanQualType, 16> argTypes; 354 for (auto &arg : args) 355 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 356 return argTypes; 357 } 358 359 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 360 getExtParameterInfosForCall(const FunctionProtoType *proto, 361 unsigned prefixArgs, unsigned totalArgs) { 362 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 363 if (proto->hasExtParameterInfos()) { 364 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 365 } 366 return result; 367 } 368 369 /// Arrange a call to a C++ method, passing the given arguments. 370 /// 371 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 372 /// parameter. 373 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 374 /// args. 375 /// PassProtoArgs indicates whether `args` has args for the parameters in the 376 /// given CXXConstructorDecl. 377 const CGFunctionInfo & 378 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 379 const CXXConstructorDecl *D, 380 CXXCtorType CtorKind, 381 unsigned ExtraPrefixArgs, 382 unsigned ExtraSuffixArgs, 383 bool PassProtoArgs) { 384 // FIXME: Kill copy. 385 SmallVector<CanQualType, 16> ArgTypes; 386 for (const auto &Arg : args) 387 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 388 389 // +1 for implicit this, which should always be args[0]. 390 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 391 392 CanQual<FunctionProtoType> FPT = GetFormalType(D); 393 RequiredArgs Required = 394 RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D); 395 GlobalDecl GD(D, CtorKind); 396 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 397 ? ArgTypes.front() 398 : TheCXXABI.hasMostDerivedReturn(GD) 399 ? CGM.getContext().VoidPtrTy 400 : Context.VoidTy; 401 402 FunctionType::ExtInfo Info = FPT->getExtInfo(); 403 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 404 // If the prototype args are elided, we should only have ABI-specific args, 405 // which never have param info. 406 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 407 // ABI-specific suffix arguments are treated the same as variadic arguments. 408 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 409 ArgTypes.size()); 410 } 411 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 412 /*chainCall=*/false, ArgTypes, Info, 413 ParamInfos, Required); 414 } 415 416 /// Arrange the argument and result information for the declaration or 417 /// definition of the given function. 418 const CGFunctionInfo & 419 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 420 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 421 if (MD->isInstance()) 422 return arrangeCXXMethodDeclaration(MD); 423 424 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 425 426 assert(isa<FunctionType>(FTy)); 427 428 // When declaring a function without a prototype, always use a 429 // non-variadic type. 430 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 431 return arrangeLLVMFunctionInfo( 432 noProto->getReturnType(), /*instanceMethod=*/false, 433 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 434 } 435 436 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD); 437 } 438 439 /// Arrange the argument and result information for the declaration or 440 /// definition of an Objective-C method. 441 const CGFunctionInfo & 442 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 443 // It happens that this is the same as a call with no optional 444 // arguments, except also using the formal 'self' type. 445 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 446 } 447 448 /// Arrange the argument and result information for the function type 449 /// through which to perform a send to the given Objective-C method, 450 /// using the given receiver type. The receiver type is not always 451 /// the 'self' type of the method or even an Objective-C pointer type. 452 /// This is *not* the right method for actually performing such a 453 /// message send, due to the possibility of optional arguments. 454 const CGFunctionInfo & 455 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 456 QualType receiverType) { 457 SmallVector<CanQualType, 16> argTys; 458 argTys.push_back(Context.getCanonicalParamType(receiverType)); 459 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 460 // FIXME: Kill copy? 461 for (const auto *I : MD->parameters()) { 462 argTys.push_back(Context.getCanonicalParamType(I->getType())); 463 } 464 465 FunctionType::ExtInfo einfo; 466 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 467 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 468 469 if (getContext().getLangOpts().ObjCAutoRefCount && 470 MD->hasAttr<NSReturnsRetainedAttr>()) 471 einfo = einfo.withProducesResult(true); 472 473 RequiredArgs required = 474 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 475 476 return arrangeLLVMFunctionInfo( 477 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 478 /*chainCall=*/false, argTys, einfo, {}, required); 479 } 480 481 const CGFunctionInfo & 482 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 483 const CallArgList &args) { 484 auto argTypes = getArgTypesForCall(Context, args); 485 FunctionType::ExtInfo einfo; 486 487 return arrangeLLVMFunctionInfo( 488 GetReturnType(returnType), /*instanceMethod=*/false, 489 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 490 } 491 492 const CGFunctionInfo & 493 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 494 // FIXME: Do we need to handle ObjCMethodDecl? 495 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 496 497 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) 498 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType())); 499 500 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) 501 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType())); 502 503 return arrangeFunctionDeclaration(FD); 504 } 505 506 /// Arrange a thunk that takes 'this' as the first parameter followed by 507 /// varargs. Return a void pointer, regardless of the actual return type. 508 /// The body of the thunk will end in a musttail call to a function of the 509 /// correct type, and the caller will bitcast the function to the correct 510 /// prototype. 511 const CGFunctionInfo & 512 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) { 513 assert(MD->isVirtual() && "only virtual memptrs have thunks"); 514 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 515 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) }; 516 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 517 /*chainCall=*/false, ArgTys, 518 FTP->getExtInfo(), {}, RequiredArgs(1)); 519 } 520 521 const CGFunctionInfo & 522 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 523 CXXCtorType CT) { 524 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 525 526 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 527 SmallVector<CanQualType, 2> ArgTys; 528 const CXXRecordDecl *RD = CD->getParent(); 529 ArgTys.push_back(GetThisType(Context, RD)); 530 if (CT == Ctor_CopyingClosure) 531 ArgTys.push_back(*FTP->param_type_begin()); 532 if (RD->getNumVBases() > 0) 533 ArgTys.push_back(Context.IntTy); 534 CallingConv CC = Context.getDefaultCallingConvention( 535 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 536 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 537 /*chainCall=*/false, ArgTys, 538 FunctionType::ExtInfo(CC), {}, 539 RequiredArgs::All); 540 } 541 542 /// Arrange a call as unto a free function, except possibly with an 543 /// additional number of formal parameters considered required. 544 static const CGFunctionInfo & 545 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 546 CodeGenModule &CGM, 547 const CallArgList &args, 548 const FunctionType *fnType, 549 unsigned numExtraRequiredArgs, 550 bool chainCall) { 551 assert(args.size() >= numExtraRequiredArgs); 552 553 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 554 555 // In most cases, there are no optional arguments. 556 RequiredArgs required = RequiredArgs::All; 557 558 // If we have a variadic prototype, the required arguments are the 559 // extra prefix plus the arguments in the prototype. 560 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 561 if (proto->isVariadic()) 562 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs); 563 564 if (proto->hasExtParameterInfos()) 565 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 566 args.size()); 567 568 // If we don't have a prototype at all, but we're supposed to 569 // explicitly use the variadic convention for unprototyped calls, 570 // treat all of the arguments as required but preserve the nominal 571 // possibility of variadics. 572 } else if (CGM.getTargetCodeGenInfo() 573 .isNoProtoCallVariadic(args, 574 cast<FunctionNoProtoType>(fnType))) { 575 required = RequiredArgs(args.size()); 576 } 577 578 // FIXME: Kill copy. 579 SmallVector<CanQualType, 16> argTypes; 580 for (const auto &arg : args) 581 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 582 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 583 /*instanceMethod=*/false, chainCall, 584 argTypes, fnType->getExtInfo(), paramInfos, 585 required); 586 } 587 588 /// Figure out the rules for calling a function with the given formal 589 /// type using the given arguments. The arguments are necessary 590 /// because the function might be unprototyped, in which case it's 591 /// target-dependent in crazy ways. 592 const CGFunctionInfo & 593 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 594 const FunctionType *fnType, 595 bool chainCall) { 596 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 597 chainCall ? 1 : 0, chainCall); 598 } 599 600 /// A block function is essentially a free function with an 601 /// extra implicit argument. 602 const CGFunctionInfo & 603 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 604 const FunctionType *fnType) { 605 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 606 /*chainCall=*/false); 607 } 608 609 const CGFunctionInfo & 610 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 611 const FunctionArgList ¶ms) { 612 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 613 auto argTypes = getArgTypesForDeclaration(Context, params); 614 615 return arrangeLLVMFunctionInfo( 616 GetReturnType(proto->getReturnType()), 617 /*instanceMethod*/ false, /*chainCall*/ false, argTypes, 618 proto->getExtInfo(), paramInfos, 619 RequiredArgs::forPrototypePlus(proto, 1, nullptr)); 620 } 621 622 const CGFunctionInfo & 623 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 624 const CallArgList &args) { 625 // FIXME: Kill copy. 626 SmallVector<CanQualType, 16> argTypes; 627 for (const auto &Arg : args) 628 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 629 return arrangeLLVMFunctionInfo( 630 GetReturnType(resultType), /*instanceMethod=*/false, 631 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 632 /*paramInfos=*/ {}, RequiredArgs::All); 633 } 634 635 const CGFunctionInfo & 636 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 637 const FunctionArgList &args) { 638 auto argTypes = getArgTypesForDeclaration(Context, args); 639 640 return arrangeLLVMFunctionInfo( 641 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 642 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 643 } 644 645 const CGFunctionInfo & 646 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 647 ArrayRef<CanQualType> argTypes) { 648 return arrangeLLVMFunctionInfo( 649 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 650 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 651 } 652 653 /// Arrange a call to a C++ method, passing the given arguments. 654 /// 655 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 656 /// does not count `this`. 657 const CGFunctionInfo & 658 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 659 const FunctionProtoType *proto, 660 RequiredArgs required, 661 unsigned numPrefixArgs) { 662 assert(numPrefixArgs + 1 <= args.size() && 663 "Emitting a call with less args than the required prefix?"); 664 // Add one to account for `this`. It's a bit awkward here, but we don't count 665 // `this` in similar places elsewhere. 666 auto paramInfos = 667 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 668 669 // FIXME: Kill copy. 670 auto argTypes = getArgTypesForCall(Context, args); 671 672 FunctionType::ExtInfo info = proto->getExtInfo(); 673 return arrangeLLVMFunctionInfo( 674 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 675 /*chainCall=*/false, argTypes, info, paramInfos, required); 676 } 677 678 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 679 return arrangeLLVMFunctionInfo( 680 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 681 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 682 } 683 684 const CGFunctionInfo & 685 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 686 const CallArgList &args) { 687 assert(signature.arg_size() <= args.size()); 688 if (signature.arg_size() == args.size()) 689 return signature; 690 691 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 692 auto sigParamInfos = signature.getExtParameterInfos(); 693 if (!sigParamInfos.empty()) { 694 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 695 paramInfos.resize(args.size()); 696 } 697 698 auto argTypes = getArgTypesForCall(Context, args); 699 700 assert(signature.getRequiredArgs().allowsOptionalArgs()); 701 return arrangeLLVMFunctionInfo(signature.getReturnType(), 702 signature.isInstanceMethod(), 703 signature.isChainCall(), 704 argTypes, 705 signature.getExtInfo(), 706 paramInfos, 707 signature.getRequiredArgs()); 708 } 709 710 /// Arrange the argument and result information for an abstract value 711 /// of a given function type. This is the method which all of the 712 /// above functions ultimately defer to. 713 const CGFunctionInfo & 714 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 715 bool instanceMethod, 716 bool chainCall, 717 ArrayRef<CanQualType> argTypes, 718 FunctionType::ExtInfo info, 719 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 720 RequiredArgs required) { 721 assert(std::all_of(argTypes.begin(), argTypes.end(), 722 [](CanQualType T) { return T.isCanonicalAsParam(); })); 723 724 // Lookup or create unique function info. 725 llvm::FoldingSetNodeID ID; 726 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 727 required, resultType, argTypes); 728 729 void *insertPos = nullptr; 730 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 731 if (FI) 732 return *FI; 733 734 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 735 736 // Construct the function info. We co-allocate the ArgInfos. 737 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 738 paramInfos, resultType, argTypes, required); 739 FunctionInfos.InsertNode(FI, insertPos); 740 741 bool inserted = FunctionsBeingProcessed.insert(FI).second; 742 (void)inserted; 743 assert(inserted && "Recursively being processed?"); 744 745 // Compute ABI information. 746 if (info.getCC() != CC_Swift) { 747 getABIInfo().computeInfo(*FI); 748 } else { 749 swiftcall::computeABIInfo(CGM, *FI); 750 } 751 752 // Loop over all of the computed argument and return value info. If any of 753 // them are direct or extend without a specified coerce type, specify the 754 // default now. 755 ABIArgInfo &retInfo = FI->getReturnInfo(); 756 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 757 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 758 759 for (auto &I : FI->arguments()) 760 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 761 I.info.setCoerceToType(ConvertType(I.type)); 762 763 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 764 assert(erased && "Not in set?"); 765 766 return *FI; 767 } 768 769 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 770 bool instanceMethod, 771 bool chainCall, 772 const FunctionType::ExtInfo &info, 773 ArrayRef<ExtParameterInfo> paramInfos, 774 CanQualType resultType, 775 ArrayRef<CanQualType> argTypes, 776 RequiredArgs required) { 777 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 778 779 void *buffer = 780 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 781 argTypes.size() + 1, paramInfos.size())); 782 783 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 784 FI->CallingConvention = llvmCC; 785 FI->EffectiveCallingConvention = llvmCC; 786 FI->ASTCallingConvention = info.getCC(); 787 FI->InstanceMethod = instanceMethod; 788 FI->ChainCall = chainCall; 789 FI->NoReturn = info.getNoReturn(); 790 FI->ReturnsRetained = info.getProducesResult(); 791 FI->Required = required; 792 FI->HasRegParm = info.getHasRegParm(); 793 FI->RegParm = info.getRegParm(); 794 FI->ArgStruct = nullptr; 795 FI->ArgStructAlign = 0; 796 FI->NumArgs = argTypes.size(); 797 FI->HasExtParameterInfos = !paramInfos.empty(); 798 FI->getArgsBuffer()[0].type = resultType; 799 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 800 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 801 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 802 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 803 return FI; 804 } 805 806 /***/ 807 808 namespace { 809 // ABIArgInfo::Expand implementation. 810 811 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 812 struct TypeExpansion { 813 enum TypeExpansionKind { 814 // Elements of constant arrays are expanded recursively. 815 TEK_ConstantArray, 816 // Record fields are expanded recursively (but if record is a union, only 817 // the field with the largest size is expanded). 818 TEK_Record, 819 // For complex types, real and imaginary parts are expanded recursively. 820 TEK_Complex, 821 // All other types are not expandable. 822 TEK_None 823 }; 824 825 const TypeExpansionKind Kind; 826 827 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 828 virtual ~TypeExpansion() {} 829 }; 830 831 struct ConstantArrayExpansion : TypeExpansion { 832 QualType EltTy; 833 uint64_t NumElts; 834 835 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 836 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 837 static bool classof(const TypeExpansion *TE) { 838 return TE->Kind == TEK_ConstantArray; 839 } 840 }; 841 842 struct RecordExpansion : TypeExpansion { 843 SmallVector<const CXXBaseSpecifier *, 1> Bases; 844 845 SmallVector<const FieldDecl *, 1> Fields; 846 847 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 848 SmallVector<const FieldDecl *, 1> &&Fields) 849 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 850 Fields(std::move(Fields)) {} 851 static bool classof(const TypeExpansion *TE) { 852 return TE->Kind == TEK_Record; 853 } 854 }; 855 856 struct ComplexExpansion : TypeExpansion { 857 QualType EltTy; 858 859 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 860 static bool classof(const TypeExpansion *TE) { 861 return TE->Kind == TEK_Complex; 862 } 863 }; 864 865 struct NoExpansion : TypeExpansion { 866 NoExpansion() : TypeExpansion(TEK_None) {} 867 static bool classof(const TypeExpansion *TE) { 868 return TE->Kind == TEK_None; 869 } 870 }; 871 } // namespace 872 873 static std::unique_ptr<TypeExpansion> 874 getTypeExpansion(QualType Ty, const ASTContext &Context) { 875 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 876 return llvm::make_unique<ConstantArrayExpansion>( 877 AT->getElementType(), AT->getSize().getZExtValue()); 878 } 879 if (const RecordType *RT = Ty->getAs<RecordType>()) { 880 SmallVector<const CXXBaseSpecifier *, 1> Bases; 881 SmallVector<const FieldDecl *, 1> Fields; 882 const RecordDecl *RD = RT->getDecl(); 883 assert(!RD->hasFlexibleArrayMember() && 884 "Cannot expand structure with flexible array."); 885 if (RD->isUnion()) { 886 // Unions can be here only in degenerative cases - all the fields are same 887 // after flattening. Thus we have to use the "largest" field. 888 const FieldDecl *LargestFD = nullptr; 889 CharUnits UnionSize = CharUnits::Zero(); 890 891 for (const auto *FD : RD->fields()) { 892 // Skip zero length bitfields. 893 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 894 continue; 895 assert(!FD->isBitField() && 896 "Cannot expand structure with bit-field members."); 897 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 898 if (UnionSize < FieldSize) { 899 UnionSize = FieldSize; 900 LargestFD = FD; 901 } 902 } 903 if (LargestFD) 904 Fields.push_back(LargestFD); 905 } else { 906 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 907 assert(!CXXRD->isDynamicClass() && 908 "cannot expand vtable pointers in dynamic classes"); 909 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 910 Bases.push_back(&BS); 911 } 912 913 for (const auto *FD : RD->fields()) { 914 // Skip zero length bitfields. 915 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0) 916 continue; 917 assert(!FD->isBitField() && 918 "Cannot expand structure with bit-field members."); 919 Fields.push_back(FD); 920 } 921 } 922 return llvm::make_unique<RecordExpansion>(std::move(Bases), 923 std::move(Fields)); 924 } 925 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 926 return llvm::make_unique<ComplexExpansion>(CT->getElementType()); 927 } 928 return llvm::make_unique<NoExpansion>(); 929 } 930 931 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 932 auto Exp = getTypeExpansion(Ty, Context); 933 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 934 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 935 } 936 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 937 int Res = 0; 938 for (auto BS : RExp->Bases) 939 Res += getExpansionSize(BS->getType(), Context); 940 for (auto FD : RExp->Fields) 941 Res += getExpansionSize(FD->getType(), Context); 942 return Res; 943 } 944 if (isa<ComplexExpansion>(Exp.get())) 945 return 2; 946 assert(isa<NoExpansion>(Exp.get())); 947 return 1; 948 } 949 950 void 951 CodeGenTypes::getExpandedTypes(QualType Ty, 952 SmallVectorImpl<llvm::Type *>::iterator &TI) { 953 auto Exp = getTypeExpansion(Ty, Context); 954 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 955 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 956 getExpandedTypes(CAExp->EltTy, TI); 957 } 958 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 959 for (auto BS : RExp->Bases) 960 getExpandedTypes(BS->getType(), TI); 961 for (auto FD : RExp->Fields) 962 getExpandedTypes(FD->getType(), TI); 963 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 964 llvm::Type *EltTy = ConvertType(CExp->EltTy); 965 *TI++ = EltTy; 966 *TI++ = EltTy; 967 } else { 968 assert(isa<NoExpansion>(Exp.get())); 969 *TI++ = ConvertType(Ty); 970 } 971 } 972 973 static void forConstantArrayExpansion(CodeGenFunction &CGF, 974 ConstantArrayExpansion *CAE, 975 Address BaseAddr, 976 llvm::function_ref<void(Address)> Fn) { 977 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 978 CharUnits EltAlign = 979 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 980 981 for (int i = 0, n = CAE->NumElts; i < n; i++) { 982 llvm::Value *EltAddr = 983 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 984 Fn(Address(EltAddr, EltAlign)); 985 } 986 } 987 988 void CodeGenFunction::ExpandTypeFromArgs( 989 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) { 990 assert(LV.isSimple() && 991 "Unexpected non-simple lvalue during struct expansion."); 992 993 auto Exp = getTypeExpansion(Ty, getContext()); 994 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 995 forConstantArrayExpansion(*this, CAExp, LV.getAddress(), 996 [&](Address EltAddr) { 997 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 998 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 999 }); 1000 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1001 Address This = LV.getAddress(); 1002 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1003 // Perform a single step derived-to-base conversion. 1004 Address Base = 1005 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1006 /*NullCheckValue=*/false, SourceLocation()); 1007 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1008 1009 // Recurse onto bases. 1010 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1011 } 1012 for (auto FD : RExp->Fields) { 1013 // FIXME: What are the right qualifiers here? 1014 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1015 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1016 } 1017 } else if (isa<ComplexExpansion>(Exp.get())) { 1018 auto realValue = *AI++; 1019 auto imagValue = *AI++; 1020 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1021 } else { 1022 assert(isa<NoExpansion>(Exp.get())); 1023 EmitStoreThroughLValue(RValue::get(*AI++), LV); 1024 } 1025 } 1026 1027 void CodeGenFunction::ExpandTypeToArgs( 1028 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy, 1029 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1030 auto Exp = getTypeExpansion(Ty, getContext()); 1031 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1032 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(), 1033 [&](Address EltAddr) { 1034 RValue EltRV = 1035 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()); 1036 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos); 1037 }); 1038 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1039 Address This = RV.getAggregateAddress(); 1040 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1041 // Perform a single step derived-to-base conversion. 1042 Address Base = 1043 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1044 /*NullCheckValue=*/false, SourceLocation()); 1045 RValue BaseRV = RValue::getAggregate(Base); 1046 1047 // Recurse onto bases. 1048 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs, 1049 IRCallArgPos); 1050 } 1051 1052 LValue LV = MakeAddrLValue(This, Ty); 1053 for (auto FD : RExp->Fields) { 1054 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation()); 1055 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs, 1056 IRCallArgPos); 1057 } 1058 } else if (isa<ComplexExpansion>(Exp.get())) { 1059 ComplexPairTy CV = RV.getComplexVal(); 1060 IRCallArgs[IRCallArgPos++] = CV.first; 1061 IRCallArgs[IRCallArgPos++] = CV.second; 1062 } else { 1063 assert(isa<NoExpansion>(Exp.get())); 1064 assert(RV.isScalar() && 1065 "Unexpected non-scalar rvalue during struct expansion."); 1066 1067 // Insert a bitcast as needed. 1068 llvm::Value *V = RV.getScalarVal(); 1069 if (IRCallArgPos < IRFuncTy->getNumParams() && 1070 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1071 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1072 1073 IRCallArgs[IRCallArgPos++] = V; 1074 } 1075 } 1076 1077 /// Create a temporary allocation for the purposes of coercion. 1078 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1079 CharUnits MinAlign) { 1080 // Don't use an alignment that's worse than what LLVM would prefer. 1081 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1082 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1083 1084 return CGF.CreateTempAlloca(Ty, Align); 1085 } 1086 1087 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1088 /// accessing some number of bytes out of it, try to gep into the struct to get 1089 /// at its inner goodness. Dive as deep as possible without entering an element 1090 /// with an in-memory size smaller than DstSize. 1091 static Address 1092 EnterStructPointerForCoercedAccess(Address SrcPtr, 1093 llvm::StructType *SrcSTy, 1094 uint64_t DstSize, CodeGenFunction &CGF) { 1095 // We can't dive into a zero-element struct. 1096 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1097 1098 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1099 1100 // If the first elt is at least as large as what we're looking for, or if the 1101 // first element is the same size as the whole struct, we can enter it. The 1102 // comparison must be made on the store size and not the alloca size. Using 1103 // the alloca size may overstate the size of the load. 1104 uint64_t FirstEltSize = 1105 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1106 if (FirstEltSize < DstSize && 1107 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1108 return SrcPtr; 1109 1110 // GEP into the first element. 1111 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive"); 1112 1113 // If the first element is a struct, recurse. 1114 llvm::Type *SrcTy = SrcPtr.getElementType(); 1115 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1116 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1117 1118 return SrcPtr; 1119 } 1120 1121 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1122 /// are either integers or pointers. This does a truncation of the value if it 1123 /// is too large or a zero extension if it is too small. 1124 /// 1125 /// This behaves as if the value were coerced through memory, so on big-endian 1126 /// targets the high bits are preserved in a truncation, while little-endian 1127 /// targets preserve the low bits. 1128 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1129 llvm::Type *Ty, 1130 CodeGenFunction &CGF) { 1131 if (Val->getType() == Ty) 1132 return Val; 1133 1134 if (isa<llvm::PointerType>(Val->getType())) { 1135 // If this is Pointer->Pointer avoid conversion to and from int. 1136 if (isa<llvm::PointerType>(Ty)) 1137 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1138 1139 // Convert the pointer to an integer so we can play with its width. 1140 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1141 } 1142 1143 llvm::Type *DestIntTy = Ty; 1144 if (isa<llvm::PointerType>(DestIntTy)) 1145 DestIntTy = CGF.IntPtrTy; 1146 1147 if (Val->getType() != DestIntTy) { 1148 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1149 if (DL.isBigEndian()) { 1150 // Preserve the high bits on big-endian targets. 1151 // That is what memory coercion does. 1152 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1153 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1154 1155 if (SrcSize > DstSize) { 1156 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1157 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1158 } else { 1159 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1160 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1161 } 1162 } else { 1163 // Little-endian targets preserve the low bits. No shifts required. 1164 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1165 } 1166 } 1167 1168 if (isa<llvm::PointerType>(Ty)) 1169 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1170 return Val; 1171 } 1172 1173 1174 1175 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1176 /// a pointer to an object of type \arg Ty, known to be aligned to 1177 /// \arg SrcAlign bytes. 1178 /// 1179 /// This safely handles the case when the src type is smaller than the 1180 /// destination type; in this situation the values of bits which not 1181 /// present in the src are undefined. 1182 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1183 CodeGenFunction &CGF) { 1184 llvm::Type *SrcTy = Src.getElementType(); 1185 1186 // If SrcTy and Ty are the same, just do a load. 1187 if (SrcTy == Ty) 1188 return CGF.Builder.CreateLoad(Src); 1189 1190 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1191 1192 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1193 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF); 1194 SrcTy = Src.getType()->getElementType(); 1195 } 1196 1197 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1198 1199 // If the source and destination are integer or pointer types, just do an 1200 // extension or truncation to the desired type. 1201 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1202 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1203 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1204 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1205 } 1206 1207 // If load is legal, just bitcast the src pointer. 1208 if (SrcSize >= DstSize) { 1209 // Generally SrcSize is never greater than DstSize, since this means we are 1210 // losing bits. However, this can happen in cases where the structure has 1211 // additional padding, for example due to a user specified alignment. 1212 // 1213 // FIXME: Assert that we aren't truncating non-padding bits when have access 1214 // to that information. 1215 Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty)); 1216 return CGF.Builder.CreateLoad(Src); 1217 } 1218 1219 // Otherwise do coercion through memory. This is stupid, but simple. 1220 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment()); 1221 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1222 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy); 1223 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 1224 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 1225 false); 1226 return CGF.Builder.CreateLoad(Tmp); 1227 } 1228 1229 // Function to store a first-class aggregate into memory. We prefer to 1230 // store the elements rather than the aggregate to be more friendly to 1231 // fast-isel. 1232 // FIXME: Do we need to recurse here? 1233 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 1234 Address Dest, bool DestIsVolatile) { 1235 // Prefer scalar stores to first-class aggregate stores. 1236 if (llvm::StructType *STy = 1237 dyn_cast<llvm::StructType>(Val->getType())) { 1238 const llvm::StructLayout *Layout = 1239 CGF.CGM.getDataLayout().getStructLayout(STy); 1240 1241 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1242 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i)); 1243 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset); 1244 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 1245 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1246 } 1247 } else { 1248 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); 1249 } 1250 } 1251 1252 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1253 /// where the source and destination may have different types. The 1254 /// destination is known to be aligned to \arg DstAlign bytes. 1255 /// 1256 /// This safely handles the case when the src type is larger than the 1257 /// destination type; the upper bits of the src will be lost. 1258 static void CreateCoercedStore(llvm::Value *Src, 1259 Address Dst, 1260 bool DstIsVolatile, 1261 CodeGenFunction &CGF) { 1262 llvm::Type *SrcTy = Src->getType(); 1263 llvm::Type *DstTy = Dst.getType()->getElementType(); 1264 if (SrcTy == DstTy) { 1265 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1266 return; 1267 } 1268 1269 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1270 1271 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1272 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF); 1273 DstTy = Dst.getType()->getElementType(); 1274 } 1275 1276 // If the source and destination are integer or pointer types, just do an 1277 // extension or truncation to the desired type. 1278 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1279 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1280 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1281 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1282 return; 1283 } 1284 1285 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1286 1287 // If store is legal, just bitcast the src pointer. 1288 if (SrcSize <= DstSize) { 1289 Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy)); 1290 BuildAggStore(CGF, Src, Dst, DstIsVolatile); 1291 } else { 1292 // Otherwise do coercion through memory. This is stupid, but 1293 // simple. 1294 1295 // Generally SrcSize is never greater than DstSize, since this means we are 1296 // losing bits. However, this can happen in cases where the structure has 1297 // additional padding, for example due to a user specified alignment. 1298 // 1299 // FIXME: Assert that we aren't truncating non-padding bits when have access 1300 // to that information. 1301 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1302 CGF.Builder.CreateStore(Src, Tmp); 1303 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy); 1304 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy); 1305 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1306 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1307 false); 1308 } 1309 } 1310 1311 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1312 const ABIArgInfo &info) { 1313 if (unsigned offset = info.getDirectOffset()) { 1314 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1315 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1316 CharUnits::fromQuantity(offset)); 1317 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1318 } 1319 return addr; 1320 } 1321 1322 namespace { 1323 1324 /// Encapsulates information about the way function arguments from 1325 /// CGFunctionInfo should be passed to actual LLVM IR function. 1326 class ClangToLLVMArgMapping { 1327 static const unsigned InvalidIndex = ~0U; 1328 unsigned InallocaArgNo; 1329 unsigned SRetArgNo; 1330 unsigned TotalIRArgs; 1331 1332 /// Arguments of LLVM IR function corresponding to single Clang argument. 1333 struct IRArgs { 1334 unsigned PaddingArgIndex; 1335 // Argument is expanded to IR arguments at positions 1336 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1337 unsigned FirstArgIndex; 1338 unsigned NumberOfArgs; 1339 1340 IRArgs() 1341 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1342 NumberOfArgs(0) {} 1343 }; 1344 1345 SmallVector<IRArgs, 8> ArgInfo; 1346 1347 public: 1348 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1349 bool OnlyRequiredArgs = false) 1350 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1351 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1352 construct(Context, FI, OnlyRequiredArgs); 1353 } 1354 1355 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1356 unsigned getInallocaArgNo() const { 1357 assert(hasInallocaArg()); 1358 return InallocaArgNo; 1359 } 1360 1361 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1362 unsigned getSRetArgNo() const { 1363 assert(hasSRetArg()); 1364 return SRetArgNo; 1365 } 1366 1367 unsigned totalIRArgs() const { return TotalIRArgs; } 1368 1369 bool hasPaddingArg(unsigned ArgNo) const { 1370 assert(ArgNo < ArgInfo.size()); 1371 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1372 } 1373 unsigned getPaddingArgNo(unsigned ArgNo) const { 1374 assert(hasPaddingArg(ArgNo)); 1375 return ArgInfo[ArgNo].PaddingArgIndex; 1376 } 1377 1378 /// Returns index of first IR argument corresponding to ArgNo, and their 1379 /// quantity. 1380 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1381 assert(ArgNo < ArgInfo.size()); 1382 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1383 ArgInfo[ArgNo].NumberOfArgs); 1384 } 1385 1386 private: 1387 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1388 bool OnlyRequiredArgs); 1389 }; 1390 1391 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1392 const CGFunctionInfo &FI, 1393 bool OnlyRequiredArgs) { 1394 unsigned IRArgNo = 0; 1395 bool SwapThisWithSRet = false; 1396 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1397 1398 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1399 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1400 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1401 } 1402 1403 unsigned ArgNo = 0; 1404 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1405 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1406 ++I, ++ArgNo) { 1407 assert(I != FI.arg_end()); 1408 QualType ArgType = I->type; 1409 const ABIArgInfo &AI = I->info; 1410 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1411 auto &IRArgs = ArgInfo[ArgNo]; 1412 1413 if (AI.getPaddingType()) 1414 IRArgs.PaddingArgIndex = IRArgNo++; 1415 1416 switch (AI.getKind()) { 1417 case ABIArgInfo::Extend: 1418 case ABIArgInfo::Direct: { 1419 // FIXME: handle sseregparm someday... 1420 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1421 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1422 IRArgs.NumberOfArgs = STy->getNumElements(); 1423 } else { 1424 IRArgs.NumberOfArgs = 1; 1425 } 1426 break; 1427 } 1428 case ABIArgInfo::Indirect: 1429 IRArgs.NumberOfArgs = 1; 1430 break; 1431 case ABIArgInfo::Ignore: 1432 case ABIArgInfo::InAlloca: 1433 // ignore and inalloca doesn't have matching LLVM parameters. 1434 IRArgs.NumberOfArgs = 0; 1435 break; 1436 case ABIArgInfo::CoerceAndExpand: 1437 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1438 break; 1439 case ABIArgInfo::Expand: 1440 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1441 break; 1442 } 1443 1444 if (IRArgs.NumberOfArgs > 0) { 1445 IRArgs.FirstArgIndex = IRArgNo; 1446 IRArgNo += IRArgs.NumberOfArgs; 1447 } 1448 1449 // Skip over the sret parameter when it comes second. We already handled it 1450 // above. 1451 if (IRArgNo == 1 && SwapThisWithSRet) 1452 IRArgNo++; 1453 } 1454 assert(ArgNo == ArgInfo.size()); 1455 1456 if (FI.usesInAlloca()) 1457 InallocaArgNo = IRArgNo++; 1458 1459 TotalIRArgs = IRArgNo; 1460 } 1461 } // namespace 1462 1463 /***/ 1464 1465 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1466 return FI.getReturnInfo().isIndirect(); 1467 } 1468 1469 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1470 return ReturnTypeUsesSRet(FI) && 1471 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1472 } 1473 1474 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1475 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1476 switch (BT->getKind()) { 1477 default: 1478 return false; 1479 case BuiltinType::Float: 1480 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1481 case BuiltinType::Double: 1482 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1483 case BuiltinType::LongDouble: 1484 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1485 } 1486 } 1487 1488 return false; 1489 } 1490 1491 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1492 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1493 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1494 if (BT->getKind() == BuiltinType::LongDouble) 1495 return getTarget().useObjCFP2RetForComplexLongDouble(); 1496 } 1497 } 1498 1499 return false; 1500 } 1501 1502 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1503 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1504 return GetFunctionType(FI); 1505 } 1506 1507 llvm::FunctionType * 1508 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1509 1510 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1511 (void)Inserted; 1512 assert(Inserted && "Recursively being processed?"); 1513 1514 llvm::Type *resultType = nullptr; 1515 const ABIArgInfo &retAI = FI.getReturnInfo(); 1516 switch (retAI.getKind()) { 1517 case ABIArgInfo::Expand: 1518 llvm_unreachable("Invalid ABI kind for return argument"); 1519 1520 case ABIArgInfo::Extend: 1521 case ABIArgInfo::Direct: 1522 resultType = retAI.getCoerceToType(); 1523 break; 1524 1525 case ABIArgInfo::InAlloca: 1526 if (retAI.getInAllocaSRet()) { 1527 // sret things on win32 aren't void, they return the sret pointer. 1528 QualType ret = FI.getReturnType(); 1529 llvm::Type *ty = ConvertType(ret); 1530 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1531 resultType = llvm::PointerType::get(ty, addressSpace); 1532 } else { 1533 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1534 } 1535 break; 1536 1537 case ABIArgInfo::Indirect: 1538 case ABIArgInfo::Ignore: 1539 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1540 break; 1541 1542 case ABIArgInfo::CoerceAndExpand: 1543 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1544 break; 1545 } 1546 1547 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1548 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1549 1550 // Add type for sret argument. 1551 if (IRFunctionArgs.hasSRetArg()) { 1552 QualType Ret = FI.getReturnType(); 1553 llvm::Type *Ty = ConvertType(Ret); 1554 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1555 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1556 llvm::PointerType::get(Ty, AddressSpace); 1557 } 1558 1559 // Add type for inalloca argument. 1560 if (IRFunctionArgs.hasInallocaArg()) { 1561 auto ArgStruct = FI.getArgStruct(); 1562 assert(ArgStruct); 1563 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1564 } 1565 1566 // Add in all of the required arguments. 1567 unsigned ArgNo = 0; 1568 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1569 ie = it + FI.getNumRequiredArgs(); 1570 for (; it != ie; ++it, ++ArgNo) { 1571 const ABIArgInfo &ArgInfo = it->info; 1572 1573 // Insert a padding type to ensure proper alignment. 1574 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1575 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1576 ArgInfo.getPaddingType(); 1577 1578 unsigned FirstIRArg, NumIRArgs; 1579 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1580 1581 switch (ArgInfo.getKind()) { 1582 case ABIArgInfo::Ignore: 1583 case ABIArgInfo::InAlloca: 1584 assert(NumIRArgs == 0); 1585 break; 1586 1587 case ABIArgInfo::Indirect: { 1588 assert(NumIRArgs == 1); 1589 // indirect arguments are always on the stack, which is alloca addr space. 1590 llvm::Type *LTy = ConvertTypeForMem(it->type); 1591 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1592 CGM.getDataLayout().getAllocaAddrSpace()); 1593 break; 1594 } 1595 1596 case ABIArgInfo::Extend: 1597 case ABIArgInfo::Direct: { 1598 // Fast-isel and the optimizer generally like scalar values better than 1599 // FCAs, so we flatten them if this is safe to do for this argument. 1600 llvm::Type *argType = ArgInfo.getCoerceToType(); 1601 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1602 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1603 assert(NumIRArgs == st->getNumElements()); 1604 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1605 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1606 } else { 1607 assert(NumIRArgs == 1); 1608 ArgTypes[FirstIRArg] = argType; 1609 } 1610 break; 1611 } 1612 1613 case ABIArgInfo::CoerceAndExpand: { 1614 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1615 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1616 *ArgTypesIter++ = EltTy; 1617 } 1618 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1619 break; 1620 } 1621 1622 case ABIArgInfo::Expand: 1623 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1624 getExpandedTypes(it->type, ArgTypesIter); 1625 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1626 break; 1627 } 1628 } 1629 1630 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1631 assert(Erased && "Not in set?"); 1632 1633 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1634 } 1635 1636 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1637 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1638 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1639 1640 if (!isFuncTypeConvertible(FPT)) 1641 return llvm::StructType::get(getLLVMContext()); 1642 1643 const CGFunctionInfo *Info; 1644 if (isa<CXXDestructorDecl>(MD)) 1645 Info = 1646 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType())); 1647 else 1648 Info = &arrangeCXXMethodDeclaration(MD); 1649 return GetFunctionType(*Info); 1650 } 1651 1652 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1653 llvm::AttrBuilder &FuncAttrs, 1654 const FunctionProtoType *FPT) { 1655 if (!FPT) 1656 return; 1657 1658 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1659 FPT->isNothrow(Ctx)) 1660 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1661 } 1662 1663 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone, 1664 bool AttrOnCallSite, 1665 llvm::AttrBuilder &FuncAttrs) { 1666 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1667 if (!HasOptnone) { 1668 if (CodeGenOpts.OptimizeSize) 1669 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1670 if (CodeGenOpts.OptimizeSize == 2) 1671 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1672 } 1673 1674 if (CodeGenOpts.DisableRedZone) 1675 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1676 if (CodeGenOpts.NoImplicitFloat) 1677 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1678 1679 if (AttrOnCallSite) { 1680 // Attributes that should go on the call site only. 1681 if (!CodeGenOpts.SimplifyLibCalls || 1682 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1683 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1684 if (!CodeGenOpts.TrapFuncName.empty()) 1685 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1686 } else { 1687 // Attributes that should go on the function, but not the call site. 1688 if (!CodeGenOpts.DisableFPElim) { 1689 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1690 } else if (CodeGenOpts.OmitLeafFramePointer) { 1691 FuncAttrs.addAttribute("no-frame-pointer-elim", "false"); 1692 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1693 } else { 1694 FuncAttrs.addAttribute("no-frame-pointer-elim", "true"); 1695 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf"); 1696 } 1697 1698 FuncAttrs.addAttribute("less-precise-fpmad", 1699 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1700 1701 if (!CodeGenOpts.FPDenormalMode.empty()) 1702 FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode); 1703 1704 FuncAttrs.addAttribute("no-trapping-math", 1705 llvm::toStringRef(CodeGenOpts.NoTrappingMath)); 1706 1707 // TODO: Are these all needed? 1708 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1709 FuncAttrs.addAttribute("no-infs-fp-math", 1710 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1711 FuncAttrs.addAttribute("no-nans-fp-math", 1712 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1713 FuncAttrs.addAttribute("unsafe-fp-math", 1714 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1715 FuncAttrs.addAttribute("use-soft-float", 1716 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1717 FuncAttrs.addAttribute("stack-protector-buffer-size", 1718 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1719 FuncAttrs.addAttribute("no-signed-zeros-fp-math", 1720 llvm::toStringRef(CodeGenOpts.NoSignedZeros)); 1721 FuncAttrs.addAttribute( 1722 "correctly-rounded-divide-sqrt-fp-math", 1723 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt)); 1724 1725 // TODO: Reciprocal estimate codegen options should apply to instructions? 1726 std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals; 1727 if (!Recips.empty()) 1728 FuncAttrs.addAttribute("reciprocal-estimates", 1729 llvm::join(Recips.begin(), Recips.end(), ",")); 1730 1731 if (CodeGenOpts.StackRealignment) 1732 FuncAttrs.addAttribute("stackrealign"); 1733 if (CodeGenOpts.Backchain) 1734 FuncAttrs.addAttribute("backchain"); 1735 } 1736 1737 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1738 // Conservatively, mark all functions and calls in CUDA as convergent 1739 // (meaning, they may call an intrinsically convergent op, such as 1740 // __syncthreads(), and so can't have certain optimizations applied around 1741 // them). LLVM will remove this attribute where it safely can. 1742 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1743 1744 // Exceptions aren't supported in CUDA device code. 1745 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1746 1747 // Respect -fcuda-flush-denormals-to-zero. 1748 if (getLangOpts().CUDADeviceFlushDenormalsToZero) 1749 FuncAttrs.addAttribute("nvptx-f32ftz", "true"); 1750 } 1751 } 1752 1753 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) { 1754 llvm::AttrBuilder FuncAttrs; 1755 ConstructDefaultFnAttrList(F.getName(), 1756 F.hasFnAttribute(llvm::Attribute::OptimizeNone), 1757 /* AttrOnCallsite = */ false, FuncAttrs); 1758 llvm::AttributeList AS = llvm::AttributeList::get( 1759 getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs); 1760 F.addAttributes(llvm::AttributeList::FunctionIndex, AS); 1761 } 1762 1763 void CodeGenModule::ConstructAttributeList( 1764 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1765 AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) { 1766 llvm::AttrBuilder FuncAttrs; 1767 llvm::AttrBuilder RetAttrs; 1768 1769 CallingConv = FI.getEffectiveCallingConvention(); 1770 if (FI.isNoReturn()) 1771 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1772 1773 // If we have information about the function prototype, we can learn 1774 // attributes form there. 1775 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1776 CalleeInfo.getCalleeFunctionProtoType()); 1777 1778 const Decl *TargetDecl = CalleeInfo.getCalleeDecl(); 1779 1780 bool HasOptnone = false; 1781 // FIXME: handle sseregparm someday... 1782 if (TargetDecl) { 1783 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1784 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1785 if (TargetDecl->hasAttr<NoThrowAttr>()) 1786 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1787 if (TargetDecl->hasAttr<NoReturnAttr>()) 1788 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1789 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1790 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1791 if (TargetDecl->hasAttr<ConvergentAttr>()) 1792 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1793 1794 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1795 AddAttributesFromFunctionProtoType( 1796 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1797 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function. 1798 // These attributes are not inherited by overloads. 1799 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1800 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual())) 1801 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1802 } 1803 1804 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1805 if (TargetDecl->hasAttr<ConstAttr>()) { 1806 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1807 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1808 } else if (TargetDecl->hasAttr<PureAttr>()) { 1809 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1810 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1811 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 1812 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 1813 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1814 } 1815 if (TargetDecl->hasAttr<RestrictAttr>()) 1816 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1817 if (TargetDecl->hasAttr<ReturnsNonNullAttr>()) 1818 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1819 1820 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1821 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 1822 Optional<unsigned> NumElemsParam; 1823 // alloc_size args are base-1, 0 means not present. 1824 if (unsigned N = AllocSize->getNumElemsParam()) 1825 NumElemsParam = N - 1; 1826 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1, 1827 NumElemsParam); 1828 } 1829 } 1830 1831 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 1832 1833 if (CodeGenOpts.EnableSegmentedStacks && 1834 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1835 FuncAttrs.addAttribute("split-stack"); 1836 1837 if (!AttrOnCallSite) { 1838 bool DisableTailCalls = 1839 CodeGenOpts.DisableTailCalls || 1840 (TargetDecl && (TargetDecl->hasAttr<DisableTailCallsAttr>() || 1841 TargetDecl->hasAttr<AnyX86InterruptAttr>())); 1842 FuncAttrs.addAttribute("disable-tail-calls", 1843 llvm::toStringRef(DisableTailCalls)); 1844 1845 // Add target-cpu and target-features attributes to functions. If 1846 // we have a decl for the function and it has a target attribute then 1847 // parse that and add it to the feature set. 1848 StringRef TargetCPU = getTarget().getTargetOpts().CPU; 1849 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl); 1850 if (FD && FD->hasAttr<TargetAttr>()) { 1851 llvm::StringMap<bool> FeatureMap; 1852 getFunctionFeatureMap(FeatureMap, FD); 1853 1854 // Produce the canonical string for this set of features. 1855 std::vector<std::string> Features; 1856 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(), 1857 ie = FeatureMap.end(); 1858 it != ie; ++it) 1859 Features.push_back((it->second ? "+" : "-") + it->first().str()); 1860 1861 // Now add the target-cpu and target-features to the function. 1862 // While we populated the feature map above, we still need to 1863 // get and parse the target attribute so we can get the cpu for 1864 // the function. 1865 const auto *TD = FD->getAttr<TargetAttr>(); 1866 TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse(); 1867 if (ParsedAttr.second != "") 1868 TargetCPU = ParsedAttr.second; 1869 if (TargetCPU != "") 1870 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1871 if (!Features.empty()) { 1872 std::sort(Features.begin(), Features.end()); 1873 FuncAttrs.addAttribute( 1874 "target-features", 1875 llvm::join(Features.begin(), Features.end(), ",")); 1876 } 1877 } else { 1878 // Otherwise just add the existing target cpu and target features to the 1879 // function. 1880 std::vector<std::string> &Features = getTarget().getTargetOpts().Features; 1881 if (TargetCPU != "") 1882 FuncAttrs.addAttribute("target-cpu", TargetCPU); 1883 if (!Features.empty()) { 1884 std::sort(Features.begin(), Features.end()); 1885 FuncAttrs.addAttribute( 1886 "target-features", 1887 llvm::join(Features.begin(), Features.end(), ",")); 1888 } 1889 } 1890 } 1891 1892 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 1893 1894 QualType RetTy = FI.getReturnType(); 1895 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1896 switch (RetAI.getKind()) { 1897 case ABIArgInfo::Extend: 1898 if (RetTy->hasSignedIntegerRepresentation()) 1899 RetAttrs.addAttribute(llvm::Attribute::SExt); 1900 else if (RetTy->hasUnsignedIntegerRepresentation()) 1901 RetAttrs.addAttribute(llvm::Attribute::ZExt); 1902 // FALL THROUGH 1903 case ABIArgInfo::Direct: 1904 if (RetAI.getInReg()) 1905 RetAttrs.addAttribute(llvm::Attribute::InReg); 1906 break; 1907 case ABIArgInfo::Ignore: 1908 break; 1909 1910 case ABIArgInfo::InAlloca: 1911 case ABIArgInfo::Indirect: { 1912 // inalloca and sret disable readnone and readonly 1913 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 1914 .removeAttribute(llvm::Attribute::ReadNone); 1915 break; 1916 } 1917 1918 case ABIArgInfo::CoerceAndExpand: 1919 break; 1920 1921 case ABIArgInfo::Expand: 1922 llvm_unreachable("Invalid ABI kind for return argument"); 1923 } 1924 1925 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 1926 QualType PTy = RefTy->getPointeeType(); 1927 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 1928 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 1929 .getQuantity()); 1930 else if (getContext().getTargetAddressSpace(PTy) == 0) 1931 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1932 } 1933 1934 // Attach return attributes. 1935 if (RetAttrs.hasAttributes()) { 1936 PAL.push_back(llvm::AttributeList::get( 1937 getLLVMContext(), llvm::AttributeList::ReturnIndex, RetAttrs)); 1938 } 1939 1940 bool hasUsedSRet = false; 1941 1942 // Attach attributes to sret. 1943 if (IRFunctionArgs.hasSRetArg()) { 1944 llvm::AttrBuilder SRETAttrs; 1945 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 1946 hasUsedSRet = true; 1947 if (RetAI.getInReg()) 1948 SRETAttrs.addAttribute(llvm::Attribute::InReg); 1949 PAL.push_back(llvm::AttributeList::get( 1950 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs)); 1951 } 1952 1953 // Attach attributes to inalloca argument. 1954 if (IRFunctionArgs.hasInallocaArg()) { 1955 llvm::AttrBuilder Attrs; 1956 Attrs.addAttribute(llvm::Attribute::InAlloca); 1957 PAL.push_back(llvm::AttributeList::get( 1958 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs)); 1959 } 1960 1961 unsigned ArgNo = 0; 1962 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 1963 E = FI.arg_end(); 1964 I != E; ++I, ++ArgNo) { 1965 QualType ParamType = I->type; 1966 const ABIArgInfo &AI = I->info; 1967 llvm::AttrBuilder Attrs; 1968 1969 // Add attribute for padding argument, if necessary. 1970 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 1971 if (AI.getPaddingInReg()) 1972 PAL.push_back(llvm::AttributeList::get( 1973 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1, 1974 llvm::Attribute::InReg)); 1975 } 1976 1977 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 1978 // have the corresponding parameter variable. It doesn't make 1979 // sense to do it here because parameters are so messed up. 1980 switch (AI.getKind()) { 1981 case ABIArgInfo::Extend: 1982 if (ParamType->isSignedIntegerOrEnumerationType()) 1983 Attrs.addAttribute(llvm::Attribute::SExt); 1984 else if (ParamType->isUnsignedIntegerOrEnumerationType()) { 1985 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType)) 1986 Attrs.addAttribute(llvm::Attribute::SExt); 1987 else 1988 Attrs.addAttribute(llvm::Attribute::ZExt); 1989 } 1990 // FALL THROUGH 1991 case ABIArgInfo::Direct: 1992 if (ArgNo == 0 && FI.isChainCall()) 1993 Attrs.addAttribute(llvm::Attribute::Nest); 1994 else if (AI.getInReg()) 1995 Attrs.addAttribute(llvm::Attribute::InReg); 1996 break; 1997 1998 case ABIArgInfo::Indirect: { 1999 if (AI.getInReg()) 2000 Attrs.addAttribute(llvm::Attribute::InReg); 2001 2002 if (AI.getIndirectByVal()) 2003 Attrs.addAttribute(llvm::Attribute::ByVal); 2004 2005 CharUnits Align = AI.getIndirectAlign(); 2006 2007 // In a byval argument, it is important that the required 2008 // alignment of the type is honored, as LLVM might be creating a 2009 // *new* stack object, and needs to know what alignment to give 2010 // it. (Sometimes it can deduce a sensible alignment on its own, 2011 // but not if clang decides it must emit a packed struct, or the 2012 // user specifies increased alignment requirements.) 2013 // 2014 // This is different from indirect *not* byval, where the object 2015 // exists already, and the align attribute is purely 2016 // informative. 2017 assert(!Align.isZero()); 2018 2019 // For now, only add this when we have a byval argument. 2020 // TODO: be less lazy about updating test cases. 2021 if (AI.getIndirectByVal()) 2022 Attrs.addAlignmentAttr(Align.getQuantity()); 2023 2024 // byval disables readnone and readonly. 2025 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2026 .removeAttribute(llvm::Attribute::ReadNone); 2027 break; 2028 } 2029 case ABIArgInfo::Ignore: 2030 case ABIArgInfo::Expand: 2031 case ABIArgInfo::CoerceAndExpand: 2032 break; 2033 2034 case ABIArgInfo::InAlloca: 2035 // inalloca disables readnone and readonly. 2036 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2037 .removeAttribute(llvm::Attribute::ReadNone); 2038 continue; 2039 } 2040 2041 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2042 QualType PTy = RefTy->getPointeeType(); 2043 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2044 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy) 2045 .getQuantity()); 2046 else if (getContext().getTargetAddressSpace(PTy) == 0) 2047 Attrs.addAttribute(llvm::Attribute::NonNull); 2048 } 2049 2050 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2051 case ParameterABI::Ordinary: 2052 break; 2053 2054 case ParameterABI::SwiftIndirectResult: { 2055 // Add 'sret' if we haven't already used it for something, but 2056 // only if the result is void. 2057 if (!hasUsedSRet && RetTy->isVoidType()) { 2058 Attrs.addAttribute(llvm::Attribute::StructRet); 2059 hasUsedSRet = true; 2060 } 2061 2062 // Add 'noalias' in either case. 2063 Attrs.addAttribute(llvm::Attribute::NoAlias); 2064 2065 // Add 'dereferenceable' and 'alignment'. 2066 auto PTy = ParamType->getPointeeType(); 2067 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2068 auto info = getContext().getTypeInfoInChars(PTy); 2069 Attrs.addDereferenceableAttr(info.first.getQuantity()); 2070 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(), 2071 info.second.getQuantity())); 2072 } 2073 break; 2074 } 2075 2076 case ParameterABI::SwiftErrorResult: 2077 Attrs.addAttribute(llvm::Attribute::SwiftError); 2078 break; 2079 2080 case ParameterABI::SwiftContext: 2081 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2082 break; 2083 } 2084 2085 if (Attrs.hasAttributes()) { 2086 unsigned FirstIRArg, NumIRArgs; 2087 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2088 for (unsigned i = 0; i < NumIRArgs; i++) 2089 PAL.push_back(llvm::AttributeList::get(getLLVMContext(), 2090 FirstIRArg + i + 1, Attrs)); 2091 } 2092 } 2093 assert(ArgNo == FI.arg_size()); 2094 2095 if (FuncAttrs.hasAttributes()) 2096 PAL.push_back(llvm::AttributeList::get( 2097 getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs)); 2098 } 2099 2100 /// An argument came in as a promoted argument; demote it back to its 2101 /// declared type. 2102 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2103 const VarDecl *var, 2104 llvm::Value *value) { 2105 llvm::Type *varType = CGF.ConvertType(var->getType()); 2106 2107 // This can happen with promotions that actually don't change the 2108 // underlying type, like the enum promotions. 2109 if (value->getType() == varType) return value; 2110 2111 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2112 && "unexpected promotion type"); 2113 2114 if (isa<llvm::IntegerType>(varType)) 2115 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2116 2117 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2118 } 2119 2120 /// Returns the attribute (either parameter attribute, or function 2121 /// attribute), which declares argument ArgNo to be non-null. 2122 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2123 QualType ArgType, unsigned ArgNo) { 2124 // FIXME: __attribute__((nonnull)) can also be applied to: 2125 // - references to pointers, where the pointee is known to be 2126 // nonnull (apparently a Clang extension) 2127 // - transparent unions containing pointers 2128 // In the former case, LLVM IR cannot represent the constraint. In 2129 // the latter case, we have no guarantee that the transparent union 2130 // is in fact passed as a pointer. 2131 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2132 return nullptr; 2133 // First, check attribute on parameter itself. 2134 if (PVD) { 2135 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2136 return ParmNNAttr; 2137 } 2138 // Check function attributes. 2139 if (!FD) 2140 return nullptr; 2141 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2142 if (NNAttr->isNonNull(ArgNo)) 2143 return NNAttr; 2144 } 2145 return nullptr; 2146 } 2147 2148 namespace { 2149 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2150 Address Temp; 2151 Address Arg; 2152 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2153 void Emit(CodeGenFunction &CGF, Flags flags) override { 2154 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2155 CGF.Builder.CreateStore(errorValue, Arg); 2156 } 2157 }; 2158 } 2159 2160 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2161 llvm::Function *Fn, 2162 const FunctionArgList &Args) { 2163 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2164 // Naked functions don't have prologues. 2165 return; 2166 2167 // If this is an implicit-return-zero function, go ahead and 2168 // initialize the return value. TODO: it might be nice to have 2169 // a more general mechanism for this that didn't require synthesized 2170 // return statements. 2171 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2172 if (FD->hasImplicitReturnZero()) { 2173 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2174 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2175 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2176 Builder.CreateStore(Zero, ReturnValue); 2177 } 2178 } 2179 2180 // FIXME: We no longer need the types from FunctionArgList; lift up and 2181 // simplify. 2182 2183 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2184 // Flattened function arguments. 2185 SmallVector<llvm::Value *, 16> FnArgs; 2186 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 2187 for (auto &Arg : Fn->args()) { 2188 FnArgs.push_back(&Arg); 2189 } 2190 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 2191 2192 // If we're using inalloca, all the memory arguments are GEPs off of the last 2193 // parameter, which is a pointer to the complete memory area. 2194 Address ArgStruct = Address::invalid(); 2195 const llvm::StructLayout *ArgStructLayout = nullptr; 2196 if (IRFunctionArgs.hasInallocaArg()) { 2197 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct()); 2198 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()], 2199 FI.getArgStructAlignment()); 2200 2201 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2202 } 2203 2204 // Name the struct return parameter. 2205 if (IRFunctionArgs.hasSRetArg()) { 2206 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]); 2207 AI->setName("agg.result"); 2208 AI->addAttr(llvm::AttributeList::get(getLLVMContext(), AI->getArgNo() + 1, 2209 llvm::Attribute::NoAlias)); 2210 } 2211 2212 // Track if we received the parameter as a pointer (indirect, byval, or 2213 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2214 // into a local alloca for us. 2215 SmallVector<ParamValue, 16> ArgVals; 2216 ArgVals.reserve(Args.size()); 2217 2218 // Create a pointer value for every parameter declaration. This usually 2219 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2220 // any cleanups or do anything that might unwind. We do that separately, so 2221 // we can push the cleanups in the correct order for the ABI. 2222 assert(FI.arg_size() == Args.size() && 2223 "Mismatch between function signature & arguments."); 2224 unsigned ArgNo = 0; 2225 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2226 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2227 i != e; ++i, ++info_it, ++ArgNo) { 2228 const VarDecl *Arg = *i; 2229 QualType Ty = info_it->type; 2230 const ABIArgInfo &ArgI = info_it->info; 2231 2232 bool isPromoted = 2233 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2234 2235 unsigned FirstIRArg, NumIRArgs; 2236 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2237 2238 switch (ArgI.getKind()) { 2239 case ABIArgInfo::InAlloca: { 2240 assert(NumIRArgs == 0); 2241 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2242 CharUnits FieldOffset = 2243 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex)); 2244 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset, 2245 Arg->getName()); 2246 ArgVals.push_back(ParamValue::forIndirect(V)); 2247 break; 2248 } 2249 2250 case ABIArgInfo::Indirect: { 2251 assert(NumIRArgs == 1); 2252 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign()); 2253 2254 if (!hasScalarEvaluationKind(Ty)) { 2255 // Aggregates and complex variables are accessed by reference. All we 2256 // need to do is realign the value, if requested. 2257 Address V = ParamAddr; 2258 if (ArgI.getIndirectRealign()) { 2259 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2260 2261 // Copy from the incoming argument pointer to the temporary with the 2262 // appropriate alignment. 2263 // 2264 // FIXME: We should have a common utility for generating an aggregate 2265 // copy. 2266 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2267 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()); 2268 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy); 2269 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy); 2270 Builder.CreateMemCpy(Dst, Src, SizeVal, false); 2271 V = AlignedTemp; 2272 } 2273 ArgVals.push_back(ParamValue::forIndirect(V)); 2274 } else { 2275 // Load scalar value from indirect argument. 2276 llvm::Value *V = 2277 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart()); 2278 2279 if (isPromoted) 2280 V = emitArgumentDemotion(*this, Arg, V); 2281 ArgVals.push_back(ParamValue::forDirect(V)); 2282 } 2283 break; 2284 } 2285 2286 case ABIArgInfo::Extend: 2287 case ABIArgInfo::Direct: { 2288 2289 // If we have the trivial case, handle it with no muss and fuss. 2290 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2291 ArgI.getCoerceToType() == ConvertType(Ty) && 2292 ArgI.getDirectOffset() == 0) { 2293 assert(NumIRArgs == 1); 2294 llvm::Value *V = FnArgs[FirstIRArg]; 2295 auto AI = cast<llvm::Argument>(V); 2296 2297 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2298 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2299 PVD->getFunctionScopeIndex())) 2300 AI->addAttr(llvm::AttributeList::get(getLLVMContext(), 2301 AI->getArgNo() + 1, 2302 llvm::Attribute::NonNull)); 2303 2304 QualType OTy = PVD->getOriginalType(); 2305 if (const auto *ArrTy = 2306 getContext().getAsConstantArrayType(OTy)) { 2307 // A C99 array parameter declaration with the static keyword also 2308 // indicates dereferenceability, and if the size is constant we can 2309 // use the dereferenceable attribute (which requires the size in 2310 // bytes). 2311 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2312 QualType ETy = ArrTy->getElementType(); 2313 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2314 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2315 ArrSize) { 2316 llvm::AttrBuilder Attrs; 2317 Attrs.addDereferenceableAttr( 2318 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 2319 AI->addAttr(llvm::AttributeList::get( 2320 getLLVMContext(), AI->getArgNo() + 1, Attrs)); 2321 } else if (getContext().getTargetAddressSpace(ETy) == 0) { 2322 AI->addAttr(llvm::AttributeList::get(getLLVMContext(), 2323 AI->getArgNo() + 1, 2324 llvm::Attribute::NonNull)); 2325 } 2326 } 2327 } else if (const auto *ArrTy = 2328 getContext().getAsVariableArrayType(OTy)) { 2329 // For C99 VLAs with the static keyword, we don't know the size so 2330 // we can't use the dereferenceable attribute, but in addrspace(0) 2331 // we know that it must be nonnull. 2332 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 2333 !getContext().getTargetAddressSpace(ArrTy->getElementType())) 2334 AI->addAttr(llvm::AttributeList::get(getLLVMContext(), 2335 AI->getArgNo() + 1, 2336 llvm::Attribute::NonNull)); 2337 } 2338 2339 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2340 if (!AVAttr) 2341 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2342 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2343 if (AVAttr) { 2344 llvm::Value *AlignmentValue = 2345 EmitScalarExpr(AVAttr->getAlignment()); 2346 llvm::ConstantInt *AlignmentCI = 2347 cast<llvm::ConstantInt>(AlignmentValue); 2348 unsigned Alignment = 2349 std::min((unsigned) AlignmentCI->getZExtValue(), 2350 +llvm::Value::MaximumAlignment); 2351 2352 llvm::AttrBuilder Attrs; 2353 Attrs.addAlignmentAttr(Alignment); 2354 AI->addAttr(llvm::AttributeList::get(getLLVMContext(), 2355 AI->getArgNo() + 1, Attrs)); 2356 } 2357 } 2358 2359 if (Arg->getType().isRestrictQualified()) 2360 AI->addAttr(llvm::AttributeList::get( 2361 getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NoAlias)); 2362 2363 // LLVM expects swifterror parameters to be used in very restricted 2364 // ways. Copy the value into a less-restricted temporary. 2365 if (FI.getExtParameterInfo(ArgNo).getABI() 2366 == ParameterABI::SwiftErrorResult) { 2367 QualType pointeeTy = Ty->getPointeeType(); 2368 assert(pointeeTy->isPointerType()); 2369 Address temp = 2370 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2371 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2372 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2373 Builder.CreateStore(incomingErrorValue, temp); 2374 V = temp.getPointer(); 2375 2376 // Push a cleanup to copy the value back at the end of the function. 2377 // The convention does not guarantee that the value will be written 2378 // back if the function exits with an unwind exception. 2379 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2380 } 2381 2382 // Ensure the argument is the correct type. 2383 if (V->getType() != ArgI.getCoerceToType()) 2384 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2385 2386 if (isPromoted) 2387 V = emitArgumentDemotion(*this, Arg, V); 2388 2389 // Because of merging of function types from multiple decls it is 2390 // possible for the type of an argument to not match the corresponding 2391 // type in the function type. Since we are codegening the callee 2392 // in here, add a cast to the argument type. 2393 llvm::Type *LTy = ConvertType(Arg->getType()); 2394 if (V->getType() != LTy) 2395 V = Builder.CreateBitCast(V, LTy); 2396 2397 ArgVals.push_back(ParamValue::forDirect(V)); 2398 break; 2399 } 2400 2401 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2402 Arg->getName()); 2403 2404 // Pointer to store into. 2405 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2406 2407 // Fast-isel and the optimizer generally like scalar values better than 2408 // FCAs, so we flatten them if this is safe to do for this argument. 2409 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2410 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2411 STy->getNumElements() > 1) { 2412 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 2413 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2414 llvm::Type *DstTy = Ptr.getElementType(); 2415 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2416 2417 Address AddrToStoreInto = Address::invalid(); 2418 if (SrcSize <= DstSize) { 2419 AddrToStoreInto = 2420 Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy)); 2421 } else { 2422 AddrToStoreInto = 2423 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2424 } 2425 2426 assert(STy->getNumElements() == NumIRArgs); 2427 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2428 auto AI = FnArgs[FirstIRArg + i]; 2429 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2430 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 2431 Address EltPtr = 2432 Builder.CreateStructGEP(AddrToStoreInto, i, Offset); 2433 Builder.CreateStore(AI, EltPtr); 2434 } 2435 2436 if (SrcSize > DstSize) { 2437 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2438 } 2439 2440 } else { 2441 // Simple case, just do a coerced store of the argument into the alloca. 2442 assert(NumIRArgs == 1); 2443 auto AI = FnArgs[FirstIRArg]; 2444 AI->setName(Arg->getName() + ".coerce"); 2445 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this); 2446 } 2447 2448 // Match to what EmitParmDecl is expecting for this type. 2449 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2450 llvm::Value *V = 2451 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart()); 2452 if (isPromoted) 2453 V = emitArgumentDemotion(*this, Arg, V); 2454 ArgVals.push_back(ParamValue::forDirect(V)); 2455 } else { 2456 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2457 } 2458 break; 2459 } 2460 2461 case ABIArgInfo::CoerceAndExpand: { 2462 // Reconstruct into a temporary. 2463 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2464 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2465 2466 auto coercionType = ArgI.getCoerceAndExpandType(); 2467 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2468 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2469 2470 unsigned argIndex = FirstIRArg; 2471 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2472 llvm::Type *eltType = coercionType->getElementType(i); 2473 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2474 continue; 2475 2476 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout); 2477 auto elt = FnArgs[argIndex++]; 2478 Builder.CreateStore(elt, eltAddr); 2479 } 2480 assert(argIndex == FirstIRArg + NumIRArgs); 2481 break; 2482 } 2483 2484 case ABIArgInfo::Expand: { 2485 // If this structure was expanded into multiple arguments then 2486 // we need to create a temporary and reconstruct it from the 2487 // arguments. 2488 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2489 LValue LV = MakeAddrLValue(Alloca, Ty); 2490 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2491 2492 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2493 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2494 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2495 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2496 auto AI = FnArgs[FirstIRArg + i]; 2497 AI->setName(Arg->getName() + "." + Twine(i)); 2498 } 2499 break; 2500 } 2501 2502 case ABIArgInfo::Ignore: 2503 assert(NumIRArgs == 0); 2504 // Initialize the local variable appropriately. 2505 if (!hasScalarEvaluationKind(Ty)) { 2506 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2507 } else { 2508 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2509 ArgVals.push_back(ParamValue::forDirect(U)); 2510 } 2511 break; 2512 } 2513 } 2514 2515 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2516 for (int I = Args.size() - 1; I >= 0; --I) 2517 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2518 } else { 2519 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2520 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2521 } 2522 } 2523 2524 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2525 while (insn->use_empty()) { 2526 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2527 if (!bitcast) return; 2528 2529 // This is "safe" because we would have used a ConstantExpr otherwise. 2530 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2531 bitcast->eraseFromParent(); 2532 } 2533 } 2534 2535 /// Try to emit a fused autorelease of a return result. 2536 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2537 llvm::Value *result) { 2538 // We must be immediately followed the cast. 2539 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2540 if (BB->empty()) return nullptr; 2541 if (&BB->back() != result) return nullptr; 2542 2543 llvm::Type *resultType = result->getType(); 2544 2545 // result is in a BasicBlock and is therefore an Instruction. 2546 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2547 2548 SmallVector<llvm::Instruction *, 4> InstsToKill; 2549 2550 // Look for: 2551 // %generator = bitcast %type1* %generator2 to %type2* 2552 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2553 // We would have emitted this as a constant if the operand weren't 2554 // an Instruction. 2555 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2556 2557 // Require the generator to be immediately followed by the cast. 2558 if (generator->getNextNode() != bitcast) 2559 return nullptr; 2560 2561 InstsToKill.push_back(bitcast); 2562 } 2563 2564 // Look for: 2565 // %generator = call i8* @objc_retain(i8* %originalResult) 2566 // or 2567 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2568 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2569 if (!call) return nullptr; 2570 2571 bool doRetainAutorelease; 2572 2573 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2574 doRetainAutorelease = true; 2575 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() 2576 .objc_retainAutoreleasedReturnValue) { 2577 doRetainAutorelease = false; 2578 2579 // If we emitted an assembly marker for this call (and the 2580 // ARCEntrypoints field should have been set if so), go looking 2581 // for that call. If we can't find it, we can't do this 2582 // optimization. But it should always be the immediately previous 2583 // instruction, unless we needed bitcasts around the call. 2584 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2585 llvm::Instruction *prev = call->getPrevNode(); 2586 assert(prev); 2587 if (isa<llvm::BitCastInst>(prev)) { 2588 prev = prev->getPrevNode(); 2589 assert(prev); 2590 } 2591 assert(isa<llvm::CallInst>(prev)); 2592 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2593 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2594 InstsToKill.push_back(prev); 2595 } 2596 } else { 2597 return nullptr; 2598 } 2599 2600 result = call->getArgOperand(0); 2601 InstsToKill.push_back(call); 2602 2603 // Keep killing bitcasts, for sanity. Note that we no longer care 2604 // about precise ordering as long as there's exactly one use. 2605 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2606 if (!bitcast->hasOneUse()) break; 2607 InstsToKill.push_back(bitcast); 2608 result = bitcast->getOperand(0); 2609 } 2610 2611 // Delete all the unnecessary instructions, from latest to earliest. 2612 for (auto *I : InstsToKill) 2613 I->eraseFromParent(); 2614 2615 // Do the fused retain/autorelease if we were asked to. 2616 if (doRetainAutorelease) 2617 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2618 2619 // Cast back to the result type. 2620 return CGF.Builder.CreateBitCast(result, resultType); 2621 } 2622 2623 /// If this is a +1 of the value of an immutable 'self', remove it. 2624 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2625 llvm::Value *result) { 2626 // This is only applicable to a method with an immutable 'self'. 2627 const ObjCMethodDecl *method = 2628 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2629 if (!method) return nullptr; 2630 const VarDecl *self = method->getSelfDecl(); 2631 if (!self->getType().isConstQualified()) return nullptr; 2632 2633 // Look for a retain call. 2634 llvm::CallInst *retainCall = 2635 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2636 if (!retainCall || 2637 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) 2638 return nullptr; 2639 2640 // Look for an ordinary load of 'self'. 2641 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2642 llvm::LoadInst *load = 2643 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2644 if (!load || load->isAtomic() || load->isVolatile() || 2645 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2646 return nullptr; 2647 2648 // Okay! Burn it all down. This relies for correctness on the 2649 // assumption that the retain is emitted as part of the return and 2650 // that thereafter everything is used "linearly". 2651 llvm::Type *resultType = result->getType(); 2652 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2653 assert(retainCall->use_empty()); 2654 retainCall->eraseFromParent(); 2655 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2656 2657 return CGF.Builder.CreateBitCast(load, resultType); 2658 } 2659 2660 /// Emit an ARC autorelease of the result of a function. 2661 /// 2662 /// \return the value to actually return from the function 2663 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2664 llvm::Value *result) { 2665 // If we're returning 'self', kill the initial retain. This is a 2666 // heuristic attempt to "encourage correctness" in the really unfortunate 2667 // case where we have a return of self during a dealloc and we desperately 2668 // need to avoid the possible autorelease. 2669 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2670 return self; 2671 2672 // At -O0, try to emit a fused retain/autorelease. 2673 if (CGF.shouldUseFusedARCCalls()) 2674 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2675 return fused; 2676 2677 return CGF.EmitARCAutoreleaseReturnValue(result); 2678 } 2679 2680 /// Heuristically search for a dominating store to the return-value slot. 2681 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2682 // Check if a User is a store which pointerOperand is the ReturnValue. 2683 // We are looking for stores to the ReturnValue, not for stores of the 2684 // ReturnValue to some other location. 2685 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 2686 auto *SI = dyn_cast<llvm::StoreInst>(U); 2687 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 2688 return nullptr; 2689 // These aren't actually possible for non-coerced returns, and we 2690 // only care about non-coerced returns on this code path. 2691 assert(!SI->isAtomic() && !SI->isVolatile()); 2692 return SI; 2693 }; 2694 // If there are multiple uses of the return-value slot, just check 2695 // for something immediately preceding the IP. Sometimes this can 2696 // happen with how we generate implicit-returns; it can also happen 2697 // with noreturn cleanups. 2698 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 2699 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2700 if (IP->empty()) return nullptr; 2701 llvm::Instruction *I = &IP->back(); 2702 2703 // Skip lifetime markers 2704 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2705 IE = IP->rend(); 2706 II != IE; ++II) { 2707 if (llvm::IntrinsicInst *Intrinsic = 2708 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2709 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2710 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2711 ++II; 2712 if (II == IE) 2713 break; 2714 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2715 continue; 2716 } 2717 } 2718 I = &*II; 2719 break; 2720 } 2721 2722 return GetStoreIfValid(I); 2723 } 2724 2725 llvm::StoreInst *store = 2726 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 2727 if (!store) return nullptr; 2728 2729 // Now do a first-and-dirty dominance check: just walk up the 2730 // single-predecessors chain from the current insertion point. 2731 llvm::BasicBlock *StoreBB = store->getParent(); 2732 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2733 while (IP != StoreBB) { 2734 if (!(IP = IP->getSinglePredecessor())) 2735 return nullptr; 2736 } 2737 2738 // Okay, the store's basic block dominates the insertion point; we 2739 // can do our thing. 2740 return store; 2741 } 2742 2743 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2744 bool EmitRetDbgLoc, 2745 SourceLocation EndLoc) { 2746 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2747 // Naked functions don't have epilogues. 2748 Builder.CreateUnreachable(); 2749 return; 2750 } 2751 2752 // Functions with no result always return void. 2753 if (!ReturnValue.isValid()) { 2754 Builder.CreateRetVoid(); 2755 return; 2756 } 2757 2758 llvm::DebugLoc RetDbgLoc; 2759 llvm::Value *RV = nullptr; 2760 QualType RetTy = FI.getReturnType(); 2761 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2762 2763 switch (RetAI.getKind()) { 2764 case ABIArgInfo::InAlloca: 2765 // Aggregrates get evaluated directly into the destination. Sometimes we 2766 // need to return the sret value in a register, though. 2767 assert(hasAggregateEvaluationKind(RetTy)); 2768 if (RetAI.getInAllocaSRet()) { 2769 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2770 --EI; 2771 llvm::Value *ArgStruct = &*EI; 2772 llvm::Value *SRet = Builder.CreateStructGEP( 2773 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2774 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 2775 } 2776 break; 2777 2778 case ABIArgInfo::Indirect: { 2779 auto AI = CurFn->arg_begin(); 2780 if (RetAI.isSRetAfterThis()) 2781 ++AI; 2782 switch (getEvaluationKind(RetTy)) { 2783 case TEK_Complex: { 2784 ComplexPairTy RT = 2785 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 2786 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 2787 /*isInit*/ true); 2788 break; 2789 } 2790 case TEK_Aggregate: 2791 // Do nothing; aggregrates get evaluated directly into the destination. 2792 break; 2793 case TEK_Scalar: 2794 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2795 MakeNaturalAlignAddrLValue(&*AI, RetTy), 2796 /*isInit*/ true); 2797 break; 2798 } 2799 break; 2800 } 2801 2802 case ABIArgInfo::Extend: 2803 case ABIArgInfo::Direct: 2804 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2805 RetAI.getDirectOffset() == 0) { 2806 // The internal return value temp always will have pointer-to-return-type 2807 // type, just do a load. 2808 2809 // If there is a dominating store to ReturnValue, we can elide 2810 // the load, zap the store, and usually zap the alloca. 2811 if (llvm::StoreInst *SI = 2812 findDominatingStoreToReturnValue(*this)) { 2813 // Reuse the debug location from the store unless there is 2814 // cleanup code to be emitted between the store and return 2815 // instruction. 2816 if (EmitRetDbgLoc && !AutoreleaseResult) 2817 RetDbgLoc = SI->getDebugLoc(); 2818 // Get the stored value and nuke the now-dead store. 2819 RV = SI->getValueOperand(); 2820 SI->eraseFromParent(); 2821 2822 // If that was the only use of the return value, nuke it as well now. 2823 auto returnValueInst = ReturnValue.getPointer(); 2824 if (returnValueInst->use_empty()) { 2825 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) { 2826 alloca->eraseFromParent(); 2827 ReturnValue = Address::invalid(); 2828 } 2829 } 2830 2831 // Otherwise, we have to do a simple load. 2832 } else { 2833 RV = Builder.CreateLoad(ReturnValue); 2834 } 2835 } else { 2836 // If the value is offset in memory, apply the offset now. 2837 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 2838 2839 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2840 } 2841 2842 // In ARC, end functions that return a retainable type with a call 2843 // to objc_autoreleaseReturnValue. 2844 if (AutoreleaseResult) { 2845 #ifndef NDEBUG 2846 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 2847 // been stripped of the typedefs, so we cannot use RetTy here. Get the 2848 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 2849 // CurCodeDecl or BlockInfo. 2850 QualType RT; 2851 2852 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 2853 RT = FD->getReturnType(); 2854 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 2855 RT = MD->getReturnType(); 2856 else if (isa<BlockDecl>(CurCodeDecl)) 2857 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 2858 else 2859 llvm_unreachable("Unexpected function/method type"); 2860 2861 assert(getLangOpts().ObjCAutoRefCount && 2862 !FI.isReturnsRetained() && 2863 RT->isObjCRetainableType()); 2864 #endif 2865 RV = emitAutoreleaseOfResult(*this, RV); 2866 } 2867 2868 break; 2869 2870 case ABIArgInfo::Ignore: 2871 break; 2872 2873 case ABIArgInfo::CoerceAndExpand: { 2874 auto coercionType = RetAI.getCoerceAndExpandType(); 2875 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 2876 2877 // Load all of the coerced elements out into results. 2878 llvm::SmallVector<llvm::Value*, 4> results; 2879 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 2880 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2881 auto coercedEltType = coercionType->getElementType(i); 2882 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 2883 continue; 2884 2885 auto eltAddr = Builder.CreateStructGEP(addr, i, layout); 2886 auto elt = Builder.CreateLoad(eltAddr); 2887 results.push_back(elt); 2888 } 2889 2890 // If we have one result, it's the single direct result type. 2891 if (results.size() == 1) { 2892 RV = results[0]; 2893 2894 // Otherwise, we need to make a first-class aggregate. 2895 } else { 2896 // Construct a return type that lacks padding elements. 2897 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 2898 2899 RV = llvm::UndefValue::get(returnType); 2900 for (unsigned i = 0, e = results.size(); i != e; ++i) { 2901 RV = Builder.CreateInsertValue(RV, results[i], i); 2902 } 2903 } 2904 break; 2905 } 2906 2907 case ABIArgInfo::Expand: 2908 llvm_unreachable("Invalid ABI kind for return argument"); 2909 } 2910 2911 llvm::Instruction *Ret; 2912 if (RV) { 2913 EmitReturnValueCheck(RV, EndLoc); 2914 Ret = Builder.CreateRet(RV); 2915 } else { 2916 Ret = Builder.CreateRetVoid(); 2917 } 2918 2919 if (RetDbgLoc) 2920 Ret->setDebugLoc(std::move(RetDbgLoc)); 2921 } 2922 2923 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV, 2924 SourceLocation EndLoc) { 2925 // A current decl may not be available when emitting vtable thunks. 2926 if (!CurCodeDecl) 2927 return; 2928 2929 ReturnsNonNullAttr *RetNNAttr = nullptr; 2930 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 2931 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 2932 2933 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 2934 return; 2935 2936 // Prefer the returns_nonnull attribute if it's present. 2937 SourceLocation AttrLoc; 2938 SanitizerMask CheckKind; 2939 SanitizerHandler Handler; 2940 if (RetNNAttr) { 2941 assert(!requiresReturnValueNullabilityCheck() && 2942 "Cannot check nullability and the nonnull attribute"); 2943 AttrLoc = RetNNAttr->getLocation(); 2944 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 2945 Handler = SanitizerHandler::NonnullReturn; 2946 } else { 2947 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 2948 if (auto *TSI = DD->getTypeSourceInfo()) 2949 if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>()) 2950 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 2951 CheckKind = SanitizerKind::NullabilityReturn; 2952 Handler = SanitizerHandler::NullabilityReturn; 2953 } 2954 2955 SanitizerScope SanScope(this); 2956 2957 llvm::BasicBlock *Check = nullptr; 2958 llvm::BasicBlock *NoCheck = nullptr; 2959 if (requiresReturnValueNullabilityCheck()) { 2960 // Before doing the nullability check, make sure that the preconditions for 2961 // the check are met. 2962 Check = createBasicBlock("nullcheck"); 2963 NoCheck = createBasicBlock("no.nullcheck"); 2964 Builder.CreateCondBr(RetValNullabilityPrecondition, Check, NoCheck); 2965 EmitBlock(Check); 2966 } 2967 2968 // Now do the null check. If the returns_nonnull attribute is present, this 2969 // is done unconditionally. 2970 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 2971 llvm::Constant *StaticData[] = { 2972 EmitCheckSourceLocation(EndLoc), EmitCheckSourceLocation(AttrLoc), 2973 }; 2974 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 2975 2976 if (requiresReturnValueNullabilityCheck()) 2977 EmitBlock(NoCheck); 2978 } 2979 2980 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 2981 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 2982 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 2983 } 2984 2985 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 2986 QualType Ty) { 2987 // FIXME: Generate IR in one pass, rather than going back and fixing up these 2988 // placeholders. 2989 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 2990 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 2991 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 2992 2993 // FIXME: When we generate this IR in one pass, we shouldn't need 2994 // this win32-specific alignment hack. 2995 CharUnits Align = CharUnits::fromQuantity(4); 2996 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 2997 2998 return AggValueSlot::forAddr(Address(Placeholder, Align), 2999 Ty.getQualifiers(), 3000 AggValueSlot::IsNotDestructed, 3001 AggValueSlot::DoesNotNeedGCBarriers, 3002 AggValueSlot::IsNotAliased); 3003 } 3004 3005 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3006 const VarDecl *param, 3007 SourceLocation loc) { 3008 // StartFunction converted the ABI-lowered parameter(s) into a 3009 // local alloca. We need to turn that into an r-value suitable 3010 // for EmitCall. 3011 Address local = GetAddrOfLocalVar(param); 3012 3013 QualType type = param->getType(); 3014 3015 assert(!isInAllocaArgument(CGM.getCXXABI(), type) && 3016 "cannot emit delegate call arguments for inalloca arguments!"); 3017 3018 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3019 // but the argument needs to be the original pointer. 3020 if (type->isReferenceType()) { 3021 args.add(RValue::get(Builder.CreateLoad(local)), type); 3022 3023 // In ARC, move out of consumed arguments so that the release cleanup 3024 // entered by StartFunction doesn't cause an over-release. This isn't 3025 // optimal -O0 code generation, but it should get cleaned up when 3026 // optimization is enabled. This also assumes that delegate calls are 3027 // performed exactly once for a set of arguments, but that should be safe. 3028 } else if (getLangOpts().ObjCAutoRefCount && 3029 param->hasAttr<NSConsumedAttr>() && 3030 type->isObjCRetainableType()) { 3031 llvm::Value *ptr = Builder.CreateLoad(local); 3032 auto null = 3033 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3034 Builder.CreateStore(null, local); 3035 args.add(RValue::get(ptr), type); 3036 3037 // For the most part, we just need to load the alloca, except that 3038 // aggregate r-values are actually pointers to temporaries. 3039 } else { 3040 args.add(convertTempToRValue(local, type, loc), type); 3041 } 3042 } 3043 3044 static bool isProvablyNull(llvm::Value *addr) { 3045 return isa<llvm::ConstantPointerNull>(addr); 3046 } 3047 3048 /// Emit the actual writing-back of a writeback. 3049 static void emitWriteback(CodeGenFunction &CGF, 3050 const CallArgList::Writeback &writeback) { 3051 const LValue &srcLV = writeback.Source; 3052 Address srcAddr = srcLV.getAddress(); 3053 assert(!isProvablyNull(srcAddr.getPointer()) && 3054 "shouldn't have writeback for provably null argument"); 3055 3056 llvm::BasicBlock *contBB = nullptr; 3057 3058 // If the argument wasn't provably non-null, we need to null check 3059 // before doing the store. 3060 bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer()); 3061 if (!provablyNonNull) { 3062 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3063 contBB = CGF.createBasicBlock("icr.done"); 3064 3065 llvm::Value *isNull = 3066 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3067 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3068 CGF.EmitBlock(writebackBB); 3069 } 3070 3071 // Load the value to writeback. 3072 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3073 3074 // Cast it back, in case we're writing an id to a Foo* or something. 3075 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3076 "icr.writeback-cast"); 3077 3078 // Perform the writeback. 3079 3080 // If we have a "to use" value, it's something we need to emit a use 3081 // of. This has to be carefully threaded in: if it's done after the 3082 // release it's potentially undefined behavior (and the optimizer 3083 // will ignore it), and if it happens before the retain then the 3084 // optimizer could move the release there. 3085 if (writeback.ToUse) { 3086 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3087 3088 // Retain the new value. No need to block-copy here: the block's 3089 // being passed up the stack. 3090 value = CGF.EmitARCRetainNonBlock(value); 3091 3092 // Emit the intrinsic use here. 3093 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3094 3095 // Load the old value (primitively). 3096 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3097 3098 // Put the new value in place (primitively). 3099 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3100 3101 // Release the old value. 3102 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3103 3104 // Otherwise, we can just do a normal lvalue store. 3105 } else { 3106 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3107 } 3108 3109 // Jump to the continuation block. 3110 if (!provablyNonNull) 3111 CGF.EmitBlock(contBB); 3112 } 3113 3114 static void emitWritebacks(CodeGenFunction &CGF, 3115 const CallArgList &args) { 3116 for (const auto &I : args.writebacks()) 3117 emitWriteback(CGF, I); 3118 } 3119 3120 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3121 const CallArgList &CallArgs) { 3122 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()); 3123 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3124 CallArgs.getCleanupsToDeactivate(); 3125 // Iterate in reverse to increase the likelihood of popping the cleanup. 3126 for (const auto &I : llvm::reverse(Cleanups)) { 3127 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3128 I.IsActiveIP->eraseFromParent(); 3129 } 3130 } 3131 3132 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3133 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3134 if (uop->getOpcode() == UO_AddrOf) 3135 return uop->getSubExpr(); 3136 return nullptr; 3137 } 3138 3139 /// Emit an argument that's being passed call-by-writeback. That is, 3140 /// we are passing the address of an __autoreleased temporary; it 3141 /// might be copy-initialized with the current value of the given 3142 /// address, but it will definitely be copied out of after the call. 3143 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3144 const ObjCIndirectCopyRestoreExpr *CRE) { 3145 LValue srcLV; 3146 3147 // Make an optimistic effort to emit the address as an l-value. 3148 // This can fail if the argument expression is more complicated. 3149 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3150 srcLV = CGF.EmitLValue(lvExpr); 3151 3152 // Otherwise, just emit it as a scalar. 3153 } else { 3154 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3155 3156 QualType srcAddrType = 3157 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3158 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3159 } 3160 Address srcAddr = srcLV.getAddress(); 3161 3162 // The dest and src types don't necessarily match in LLVM terms 3163 // because of the crazy ObjC compatibility rules. 3164 3165 llvm::PointerType *destType = 3166 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3167 3168 // If the address is a constant null, just pass the appropriate null. 3169 if (isProvablyNull(srcAddr.getPointer())) { 3170 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3171 CRE->getType()); 3172 return; 3173 } 3174 3175 // Create the temporary. 3176 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3177 CGF.getPointerAlign(), 3178 "icr.temp"); 3179 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3180 // and that cleanup will be conditional if we can't prove that the l-value 3181 // isn't null, so we need to register a dominating point so that the cleanups 3182 // system will make valid IR. 3183 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3184 3185 // Zero-initialize it if we're not doing a copy-initialization. 3186 bool shouldCopy = CRE->shouldCopy(); 3187 if (!shouldCopy) { 3188 llvm::Value *null = 3189 llvm::ConstantPointerNull::get( 3190 cast<llvm::PointerType>(destType->getElementType())); 3191 CGF.Builder.CreateStore(null, temp); 3192 } 3193 3194 llvm::BasicBlock *contBB = nullptr; 3195 llvm::BasicBlock *originBB = nullptr; 3196 3197 // If the address is *not* known to be non-null, we need to switch. 3198 llvm::Value *finalArgument; 3199 3200 bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer()); 3201 if (provablyNonNull) { 3202 finalArgument = temp.getPointer(); 3203 } else { 3204 llvm::Value *isNull = 3205 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3206 3207 finalArgument = CGF.Builder.CreateSelect(isNull, 3208 llvm::ConstantPointerNull::get(destType), 3209 temp.getPointer(), "icr.argument"); 3210 3211 // If we need to copy, then the load has to be conditional, which 3212 // means we need control flow. 3213 if (shouldCopy) { 3214 originBB = CGF.Builder.GetInsertBlock(); 3215 contBB = CGF.createBasicBlock("icr.cont"); 3216 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3217 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3218 CGF.EmitBlock(copyBB); 3219 condEval.begin(CGF); 3220 } 3221 } 3222 3223 llvm::Value *valueToUse = nullptr; 3224 3225 // Perform a copy if necessary. 3226 if (shouldCopy) { 3227 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3228 assert(srcRV.isScalar()); 3229 3230 llvm::Value *src = srcRV.getScalarVal(); 3231 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3232 "icr.cast"); 3233 3234 // Use an ordinary store, not a store-to-lvalue. 3235 CGF.Builder.CreateStore(src, temp); 3236 3237 // If optimization is enabled, and the value was held in a 3238 // __strong variable, we need to tell the optimizer that this 3239 // value has to stay alive until we're doing the store back. 3240 // This is because the temporary is effectively unretained, 3241 // and so otherwise we can violate the high-level semantics. 3242 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3243 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3244 valueToUse = src; 3245 } 3246 } 3247 3248 // Finish the control flow if we needed it. 3249 if (shouldCopy && !provablyNonNull) { 3250 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3251 CGF.EmitBlock(contBB); 3252 3253 // Make a phi for the value to intrinsically use. 3254 if (valueToUse) { 3255 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3256 "icr.to-use"); 3257 phiToUse->addIncoming(valueToUse, copyBB); 3258 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3259 originBB); 3260 valueToUse = phiToUse; 3261 } 3262 3263 condEval.end(CGF); 3264 } 3265 3266 args.addWriteback(srcLV, temp, valueToUse); 3267 args.add(RValue::get(finalArgument), CRE->getType()); 3268 } 3269 3270 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3271 assert(!StackBase); 3272 3273 // Save the stack. 3274 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3275 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3276 } 3277 3278 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3279 if (StackBase) { 3280 // Restore the stack after the call. 3281 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3282 CGF.Builder.CreateCall(F, StackBase); 3283 } 3284 } 3285 3286 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3287 SourceLocation ArgLoc, 3288 AbstractCallee AC, 3289 unsigned ParmNum) { 3290 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3291 SanOpts.has(SanitizerKind::NullabilityArg))) 3292 return; 3293 3294 // The param decl may be missing in a variadic function. 3295 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 3296 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3297 3298 // Prefer the nonnull attribute if it's present. 3299 const NonNullAttr *NNAttr = nullptr; 3300 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 3301 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 3302 3303 bool CanCheckNullability = false; 3304 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 3305 auto Nullability = PVD->getType()->getNullability(getContext()); 3306 CanCheckNullability = Nullability && 3307 *Nullability == NullabilityKind::NonNull && 3308 PVD->getTypeSourceInfo(); 3309 } 3310 3311 if (!NNAttr && !CanCheckNullability) 3312 return; 3313 3314 SourceLocation AttrLoc; 3315 SanitizerMask CheckKind; 3316 SanitizerHandler Handler; 3317 if (NNAttr) { 3318 AttrLoc = NNAttr->getLocation(); 3319 CheckKind = SanitizerKind::NonnullAttribute; 3320 Handler = SanitizerHandler::NonnullArg; 3321 } else { 3322 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 3323 CheckKind = SanitizerKind::NullabilityArg; 3324 Handler = SanitizerHandler::NullabilityArg; 3325 } 3326 3327 SanitizerScope SanScope(this); 3328 assert(RV.isScalar()); 3329 llvm::Value *V = RV.getScalarVal(); 3330 llvm::Value *Cond = 3331 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 3332 llvm::Constant *StaticData[] = { 3333 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 3334 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3335 }; 3336 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 3337 } 3338 3339 void CodeGenFunction::EmitCallArgs( 3340 CallArgList &Args, ArrayRef<QualType> ArgTypes, 3341 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3342 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 3343 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3344 3345 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 3346 // because arguments are destroyed left to right in the callee. As a special 3347 // case, there are certain language constructs that require left-to-right 3348 // evaluation, and in those cases we consider the evaluation order requirement 3349 // to trump the "destruction order is reverse construction order" guarantee. 3350 bool LeftToRight = 3351 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 3352 ? Order == EvaluationOrder::ForceLeftToRight 3353 : Order != EvaluationOrder::ForceRightToLeft; 3354 3355 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 3356 RValue EmittedArg) { 3357 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 3358 return; 3359 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3360 if (PS == nullptr) 3361 return; 3362 3363 const auto &Context = getContext(); 3364 auto SizeTy = Context.getSizeType(); 3365 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 3366 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 3367 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 3368 EmittedArg.getScalarVal()); 3369 Args.add(RValue::get(V), SizeTy); 3370 // If we're emitting args in reverse, be sure to do so with 3371 // pass_object_size, as well. 3372 if (!LeftToRight) 3373 std::swap(Args.back(), *(&Args.back() - 1)); 3374 }; 3375 3376 // Insert a stack save if we're going to need any inalloca args. 3377 bool HasInAllocaArgs = false; 3378 if (CGM.getTarget().getCXXABI().isMicrosoft()) { 3379 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 3380 I != E && !HasInAllocaArgs; ++I) 3381 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 3382 if (HasInAllocaArgs) { 3383 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3384 Args.allocateArgumentMemory(*this); 3385 } 3386 } 3387 3388 // Evaluate each argument in the appropriate order. 3389 size_t CallArgsStart = Args.size(); 3390 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 3391 unsigned Idx = LeftToRight ? I : E - I - 1; 3392 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 3393 unsigned InitialArgSize = Args.size(); 3394 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 3395 // In particular, we depend on it being the last arg in Args, and the 3396 // objectsize bits depend on there only being one arg if !LeftToRight. 3397 assert(InitialArgSize + 1 == Args.size() && 3398 "The code below depends on only adding one arg per EmitCallArg"); 3399 (void)InitialArgSize; 3400 RValue RVArg = Args.back().RV; 3401 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 3402 ParamsToSkip + Idx); 3403 // @llvm.objectsize should never have side-effects and shouldn't need 3404 // destruction/cleanups, so we can safely "emit" it after its arg, 3405 // regardless of right-to-leftness 3406 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 3407 } 3408 3409 if (!LeftToRight) { 3410 // Un-reverse the arguments we just evaluated so they match up with the LLVM 3411 // IR function. 3412 std::reverse(Args.begin() + CallArgsStart, Args.end()); 3413 } 3414 } 3415 3416 namespace { 3417 3418 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 3419 DestroyUnpassedArg(Address Addr, QualType Ty) 3420 : Addr(Addr), Ty(Ty) {} 3421 3422 Address Addr; 3423 QualType Ty; 3424 3425 void Emit(CodeGenFunction &CGF, Flags flags) override { 3426 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 3427 assert(!Dtor->isTrivial()); 3428 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 3429 /*Delegating=*/false, Addr); 3430 } 3431 }; 3432 3433 struct DisableDebugLocationUpdates { 3434 CodeGenFunction &CGF; 3435 bool disabledDebugInfo; 3436 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 3437 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 3438 CGF.disableDebugInfo(); 3439 } 3440 ~DisableDebugLocationUpdates() { 3441 if (disabledDebugInfo) 3442 CGF.enableDebugInfo(); 3443 } 3444 }; 3445 3446 } // end anonymous namespace 3447 3448 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 3449 QualType type) { 3450 DisableDebugLocationUpdates Dis(*this, E); 3451 if (const ObjCIndirectCopyRestoreExpr *CRE 3452 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 3453 assert(getLangOpts().ObjCAutoRefCount); 3454 assert(getContext().hasSameUnqualifiedType(E->getType(), type)); 3455 return emitWritebackArg(*this, args, CRE); 3456 } 3457 3458 assert(type->isReferenceType() == E->isGLValue() && 3459 "reference binding to unmaterialized r-value!"); 3460 3461 if (E->isGLValue()) { 3462 assert(E->getObjectKind() == OK_Ordinary); 3463 return args.add(EmitReferenceBindingToExpr(E), type); 3464 } 3465 3466 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 3467 3468 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 3469 // However, we still have to push an EH-only cleanup in case we unwind before 3470 // we make it to the call. 3471 if (HasAggregateEvalKind && 3472 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3473 // If we're using inalloca, use the argument memory. Otherwise, use a 3474 // temporary. 3475 AggValueSlot Slot; 3476 if (args.isUsingInAlloca()) 3477 Slot = createPlaceholderSlot(*this, type); 3478 else 3479 Slot = CreateAggTemp(type, "agg.tmp"); 3480 3481 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3482 bool DestroyedInCallee = 3483 RD && RD->hasNonTrivialDestructor() && 3484 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default; 3485 if (DestroyedInCallee) 3486 Slot.setExternallyDestructed(); 3487 3488 EmitAggExpr(E, Slot); 3489 RValue RV = Slot.asRValue(); 3490 args.add(RV, type); 3491 3492 if (DestroyedInCallee) { 3493 // Create a no-op GEP between the placeholder and the cleanup so we can 3494 // RAUW it successfully. It also serves as a marker of the first 3495 // instruction where the cleanup is active. 3496 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 3497 type); 3498 // This unreachable is a temporary marker which will be removed later. 3499 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 3500 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 3501 } 3502 return; 3503 } 3504 3505 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 3506 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 3507 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 3508 assert(L.isSimple()); 3509 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) { 3510 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true); 3511 } else { 3512 // We can't represent a misaligned lvalue in the CallArgList, so copy 3513 // to an aligned temporary now. 3514 Address tmp = CreateMemTemp(type); 3515 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile()); 3516 args.add(RValue::getAggregate(tmp), type); 3517 } 3518 return; 3519 } 3520 3521 args.add(EmitAnyExprToTemp(E), type); 3522 } 3523 3524 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 3525 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 3526 // implicitly widens null pointer constants that are arguments to varargs 3527 // functions to pointer-sized ints. 3528 if (!getTarget().getTriple().isOSWindows()) 3529 return Arg->getType(); 3530 3531 if (Arg->getType()->isIntegerType() && 3532 getContext().getTypeSize(Arg->getType()) < 3533 getContext().getTargetInfo().getPointerWidth(0) && 3534 Arg->isNullPointerConstant(getContext(), 3535 Expr::NPC_ValueDependentIsNotNull)) { 3536 return getContext().getIntPtrType(); 3537 } 3538 3539 return Arg->getType(); 3540 } 3541 3542 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3543 // optimizer it can aggressively ignore unwind edges. 3544 void 3545 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 3546 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 3547 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 3548 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 3549 CGM.getNoObjCARCExceptionsMetadata()); 3550 } 3551 3552 /// Emits a call to the given no-arguments nounwind runtime function. 3553 llvm::CallInst * 3554 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3555 const llvm::Twine &name) { 3556 return EmitNounwindRuntimeCall(callee, None, name); 3557 } 3558 3559 /// Emits a call to the given nounwind runtime function. 3560 llvm::CallInst * 3561 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee, 3562 ArrayRef<llvm::Value*> args, 3563 const llvm::Twine &name) { 3564 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 3565 call->setDoesNotThrow(); 3566 return call; 3567 } 3568 3569 /// Emits a simple call (never an invoke) to the given no-arguments 3570 /// runtime function. 3571 llvm::CallInst * 3572 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3573 const llvm::Twine &name) { 3574 return EmitRuntimeCall(callee, None, name); 3575 } 3576 3577 // Calls which may throw must have operand bundles indicating which funclet 3578 // they are nested within. 3579 static void 3580 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad, 3581 SmallVectorImpl<llvm::OperandBundleDef> &BundleList) { 3582 // There is no need for a funclet operand bundle if we aren't inside a 3583 // funclet. 3584 if (!CurrentFuncletPad) 3585 return; 3586 3587 // Skip intrinsics which cannot throw. 3588 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 3589 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 3590 return; 3591 3592 BundleList.emplace_back("funclet", CurrentFuncletPad); 3593 } 3594 3595 /// Emits a simple call (never an invoke) to the given runtime function. 3596 llvm::CallInst * 3597 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee, 3598 ArrayRef<llvm::Value*> args, 3599 const llvm::Twine &name) { 3600 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3601 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3602 3603 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name); 3604 call->setCallingConv(getRuntimeCC()); 3605 return call; 3606 } 3607 3608 /// Emits a call or invoke to the given noreturn runtime function. 3609 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee, 3610 ArrayRef<llvm::Value*> args) { 3611 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3612 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList); 3613 3614 if (getInvokeDest()) { 3615 llvm::InvokeInst *invoke = 3616 Builder.CreateInvoke(callee, 3617 getUnreachableBlock(), 3618 getInvokeDest(), 3619 args, 3620 BundleList); 3621 invoke->setDoesNotReturn(); 3622 invoke->setCallingConv(getRuntimeCC()); 3623 } else { 3624 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 3625 call->setDoesNotReturn(); 3626 call->setCallingConv(getRuntimeCC()); 3627 Builder.CreateUnreachable(); 3628 } 3629 } 3630 3631 /// Emits a call or invoke instruction to the given nullary runtime function. 3632 llvm::CallSite 3633 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3634 const Twine &name) { 3635 return EmitRuntimeCallOrInvoke(callee, None, name); 3636 } 3637 3638 /// Emits a call or invoke instruction to the given runtime function. 3639 llvm::CallSite 3640 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee, 3641 ArrayRef<llvm::Value*> args, 3642 const Twine &name) { 3643 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name); 3644 callSite.setCallingConv(getRuntimeCC()); 3645 return callSite; 3646 } 3647 3648 /// Emits a call or invoke instruction to the given function, depending 3649 /// on the current state of the EH stack. 3650 llvm::CallSite 3651 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee, 3652 ArrayRef<llvm::Value *> Args, 3653 const Twine &Name) { 3654 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3655 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3656 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList); 3657 3658 llvm::Instruction *Inst; 3659 if (!InvokeDest) 3660 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 3661 else { 3662 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3663 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 3664 Name); 3665 EmitBlock(ContBB); 3666 } 3667 3668 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3669 // optimizer it can aggressively ignore unwind edges. 3670 if (CGM.getLangOpts().ObjCAutoRefCount) 3671 AddObjCARCExceptionMetadata(Inst); 3672 3673 return llvm::CallSite(Inst); 3674 } 3675 3676 /// \brief Store a non-aggregate value to an address to initialize it. For 3677 /// initialization, a non-atomic store will be used. 3678 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src, 3679 LValue Dst) { 3680 if (Src.isScalar()) 3681 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true); 3682 else 3683 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true); 3684 } 3685 3686 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3687 llvm::Value *New) { 3688 DeferredReplacements.push_back(std::make_pair(Old, New)); 3689 } 3690 3691 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3692 const CGCallee &Callee, 3693 ReturnValueSlot ReturnValue, 3694 const CallArgList &CallArgs, 3695 llvm::Instruction **callOrInvoke) { 3696 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3697 3698 assert(Callee.isOrdinary()); 3699 3700 // Handle struct-return functions by passing a pointer to the 3701 // location that we would like to return into. 3702 QualType RetTy = CallInfo.getReturnType(); 3703 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3704 3705 llvm::FunctionType *IRFuncTy = Callee.getFunctionType(); 3706 3707 // 1. Set up the arguments. 3708 3709 // If we're using inalloca, insert the allocation after the stack save. 3710 // FIXME: Do this earlier rather than hacking it in here! 3711 Address ArgMemory = Address::invalid(); 3712 const llvm::StructLayout *ArgMemoryLayout = nullptr; 3713 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 3714 const llvm::DataLayout &DL = CGM.getDataLayout(); 3715 ArgMemoryLayout = DL.getStructLayout(ArgStruct); 3716 llvm::Instruction *IP = CallArgs.getStackBase(); 3717 llvm::AllocaInst *AI; 3718 if (IP) { 3719 IP = IP->getNextNode(); 3720 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 3721 "argmem", IP); 3722 } else { 3723 AI = CreateTempAlloca(ArgStruct, "argmem"); 3724 } 3725 auto Align = CallInfo.getArgStructAlignment(); 3726 AI->setAlignment(Align.getQuantity()); 3727 AI->setUsedWithInAlloca(true); 3728 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 3729 ArgMemory = Address(AI, Align); 3730 } 3731 3732 // Helper function to drill into the inalloca allocation. 3733 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address { 3734 auto FieldOffset = 3735 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex)); 3736 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset); 3737 }; 3738 3739 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 3740 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 3741 3742 // If the call returns a temporary with struct return, create a temporary 3743 // alloca to hold the result, unless one is given to us. 3744 Address SRetPtr = Address::invalid(); 3745 size_t UnusedReturnSize = 0; 3746 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 3747 if (!ReturnValue.isNull()) { 3748 SRetPtr = ReturnValue.getValue(); 3749 } else { 3750 SRetPtr = CreateMemTemp(RetTy); 3751 if (HaveInsertPoint() && ReturnValue.isUnused()) { 3752 uint64_t size = 3753 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 3754 if (EmitLifetimeStart(size, SRetPtr.getPointer())) 3755 UnusedReturnSize = size; 3756 } 3757 } 3758 if (IRFunctionArgs.hasSRetArg()) { 3759 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 3760 } else if (RetAI.isInAlloca()) { 3761 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex()); 3762 Builder.CreateStore(SRetPtr.getPointer(), Addr); 3763 } 3764 } 3765 3766 Address swiftErrorTemp = Address::invalid(); 3767 Address swiftErrorArg = Address::invalid(); 3768 3769 // Translate all of the arguments as necessary to match the IR lowering. 3770 assert(CallInfo.arg_size() == CallArgs.size() && 3771 "Mismatch between function signature & arguments."); 3772 unsigned ArgNo = 0; 3773 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 3774 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 3775 I != E; ++I, ++info_it, ++ArgNo) { 3776 const ABIArgInfo &ArgInfo = info_it->info; 3777 RValue RV = I->RV; 3778 3779 // Insert a padding argument to ensure proper alignment. 3780 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 3781 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 3782 llvm::UndefValue::get(ArgInfo.getPaddingType()); 3783 3784 unsigned FirstIRArg, NumIRArgs; 3785 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 3786 3787 switch (ArgInfo.getKind()) { 3788 case ABIArgInfo::InAlloca: { 3789 assert(NumIRArgs == 0); 3790 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3791 if (RV.isAggregate()) { 3792 // Replace the placeholder with the appropriate argument slot GEP. 3793 llvm::Instruction *Placeholder = 3794 cast<llvm::Instruction>(RV.getAggregatePointer()); 3795 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 3796 Builder.SetInsertPoint(Placeholder); 3797 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3798 Builder.restoreIP(IP); 3799 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 3800 } else { 3801 // Store the RValue into the argument struct. 3802 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex()); 3803 unsigned AS = Addr.getType()->getPointerAddressSpace(); 3804 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 3805 // There are some cases where a trivial bitcast is not avoidable. The 3806 // definition of a type later in a translation unit may change it's type 3807 // from {}* to (%struct.foo*)*. 3808 if (Addr.getType() != MemType) 3809 Addr = Builder.CreateBitCast(Addr, MemType); 3810 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3811 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3812 } 3813 break; 3814 } 3815 3816 case ABIArgInfo::Indirect: { 3817 assert(NumIRArgs == 1); 3818 if (RV.isScalar() || RV.isComplex()) { 3819 // Make a temporary alloca to pass the argument. 3820 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3821 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3822 3823 LValue argLV = MakeAddrLValue(Addr, I->Ty); 3824 EmitInitStoreOfNonAggregate(*this, RV, argLV); 3825 } else { 3826 // We want to avoid creating an unnecessary temporary+copy here; 3827 // however, we need one in three cases: 3828 // 1. If the argument is not byval, and we are required to copy the 3829 // source. (This case doesn't occur on any common architecture.) 3830 // 2. If the argument is byval, RV is not sufficiently aligned, and 3831 // we cannot force it to be sufficiently aligned. 3832 // 3. If the argument is byval, but RV is located in an address space 3833 // different than that of the argument (0). 3834 Address Addr = RV.getAggregateAddress(); 3835 CharUnits Align = ArgInfo.getIndirectAlign(); 3836 const llvm::DataLayout *TD = &CGM.getDataLayout(); 3837 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace(); 3838 const unsigned ArgAddrSpace = 3839 (FirstIRArg < IRFuncTy->getNumParams() 3840 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() 3841 : 0); 3842 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) || 3843 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align && 3844 llvm::getOrEnforceKnownAlignment(Addr.getPointer(), 3845 Align.getQuantity(), *TD) 3846 < Align.getQuantity()) || 3847 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) { 3848 // Create an aligned temporary, and copy to it. 3849 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign()); 3850 IRCallArgs[FirstIRArg] = AI.getPointer(); 3851 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified()); 3852 } else { 3853 // Skip the extra memcpy call. 3854 IRCallArgs[FirstIRArg] = Addr.getPointer(); 3855 } 3856 } 3857 break; 3858 } 3859 3860 case ABIArgInfo::Ignore: 3861 assert(NumIRArgs == 0); 3862 break; 3863 3864 case ABIArgInfo::Extend: 3865 case ABIArgInfo::Direct: { 3866 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 3867 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 3868 ArgInfo.getDirectOffset() == 0) { 3869 assert(NumIRArgs == 1); 3870 llvm::Value *V; 3871 if (RV.isScalar()) 3872 V = RV.getScalarVal(); 3873 else 3874 V = Builder.CreateLoad(RV.getAggregateAddress()); 3875 3876 // Implement swifterror by copying into a new swifterror argument. 3877 // We'll write back in the normal path out of the call. 3878 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 3879 == ParameterABI::SwiftErrorResult) { 3880 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 3881 3882 QualType pointeeTy = I->Ty->getPointeeType(); 3883 swiftErrorArg = 3884 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 3885 3886 swiftErrorTemp = 3887 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 3888 V = swiftErrorTemp.getPointer(); 3889 cast<llvm::AllocaInst>(V)->setSwiftError(true); 3890 3891 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 3892 Builder.CreateStore(errorValue, swiftErrorTemp); 3893 } 3894 3895 // We might have to widen integers, but we should never truncate. 3896 if (ArgInfo.getCoerceToType() != V->getType() && 3897 V->getType()->isIntegerTy()) 3898 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 3899 3900 // If the argument doesn't match, perform a bitcast to coerce it. This 3901 // can happen due to trivial type mismatches. 3902 if (FirstIRArg < IRFuncTy->getNumParams() && 3903 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 3904 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 3905 3906 IRCallArgs[FirstIRArg] = V; 3907 break; 3908 } 3909 3910 // FIXME: Avoid the conversion through memory if possible. 3911 Address Src = Address::invalid(); 3912 if (RV.isScalar() || RV.isComplex()) { 3913 Src = CreateMemTemp(I->Ty, "coerce"); 3914 LValue SrcLV = MakeAddrLValue(Src, I->Ty); 3915 EmitInitStoreOfNonAggregate(*this, RV, SrcLV); 3916 } else { 3917 Src = RV.getAggregateAddress(); 3918 } 3919 3920 // If the value is offset in memory, apply the offset now. 3921 Src = emitAddressAtOffset(*this, Src, ArgInfo); 3922 3923 // Fast-isel and the optimizer generally like scalar values better than 3924 // FCAs, so we flatten them if this is safe to do for this argument. 3925 llvm::StructType *STy = 3926 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 3927 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 3928 llvm::Type *SrcTy = Src.getType()->getElementType(); 3929 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 3930 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 3931 3932 // If the source type is smaller than the destination type of the 3933 // coerce-to logic, copy the source value into a temp alloca the size 3934 // of the destination type to allow loading all of it. The bits past 3935 // the source value are left undef. 3936 if (SrcSize < DstSize) { 3937 Address TempAlloca 3938 = CreateTempAlloca(STy, Src.getAlignment(), 3939 Src.getName() + ".coerce"); 3940 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 3941 Src = TempAlloca; 3942 } else { 3943 Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy)); 3944 } 3945 3946 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy); 3947 assert(NumIRArgs == STy->getNumElements()); 3948 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 3949 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i)); 3950 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset); 3951 llvm::Value *LI = Builder.CreateLoad(EltPtr); 3952 IRCallArgs[FirstIRArg + i] = LI; 3953 } 3954 } else { 3955 // In the simple case, just pass the coerced loaded value. 3956 assert(NumIRArgs == 1); 3957 IRCallArgs[FirstIRArg] = 3958 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 3959 } 3960 3961 break; 3962 } 3963 3964 case ABIArgInfo::CoerceAndExpand: { 3965 auto coercionType = ArgInfo.getCoerceAndExpandType(); 3966 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 3967 3968 llvm::Value *tempSize = nullptr; 3969 Address addr = Address::invalid(); 3970 if (RV.isAggregate()) { 3971 addr = RV.getAggregateAddress(); 3972 } else { 3973 assert(RV.isScalar()); // complex should always just be direct 3974 3975 llvm::Type *scalarType = RV.getScalarVal()->getType(); 3976 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 3977 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 3978 3979 tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize); 3980 3981 // Materialize to a temporary. 3982 addr = CreateTempAlloca(RV.getScalarVal()->getType(), 3983 CharUnits::fromQuantity(std::max(layout->getAlignment(), 3984 scalarAlign))); 3985 EmitLifetimeStart(scalarSize, addr.getPointer()); 3986 3987 Builder.CreateStore(RV.getScalarVal(), addr); 3988 } 3989 3990 addr = Builder.CreateElementBitCast(addr, coercionType); 3991 3992 unsigned IRArgPos = FirstIRArg; 3993 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3994 llvm::Type *eltType = coercionType->getElementType(i); 3995 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 3996 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 3997 llvm::Value *elt = Builder.CreateLoad(eltAddr); 3998 IRCallArgs[IRArgPos++] = elt; 3999 } 4000 assert(IRArgPos == FirstIRArg + NumIRArgs); 4001 4002 if (tempSize) { 4003 EmitLifetimeEnd(tempSize, addr.getPointer()); 4004 } 4005 4006 break; 4007 } 4008 4009 case ABIArgInfo::Expand: 4010 unsigned IRArgPos = FirstIRArg; 4011 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos); 4012 assert(IRArgPos == FirstIRArg + NumIRArgs); 4013 break; 4014 } 4015 } 4016 4017 llvm::Value *CalleePtr = Callee.getFunctionPointer(); 4018 4019 // If we're using inalloca, set up that argument. 4020 if (ArgMemory.isValid()) { 4021 llvm::Value *Arg = ArgMemory.getPointer(); 4022 if (CallInfo.isVariadic()) { 4023 // When passing non-POD arguments by value to variadic functions, we will 4024 // end up with a variadic prototype and an inalloca call site. In such 4025 // cases, we can't do any parameter mismatch checks. Give up and bitcast 4026 // the callee. 4027 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 4028 auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS); 4029 CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy); 4030 } else { 4031 llvm::Type *LastParamTy = 4032 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 4033 if (Arg->getType() != LastParamTy) { 4034 #ifndef NDEBUG 4035 // Assert that these structs have equivalent element types. 4036 llvm::StructType *FullTy = CallInfo.getArgStruct(); 4037 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 4038 cast<llvm::PointerType>(LastParamTy)->getElementType()); 4039 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 4040 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 4041 DE = DeclaredTy->element_end(), 4042 FI = FullTy->element_begin(); 4043 DI != DE; ++DI, ++FI) 4044 assert(*DI == *FI); 4045 #endif 4046 Arg = Builder.CreateBitCast(Arg, LastParamTy); 4047 } 4048 } 4049 assert(IRFunctionArgs.hasInallocaArg()); 4050 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 4051 } 4052 4053 // 2. Prepare the function pointer. 4054 4055 // If the callee is a bitcast of a non-variadic function to have a 4056 // variadic function pointer type, check to see if we can remove the 4057 // bitcast. This comes up with unprototyped functions. 4058 // 4059 // This makes the IR nicer, but more importantly it ensures that we 4060 // can inline the function at -O0 if it is marked always_inline. 4061 auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* { 4062 llvm::FunctionType *CalleeFT = 4063 cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType()); 4064 if (!CalleeFT->isVarArg()) 4065 return Ptr; 4066 4067 llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr); 4068 if (!CE || CE->getOpcode() != llvm::Instruction::BitCast) 4069 return Ptr; 4070 4071 llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0)); 4072 if (!OrigFn) 4073 return Ptr; 4074 4075 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 4076 4077 // If the original type is variadic, or if any of the component types 4078 // disagree, we cannot remove the cast. 4079 if (OrigFT->isVarArg() || 4080 OrigFT->getNumParams() != CalleeFT->getNumParams() || 4081 OrigFT->getReturnType() != CalleeFT->getReturnType()) 4082 return Ptr; 4083 4084 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 4085 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 4086 return Ptr; 4087 4088 return OrigFn; 4089 }; 4090 CalleePtr = simplifyVariadicCallee(CalleePtr); 4091 4092 // 3. Perform the actual call. 4093 4094 // Deactivate any cleanups that we're supposed to do immediately before 4095 // the call. 4096 if (!CallArgs.getCleanupsToDeactivate().empty()) 4097 deactivateArgCleanupsBeforeCall(*this, CallArgs); 4098 4099 // Assert that the arguments we computed match up. The IR verifier 4100 // will catch this, but this is a common enough source of problems 4101 // during IRGen changes that it's way better for debugging to catch 4102 // it ourselves here. 4103 #ifndef NDEBUG 4104 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 4105 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 4106 // Inalloca argument can have different type. 4107 if (IRFunctionArgs.hasInallocaArg() && 4108 i == IRFunctionArgs.getInallocaArgNo()) 4109 continue; 4110 if (i < IRFuncTy->getNumParams()) 4111 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 4112 } 4113 #endif 4114 4115 // Compute the calling convention and attributes. 4116 unsigned CallingConv; 4117 CodeGen::AttributeListType AttributeList; 4118 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 4119 Callee.getAbstractInfo(), 4120 AttributeList, CallingConv, 4121 /*AttrOnCallSite=*/true); 4122 llvm::AttributeList Attrs = 4123 llvm::AttributeList::get(getLLVMContext(), AttributeList); 4124 4125 // Apply some call-site-specific attributes. 4126 // TODO: work this into building the attribute set. 4127 4128 // Apply always_inline to all calls within flatten functions. 4129 // FIXME: should this really take priority over __try, below? 4130 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 4131 !(Callee.getAbstractInfo().getCalleeDecl() && 4132 Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) { 4133 Attrs = 4134 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4135 llvm::Attribute::AlwaysInline); 4136 } 4137 4138 // Disable inlining inside SEH __try blocks. 4139 if (isSEHTryScope()) { 4140 Attrs = 4141 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4142 llvm::Attribute::NoInline); 4143 } 4144 4145 // Decide whether to use a call or an invoke. 4146 bool CannotThrow; 4147 if (currentFunctionUsesSEHTry()) { 4148 // SEH cares about asynchronous exceptions, so everything can "throw." 4149 CannotThrow = false; 4150 } else if (isCleanupPadScope() && 4151 EHPersonality::get(*this).isMSVCXXPersonality()) { 4152 // The MSVC++ personality will implicitly terminate the program if an 4153 // exception is thrown during a cleanup outside of a try/catch. 4154 // We don't need to model anything in IR to get this behavior. 4155 CannotThrow = true; 4156 } else { 4157 // Otherwise, nounwind call sites will never throw. 4158 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex, 4159 llvm::Attribute::NoUnwind); 4160 } 4161 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 4162 4163 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4164 getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList); 4165 4166 // Emit the actual call/invoke instruction. 4167 llvm::CallSite CS; 4168 if (!InvokeDest) { 4169 CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList); 4170 } else { 4171 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 4172 CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs, 4173 BundleList); 4174 EmitBlock(Cont); 4175 } 4176 llvm::Instruction *CI = CS.getInstruction(); 4177 if (callOrInvoke) 4178 *callOrInvoke = CI; 4179 4180 // Apply the attributes and calling convention. 4181 CS.setAttributes(Attrs); 4182 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 4183 4184 // Apply various metadata. 4185 4186 if (!CI->getType()->isVoidTy()) 4187 CI->setName("call"); 4188 4189 // Insert instrumentation or attach profile metadata at indirect call sites. 4190 // For more details, see the comment before the definition of 4191 // IPVK_IndirectCallTarget in InstrProfData.inc. 4192 if (!CS.getCalledFunction()) 4193 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 4194 CI, CalleePtr); 4195 4196 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4197 // optimizer it can aggressively ignore unwind edges. 4198 if (CGM.getLangOpts().ObjCAutoRefCount) 4199 AddObjCARCExceptionMetadata(CI); 4200 4201 // Suppress tail calls if requested. 4202 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 4203 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl(); 4204 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 4205 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 4206 } 4207 4208 // 4. Finish the call. 4209 4210 // If the call doesn't return, finish the basic block and clear the 4211 // insertion point; this allows the rest of IRGen to discard 4212 // unreachable code. 4213 if (CS.doesNotReturn()) { 4214 if (UnusedReturnSize) 4215 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 4216 SRetPtr.getPointer()); 4217 4218 Builder.CreateUnreachable(); 4219 Builder.ClearInsertionPoint(); 4220 4221 // FIXME: For now, emit a dummy basic block because expr emitters in 4222 // generally are not ready to handle emitting expressions at unreachable 4223 // points. 4224 EnsureInsertPoint(); 4225 4226 // Return a reasonable RValue. 4227 return GetUndefRValue(RetTy); 4228 } 4229 4230 // Perform the swifterror writeback. 4231 if (swiftErrorTemp.isValid()) { 4232 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 4233 Builder.CreateStore(errorResult, swiftErrorArg); 4234 } 4235 4236 // Emit any call-associated writebacks immediately. Arguably this 4237 // should happen after any return-value munging. 4238 if (CallArgs.hasWritebacks()) 4239 emitWritebacks(*this, CallArgs); 4240 4241 // The stack cleanup for inalloca arguments has to run out of the normal 4242 // lexical order, so deactivate it and run it manually here. 4243 CallArgs.freeArgumentMemory(*this); 4244 4245 // Extract the return value. 4246 RValue Ret = [&] { 4247 switch (RetAI.getKind()) { 4248 case ABIArgInfo::CoerceAndExpand: { 4249 auto coercionType = RetAI.getCoerceAndExpandType(); 4250 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4251 4252 Address addr = SRetPtr; 4253 addr = Builder.CreateElementBitCast(addr, coercionType); 4254 4255 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 4256 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 4257 4258 unsigned unpaddedIndex = 0; 4259 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4260 llvm::Type *eltType = coercionType->getElementType(i); 4261 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4262 Address eltAddr = Builder.CreateStructGEP(addr, i, layout); 4263 llvm::Value *elt = CI; 4264 if (requiresExtract) 4265 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 4266 else 4267 assert(unpaddedIndex == 0); 4268 Builder.CreateStore(elt, eltAddr); 4269 } 4270 // FALLTHROUGH 4271 } 4272 4273 case ABIArgInfo::InAlloca: 4274 case ABIArgInfo::Indirect: { 4275 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 4276 if (UnusedReturnSize) 4277 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize), 4278 SRetPtr.getPointer()); 4279 return ret; 4280 } 4281 4282 case ABIArgInfo::Ignore: 4283 // If we are ignoring an argument that had a result, make sure to 4284 // construct the appropriate return value for our caller. 4285 return GetUndefRValue(RetTy); 4286 4287 case ABIArgInfo::Extend: 4288 case ABIArgInfo::Direct: { 4289 llvm::Type *RetIRTy = ConvertType(RetTy); 4290 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 4291 switch (getEvaluationKind(RetTy)) { 4292 case TEK_Complex: { 4293 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 4294 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 4295 return RValue::getComplex(std::make_pair(Real, Imag)); 4296 } 4297 case TEK_Aggregate: { 4298 Address DestPtr = ReturnValue.getValue(); 4299 bool DestIsVolatile = ReturnValue.isVolatile(); 4300 4301 if (!DestPtr.isValid()) { 4302 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 4303 DestIsVolatile = false; 4304 } 4305 BuildAggStore(*this, CI, DestPtr, DestIsVolatile); 4306 return RValue::getAggregate(DestPtr); 4307 } 4308 case TEK_Scalar: { 4309 // If the argument doesn't match, perform a bitcast to coerce it. This 4310 // can happen due to trivial type mismatches. 4311 llvm::Value *V = CI; 4312 if (V->getType() != RetIRTy) 4313 V = Builder.CreateBitCast(V, RetIRTy); 4314 return RValue::get(V); 4315 } 4316 } 4317 llvm_unreachable("bad evaluation kind"); 4318 } 4319 4320 Address DestPtr = ReturnValue.getValue(); 4321 bool DestIsVolatile = ReturnValue.isVolatile(); 4322 4323 if (!DestPtr.isValid()) { 4324 DestPtr = CreateMemTemp(RetTy, "coerce"); 4325 DestIsVolatile = false; 4326 } 4327 4328 // If the value is offset in memory, apply the offset now. 4329 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 4330 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 4331 4332 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 4333 } 4334 4335 case ABIArgInfo::Expand: 4336 llvm_unreachable("Invalid ABI kind for return argument"); 4337 } 4338 4339 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 4340 } (); 4341 4342 // Emit the assume_aligned check on the return value. 4343 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl(); 4344 if (Ret.isScalar() && TargetDecl) { 4345 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) { 4346 llvm::Value *OffsetValue = nullptr; 4347 if (const auto *Offset = AA->getOffset()) 4348 OffsetValue = EmitScalarExpr(Offset); 4349 4350 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment()); 4351 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment); 4352 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(), 4353 OffsetValue); 4354 } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) { 4355 llvm::Value *ParamVal = 4356 CallArgs[AA->getParamIndex() - 1].RV.getScalarVal(); 4357 EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal); 4358 } 4359 } 4360 4361 return Ret; 4362 } 4363 4364 /* VarArg handling */ 4365 4366 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 4367 VAListAddr = VE->isMicrosoftABI() 4368 ? EmitMSVAListRef(VE->getSubExpr()) 4369 : EmitVAListRef(VE->getSubExpr()); 4370 QualType Ty = VE->getType(); 4371 if (VE->isMicrosoftABI()) 4372 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 4373 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 4374 } 4375