1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclCXX.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/Basic/CodeGenOptions.h" 28 #include "clang/Basic/TargetBuiltins.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "clang/CodeGen/CGFunctionInfo.h" 31 #include "clang/CodeGen/SwiftCallingConv.h" 32 #include "llvm/ADT/StringExtras.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/IR/Assumptions.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/CallingConv.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/InlineAsm.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/Transforms/Utils/Local.h" 42 using namespace clang; 43 using namespace CodeGen; 44 45 /***/ 46 47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 48 switch (CC) { 49 default: return llvm::CallingConv::C; 50 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 51 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 52 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 53 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 54 case CC_Win64: return llvm::CallingConv::Win64; 55 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 56 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 57 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 58 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 59 // TODO: Add support for __pascal to LLVM. 60 case CC_X86Pascal: return llvm::CallingConv::C; 61 // TODO: Add support for __vectorcall to LLVM. 62 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 63 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 64 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 65 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 66 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 67 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 68 case CC_Swift: return llvm::CallingConv::Swift; 69 case CC_SwiftAsync: return llvm::CallingConv::SwiftTail; 70 } 71 } 72 73 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 74 /// qualification. Either or both of RD and MD may be null. A null RD indicates 75 /// that there is no meaningful 'this' type, and a null MD can occur when 76 /// calling a method pointer. 77 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, 78 const CXXMethodDecl *MD) { 79 QualType RecTy; 80 if (RD) 81 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 82 else 83 RecTy = Context.VoidTy; 84 85 if (MD) 86 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 87 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 88 } 89 90 /// Returns the canonical formal type of the given C++ method. 91 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 92 return MD->getType()->getCanonicalTypeUnqualified() 93 .getAs<FunctionProtoType>(); 94 } 95 96 /// Returns the "extra-canonicalized" return type, which discards 97 /// qualifiers on the return type. Codegen doesn't care about them, 98 /// and it makes ABI code a little easier to be able to assume that 99 /// all parameter and return types are top-level unqualified. 100 static CanQualType GetReturnType(QualType RetTy) { 101 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 102 } 103 104 /// Arrange the argument and result information for a value of the given 105 /// unprototyped freestanding function type. 106 const CGFunctionInfo & 107 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 108 // When translating an unprototyped function type, always use a 109 // variadic type. 110 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 111 /*instanceMethod=*/false, 112 /*chainCall=*/false, None, 113 FTNP->getExtInfo(), {}, RequiredArgs(0)); 114 } 115 116 static void addExtParameterInfosForCall( 117 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 118 const FunctionProtoType *proto, 119 unsigned prefixArgs, 120 unsigned totalArgs) { 121 assert(proto->hasExtParameterInfos()); 122 assert(paramInfos.size() <= prefixArgs); 123 assert(proto->getNumParams() + prefixArgs <= totalArgs); 124 125 paramInfos.reserve(totalArgs); 126 127 // Add default infos for any prefix args that don't already have infos. 128 paramInfos.resize(prefixArgs); 129 130 // Add infos for the prototype. 131 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 132 paramInfos.push_back(ParamInfo); 133 // pass_object_size params have no parameter info. 134 if (ParamInfo.hasPassObjectSize()) 135 paramInfos.emplace_back(); 136 } 137 138 assert(paramInfos.size() <= totalArgs && 139 "Did we forget to insert pass_object_size args?"); 140 // Add default infos for the variadic and/or suffix arguments. 141 paramInfos.resize(totalArgs); 142 } 143 144 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 145 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 146 static void appendParameterTypes(const CodeGenTypes &CGT, 147 SmallVectorImpl<CanQualType> &prefix, 148 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 149 CanQual<FunctionProtoType> FPT) { 150 // Fast path: don't touch param info if we don't need to. 151 if (!FPT->hasExtParameterInfos()) { 152 assert(paramInfos.empty() && 153 "We have paramInfos, but the prototype doesn't?"); 154 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 155 return; 156 } 157 158 unsigned PrefixSize = prefix.size(); 159 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 160 // parameters; the only thing that can change this is the presence of 161 // pass_object_size. So, we preallocate for the common case. 162 prefix.reserve(prefix.size() + FPT->getNumParams()); 163 164 auto ExtInfos = FPT->getExtParameterInfos(); 165 assert(ExtInfos.size() == FPT->getNumParams()); 166 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 167 prefix.push_back(FPT->getParamType(I)); 168 if (ExtInfos[I].hasPassObjectSize()) 169 prefix.push_back(CGT.getContext().getSizeType()); 170 } 171 172 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 173 prefix.size()); 174 } 175 176 /// Arrange the LLVM function layout for a value of the given function 177 /// type, on top of any implicit parameters already stored. 178 static const CGFunctionInfo & 179 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 180 SmallVectorImpl<CanQualType> &prefix, 181 CanQual<FunctionProtoType> FTP) { 182 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 183 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 184 // FIXME: Kill copy. 185 appendParameterTypes(CGT, prefix, paramInfos, FTP); 186 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 187 188 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 189 /*chainCall=*/false, prefix, 190 FTP->getExtInfo(), paramInfos, 191 Required); 192 } 193 194 /// Arrange the argument and result information for a value of the 195 /// given freestanding function type. 196 const CGFunctionInfo & 197 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 198 SmallVector<CanQualType, 16> argTypes; 199 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 200 FTP); 201 } 202 203 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, 204 bool IsWindows) { 205 // Set the appropriate calling convention for the Function. 206 if (D->hasAttr<StdCallAttr>()) 207 return CC_X86StdCall; 208 209 if (D->hasAttr<FastCallAttr>()) 210 return CC_X86FastCall; 211 212 if (D->hasAttr<RegCallAttr>()) 213 return CC_X86RegCall; 214 215 if (D->hasAttr<ThisCallAttr>()) 216 return CC_X86ThisCall; 217 218 if (D->hasAttr<VectorCallAttr>()) 219 return CC_X86VectorCall; 220 221 if (D->hasAttr<PascalAttr>()) 222 return CC_X86Pascal; 223 224 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 225 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 226 227 if (D->hasAttr<AArch64VectorPcsAttr>()) 228 return CC_AArch64VectorCall; 229 230 if (D->hasAttr<IntelOclBiccAttr>()) 231 return CC_IntelOclBicc; 232 233 if (D->hasAttr<MSABIAttr>()) 234 return IsWindows ? CC_C : CC_Win64; 235 236 if (D->hasAttr<SysVABIAttr>()) 237 return IsWindows ? CC_X86_64SysV : CC_C; 238 239 if (D->hasAttr<PreserveMostAttr>()) 240 return CC_PreserveMost; 241 242 if (D->hasAttr<PreserveAllAttr>()) 243 return CC_PreserveAll; 244 245 return CC_C; 246 } 247 248 /// Arrange the argument and result information for a call to an 249 /// unknown C++ non-static member function of the given abstract type. 250 /// (A null RD means we don't have any meaningful "this" argument type, 251 /// so fall back to a generic pointer type). 252 /// The member function must be an ordinary function, i.e. not a 253 /// constructor or destructor. 254 const CGFunctionInfo & 255 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 256 const FunctionProtoType *FTP, 257 const CXXMethodDecl *MD) { 258 SmallVector<CanQualType, 16> argTypes; 259 260 // Add the 'this' pointer. 261 argTypes.push_back(DeriveThisType(RD, MD)); 262 263 return ::arrangeLLVMFunctionInfo( 264 *this, true, argTypes, 265 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 266 } 267 268 /// Set calling convention for CUDA/HIP kernel. 269 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 270 const FunctionDecl *FD) { 271 if (FD->hasAttr<CUDAGlobalAttr>()) { 272 const FunctionType *FT = FTy->getAs<FunctionType>(); 273 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 274 FTy = FT->getCanonicalTypeUnqualified(); 275 } 276 } 277 278 /// Arrange the argument and result information for a declaration or 279 /// definition of the given C++ non-static member function. The 280 /// member function must be an ordinary function, i.e. not a 281 /// constructor or destructor. 282 const CGFunctionInfo & 283 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 284 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 285 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 286 287 CanQualType FT = GetFormalType(MD).getAs<Type>(); 288 setCUDAKernelCallingConvention(FT, CGM, MD); 289 auto prototype = FT.getAs<FunctionProtoType>(); 290 291 if (MD->isInstance()) { 292 // The abstract case is perfectly fine. 293 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 294 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 295 } 296 297 return arrangeFreeFunctionType(prototype); 298 } 299 300 bool CodeGenTypes::inheritingCtorHasParams( 301 const InheritedConstructor &Inherited, CXXCtorType Type) { 302 // Parameters are unnecessary if we're constructing a base class subobject 303 // and the inherited constructor lives in a virtual base. 304 return Type == Ctor_Complete || 305 !Inherited.getShadowDecl()->constructsVirtualBase() || 306 !Target.getCXXABI().hasConstructorVariants(); 307 } 308 309 const CGFunctionInfo & 310 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { 311 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 312 313 SmallVector<CanQualType, 16> argTypes; 314 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 315 argTypes.push_back(DeriveThisType(MD->getParent(), MD)); 316 317 bool PassParams = true; 318 319 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 320 // A base class inheriting constructor doesn't get forwarded arguments 321 // needed to construct a virtual base (or base class thereof). 322 if (auto Inherited = CD->getInheritedConstructor()) 323 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); 324 } 325 326 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 327 328 // Add the formal parameters. 329 if (PassParams) 330 appendParameterTypes(*this, argTypes, paramInfos, FTP); 331 332 CGCXXABI::AddedStructorArgCounts AddedArgs = 333 TheCXXABI.buildStructorSignature(GD, argTypes); 334 if (!paramInfos.empty()) { 335 // Note: prefix implies after the first param. 336 if (AddedArgs.Prefix) 337 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 338 FunctionProtoType::ExtParameterInfo{}); 339 if (AddedArgs.Suffix) 340 paramInfos.append(AddedArgs.Suffix, 341 FunctionProtoType::ExtParameterInfo{}); 342 } 343 344 RequiredArgs required = 345 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 346 : RequiredArgs::All); 347 348 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 349 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 350 ? argTypes.front() 351 : TheCXXABI.hasMostDerivedReturn(GD) 352 ? CGM.getContext().VoidPtrTy 353 : Context.VoidTy; 354 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 355 /*chainCall=*/false, argTypes, extInfo, 356 paramInfos, required); 357 } 358 359 static SmallVector<CanQualType, 16> 360 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 361 SmallVector<CanQualType, 16> argTypes; 362 for (auto &arg : args) 363 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 364 return argTypes; 365 } 366 367 static SmallVector<CanQualType, 16> 368 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 369 SmallVector<CanQualType, 16> argTypes; 370 for (auto &arg : args) 371 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 372 return argTypes; 373 } 374 375 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 376 getExtParameterInfosForCall(const FunctionProtoType *proto, 377 unsigned prefixArgs, unsigned totalArgs) { 378 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 379 if (proto->hasExtParameterInfos()) { 380 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 381 } 382 return result; 383 } 384 385 /// Arrange a call to a C++ method, passing the given arguments. 386 /// 387 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 388 /// parameter. 389 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 390 /// args. 391 /// PassProtoArgs indicates whether `args` has args for the parameters in the 392 /// given CXXConstructorDecl. 393 const CGFunctionInfo & 394 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 395 const CXXConstructorDecl *D, 396 CXXCtorType CtorKind, 397 unsigned ExtraPrefixArgs, 398 unsigned ExtraSuffixArgs, 399 bool PassProtoArgs) { 400 // FIXME: Kill copy. 401 SmallVector<CanQualType, 16> ArgTypes; 402 for (const auto &Arg : args) 403 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 404 405 // +1 for implicit this, which should always be args[0]. 406 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 407 408 CanQual<FunctionProtoType> FPT = GetFormalType(D); 409 RequiredArgs Required = PassProtoArgs 410 ? RequiredArgs::forPrototypePlus( 411 FPT, TotalPrefixArgs + ExtraSuffixArgs) 412 : RequiredArgs::All; 413 414 GlobalDecl GD(D, CtorKind); 415 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 416 ? ArgTypes.front() 417 : TheCXXABI.hasMostDerivedReturn(GD) 418 ? CGM.getContext().VoidPtrTy 419 : Context.VoidTy; 420 421 FunctionType::ExtInfo Info = FPT->getExtInfo(); 422 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 423 // If the prototype args are elided, we should only have ABI-specific args, 424 // which never have param info. 425 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 426 // ABI-specific suffix arguments are treated the same as variadic arguments. 427 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 428 ArgTypes.size()); 429 } 430 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 431 /*chainCall=*/false, ArgTypes, Info, 432 ParamInfos, Required); 433 } 434 435 /// Arrange the argument and result information for the declaration or 436 /// definition of the given function. 437 const CGFunctionInfo & 438 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 439 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 440 if (MD->isInstance()) 441 return arrangeCXXMethodDeclaration(MD); 442 443 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 444 445 assert(isa<FunctionType>(FTy)); 446 setCUDAKernelCallingConvention(FTy, CGM, FD); 447 448 // When declaring a function without a prototype, always use a 449 // non-variadic type. 450 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 451 return arrangeLLVMFunctionInfo( 452 noProto->getReturnType(), /*instanceMethod=*/false, 453 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 454 } 455 456 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 457 } 458 459 /// Arrange the argument and result information for the declaration or 460 /// definition of an Objective-C method. 461 const CGFunctionInfo & 462 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 463 // It happens that this is the same as a call with no optional 464 // arguments, except also using the formal 'self' type. 465 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 466 } 467 468 /// Arrange the argument and result information for the function type 469 /// through which to perform a send to the given Objective-C method, 470 /// using the given receiver type. The receiver type is not always 471 /// the 'self' type of the method or even an Objective-C pointer type. 472 /// This is *not* the right method for actually performing such a 473 /// message send, due to the possibility of optional arguments. 474 const CGFunctionInfo & 475 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 476 QualType receiverType) { 477 SmallVector<CanQualType, 16> argTys; 478 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 479 argTys.push_back(Context.getCanonicalParamType(receiverType)); 480 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 481 // FIXME: Kill copy? 482 for (const auto *I : MD->parameters()) { 483 argTys.push_back(Context.getCanonicalParamType(I->getType())); 484 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 485 I->hasAttr<NoEscapeAttr>()); 486 extParamInfos.push_back(extParamInfo); 487 } 488 489 FunctionType::ExtInfo einfo; 490 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 491 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 492 493 if (getContext().getLangOpts().ObjCAutoRefCount && 494 MD->hasAttr<NSReturnsRetainedAttr>()) 495 einfo = einfo.withProducesResult(true); 496 497 RequiredArgs required = 498 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 499 500 return arrangeLLVMFunctionInfo( 501 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 502 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 503 } 504 505 const CGFunctionInfo & 506 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 507 const CallArgList &args) { 508 auto argTypes = getArgTypesForCall(Context, args); 509 FunctionType::ExtInfo einfo; 510 511 return arrangeLLVMFunctionInfo( 512 GetReturnType(returnType), /*instanceMethod=*/false, 513 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 514 } 515 516 const CGFunctionInfo & 517 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 518 // FIXME: Do we need to handle ObjCMethodDecl? 519 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 520 521 if (isa<CXXConstructorDecl>(GD.getDecl()) || 522 isa<CXXDestructorDecl>(GD.getDecl())) 523 return arrangeCXXStructorDeclaration(GD); 524 525 return arrangeFunctionDeclaration(FD); 526 } 527 528 /// Arrange a thunk that takes 'this' as the first parameter followed by 529 /// varargs. Return a void pointer, regardless of the actual return type. 530 /// The body of the thunk will end in a musttail call to a function of the 531 /// correct type, and the caller will bitcast the function to the correct 532 /// prototype. 533 const CGFunctionInfo & 534 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 535 assert(MD->isVirtual() && "only methods have thunks"); 536 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 537 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; 538 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 539 /*chainCall=*/false, ArgTys, 540 FTP->getExtInfo(), {}, RequiredArgs(1)); 541 } 542 543 const CGFunctionInfo & 544 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 545 CXXCtorType CT) { 546 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 547 548 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 549 SmallVector<CanQualType, 2> ArgTys; 550 const CXXRecordDecl *RD = CD->getParent(); 551 ArgTys.push_back(DeriveThisType(RD, CD)); 552 if (CT == Ctor_CopyingClosure) 553 ArgTys.push_back(*FTP->param_type_begin()); 554 if (RD->getNumVBases() > 0) 555 ArgTys.push_back(Context.IntTy); 556 CallingConv CC = Context.getDefaultCallingConvention( 557 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 558 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 559 /*chainCall=*/false, ArgTys, 560 FunctionType::ExtInfo(CC), {}, 561 RequiredArgs::All); 562 } 563 564 /// Arrange a call as unto a free function, except possibly with an 565 /// additional number of formal parameters considered required. 566 static const CGFunctionInfo & 567 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 568 CodeGenModule &CGM, 569 const CallArgList &args, 570 const FunctionType *fnType, 571 unsigned numExtraRequiredArgs, 572 bool chainCall) { 573 assert(args.size() >= numExtraRequiredArgs); 574 575 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 576 577 // In most cases, there are no optional arguments. 578 RequiredArgs required = RequiredArgs::All; 579 580 // If we have a variadic prototype, the required arguments are the 581 // extra prefix plus the arguments in the prototype. 582 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 583 if (proto->isVariadic()) 584 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); 585 586 if (proto->hasExtParameterInfos()) 587 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 588 args.size()); 589 590 // If we don't have a prototype at all, but we're supposed to 591 // explicitly use the variadic convention for unprototyped calls, 592 // treat all of the arguments as required but preserve the nominal 593 // possibility of variadics. 594 } else if (CGM.getTargetCodeGenInfo() 595 .isNoProtoCallVariadic(args, 596 cast<FunctionNoProtoType>(fnType))) { 597 required = RequiredArgs(args.size()); 598 } 599 600 // FIXME: Kill copy. 601 SmallVector<CanQualType, 16> argTypes; 602 for (const auto &arg : args) 603 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 604 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 605 /*instanceMethod=*/false, chainCall, 606 argTypes, fnType->getExtInfo(), paramInfos, 607 required); 608 } 609 610 /// Figure out the rules for calling a function with the given formal 611 /// type using the given arguments. The arguments are necessary 612 /// because the function might be unprototyped, in which case it's 613 /// target-dependent in crazy ways. 614 const CGFunctionInfo & 615 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 616 const FunctionType *fnType, 617 bool chainCall) { 618 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 619 chainCall ? 1 : 0, chainCall); 620 } 621 622 /// A block function is essentially a free function with an 623 /// extra implicit argument. 624 const CGFunctionInfo & 625 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 626 const FunctionType *fnType) { 627 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 628 /*chainCall=*/false); 629 } 630 631 const CGFunctionInfo & 632 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 633 const FunctionArgList ¶ms) { 634 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 635 auto argTypes = getArgTypesForDeclaration(Context, params); 636 637 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 638 /*instanceMethod*/ false, /*chainCall*/ false, 639 argTypes, proto->getExtInfo(), paramInfos, 640 RequiredArgs::forPrototypePlus(proto, 1)); 641 } 642 643 const CGFunctionInfo & 644 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 645 const CallArgList &args) { 646 // FIXME: Kill copy. 647 SmallVector<CanQualType, 16> argTypes; 648 for (const auto &Arg : args) 649 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 650 return arrangeLLVMFunctionInfo( 651 GetReturnType(resultType), /*instanceMethod=*/false, 652 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 653 /*paramInfos=*/ {}, RequiredArgs::All); 654 } 655 656 const CGFunctionInfo & 657 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 658 const FunctionArgList &args) { 659 auto argTypes = getArgTypesForDeclaration(Context, args); 660 661 return arrangeLLVMFunctionInfo( 662 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 663 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 664 } 665 666 const CGFunctionInfo & 667 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 668 ArrayRef<CanQualType> argTypes) { 669 return arrangeLLVMFunctionInfo( 670 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 671 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 672 } 673 674 /// Arrange a call to a C++ method, passing the given arguments. 675 /// 676 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 677 /// does not count `this`. 678 const CGFunctionInfo & 679 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 680 const FunctionProtoType *proto, 681 RequiredArgs required, 682 unsigned numPrefixArgs) { 683 assert(numPrefixArgs + 1 <= args.size() && 684 "Emitting a call with less args than the required prefix?"); 685 // Add one to account for `this`. It's a bit awkward here, but we don't count 686 // `this` in similar places elsewhere. 687 auto paramInfos = 688 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 689 690 // FIXME: Kill copy. 691 auto argTypes = getArgTypesForCall(Context, args); 692 693 FunctionType::ExtInfo info = proto->getExtInfo(); 694 return arrangeLLVMFunctionInfo( 695 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 696 /*chainCall=*/false, argTypes, info, paramInfos, required); 697 } 698 699 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 700 return arrangeLLVMFunctionInfo( 701 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 702 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 703 } 704 705 const CGFunctionInfo & 706 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 707 const CallArgList &args) { 708 assert(signature.arg_size() <= args.size()); 709 if (signature.arg_size() == args.size()) 710 return signature; 711 712 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 713 auto sigParamInfos = signature.getExtParameterInfos(); 714 if (!sigParamInfos.empty()) { 715 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 716 paramInfos.resize(args.size()); 717 } 718 719 auto argTypes = getArgTypesForCall(Context, args); 720 721 assert(signature.getRequiredArgs().allowsOptionalArgs()); 722 return arrangeLLVMFunctionInfo(signature.getReturnType(), 723 signature.isInstanceMethod(), 724 signature.isChainCall(), 725 argTypes, 726 signature.getExtInfo(), 727 paramInfos, 728 signature.getRequiredArgs()); 729 } 730 731 namespace clang { 732 namespace CodeGen { 733 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 734 } 735 } 736 737 /// Arrange the argument and result information for an abstract value 738 /// of a given function type. This is the method which all of the 739 /// above functions ultimately defer to. 740 const CGFunctionInfo & 741 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 742 bool instanceMethod, 743 bool chainCall, 744 ArrayRef<CanQualType> argTypes, 745 FunctionType::ExtInfo info, 746 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 747 RequiredArgs required) { 748 assert(llvm::all_of(argTypes, 749 [](CanQualType T) { return T.isCanonicalAsParam(); })); 750 751 // Lookup or create unique function info. 752 llvm::FoldingSetNodeID ID; 753 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 754 required, resultType, argTypes); 755 756 void *insertPos = nullptr; 757 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 758 if (FI) 759 return *FI; 760 761 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 762 763 // Construct the function info. We co-allocate the ArgInfos. 764 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 765 paramInfos, resultType, argTypes, required); 766 FunctionInfos.InsertNode(FI, insertPos); 767 768 bool inserted = FunctionsBeingProcessed.insert(FI).second; 769 (void)inserted; 770 assert(inserted && "Recursively being processed?"); 771 772 // Compute ABI information. 773 if (CC == llvm::CallingConv::SPIR_KERNEL) { 774 // Force target independent argument handling for the host visible 775 // kernel functions. 776 computeSPIRKernelABIInfo(CGM, *FI); 777 } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { 778 swiftcall::computeABIInfo(CGM, *FI); 779 } else { 780 getABIInfo().computeInfo(*FI); 781 } 782 783 // Loop over all of the computed argument and return value info. If any of 784 // them are direct or extend without a specified coerce type, specify the 785 // default now. 786 ABIArgInfo &retInfo = FI->getReturnInfo(); 787 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 788 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 789 790 for (auto &I : FI->arguments()) 791 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 792 I.info.setCoerceToType(ConvertType(I.type)); 793 794 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 795 assert(erased && "Not in set?"); 796 797 return *FI; 798 } 799 800 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 801 bool instanceMethod, 802 bool chainCall, 803 const FunctionType::ExtInfo &info, 804 ArrayRef<ExtParameterInfo> paramInfos, 805 CanQualType resultType, 806 ArrayRef<CanQualType> argTypes, 807 RequiredArgs required) { 808 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 809 assert(!required.allowsOptionalArgs() || 810 required.getNumRequiredArgs() <= argTypes.size()); 811 812 void *buffer = 813 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 814 argTypes.size() + 1, paramInfos.size())); 815 816 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 817 FI->CallingConvention = llvmCC; 818 FI->EffectiveCallingConvention = llvmCC; 819 FI->ASTCallingConvention = info.getCC(); 820 FI->InstanceMethod = instanceMethod; 821 FI->ChainCall = chainCall; 822 FI->CmseNSCall = info.getCmseNSCall(); 823 FI->NoReturn = info.getNoReturn(); 824 FI->ReturnsRetained = info.getProducesResult(); 825 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 826 FI->NoCfCheck = info.getNoCfCheck(); 827 FI->Required = required; 828 FI->HasRegParm = info.getHasRegParm(); 829 FI->RegParm = info.getRegParm(); 830 FI->ArgStruct = nullptr; 831 FI->ArgStructAlign = 0; 832 FI->NumArgs = argTypes.size(); 833 FI->HasExtParameterInfos = !paramInfos.empty(); 834 FI->getArgsBuffer()[0].type = resultType; 835 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 836 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 837 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 838 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 839 return FI; 840 } 841 842 /***/ 843 844 namespace { 845 // ABIArgInfo::Expand implementation. 846 847 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 848 struct TypeExpansion { 849 enum TypeExpansionKind { 850 // Elements of constant arrays are expanded recursively. 851 TEK_ConstantArray, 852 // Record fields are expanded recursively (but if record is a union, only 853 // the field with the largest size is expanded). 854 TEK_Record, 855 // For complex types, real and imaginary parts are expanded recursively. 856 TEK_Complex, 857 // All other types are not expandable. 858 TEK_None 859 }; 860 861 const TypeExpansionKind Kind; 862 863 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 864 virtual ~TypeExpansion() {} 865 }; 866 867 struct ConstantArrayExpansion : TypeExpansion { 868 QualType EltTy; 869 uint64_t NumElts; 870 871 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 872 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 873 static bool classof(const TypeExpansion *TE) { 874 return TE->Kind == TEK_ConstantArray; 875 } 876 }; 877 878 struct RecordExpansion : TypeExpansion { 879 SmallVector<const CXXBaseSpecifier *, 1> Bases; 880 881 SmallVector<const FieldDecl *, 1> Fields; 882 883 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 884 SmallVector<const FieldDecl *, 1> &&Fields) 885 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 886 Fields(std::move(Fields)) {} 887 static bool classof(const TypeExpansion *TE) { 888 return TE->Kind == TEK_Record; 889 } 890 }; 891 892 struct ComplexExpansion : TypeExpansion { 893 QualType EltTy; 894 895 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 896 static bool classof(const TypeExpansion *TE) { 897 return TE->Kind == TEK_Complex; 898 } 899 }; 900 901 struct NoExpansion : TypeExpansion { 902 NoExpansion() : TypeExpansion(TEK_None) {} 903 static bool classof(const TypeExpansion *TE) { 904 return TE->Kind == TEK_None; 905 } 906 }; 907 } // namespace 908 909 static std::unique_ptr<TypeExpansion> 910 getTypeExpansion(QualType Ty, const ASTContext &Context) { 911 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 912 return std::make_unique<ConstantArrayExpansion>( 913 AT->getElementType(), AT->getSize().getZExtValue()); 914 } 915 if (const RecordType *RT = Ty->getAs<RecordType>()) { 916 SmallVector<const CXXBaseSpecifier *, 1> Bases; 917 SmallVector<const FieldDecl *, 1> Fields; 918 const RecordDecl *RD = RT->getDecl(); 919 assert(!RD->hasFlexibleArrayMember() && 920 "Cannot expand structure with flexible array."); 921 if (RD->isUnion()) { 922 // Unions can be here only in degenerative cases - all the fields are same 923 // after flattening. Thus we have to use the "largest" field. 924 const FieldDecl *LargestFD = nullptr; 925 CharUnits UnionSize = CharUnits::Zero(); 926 927 for (const auto *FD : RD->fields()) { 928 if (FD->isZeroLengthBitField(Context)) 929 continue; 930 assert(!FD->isBitField() && 931 "Cannot expand structure with bit-field members."); 932 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 933 if (UnionSize < FieldSize) { 934 UnionSize = FieldSize; 935 LargestFD = FD; 936 } 937 } 938 if (LargestFD) 939 Fields.push_back(LargestFD); 940 } else { 941 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 942 assert(!CXXRD->isDynamicClass() && 943 "cannot expand vtable pointers in dynamic classes"); 944 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 945 Bases.push_back(&BS); 946 } 947 948 for (const auto *FD : RD->fields()) { 949 if (FD->isZeroLengthBitField(Context)) 950 continue; 951 assert(!FD->isBitField() && 952 "Cannot expand structure with bit-field members."); 953 Fields.push_back(FD); 954 } 955 } 956 return std::make_unique<RecordExpansion>(std::move(Bases), 957 std::move(Fields)); 958 } 959 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 960 return std::make_unique<ComplexExpansion>(CT->getElementType()); 961 } 962 return std::make_unique<NoExpansion>(); 963 } 964 965 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 966 auto Exp = getTypeExpansion(Ty, Context); 967 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 968 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 969 } 970 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 971 int Res = 0; 972 for (auto BS : RExp->Bases) 973 Res += getExpansionSize(BS->getType(), Context); 974 for (auto FD : RExp->Fields) 975 Res += getExpansionSize(FD->getType(), Context); 976 return Res; 977 } 978 if (isa<ComplexExpansion>(Exp.get())) 979 return 2; 980 assert(isa<NoExpansion>(Exp.get())); 981 return 1; 982 } 983 984 void 985 CodeGenTypes::getExpandedTypes(QualType Ty, 986 SmallVectorImpl<llvm::Type *>::iterator &TI) { 987 auto Exp = getTypeExpansion(Ty, Context); 988 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 989 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 990 getExpandedTypes(CAExp->EltTy, TI); 991 } 992 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 993 for (auto BS : RExp->Bases) 994 getExpandedTypes(BS->getType(), TI); 995 for (auto FD : RExp->Fields) 996 getExpandedTypes(FD->getType(), TI); 997 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 998 llvm::Type *EltTy = ConvertType(CExp->EltTy); 999 *TI++ = EltTy; 1000 *TI++ = EltTy; 1001 } else { 1002 assert(isa<NoExpansion>(Exp.get())); 1003 *TI++ = ConvertType(Ty); 1004 } 1005 } 1006 1007 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1008 ConstantArrayExpansion *CAE, 1009 Address BaseAddr, 1010 llvm::function_ref<void(Address)> Fn) { 1011 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1012 CharUnits EltAlign = 1013 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1014 1015 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1016 llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32( 1017 BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i); 1018 Fn(Address(EltAddr, EltAlign)); 1019 } 1020 } 1021 1022 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1023 llvm::Function::arg_iterator &AI) { 1024 assert(LV.isSimple() && 1025 "Unexpected non-simple lvalue during struct expansion."); 1026 1027 auto Exp = getTypeExpansion(Ty, getContext()); 1028 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1029 forConstantArrayExpansion( 1030 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { 1031 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1032 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1033 }); 1034 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1035 Address This = LV.getAddress(*this); 1036 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1037 // Perform a single step derived-to-base conversion. 1038 Address Base = 1039 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1040 /*NullCheckValue=*/false, SourceLocation()); 1041 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1042 1043 // Recurse onto bases. 1044 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1045 } 1046 for (auto FD : RExp->Fields) { 1047 // FIXME: What are the right qualifiers here? 1048 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1049 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1050 } 1051 } else if (isa<ComplexExpansion>(Exp.get())) { 1052 auto realValue = &*AI++; 1053 auto imagValue = &*AI++; 1054 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1055 } else { 1056 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a 1057 // primitive store. 1058 assert(isa<NoExpansion>(Exp.get())); 1059 if (LV.isBitField()) 1060 EmitStoreThroughLValue(RValue::get(&*AI++), LV); 1061 else 1062 EmitStoreOfScalar(&*AI++, LV); 1063 } 1064 } 1065 1066 void CodeGenFunction::ExpandTypeToArgs( 1067 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1068 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1069 auto Exp = getTypeExpansion(Ty, getContext()); 1070 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1071 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1072 : Arg.getKnownRValue().getAggregateAddress(); 1073 forConstantArrayExpansion( 1074 *this, CAExp, Addr, [&](Address EltAddr) { 1075 CallArg EltArg = CallArg( 1076 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1077 CAExp->EltTy); 1078 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1079 IRCallArgPos); 1080 }); 1081 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1082 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1083 : Arg.getKnownRValue().getAggregateAddress(); 1084 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1085 // Perform a single step derived-to-base conversion. 1086 Address Base = 1087 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1088 /*NullCheckValue=*/false, SourceLocation()); 1089 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1090 1091 // Recurse onto bases. 1092 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1093 IRCallArgPos); 1094 } 1095 1096 LValue LV = MakeAddrLValue(This, Ty); 1097 for (auto FD : RExp->Fields) { 1098 CallArg FldArg = 1099 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1100 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1101 IRCallArgPos); 1102 } 1103 } else if (isa<ComplexExpansion>(Exp.get())) { 1104 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1105 IRCallArgs[IRCallArgPos++] = CV.first; 1106 IRCallArgs[IRCallArgPos++] = CV.second; 1107 } else { 1108 assert(isa<NoExpansion>(Exp.get())); 1109 auto RV = Arg.getKnownRValue(); 1110 assert(RV.isScalar() && 1111 "Unexpected non-scalar rvalue during struct expansion."); 1112 1113 // Insert a bitcast as needed. 1114 llvm::Value *V = RV.getScalarVal(); 1115 if (IRCallArgPos < IRFuncTy->getNumParams() && 1116 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1117 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1118 1119 IRCallArgs[IRCallArgPos++] = V; 1120 } 1121 } 1122 1123 /// Create a temporary allocation for the purposes of coercion. 1124 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1125 CharUnits MinAlign, 1126 const Twine &Name = "tmp") { 1127 // Don't use an alignment that's worse than what LLVM would prefer. 1128 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1129 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1130 1131 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); 1132 } 1133 1134 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1135 /// accessing some number of bytes out of it, try to gep into the struct to get 1136 /// at its inner goodness. Dive as deep as possible without entering an element 1137 /// with an in-memory size smaller than DstSize. 1138 static Address 1139 EnterStructPointerForCoercedAccess(Address SrcPtr, 1140 llvm::StructType *SrcSTy, 1141 uint64_t DstSize, CodeGenFunction &CGF) { 1142 // We can't dive into a zero-element struct. 1143 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1144 1145 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1146 1147 // If the first elt is at least as large as what we're looking for, or if the 1148 // first element is the same size as the whole struct, we can enter it. The 1149 // comparison must be made on the store size and not the alloca size. Using 1150 // the alloca size may overstate the size of the load. 1151 uint64_t FirstEltSize = 1152 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1153 if (FirstEltSize < DstSize && 1154 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1155 return SrcPtr; 1156 1157 // GEP into the first element. 1158 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); 1159 1160 // If the first element is a struct, recurse. 1161 llvm::Type *SrcTy = SrcPtr.getElementType(); 1162 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1163 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1164 1165 return SrcPtr; 1166 } 1167 1168 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1169 /// are either integers or pointers. This does a truncation of the value if it 1170 /// is too large or a zero extension if it is too small. 1171 /// 1172 /// This behaves as if the value were coerced through memory, so on big-endian 1173 /// targets the high bits are preserved in a truncation, while little-endian 1174 /// targets preserve the low bits. 1175 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1176 llvm::Type *Ty, 1177 CodeGenFunction &CGF) { 1178 if (Val->getType() == Ty) 1179 return Val; 1180 1181 if (isa<llvm::PointerType>(Val->getType())) { 1182 // If this is Pointer->Pointer avoid conversion to and from int. 1183 if (isa<llvm::PointerType>(Ty)) 1184 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1185 1186 // Convert the pointer to an integer so we can play with its width. 1187 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1188 } 1189 1190 llvm::Type *DestIntTy = Ty; 1191 if (isa<llvm::PointerType>(DestIntTy)) 1192 DestIntTy = CGF.IntPtrTy; 1193 1194 if (Val->getType() != DestIntTy) { 1195 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1196 if (DL.isBigEndian()) { 1197 // Preserve the high bits on big-endian targets. 1198 // That is what memory coercion does. 1199 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1200 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1201 1202 if (SrcSize > DstSize) { 1203 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1204 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1205 } else { 1206 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1207 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1208 } 1209 } else { 1210 // Little-endian targets preserve the low bits. No shifts required. 1211 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1212 } 1213 } 1214 1215 if (isa<llvm::PointerType>(Ty)) 1216 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1217 return Val; 1218 } 1219 1220 1221 1222 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1223 /// a pointer to an object of type \arg Ty, known to be aligned to 1224 /// \arg SrcAlign bytes. 1225 /// 1226 /// This safely handles the case when the src type is smaller than the 1227 /// destination type; in this situation the values of bits which not 1228 /// present in the src are undefined. 1229 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1230 CodeGenFunction &CGF) { 1231 llvm::Type *SrcTy = Src.getElementType(); 1232 1233 // If SrcTy and Ty are the same, just do a load. 1234 if (SrcTy == Ty) 1235 return CGF.Builder.CreateLoad(Src); 1236 1237 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1238 1239 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1240 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, 1241 DstSize.getFixedSize(), CGF); 1242 SrcTy = Src.getElementType(); 1243 } 1244 1245 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1246 1247 // If the source and destination are integer or pointer types, just do an 1248 // extension or truncation to the desired type. 1249 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1250 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1251 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1252 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1253 } 1254 1255 // If load is legal, just bitcast the src pointer. 1256 if (!SrcSize.isScalable() && !DstSize.isScalable() && 1257 SrcSize.getFixedSize() >= DstSize.getFixedSize()) { 1258 // Generally SrcSize is never greater than DstSize, since this means we are 1259 // losing bits. However, this can happen in cases where the structure has 1260 // additional padding, for example due to a user specified alignment. 1261 // 1262 // FIXME: Assert that we aren't truncating non-padding bits when have access 1263 // to that information. 1264 Src = CGF.Builder.CreateElementBitCast(Src, Ty); 1265 return CGF.Builder.CreateLoad(Src); 1266 } 1267 1268 // If coercing a fixed vector to a scalable vector for ABI compatibility, and 1269 // the types match, use the llvm.experimental.vector.insert intrinsic to 1270 // perform the conversion. 1271 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { 1272 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 1273 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate 1274 // vector, use a vector insert and bitcast the result. 1275 bool NeedsBitcast = false; 1276 auto PredType = 1277 llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16); 1278 llvm::Type *OrigType = Ty; 1279 if (ScalableDst == PredType && 1280 FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) { 1281 ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2); 1282 NeedsBitcast = true; 1283 } 1284 if (ScalableDst->getElementType() == FixedSrc->getElementType()) { 1285 auto *Load = CGF.Builder.CreateLoad(Src); 1286 auto *UndefVec = llvm::UndefValue::get(ScalableDst); 1287 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 1288 llvm::Value *Result = CGF.Builder.CreateInsertVector( 1289 ScalableDst, UndefVec, Load, Zero, "castScalableSve"); 1290 if (NeedsBitcast) 1291 Result = CGF.Builder.CreateBitCast(Result, OrigType); 1292 return Result; 1293 } 1294 } 1295 } 1296 1297 // Otherwise do coercion through memory. This is stupid, but simple. 1298 Address Tmp = 1299 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); 1300 CGF.Builder.CreateMemCpy( 1301 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), 1302 Src.getAlignment().getAsAlign(), 1303 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); 1304 return CGF.Builder.CreateLoad(Tmp); 1305 } 1306 1307 // Function to store a first-class aggregate into memory. We prefer to 1308 // store the elements rather than the aggregate to be more friendly to 1309 // fast-isel. 1310 // FIXME: Do we need to recurse here? 1311 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, 1312 bool DestIsVolatile) { 1313 // Prefer scalar stores to first-class aggregate stores. 1314 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { 1315 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1316 Address EltPtr = Builder.CreateStructGEP(Dest, i); 1317 llvm::Value *Elt = Builder.CreateExtractValue(Val, i); 1318 Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1319 } 1320 } else { 1321 Builder.CreateStore(Val, Dest, DestIsVolatile); 1322 } 1323 } 1324 1325 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1326 /// where the source and destination may have different types. The 1327 /// destination is known to be aligned to \arg DstAlign bytes. 1328 /// 1329 /// This safely handles the case when the src type is larger than the 1330 /// destination type; the upper bits of the src will be lost. 1331 static void CreateCoercedStore(llvm::Value *Src, 1332 Address Dst, 1333 bool DstIsVolatile, 1334 CodeGenFunction &CGF) { 1335 llvm::Type *SrcTy = Src->getType(); 1336 llvm::Type *DstTy = Dst.getElementType(); 1337 if (SrcTy == DstTy) { 1338 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1339 return; 1340 } 1341 1342 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1343 1344 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1345 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, 1346 SrcSize.getFixedSize(), CGF); 1347 DstTy = Dst.getElementType(); 1348 } 1349 1350 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); 1351 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); 1352 if (SrcPtrTy && DstPtrTy && 1353 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { 1354 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); 1355 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1356 return; 1357 } 1358 1359 // If the source and destination are integer or pointer types, just do an 1360 // extension or truncation to the desired type. 1361 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1362 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1363 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1364 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1365 return; 1366 } 1367 1368 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1369 1370 // If store is legal, just bitcast the src pointer. 1371 if (isa<llvm::ScalableVectorType>(SrcTy) || 1372 isa<llvm::ScalableVectorType>(DstTy) || 1373 SrcSize.getFixedSize() <= DstSize.getFixedSize()) { 1374 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1375 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); 1376 } else { 1377 // Otherwise do coercion through memory. This is stupid, but 1378 // simple. 1379 1380 // Generally SrcSize is never greater than DstSize, since this means we are 1381 // losing bits. However, this can happen in cases where the structure has 1382 // additional padding, for example due to a user specified alignment. 1383 // 1384 // FIXME: Assert that we aren't truncating non-padding bits when have access 1385 // to that information. 1386 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1387 CGF.Builder.CreateStore(Src, Tmp); 1388 CGF.Builder.CreateMemCpy( 1389 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), 1390 Tmp.getAlignment().getAsAlign(), 1391 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); 1392 } 1393 } 1394 1395 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1396 const ABIArgInfo &info) { 1397 if (unsigned offset = info.getDirectOffset()) { 1398 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1399 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1400 CharUnits::fromQuantity(offset)); 1401 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1402 } 1403 return addr; 1404 } 1405 1406 namespace { 1407 1408 /// Encapsulates information about the way function arguments from 1409 /// CGFunctionInfo should be passed to actual LLVM IR function. 1410 class ClangToLLVMArgMapping { 1411 static const unsigned InvalidIndex = ~0U; 1412 unsigned InallocaArgNo; 1413 unsigned SRetArgNo; 1414 unsigned TotalIRArgs; 1415 1416 /// Arguments of LLVM IR function corresponding to single Clang argument. 1417 struct IRArgs { 1418 unsigned PaddingArgIndex; 1419 // Argument is expanded to IR arguments at positions 1420 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1421 unsigned FirstArgIndex; 1422 unsigned NumberOfArgs; 1423 1424 IRArgs() 1425 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1426 NumberOfArgs(0) {} 1427 }; 1428 1429 SmallVector<IRArgs, 8> ArgInfo; 1430 1431 public: 1432 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1433 bool OnlyRequiredArgs = false) 1434 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1435 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1436 construct(Context, FI, OnlyRequiredArgs); 1437 } 1438 1439 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1440 unsigned getInallocaArgNo() const { 1441 assert(hasInallocaArg()); 1442 return InallocaArgNo; 1443 } 1444 1445 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1446 unsigned getSRetArgNo() const { 1447 assert(hasSRetArg()); 1448 return SRetArgNo; 1449 } 1450 1451 unsigned totalIRArgs() const { return TotalIRArgs; } 1452 1453 bool hasPaddingArg(unsigned ArgNo) const { 1454 assert(ArgNo < ArgInfo.size()); 1455 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1456 } 1457 unsigned getPaddingArgNo(unsigned ArgNo) const { 1458 assert(hasPaddingArg(ArgNo)); 1459 return ArgInfo[ArgNo].PaddingArgIndex; 1460 } 1461 1462 /// Returns index of first IR argument corresponding to ArgNo, and their 1463 /// quantity. 1464 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1465 assert(ArgNo < ArgInfo.size()); 1466 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1467 ArgInfo[ArgNo].NumberOfArgs); 1468 } 1469 1470 private: 1471 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1472 bool OnlyRequiredArgs); 1473 }; 1474 1475 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1476 const CGFunctionInfo &FI, 1477 bool OnlyRequiredArgs) { 1478 unsigned IRArgNo = 0; 1479 bool SwapThisWithSRet = false; 1480 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1481 1482 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1483 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1484 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1485 } 1486 1487 unsigned ArgNo = 0; 1488 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1489 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1490 ++I, ++ArgNo) { 1491 assert(I != FI.arg_end()); 1492 QualType ArgType = I->type; 1493 const ABIArgInfo &AI = I->info; 1494 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1495 auto &IRArgs = ArgInfo[ArgNo]; 1496 1497 if (AI.getPaddingType()) 1498 IRArgs.PaddingArgIndex = IRArgNo++; 1499 1500 switch (AI.getKind()) { 1501 case ABIArgInfo::Extend: 1502 case ABIArgInfo::Direct: { 1503 // FIXME: handle sseregparm someday... 1504 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1505 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1506 IRArgs.NumberOfArgs = STy->getNumElements(); 1507 } else { 1508 IRArgs.NumberOfArgs = 1; 1509 } 1510 break; 1511 } 1512 case ABIArgInfo::Indirect: 1513 case ABIArgInfo::IndirectAliased: 1514 IRArgs.NumberOfArgs = 1; 1515 break; 1516 case ABIArgInfo::Ignore: 1517 case ABIArgInfo::InAlloca: 1518 // ignore and inalloca doesn't have matching LLVM parameters. 1519 IRArgs.NumberOfArgs = 0; 1520 break; 1521 case ABIArgInfo::CoerceAndExpand: 1522 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1523 break; 1524 case ABIArgInfo::Expand: 1525 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1526 break; 1527 } 1528 1529 if (IRArgs.NumberOfArgs > 0) { 1530 IRArgs.FirstArgIndex = IRArgNo; 1531 IRArgNo += IRArgs.NumberOfArgs; 1532 } 1533 1534 // Skip over the sret parameter when it comes second. We already handled it 1535 // above. 1536 if (IRArgNo == 1 && SwapThisWithSRet) 1537 IRArgNo++; 1538 } 1539 assert(ArgNo == ArgInfo.size()); 1540 1541 if (FI.usesInAlloca()) 1542 InallocaArgNo = IRArgNo++; 1543 1544 TotalIRArgs = IRArgNo; 1545 } 1546 } // namespace 1547 1548 /***/ 1549 1550 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1551 const auto &RI = FI.getReturnInfo(); 1552 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1553 } 1554 1555 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1556 return ReturnTypeUsesSRet(FI) && 1557 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1558 } 1559 1560 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1561 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1562 switch (BT->getKind()) { 1563 default: 1564 return false; 1565 case BuiltinType::Float: 1566 return getTarget().useObjCFPRetForRealType(FloatModeKind::Float); 1567 case BuiltinType::Double: 1568 return getTarget().useObjCFPRetForRealType(FloatModeKind::Double); 1569 case BuiltinType::LongDouble: 1570 return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble); 1571 } 1572 } 1573 1574 return false; 1575 } 1576 1577 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1578 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1579 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1580 if (BT->getKind() == BuiltinType::LongDouble) 1581 return getTarget().useObjCFP2RetForComplexLongDouble(); 1582 } 1583 } 1584 1585 return false; 1586 } 1587 1588 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1589 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1590 return GetFunctionType(FI); 1591 } 1592 1593 llvm::FunctionType * 1594 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1595 1596 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1597 (void)Inserted; 1598 assert(Inserted && "Recursively being processed?"); 1599 1600 llvm::Type *resultType = nullptr; 1601 const ABIArgInfo &retAI = FI.getReturnInfo(); 1602 switch (retAI.getKind()) { 1603 case ABIArgInfo::Expand: 1604 case ABIArgInfo::IndirectAliased: 1605 llvm_unreachable("Invalid ABI kind for return argument"); 1606 1607 case ABIArgInfo::Extend: 1608 case ABIArgInfo::Direct: 1609 resultType = retAI.getCoerceToType(); 1610 break; 1611 1612 case ABIArgInfo::InAlloca: 1613 if (retAI.getInAllocaSRet()) { 1614 // sret things on win32 aren't void, they return the sret pointer. 1615 QualType ret = FI.getReturnType(); 1616 llvm::Type *ty = ConvertType(ret); 1617 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1618 resultType = llvm::PointerType::get(ty, addressSpace); 1619 } else { 1620 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1621 } 1622 break; 1623 1624 case ABIArgInfo::Indirect: 1625 case ABIArgInfo::Ignore: 1626 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1627 break; 1628 1629 case ABIArgInfo::CoerceAndExpand: 1630 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1631 break; 1632 } 1633 1634 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1635 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1636 1637 // Add type for sret argument. 1638 if (IRFunctionArgs.hasSRetArg()) { 1639 QualType Ret = FI.getReturnType(); 1640 llvm::Type *Ty = ConvertType(Ret); 1641 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1642 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1643 llvm::PointerType::get(Ty, AddressSpace); 1644 } 1645 1646 // Add type for inalloca argument. 1647 if (IRFunctionArgs.hasInallocaArg()) { 1648 auto ArgStruct = FI.getArgStruct(); 1649 assert(ArgStruct); 1650 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1651 } 1652 1653 // Add in all of the required arguments. 1654 unsigned ArgNo = 0; 1655 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1656 ie = it + FI.getNumRequiredArgs(); 1657 for (; it != ie; ++it, ++ArgNo) { 1658 const ABIArgInfo &ArgInfo = it->info; 1659 1660 // Insert a padding type to ensure proper alignment. 1661 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1662 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1663 ArgInfo.getPaddingType(); 1664 1665 unsigned FirstIRArg, NumIRArgs; 1666 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1667 1668 switch (ArgInfo.getKind()) { 1669 case ABIArgInfo::Ignore: 1670 case ABIArgInfo::InAlloca: 1671 assert(NumIRArgs == 0); 1672 break; 1673 1674 case ABIArgInfo::Indirect: { 1675 assert(NumIRArgs == 1); 1676 // indirect arguments are always on the stack, which is alloca addr space. 1677 llvm::Type *LTy = ConvertTypeForMem(it->type); 1678 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1679 CGM.getDataLayout().getAllocaAddrSpace()); 1680 break; 1681 } 1682 case ABIArgInfo::IndirectAliased: { 1683 assert(NumIRArgs == 1); 1684 llvm::Type *LTy = ConvertTypeForMem(it->type); 1685 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace()); 1686 break; 1687 } 1688 case ABIArgInfo::Extend: 1689 case ABIArgInfo::Direct: { 1690 // Fast-isel and the optimizer generally like scalar values better than 1691 // FCAs, so we flatten them if this is safe to do for this argument. 1692 llvm::Type *argType = ArgInfo.getCoerceToType(); 1693 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1694 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1695 assert(NumIRArgs == st->getNumElements()); 1696 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1697 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1698 } else { 1699 assert(NumIRArgs == 1); 1700 ArgTypes[FirstIRArg] = argType; 1701 } 1702 break; 1703 } 1704 1705 case ABIArgInfo::CoerceAndExpand: { 1706 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1707 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1708 *ArgTypesIter++ = EltTy; 1709 } 1710 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1711 break; 1712 } 1713 1714 case ABIArgInfo::Expand: 1715 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1716 getExpandedTypes(it->type, ArgTypesIter); 1717 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1718 break; 1719 } 1720 } 1721 1722 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1723 assert(Erased && "Not in set?"); 1724 1725 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1726 } 1727 1728 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1729 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1730 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1731 1732 if (!isFuncTypeConvertible(FPT)) 1733 return llvm::StructType::get(getLLVMContext()); 1734 1735 return GetFunctionType(GD); 1736 } 1737 1738 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1739 llvm::AttrBuilder &FuncAttrs, 1740 const FunctionProtoType *FPT) { 1741 if (!FPT) 1742 return; 1743 1744 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1745 FPT->isNothrow()) 1746 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1747 } 1748 1749 static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs, 1750 const Decl *Callee) { 1751 if (!Callee) 1752 return; 1753 1754 SmallVector<StringRef, 4> Attrs; 1755 1756 for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>()) 1757 AA->getAssumption().split(Attrs, ","); 1758 1759 if (!Attrs.empty()) 1760 FuncAttrs.addAttribute(llvm::AssumptionAttrKey, 1761 llvm::join(Attrs.begin(), Attrs.end(), ",")); 1762 } 1763 1764 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, 1765 QualType ReturnType) { 1766 // We can't just discard the return value for a record type with a 1767 // complex destructor or a non-trivially copyable type. 1768 if (const RecordType *RT = 1769 ReturnType.getCanonicalType()->getAs<RecordType>()) { 1770 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1771 return ClassDecl->hasTrivialDestructor(); 1772 } 1773 return ReturnType.isTriviallyCopyableType(Context); 1774 } 1775 1776 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, 1777 bool HasOptnone, 1778 bool AttrOnCallSite, 1779 llvm::AttrBuilder &FuncAttrs) { 1780 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1781 if (!HasOptnone) { 1782 if (CodeGenOpts.OptimizeSize) 1783 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1784 if (CodeGenOpts.OptimizeSize == 2) 1785 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1786 } 1787 1788 if (CodeGenOpts.DisableRedZone) 1789 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1790 if (CodeGenOpts.IndirectTlsSegRefs) 1791 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1792 if (CodeGenOpts.NoImplicitFloat) 1793 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1794 1795 if (AttrOnCallSite) { 1796 // Attributes that should go on the call site only. 1797 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) 1798 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1799 if (!CodeGenOpts.TrapFuncName.empty()) 1800 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1801 } else { 1802 StringRef FpKind; 1803 switch (CodeGenOpts.getFramePointer()) { 1804 case CodeGenOptions::FramePointerKind::None: 1805 FpKind = "none"; 1806 break; 1807 case CodeGenOptions::FramePointerKind::NonLeaf: 1808 FpKind = "non-leaf"; 1809 break; 1810 case CodeGenOptions::FramePointerKind::All: 1811 FpKind = "all"; 1812 break; 1813 } 1814 FuncAttrs.addAttribute("frame-pointer", FpKind); 1815 1816 if (CodeGenOpts.LessPreciseFPMAD) 1817 FuncAttrs.addAttribute("less-precise-fpmad", "true"); 1818 1819 if (CodeGenOpts.NullPointerIsValid) 1820 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); 1821 1822 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) 1823 FuncAttrs.addAttribute("denormal-fp-math", 1824 CodeGenOpts.FPDenormalMode.str()); 1825 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { 1826 FuncAttrs.addAttribute( 1827 "denormal-fp-math-f32", 1828 CodeGenOpts.FP32DenormalMode.str()); 1829 } 1830 1831 if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore) 1832 FuncAttrs.addAttribute("no-trapping-math", "true"); 1833 1834 // Strict (compliant) code is the default, so only add this attribute to 1835 // indicate that we are trying to workaround a problem case. 1836 if (!CodeGenOpts.StrictFloatCastOverflow) 1837 FuncAttrs.addAttribute("strict-float-cast-overflow", "false"); 1838 1839 // TODO: Are these all needed? 1840 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1841 if (LangOpts.NoHonorInfs) 1842 FuncAttrs.addAttribute("no-infs-fp-math", "true"); 1843 if (LangOpts.NoHonorNaNs) 1844 FuncAttrs.addAttribute("no-nans-fp-math", "true"); 1845 if (LangOpts.ApproxFunc) 1846 FuncAttrs.addAttribute("approx-func-fp-math", "true"); 1847 if (LangOpts.UnsafeFPMath) 1848 FuncAttrs.addAttribute("unsafe-fp-math", "true"); 1849 if (CodeGenOpts.SoftFloat) 1850 FuncAttrs.addAttribute("use-soft-float", "true"); 1851 FuncAttrs.addAttribute("stack-protector-buffer-size", 1852 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1853 if (LangOpts.NoSignedZero) 1854 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); 1855 1856 // TODO: Reciprocal estimate codegen options should apply to instructions? 1857 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1858 if (!Recips.empty()) 1859 FuncAttrs.addAttribute("reciprocal-estimates", 1860 llvm::join(Recips, ",")); 1861 1862 if (!CodeGenOpts.PreferVectorWidth.empty() && 1863 CodeGenOpts.PreferVectorWidth != "none") 1864 FuncAttrs.addAttribute("prefer-vector-width", 1865 CodeGenOpts.PreferVectorWidth); 1866 1867 if (CodeGenOpts.StackRealignment) 1868 FuncAttrs.addAttribute("stackrealign"); 1869 if (CodeGenOpts.Backchain) 1870 FuncAttrs.addAttribute("backchain"); 1871 if (CodeGenOpts.EnableSegmentedStacks) 1872 FuncAttrs.addAttribute("split-stack"); 1873 1874 if (CodeGenOpts.SpeculativeLoadHardening) 1875 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1876 } 1877 1878 if (getLangOpts().assumeFunctionsAreConvergent()) { 1879 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1880 // convergent (meaning, they may call an intrinsically convergent op, such 1881 // as __syncthreads() / barrier(), and so can't have certain optimizations 1882 // applied around them). LLVM will remove this attribute where it safely 1883 // can. 1884 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1885 } 1886 1887 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1888 // Exceptions aren't supported in CUDA device code. 1889 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1890 } 1891 1892 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1893 StringRef Var, Value; 1894 std::tie(Var, Value) = Attr.split('='); 1895 FuncAttrs.addAttribute(Var, Value); 1896 } 1897 } 1898 1899 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { 1900 llvm::AttrBuilder FuncAttrs; 1901 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), 1902 /* AttrOnCallSite = */ false, FuncAttrs); 1903 // TODO: call GetCPUAndFeaturesAttributes? 1904 F.addFnAttrs(FuncAttrs); 1905 } 1906 1907 void CodeGenModule::addDefaultFunctionDefinitionAttributes( 1908 llvm::AttrBuilder &attrs) { 1909 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, 1910 /*for call*/ false, attrs); 1911 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); 1912 } 1913 1914 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, 1915 const LangOptions &LangOpts, 1916 const NoBuiltinAttr *NBA = nullptr) { 1917 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { 1918 SmallString<32> AttributeName; 1919 AttributeName += "no-builtin-"; 1920 AttributeName += BuiltinName; 1921 FuncAttrs.addAttribute(AttributeName); 1922 }; 1923 1924 // First, handle the language options passed through -fno-builtin. 1925 if (LangOpts.NoBuiltin) { 1926 // -fno-builtin disables them all. 1927 FuncAttrs.addAttribute("no-builtins"); 1928 return; 1929 } 1930 1931 // Then, add attributes for builtins specified through -fno-builtin-<name>. 1932 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); 1933 1934 // Now, let's check the __attribute__((no_builtin("...")) attribute added to 1935 // the source. 1936 if (!NBA) 1937 return; 1938 1939 // If there is a wildcard in the builtin names specified through the 1940 // attribute, disable them all. 1941 if (llvm::is_contained(NBA->builtinNames(), "*")) { 1942 FuncAttrs.addAttribute("no-builtins"); 1943 return; 1944 } 1945 1946 // And last, add the rest of the builtin names. 1947 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); 1948 } 1949 1950 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, 1951 const llvm::DataLayout &DL, const ABIArgInfo &AI, 1952 bool CheckCoerce = true) { 1953 llvm::Type *Ty = Types.ConvertTypeForMem(QTy); 1954 if (AI.getKind() == ABIArgInfo::Indirect) 1955 return true; 1956 if (AI.getKind() == ABIArgInfo::Extend) 1957 return true; 1958 if (!DL.typeSizeEqualsStoreSize(Ty)) 1959 // TODO: This will result in a modest amount of values not marked noundef 1960 // when they could be. We care about values that *invisibly* contain undef 1961 // bits from the perspective of LLVM IR. 1962 return false; 1963 if (CheckCoerce && AI.canHaveCoerceToType()) { 1964 llvm::Type *CoerceTy = AI.getCoerceToType(); 1965 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy), 1966 DL.getTypeSizeInBits(Ty))) 1967 // If we're coercing to a type with a greater size than the canonical one, 1968 // we're introducing new undef bits. 1969 // Coercing to a type of smaller or equal size is ok, as we know that 1970 // there's no internal padding (typeSizeEqualsStoreSize). 1971 return false; 1972 } 1973 if (QTy->isBitIntType()) 1974 return true; 1975 if (QTy->isReferenceType()) 1976 return true; 1977 if (QTy->isNullPtrType()) 1978 return false; 1979 if (QTy->isMemberPointerType()) 1980 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For 1981 // now, never mark them. 1982 return false; 1983 if (QTy->isScalarType()) { 1984 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy)) 1985 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); 1986 return true; 1987 } 1988 if (const VectorType *Vector = dyn_cast<VectorType>(QTy)) 1989 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); 1990 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy)) 1991 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); 1992 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy)) 1993 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); 1994 1995 // TODO: Some structs may be `noundef`, in specific situations. 1996 return false; 1997 } 1998 1999 /// Construct the IR attribute list of a function or call. 2000 /// 2001 /// When adding an attribute, please consider where it should be handled: 2002 /// 2003 /// - getDefaultFunctionAttributes is for attributes that are essentially 2004 /// part of the global target configuration (but perhaps can be 2005 /// overridden on a per-function basis). Adding attributes there 2006 /// will cause them to also be set in frontends that build on Clang's 2007 /// target-configuration logic, as well as for code defined in library 2008 /// modules such as CUDA's libdevice. 2009 /// 2010 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes 2011 /// and adds declaration-specific, convention-specific, and 2012 /// frontend-specific logic. The last is of particular importance: 2013 /// attributes that restrict how the frontend generates code must be 2014 /// added here rather than getDefaultFunctionAttributes. 2015 /// 2016 void CodeGenModule::ConstructAttributeList(StringRef Name, 2017 const CGFunctionInfo &FI, 2018 CGCalleeInfo CalleeInfo, 2019 llvm::AttributeList &AttrList, 2020 unsigned &CallingConv, 2021 bool AttrOnCallSite, bool IsThunk) { 2022 llvm::AttrBuilder FuncAttrs; 2023 llvm::AttrBuilder RetAttrs; 2024 2025 // Collect function IR attributes from the CC lowering. 2026 // We'll collect the paramete and result attributes later. 2027 CallingConv = FI.getEffectiveCallingConvention(); 2028 if (FI.isNoReturn()) 2029 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2030 if (FI.isCmseNSCall()) 2031 FuncAttrs.addAttribute("cmse_nonsecure_call"); 2032 2033 // Collect function IR attributes from the callee prototype if we have one. 2034 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 2035 CalleeInfo.getCalleeFunctionProtoType()); 2036 2037 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 2038 2039 // Attach assumption attributes to the declaration. If this is a call 2040 // site, attach assumptions from the caller to the call as well. 2041 AddAttributesFromAssumes(FuncAttrs, TargetDecl); 2042 2043 bool HasOptnone = false; 2044 // The NoBuiltinAttr attached to the target FunctionDecl. 2045 const NoBuiltinAttr *NBA = nullptr; 2046 2047 // Collect function IR attributes based on declaration-specific 2048 // information. 2049 // FIXME: handle sseregparm someday... 2050 if (TargetDecl) { 2051 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 2052 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 2053 if (TargetDecl->hasAttr<NoThrowAttr>()) 2054 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2055 if (TargetDecl->hasAttr<NoReturnAttr>()) 2056 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2057 if (TargetDecl->hasAttr<ColdAttr>()) 2058 FuncAttrs.addAttribute(llvm::Attribute::Cold); 2059 if (TargetDecl->hasAttr<HotAttr>()) 2060 FuncAttrs.addAttribute(llvm::Attribute::Hot); 2061 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 2062 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 2063 if (TargetDecl->hasAttr<ConvergentAttr>()) 2064 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 2065 2066 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2067 AddAttributesFromFunctionProtoType( 2068 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 2069 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { 2070 // A sane operator new returns a non-aliasing pointer. 2071 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); 2072 if (getCodeGenOpts().AssumeSaneOperatorNew && 2073 (Kind == OO_New || Kind == OO_Array_New)) 2074 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2075 } 2076 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 2077 const bool IsVirtualCall = MD && MD->isVirtual(); 2078 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a 2079 // virtual function. These attributes are not inherited by overloads. 2080 if (!(AttrOnCallSite && IsVirtualCall)) { 2081 if (Fn->isNoReturn()) 2082 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2083 NBA = Fn->getAttr<NoBuiltinAttr>(); 2084 } 2085 // Only place nomerge attribute on call sites, never functions. This 2086 // allows it to work on indirect virtual function calls. 2087 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) 2088 FuncAttrs.addAttribute(llvm::Attribute::NoMerge); 2089 } 2090 2091 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 2092 if (TargetDecl->hasAttr<ConstAttr>()) { 2093 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 2094 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2095 // gcc specifies that 'const' functions have greater restrictions than 2096 // 'pure' functions, so they also cannot have infinite loops. 2097 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2098 } else if (TargetDecl->hasAttr<PureAttr>()) { 2099 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 2100 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2101 // gcc specifies that 'pure' functions cannot have infinite loops. 2102 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2103 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 2104 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 2105 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2106 } 2107 if (TargetDecl->hasAttr<RestrictAttr>()) 2108 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2109 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 2110 !CodeGenOpts.NullPointerIsValid) 2111 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2112 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 2113 FuncAttrs.addAttribute("no_caller_saved_registers"); 2114 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 2115 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 2116 if (TargetDecl->hasAttr<LeafAttr>()) 2117 FuncAttrs.addAttribute(llvm::Attribute::NoCallback); 2118 2119 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 2120 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 2121 Optional<unsigned> NumElemsParam; 2122 if (AllocSize->getNumElemsParam().isValid()) 2123 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 2124 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 2125 NumElemsParam); 2126 } 2127 2128 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { 2129 if (getLangOpts().OpenCLVersion <= 120) { 2130 // OpenCL v1.2 Work groups are always uniform 2131 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 2132 } else { 2133 // OpenCL v2.0 Work groups may be whether uniform or not. 2134 // '-cl-uniform-work-group-size' compile option gets a hint 2135 // to the compiler that the global work-size be a multiple of 2136 // the work-group size specified to clEnqueueNDRangeKernel 2137 // (i.e. work groups are uniform). 2138 FuncAttrs.addAttribute("uniform-work-group-size", 2139 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 2140 } 2141 } 2142 } 2143 2144 // Attach "no-builtins" attributes to: 2145 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". 2146 // * definitions: "no-builtins" or "no-builtin-<name>" only. 2147 // The attributes can come from: 2148 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> 2149 // * FunctionDecl attributes: __attribute__((no_builtin(...))) 2150 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); 2151 2152 // Collect function IR attributes based on global settiings. 2153 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 2154 2155 // Override some default IR attributes based on declaration-specific 2156 // information. 2157 if (TargetDecl) { 2158 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 2159 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 2160 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 2161 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 2162 if (TargetDecl->hasAttr<NoSplitStackAttr>()) 2163 FuncAttrs.removeAttribute("split-stack"); 2164 2165 // Add NonLazyBind attribute to function declarations when -fno-plt 2166 // is used. 2167 // FIXME: what if we just haven't processed the function definition 2168 // yet, or if it's an external definition like C99 inline? 2169 if (CodeGenOpts.NoPLT) { 2170 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2171 if (!Fn->isDefined() && !AttrOnCallSite) { 2172 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 2173 } 2174 } 2175 } 2176 } 2177 2178 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage 2179 // functions with -funique-internal-linkage-names. 2180 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { 2181 if (isa<FunctionDecl>(TargetDecl)) { 2182 if (this->getFunctionLinkage(CalleeInfo.getCalleeDecl()) == 2183 llvm::GlobalValue::InternalLinkage) 2184 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy", 2185 "selected"); 2186 } 2187 } 2188 2189 // Collect non-call-site function IR attributes from declaration-specific 2190 // information. 2191 if (!AttrOnCallSite) { 2192 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) 2193 FuncAttrs.addAttribute("cmse_nonsecure_entry"); 2194 2195 // Whether tail calls are enabled. 2196 auto shouldDisableTailCalls = [&] { 2197 // Should this be honored in getDefaultFunctionAttributes? 2198 if (CodeGenOpts.DisableTailCalls) 2199 return true; 2200 2201 if (!TargetDecl) 2202 return false; 2203 2204 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 2205 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 2206 return true; 2207 2208 if (CodeGenOpts.NoEscapingBlockTailCalls) { 2209 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 2210 if (!BD->doesNotEscape()) 2211 return true; 2212 } 2213 2214 return false; 2215 }; 2216 if (shouldDisableTailCalls()) 2217 FuncAttrs.addAttribute("disable-tail-calls", "true"); 2218 2219 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes 2220 // handles these separately to set them based on the global defaults. 2221 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 2222 } 2223 2224 // Collect attributes from arguments and return values. 2225 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 2226 2227 QualType RetTy = FI.getReturnType(); 2228 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2229 const llvm::DataLayout &DL = getDataLayout(); 2230 2231 // C++ explicitly makes returning undefined values UB. C's rule only applies 2232 // to used values, so we never mark them noundef for now. 2233 bool HasStrictReturn = getLangOpts().CPlusPlus; 2234 if (TargetDecl && HasStrictReturn) { 2235 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) 2236 HasStrictReturn &= !FDecl->isExternC(); 2237 else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) 2238 // Function pointer 2239 HasStrictReturn &= !VDecl->isExternC(); 2240 } 2241 2242 // We don't want to be too aggressive with the return checking, unless 2243 // it's explicit in the code opts or we're using an appropriate sanitizer. 2244 // Try to respect what the programmer intended. 2245 HasStrictReturn &= getCodeGenOpts().StrictReturn || 2246 !MayDropFunctionReturn(getContext(), RetTy) || 2247 getLangOpts().Sanitize.has(SanitizerKind::Memory) || 2248 getLangOpts().Sanitize.has(SanitizerKind::Return); 2249 2250 // Determine if the return type could be partially undef 2251 if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) { 2252 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && 2253 DetermineNoUndef(RetTy, getTypes(), DL, RetAI)) 2254 RetAttrs.addAttribute(llvm::Attribute::NoUndef); 2255 } 2256 2257 switch (RetAI.getKind()) { 2258 case ABIArgInfo::Extend: 2259 if (RetAI.isSignExt()) 2260 RetAttrs.addAttribute(llvm::Attribute::SExt); 2261 else 2262 RetAttrs.addAttribute(llvm::Attribute::ZExt); 2263 LLVM_FALLTHROUGH; 2264 case ABIArgInfo::Direct: 2265 if (RetAI.getInReg()) 2266 RetAttrs.addAttribute(llvm::Attribute::InReg); 2267 break; 2268 case ABIArgInfo::Ignore: 2269 break; 2270 2271 case ABIArgInfo::InAlloca: 2272 case ABIArgInfo::Indirect: { 2273 // inalloca and sret disable readnone and readonly 2274 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2275 .removeAttribute(llvm::Attribute::ReadNone); 2276 break; 2277 } 2278 2279 case ABIArgInfo::CoerceAndExpand: 2280 break; 2281 2282 case ABIArgInfo::Expand: 2283 case ABIArgInfo::IndirectAliased: 2284 llvm_unreachable("Invalid ABI kind for return argument"); 2285 } 2286 2287 if (!IsThunk) { 2288 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2289 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 2290 QualType PTy = RefTy->getPointeeType(); 2291 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2292 RetAttrs.addDereferenceableAttr( 2293 getMinimumObjectSize(PTy).getQuantity()); 2294 if (getContext().getTargetAddressSpace(PTy) == 0 && 2295 !CodeGenOpts.NullPointerIsValid) 2296 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2297 if (PTy->isObjectType()) { 2298 llvm::Align Alignment = 2299 getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); 2300 RetAttrs.addAlignmentAttr(Alignment); 2301 } 2302 } 2303 } 2304 2305 bool hasUsedSRet = false; 2306 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2307 2308 // Attach attributes to sret. 2309 if (IRFunctionArgs.hasSRetArg()) { 2310 llvm::AttrBuilder SRETAttrs; 2311 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); 2312 hasUsedSRet = true; 2313 if (RetAI.getInReg()) 2314 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2315 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); 2316 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2317 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2318 } 2319 2320 // Attach attributes to inalloca argument. 2321 if (IRFunctionArgs.hasInallocaArg()) { 2322 llvm::AttrBuilder Attrs; 2323 Attrs.addInAllocaAttr(FI.getArgStruct()); 2324 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2325 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2326 } 2327 2328 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, 2329 // unless this is a thunk function. 2330 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2331 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && 2332 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { 2333 auto IRArgs = IRFunctionArgs.getIRArgs(0); 2334 2335 assert(IRArgs.second == 1 && "Expected only a single `this` pointer."); 2336 2337 llvm::AttrBuilder Attrs; 2338 2339 QualType ThisTy = 2340 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType(); 2341 2342 if (!CodeGenOpts.NullPointerIsValid && 2343 getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) { 2344 Attrs.addAttribute(llvm::Attribute::NonNull); 2345 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity()); 2346 } else { 2347 // FIXME dereferenceable should be correct here, regardless of 2348 // NullPointerIsValid. However, dereferenceable currently does not always 2349 // respect NullPointerIsValid and may imply nonnull and break the program. 2350 // See https://reviews.llvm.org/D66618 for discussions. 2351 Attrs.addDereferenceableOrNullAttr( 2352 getMinimumObjectSize( 2353 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) 2354 .getQuantity()); 2355 } 2356 2357 llvm::Align Alignment = 2358 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr, 2359 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true) 2360 .getAsAlign(); 2361 Attrs.addAlignmentAttr(Alignment); 2362 2363 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs); 2364 } 2365 2366 unsigned ArgNo = 0; 2367 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2368 E = FI.arg_end(); 2369 I != E; ++I, ++ArgNo) { 2370 QualType ParamType = I->type; 2371 const ABIArgInfo &AI = I->info; 2372 llvm::AttrBuilder Attrs; 2373 2374 // Add attribute for padding argument, if necessary. 2375 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2376 if (AI.getPaddingInReg()) { 2377 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2378 llvm::AttributeSet::get( 2379 getLLVMContext(), 2380 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); 2381 } 2382 } 2383 2384 // Decide whether the argument we're handling could be partially undef 2385 bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI); 2386 if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef) 2387 Attrs.addAttribute(llvm::Attribute::NoUndef); 2388 2389 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2390 // have the corresponding parameter variable. It doesn't make 2391 // sense to do it here because parameters are so messed up. 2392 switch (AI.getKind()) { 2393 case ABIArgInfo::Extend: 2394 if (AI.isSignExt()) 2395 Attrs.addAttribute(llvm::Attribute::SExt); 2396 else 2397 Attrs.addAttribute(llvm::Attribute::ZExt); 2398 LLVM_FALLTHROUGH; 2399 case ABIArgInfo::Direct: 2400 if (ArgNo == 0 && FI.isChainCall()) 2401 Attrs.addAttribute(llvm::Attribute::Nest); 2402 else if (AI.getInReg()) 2403 Attrs.addAttribute(llvm::Attribute::InReg); 2404 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); 2405 break; 2406 2407 case ABIArgInfo::Indirect: { 2408 if (AI.getInReg()) 2409 Attrs.addAttribute(llvm::Attribute::InReg); 2410 2411 if (AI.getIndirectByVal()) 2412 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); 2413 2414 auto *Decl = ParamType->getAsRecordDecl(); 2415 if (CodeGenOpts.PassByValueIsNoAlias && Decl && 2416 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs) 2417 // When calling the function, the pointer passed in will be the only 2418 // reference to the underlying object. Mark it accordingly. 2419 Attrs.addAttribute(llvm::Attribute::NoAlias); 2420 2421 // TODO: We could add the byref attribute if not byval, but it would 2422 // require updating many testcases. 2423 2424 CharUnits Align = AI.getIndirectAlign(); 2425 2426 // In a byval argument, it is important that the required 2427 // alignment of the type is honored, as LLVM might be creating a 2428 // *new* stack object, and needs to know what alignment to give 2429 // it. (Sometimes it can deduce a sensible alignment on its own, 2430 // but not if clang decides it must emit a packed struct, or the 2431 // user specifies increased alignment requirements.) 2432 // 2433 // This is different from indirect *not* byval, where the object 2434 // exists already, and the align attribute is purely 2435 // informative. 2436 assert(!Align.isZero()); 2437 2438 // For now, only add this when we have a byval argument. 2439 // TODO: be less lazy about updating test cases. 2440 if (AI.getIndirectByVal()) 2441 Attrs.addAlignmentAttr(Align.getQuantity()); 2442 2443 // byval disables readnone and readonly. 2444 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2445 .removeAttribute(llvm::Attribute::ReadNone); 2446 2447 break; 2448 } 2449 case ABIArgInfo::IndirectAliased: { 2450 CharUnits Align = AI.getIndirectAlign(); 2451 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); 2452 Attrs.addAlignmentAttr(Align.getQuantity()); 2453 break; 2454 } 2455 case ABIArgInfo::Ignore: 2456 case ABIArgInfo::Expand: 2457 case ABIArgInfo::CoerceAndExpand: 2458 break; 2459 2460 case ABIArgInfo::InAlloca: 2461 // inalloca disables readnone and readonly. 2462 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2463 .removeAttribute(llvm::Attribute::ReadNone); 2464 continue; 2465 } 2466 2467 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2468 QualType PTy = RefTy->getPointeeType(); 2469 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2470 Attrs.addDereferenceableAttr( 2471 getMinimumObjectSize(PTy).getQuantity()); 2472 if (getContext().getTargetAddressSpace(PTy) == 0 && 2473 !CodeGenOpts.NullPointerIsValid) 2474 Attrs.addAttribute(llvm::Attribute::NonNull); 2475 if (PTy->isObjectType()) { 2476 llvm::Align Alignment = 2477 getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); 2478 Attrs.addAlignmentAttr(Alignment); 2479 } 2480 } 2481 2482 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2483 case ParameterABI::Ordinary: 2484 break; 2485 2486 case ParameterABI::SwiftIndirectResult: { 2487 // Add 'sret' if we haven't already used it for something, but 2488 // only if the result is void. 2489 if (!hasUsedSRet && RetTy->isVoidType()) { 2490 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); 2491 hasUsedSRet = true; 2492 } 2493 2494 // Add 'noalias' in either case. 2495 Attrs.addAttribute(llvm::Attribute::NoAlias); 2496 2497 // Add 'dereferenceable' and 'alignment'. 2498 auto PTy = ParamType->getPointeeType(); 2499 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2500 auto info = getContext().getTypeInfoInChars(PTy); 2501 Attrs.addDereferenceableAttr(info.Width.getQuantity()); 2502 Attrs.addAlignmentAttr(info.Align.getAsAlign()); 2503 } 2504 break; 2505 } 2506 2507 case ParameterABI::SwiftErrorResult: 2508 Attrs.addAttribute(llvm::Attribute::SwiftError); 2509 break; 2510 2511 case ParameterABI::SwiftContext: 2512 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2513 break; 2514 2515 case ParameterABI::SwiftAsyncContext: 2516 Attrs.addAttribute(llvm::Attribute::SwiftAsync); 2517 break; 2518 } 2519 2520 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2521 Attrs.addAttribute(llvm::Attribute::NoCapture); 2522 2523 if (Attrs.hasAttributes()) { 2524 unsigned FirstIRArg, NumIRArgs; 2525 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2526 for (unsigned i = 0; i < NumIRArgs; i++) 2527 ArgAttrs[FirstIRArg + i] = 2528 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2529 } 2530 } 2531 assert(ArgNo == FI.arg_size()); 2532 2533 AttrList = llvm::AttributeList::get( 2534 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2535 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2536 } 2537 2538 /// An argument came in as a promoted argument; demote it back to its 2539 /// declared type. 2540 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2541 const VarDecl *var, 2542 llvm::Value *value) { 2543 llvm::Type *varType = CGF.ConvertType(var->getType()); 2544 2545 // This can happen with promotions that actually don't change the 2546 // underlying type, like the enum promotions. 2547 if (value->getType() == varType) return value; 2548 2549 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2550 && "unexpected promotion type"); 2551 2552 if (isa<llvm::IntegerType>(varType)) 2553 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2554 2555 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2556 } 2557 2558 /// Returns the attribute (either parameter attribute, or function 2559 /// attribute), which declares argument ArgNo to be non-null. 2560 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2561 QualType ArgType, unsigned ArgNo) { 2562 // FIXME: __attribute__((nonnull)) can also be applied to: 2563 // - references to pointers, where the pointee is known to be 2564 // nonnull (apparently a Clang extension) 2565 // - transparent unions containing pointers 2566 // In the former case, LLVM IR cannot represent the constraint. In 2567 // the latter case, we have no guarantee that the transparent union 2568 // is in fact passed as a pointer. 2569 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2570 return nullptr; 2571 // First, check attribute on parameter itself. 2572 if (PVD) { 2573 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2574 return ParmNNAttr; 2575 } 2576 // Check function attributes. 2577 if (!FD) 2578 return nullptr; 2579 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2580 if (NNAttr->isNonNull(ArgNo)) 2581 return NNAttr; 2582 } 2583 return nullptr; 2584 } 2585 2586 namespace { 2587 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2588 Address Temp; 2589 Address Arg; 2590 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2591 void Emit(CodeGenFunction &CGF, Flags flags) override { 2592 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2593 CGF.Builder.CreateStore(errorValue, Arg); 2594 } 2595 }; 2596 } 2597 2598 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2599 llvm::Function *Fn, 2600 const FunctionArgList &Args) { 2601 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2602 // Naked functions don't have prologues. 2603 return; 2604 2605 // If this is an implicit-return-zero function, go ahead and 2606 // initialize the return value. TODO: it might be nice to have 2607 // a more general mechanism for this that didn't require synthesized 2608 // return statements. 2609 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2610 if (FD->hasImplicitReturnZero()) { 2611 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2612 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2613 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2614 Builder.CreateStore(Zero, ReturnValue); 2615 } 2616 } 2617 2618 // FIXME: We no longer need the types from FunctionArgList; lift up and 2619 // simplify. 2620 2621 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2622 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); 2623 2624 // If we're using inalloca, all the memory arguments are GEPs off of the last 2625 // parameter, which is a pointer to the complete memory area. 2626 Address ArgStruct = Address::invalid(); 2627 if (IRFunctionArgs.hasInallocaArg()) { 2628 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), 2629 FI.getArgStructAlignment()); 2630 2631 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2632 } 2633 2634 // Name the struct return parameter. 2635 if (IRFunctionArgs.hasSRetArg()) { 2636 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); 2637 AI->setName("agg.result"); 2638 AI->addAttr(llvm::Attribute::NoAlias); 2639 } 2640 2641 // Track if we received the parameter as a pointer (indirect, byval, or 2642 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2643 // into a local alloca for us. 2644 SmallVector<ParamValue, 16> ArgVals; 2645 ArgVals.reserve(Args.size()); 2646 2647 // Create a pointer value for every parameter declaration. This usually 2648 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2649 // any cleanups or do anything that might unwind. We do that separately, so 2650 // we can push the cleanups in the correct order for the ABI. 2651 assert(FI.arg_size() == Args.size() && 2652 "Mismatch between function signature & arguments."); 2653 unsigned ArgNo = 0; 2654 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2655 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2656 i != e; ++i, ++info_it, ++ArgNo) { 2657 const VarDecl *Arg = *i; 2658 const ABIArgInfo &ArgI = info_it->info; 2659 2660 bool isPromoted = 2661 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2662 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2663 // the parameter is promoted. In this case we convert to 2664 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2665 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2666 assert(hasScalarEvaluationKind(Ty) == 2667 hasScalarEvaluationKind(Arg->getType())); 2668 2669 unsigned FirstIRArg, NumIRArgs; 2670 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2671 2672 switch (ArgI.getKind()) { 2673 case ABIArgInfo::InAlloca: { 2674 assert(NumIRArgs == 0); 2675 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2676 Address V = 2677 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); 2678 if (ArgI.getInAllocaIndirect()) 2679 V = Address(Builder.CreateLoad(V), 2680 getContext().getTypeAlignInChars(Ty)); 2681 ArgVals.push_back(ParamValue::forIndirect(V)); 2682 break; 2683 } 2684 2685 case ABIArgInfo::Indirect: 2686 case ABIArgInfo::IndirectAliased: { 2687 assert(NumIRArgs == 1); 2688 Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty), 2689 ArgI.getIndirectAlign()); 2690 2691 if (!hasScalarEvaluationKind(Ty)) { 2692 // Aggregates and complex variables are accessed by reference. All we 2693 // need to do is realign the value, if requested. Also, if the address 2694 // may be aliased, copy it to ensure that the parameter variable is 2695 // mutable and has a unique adress, as C requires. 2696 Address V = ParamAddr; 2697 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { 2698 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2699 2700 // Copy from the incoming argument pointer to the temporary with the 2701 // appropriate alignment. 2702 // 2703 // FIXME: We should have a common utility for generating an aggregate 2704 // copy. 2705 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2706 Builder.CreateMemCpy( 2707 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), 2708 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), 2709 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); 2710 V = AlignedTemp; 2711 } 2712 ArgVals.push_back(ParamValue::forIndirect(V)); 2713 } else { 2714 // Load scalar value from indirect argument. 2715 llvm::Value *V = 2716 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2717 2718 if (isPromoted) 2719 V = emitArgumentDemotion(*this, Arg, V); 2720 ArgVals.push_back(ParamValue::forDirect(V)); 2721 } 2722 break; 2723 } 2724 2725 case ABIArgInfo::Extend: 2726 case ABIArgInfo::Direct: { 2727 auto AI = Fn->getArg(FirstIRArg); 2728 llvm::Type *LTy = ConvertType(Arg->getType()); 2729 2730 // Prepare parameter attributes. So far, only attributes for pointer 2731 // parameters are prepared. See 2732 // http://llvm.org/docs/LangRef.html#paramattrs. 2733 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && 2734 ArgI.getCoerceToType()->isPointerTy()) { 2735 assert(NumIRArgs == 1); 2736 2737 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2738 // Set `nonnull` attribute if any. 2739 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2740 PVD->getFunctionScopeIndex()) && 2741 !CGM.getCodeGenOpts().NullPointerIsValid) 2742 AI->addAttr(llvm::Attribute::NonNull); 2743 2744 QualType OTy = PVD->getOriginalType(); 2745 if (const auto *ArrTy = 2746 getContext().getAsConstantArrayType(OTy)) { 2747 // A C99 array parameter declaration with the static keyword also 2748 // indicates dereferenceability, and if the size is constant we can 2749 // use the dereferenceable attribute (which requires the size in 2750 // bytes). 2751 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2752 QualType ETy = ArrTy->getElementType(); 2753 llvm::Align Alignment = 2754 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2755 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2756 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2757 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2758 ArrSize) { 2759 llvm::AttrBuilder Attrs; 2760 Attrs.addDereferenceableAttr( 2761 getContext().getTypeSizeInChars(ETy).getQuantity() * 2762 ArrSize); 2763 AI->addAttrs(Attrs); 2764 } else if (getContext().getTargetInfo().getNullPointerValue( 2765 ETy.getAddressSpace()) == 0 && 2766 !CGM.getCodeGenOpts().NullPointerIsValid) { 2767 AI->addAttr(llvm::Attribute::NonNull); 2768 } 2769 } 2770 } else if (const auto *ArrTy = 2771 getContext().getAsVariableArrayType(OTy)) { 2772 // For C99 VLAs with the static keyword, we don't know the size so 2773 // we can't use the dereferenceable attribute, but in addrspace(0) 2774 // we know that it must be nonnull. 2775 if (ArrTy->getSizeModifier() == VariableArrayType::Static) { 2776 QualType ETy = ArrTy->getElementType(); 2777 llvm::Align Alignment = 2778 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2779 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2780 if (!getContext().getTargetAddressSpace(ETy) && 2781 !CGM.getCodeGenOpts().NullPointerIsValid) 2782 AI->addAttr(llvm::Attribute::NonNull); 2783 } 2784 } 2785 2786 // Set `align` attribute if any. 2787 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2788 if (!AVAttr) 2789 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2790 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2791 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 2792 // If alignment-assumption sanitizer is enabled, we do *not* add 2793 // alignment attribute here, but emit normal alignment assumption, 2794 // so the UBSAN check could function. 2795 llvm::ConstantInt *AlignmentCI = 2796 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); 2797 uint64_t AlignmentInt = 2798 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); 2799 if (AI->getParamAlign().valueOrOne() < AlignmentInt) { 2800 AI->removeAttr(llvm::Attribute::AttrKind::Alignment); 2801 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr( 2802 llvm::Align(AlignmentInt))); 2803 } 2804 } 2805 } 2806 2807 // Set 'noalias' if an argument type has the `restrict` qualifier. 2808 if (Arg->getType().isRestrictQualified()) 2809 AI->addAttr(llvm::Attribute::NoAlias); 2810 } 2811 2812 // Prepare the argument value. If we have the trivial case, handle it 2813 // with no muss and fuss. 2814 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2815 ArgI.getCoerceToType() == ConvertType(Ty) && 2816 ArgI.getDirectOffset() == 0) { 2817 assert(NumIRArgs == 1); 2818 2819 // LLVM expects swifterror parameters to be used in very restricted 2820 // ways. Copy the value into a less-restricted temporary. 2821 llvm::Value *V = AI; 2822 if (FI.getExtParameterInfo(ArgNo).getABI() 2823 == ParameterABI::SwiftErrorResult) { 2824 QualType pointeeTy = Ty->getPointeeType(); 2825 assert(pointeeTy->isPointerType()); 2826 Address temp = 2827 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2828 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2829 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2830 Builder.CreateStore(incomingErrorValue, temp); 2831 V = temp.getPointer(); 2832 2833 // Push a cleanup to copy the value back at the end of the function. 2834 // The convention does not guarantee that the value will be written 2835 // back if the function exits with an unwind exception. 2836 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2837 } 2838 2839 // Ensure the argument is the correct type. 2840 if (V->getType() != ArgI.getCoerceToType()) 2841 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2842 2843 if (isPromoted) 2844 V = emitArgumentDemotion(*this, Arg, V); 2845 2846 // Because of merging of function types from multiple decls it is 2847 // possible for the type of an argument to not match the corresponding 2848 // type in the function type. Since we are codegening the callee 2849 // in here, add a cast to the argument type. 2850 llvm::Type *LTy = ConvertType(Arg->getType()); 2851 if (V->getType() != LTy) 2852 V = Builder.CreateBitCast(V, LTy); 2853 2854 ArgVals.push_back(ParamValue::forDirect(V)); 2855 break; 2856 } 2857 2858 // VLST arguments are coerced to VLATs at the function boundary for 2859 // ABI consistency. If this is a VLST that was coerced to 2860 // a VLAT at the function boundary and the types match up, use 2861 // llvm.experimental.vector.extract to convert back to the original 2862 // VLST. 2863 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { 2864 llvm::Value *Coerced = Fn->getArg(FirstIRArg); 2865 if (auto *VecTyFrom = 2866 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { 2867 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 2868 // vector, bitcast the source and use a vector extract. 2869 auto PredType = 2870 llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2871 if (VecTyFrom == PredType && 2872 VecTyTo->getElementType() == Builder.getInt8Ty()) { 2873 VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2874 Coerced = Builder.CreateBitCast(Coerced, VecTyFrom); 2875 } 2876 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { 2877 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); 2878 2879 assert(NumIRArgs == 1); 2880 Coerced->setName(Arg->getName() + ".coerce"); 2881 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( 2882 VecTyTo, Coerced, Zero, "castFixedSve"))); 2883 break; 2884 } 2885 } 2886 } 2887 2888 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2889 Arg->getName()); 2890 2891 // Pointer to store into. 2892 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2893 2894 // Fast-isel and the optimizer generally like scalar values better than 2895 // FCAs, so we flatten them if this is safe to do for this argument. 2896 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2897 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2898 STy->getNumElements() > 1) { 2899 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2900 llvm::Type *DstTy = Ptr.getElementType(); 2901 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2902 2903 Address AddrToStoreInto = Address::invalid(); 2904 if (SrcSize <= DstSize) { 2905 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2906 } else { 2907 AddrToStoreInto = 2908 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2909 } 2910 2911 assert(STy->getNumElements() == NumIRArgs); 2912 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2913 auto AI = Fn->getArg(FirstIRArg + i); 2914 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2915 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); 2916 Builder.CreateStore(AI, EltPtr); 2917 } 2918 2919 if (SrcSize > DstSize) { 2920 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2921 } 2922 2923 } else { 2924 // Simple case, just do a coerced store of the argument into the alloca. 2925 assert(NumIRArgs == 1); 2926 auto AI = Fn->getArg(FirstIRArg); 2927 AI->setName(Arg->getName() + ".coerce"); 2928 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); 2929 } 2930 2931 // Match to what EmitParmDecl is expecting for this type. 2932 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2933 llvm::Value *V = 2934 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 2935 if (isPromoted) 2936 V = emitArgumentDemotion(*this, Arg, V); 2937 ArgVals.push_back(ParamValue::forDirect(V)); 2938 } else { 2939 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2940 } 2941 break; 2942 } 2943 2944 case ABIArgInfo::CoerceAndExpand: { 2945 // Reconstruct into a temporary. 2946 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2947 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2948 2949 auto coercionType = ArgI.getCoerceAndExpandType(); 2950 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2951 2952 unsigned argIndex = FirstIRArg; 2953 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2954 llvm::Type *eltType = coercionType->getElementType(i); 2955 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2956 continue; 2957 2958 auto eltAddr = Builder.CreateStructGEP(alloca, i); 2959 auto elt = Fn->getArg(argIndex++); 2960 Builder.CreateStore(elt, eltAddr); 2961 } 2962 assert(argIndex == FirstIRArg + NumIRArgs); 2963 break; 2964 } 2965 2966 case ABIArgInfo::Expand: { 2967 // If this structure was expanded into multiple arguments then 2968 // we need to create a temporary and reconstruct it from the 2969 // arguments. 2970 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2971 LValue LV = MakeAddrLValue(Alloca, Ty); 2972 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2973 2974 auto FnArgIter = Fn->arg_begin() + FirstIRArg; 2975 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2976 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); 2977 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2978 auto AI = Fn->getArg(FirstIRArg + i); 2979 AI->setName(Arg->getName() + "." + Twine(i)); 2980 } 2981 break; 2982 } 2983 2984 case ABIArgInfo::Ignore: 2985 assert(NumIRArgs == 0); 2986 // Initialize the local variable appropriately. 2987 if (!hasScalarEvaluationKind(Ty)) { 2988 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2989 } else { 2990 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2991 ArgVals.push_back(ParamValue::forDirect(U)); 2992 } 2993 break; 2994 } 2995 } 2996 2997 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2998 for (int I = Args.size() - 1; I >= 0; --I) 2999 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 3000 } else { 3001 for (unsigned I = 0, E = Args.size(); I != E; ++I) 3002 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 3003 } 3004 } 3005 3006 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 3007 while (insn->use_empty()) { 3008 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 3009 if (!bitcast) return; 3010 3011 // This is "safe" because we would have used a ConstantExpr otherwise. 3012 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 3013 bitcast->eraseFromParent(); 3014 } 3015 } 3016 3017 /// Try to emit a fused autorelease of a return result. 3018 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 3019 llvm::Value *result) { 3020 // We must be immediately followed the cast. 3021 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 3022 if (BB->empty()) return nullptr; 3023 if (&BB->back() != result) return nullptr; 3024 3025 llvm::Type *resultType = result->getType(); 3026 3027 // result is in a BasicBlock and is therefore an Instruction. 3028 llvm::Instruction *generator = cast<llvm::Instruction>(result); 3029 3030 SmallVector<llvm::Instruction *, 4> InstsToKill; 3031 3032 // Look for: 3033 // %generator = bitcast %type1* %generator2 to %type2* 3034 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 3035 // We would have emitted this as a constant if the operand weren't 3036 // an Instruction. 3037 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 3038 3039 // Require the generator to be immediately followed by the cast. 3040 if (generator->getNextNode() != bitcast) 3041 return nullptr; 3042 3043 InstsToKill.push_back(bitcast); 3044 } 3045 3046 // Look for: 3047 // %generator = call i8* @objc_retain(i8* %originalResult) 3048 // or 3049 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 3050 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 3051 if (!call) return nullptr; 3052 3053 bool doRetainAutorelease; 3054 3055 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { 3056 doRetainAutorelease = true; 3057 } else if (call->getCalledOperand() == 3058 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { 3059 doRetainAutorelease = false; 3060 3061 // If we emitted an assembly marker for this call (and the 3062 // ARCEntrypoints field should have been set if so), go looking 3063 // for that call. If we can't find it, we can't do this 3064 // optimization. But it should always be the immediately previous 3065 // instruction, unless we needed bitcasts around the call. 3066 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 3067 llvm::Instruction *prev = call->getPrevNode(); 3068 assert(prev); 3069 if (isa<llvm::BitCastInst>(prev)) { 3070 prev = prev->getPrevNode(); 3071 assert(prev); 3072 } 3073 assert(isa<llvm::CallInst>(prev)); 3074 assert(cast<llvm::CallInst>(prev)->getCalledOperand() == 3075 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 3076 InstsToKill.push_back(prev); 3077 } 3078 } else { 3079 return nullptr; 3080 } 3081 3082 result = call->getArgOperand(0); 3083 InstsToKill.push_back(call); 3084 3085 // Keep killing bitcasts, for sanity. Note that we no longer care 3086 // about precise ordering as long as there's exactly one use. 3087 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 3088 if (!bitcast->hasOneUse()) break; 3089 InstsToKill.push_back(bitcast); 3090 result = bitcast->getOperand(0); 3091 } 3092 3093 // Delete all the unnecessary instructions, from latest to earliest. 3094 for (auto *I : InstsToKill) 3095 I->eraseFromParent(); 3096 3097 // Do the fused retain/autorelease if we were asked to. 3098 if (doRetainAutorelease) 3099 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 3100 3101 // Cast back to the result type. 3102 return CGF.Builder.CreateBitCast(result, resultType); 3103 } 3104 3105 /// If this is a +1 of the value of an immutable 'self', remove it. 3106 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 3107 llvm::Value *result) { 3108 // This is only applicable to a method with an immutable 'self'. 3109 const ObjCMethodDecl *method = 3110 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 3111 if (!method) return nullptr; 3112 const VarDecl *self = method->getSelfDecl(); 3113 if (!self->getType().isConstQualified()) return nullptr; 3114 3115 // Look for a retain call. 3116 llvm::CallInst *retainCall = 3117 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 3118 if (!retainCall || retainCall->getCalledOperand() != 3119 CGF.CGM.getObjCEntrypoints().objc_retain) 3120 return nullptr; 3121 3122 // Look for an ordinary load of 'self'. 3123 llvm::Value *retainedValue = retainCall->getArgOperand(0); 3124 llvm::LoadInst *load = 3125 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 3126 if (!load || load->isAtomic() || load->isVolatile() || 3127 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 3128 return nullptr; 3129 3130 // Okay! Burn it all down. This relies for correctness on the 3131 // assumption that the retain is emitted as part of the return and 3132 // that thereafter everything is used "linearly". 3133 llvm::Type *resultType = result->getType(); 3134 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 3135 assert(retainCall->use_empty()); 3136 retainCall->eraseFromParent(); 3137 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 3138 3139 return CGF.Builder.CreateBitCast(load, resultType); 3140 } 3141 3142 /// Emit an ARC autorelease of the result of a function. 3143 /// 3144 /// \return the value to actually return from the function 3145 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 3146 llvm::Value *result) { 3147 // If we're returning 'self', kill the initial retain. This is a 3148 // heuristic attempt to "encourage correctness" in the really unfortunate 3149 // case where we have a return of self during a dealloc and we desperately 3150 // need to avoid the possible autorelease. 3151 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 3152 return self; 3153 3154 // At -O0, try to emit a fused retain/autorelease. 3155 if (CGF.shouldUseFusedARCCalls()) 3156 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 3157 return fused; 3158 3159 return CGF.EmitARCAutoreleaseReturnValue(result); 3160 } 3161 3162 /// Heuristically search for a dominating store to the return-value slot. 3163 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 3164 // Check if a User is a store which pointerOperand is the ReturnValue. 3165 // We are looking for stores to the ReturnValue, not for stores of the 3166 // ReturnValue to some other location. 3167 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 3168 auto *SI = dyn_cast<llvm::StoreInst>(U); 3169 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 3170 return nullptr; 3171 // These aren't actually possible for non-coerced returns, and we 3172 // only care about non-coerced returns on this code path. 3173 assert(!SI->isAtomic() && !SI->isVolatile()); 3174 return SI; 3175 }; 3176 // If there are multiple uses of the return-value slot, just check 3177 // for something immediately preceding the IP. Sometimes this can 3178 // happen with how we generate implicit-returns; it can also happen 3179 // with noreturn cleanups. 3180 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 3181 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3182 if (IP->empty()) return nullptr; 3183 llvm::Instruction *I = &IP->back(); 3184 3185 // Skip lifetime markers 3186 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 3187 IE = IP->rend(); 3188 II != IE; ++II) { 3189 if (llvm::IntrinsicInst *Intrinsic = 3190 dyn_cast<llvm::IntrinsicInst>(&*II)) { 3191 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 3192 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 3193 ++II; 3194 if (II == IE) 3195 break; 3196 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 3197 continue; 3198 } 3199 } 3200 I = &*II; 3201 break; 3202 } 3203 3204 return GetStoreIfValid(I); 3205 } 3206 3207 llvm::StoreInst *store = 3208 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 3209 if (!store) return nullptr; 3210 3211 // Now do a first-and-dirty dominance check: just walk up the 3212 // single-predecessors chain from the current insertion point. 3213 llvm::BasicBlock *StoreBB = store->getParent(); 3214 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3215 while (IP != StoreBB) { 3216 if (!(IP = IP->getSinglePredecessor())) 3217 return nullptr; 3218 } 3219 3220 // Okay, the store's basic block dominates the insertion point; we 3221 // can do our thing. 3222 return store; 3223 } 3224 3225 // Helper functions for EmitCMSEClearRecord 3226 3227 // Set the bits corresponding to a field having width `BitWidth` and located at 3228 // offset `BitOffset` (from the least significant bit) within a storage unit of 3229 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. 3230 // Use little-endian layout, i.e.`Bits[0]` is the LSB. 3231 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, 3232 int BitWidth, int CharWidth) { 3233 assert(CharWidth <= 64); 3234 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); 3235 3236 int Pos = 0; 3237 if (BitOffset >= CharWidth) { 3238 Pos += BitOffset / CharWidth; 3239 BitOffset = BitOffset % CharWidth; 3240 } 3241 3242 const uint64_t Used = (uint64_t(1) << CharWidth) - 1; 3243 if (BitOffset + BitWidth >= CharWidth) { 3244 Bits[Pos++] |= (Used << BitOffset) & Used; 3245 BitWidth -= CharWidth - BitOffset; 3246 BitOffset = 0; 3247 } 3248 3249 while (BitWidth >= CharWidth) { 3250 Bits[Pos++] = Used; 3251 BitWidth -= CharWidth; 3252 } 3253 3254 if (BitWidth > 0) 3255 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; 3256 } 3257 3258 // Set the bits corresponding to a field having width `BitWidth` and located at 3259 // offset `BitOffset` (from the least significant bit) within a storage unit of 3260 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of 3261 // `Bits` corresponds to one target byte. Use target endian layout. 3262 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, 3263 int StorageSize, int BitOffset, int BitWidth, 3264 int CharWidth, bool BigEndian) { 3265 3266 SmallVector<uint64_t, 8> TmpBits(StorageSize); 3267 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); 3268 3269 if (BigEndian) 3270 std::reverse(TmpBits.begin(), TmpBits.end()); 3271 3272 for (uint64_t V : TmpBits) 3273 Bits[StorageOffset++] |= V; 3274 } 3275 3276 static void setUsedBits(CodeGenModule &, QualType, int, 3277 SmallVectorImpl<uint64_t> &); 3278 3279 // Set the bits in `Bits`, which correspond to the value representations of 3280 // the actual members of the record type `RTy`. Note that this function does 3281 // not handle base classes, virtual tables, etc, since they cannot happen in 3282 // CMSE function arguments or return. The bit mask corresponds to the target 3283 // memory layout, i.e. it's endian dependent. 3284 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, 3285 SmallVectorImpl<uint64_t> &Bits) { 3286 ASTContext &Context = CGM.getContext(); 3287 int CharWidth = Context.getCharWidth(); 3288 const RecordDecl *RD = RTy->getDecl()->getDefinition(); 3289 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); 3290 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); 3291 3292 int Idx = 0; 3293 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { 3294 const FieldDecl *F = *I; 3295 3296 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || 3297 F->getType()->isIncompleteArrayType()) 3298 continue; 3299 3300 if (F->isBitField()) { 3301 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); 3302 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), 3303 BFI.StorageSize / CharWidth, BFI.Offset, 3304 BFI.Size, CharWidth, 3305 CGM.getDataLayout().isBigEndian()); 3306 continue; 3307 } 3308 3309 setUsedBits(CGM, F->getType(), 3310 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); 3311 } 3312 } 3313 3314 // Set the bits in `Bits`, which correspond to the value representations of 3315 // the elements of an array type `ATy`. 3316 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, 3317 int Offset, SmallVectorImpl<uint64_t> &Bits) { 3318 const ASTContext &Context = CGM.getContext(); 3319 3320 QualType ETy = Context.getBaseElementType(ATy); 3321 int Size = Context.getTypeSizeInChars(ETy).getQuantity(); 3322 SmallVector<uint64_t, 4> TmpBits(Size); 3323 setUsedBits(CGM, ETy, 0, TmpBits); 3324 3325 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { 3326 auto Src = TmpBits.begin(); 3327 auto Dst = Bits.begin() + Offset + I * Size; 3328 for (int J = 0; J < Size; ++J) 3329 *Dst++ |= *Src++; 3330 } 3331 } 3332 3333 // Set the bits in `Bits`, which correspond to the value representations of 3334 // the type `QTy`. 3335 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, 3336 SmallVectorImpl<uint64_t> &Bits) { 3337 if (const auto *RTy = QTy->getAs<RecordType>()) 3338 return setUsedBits(CGM, RTy, Offset, Bits); 3339 3340 ASTContext &Context = CGM.getContext(); 3341 if (const auto *ATy = Context.getAsConstantArrayType(QTy)) 3342 return setUsedBits(CGM, ATy, Offset, Bits); 3343 3344 int Size = Context.getTypeSizeInChars(QTy).getQuantity(); 3345 if (Size <= 0) 3346 return; 3347 3348 std::fill_n(Bits.begin() + Offset, Size, 3349 (uint64_t(1) << Context.getCharWidth()) - 1); 3350 } 3351 3352 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, 3353 int Pos, int Size, int CharWidth, 3354 bool BigEndian) { 3355 assert(Size > 0); 3356 uint64_t Mask = 0; 3357 if (BigEndian) { 3358 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; 3359 ++P) 3360 Mask = (Mask << CharWidth) | *P; 3361 } else { 3362 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; 3363 do 3364 Mask = (Mask << CharWidth) | *--P; 3365 while (P != End); 3366 } 3367 return Mask; 3368 } 3369 3370 // Emit code to clear the bits in a record, which aren't a part of any user 3371 // declared member, when the record is a function return. 3372 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3373 llvm::IntegerType *ITy, 3374 QualType QTy) { 3375 assert(Src->getType() == ITy); 3376 assert(ITy->getScalarSizeInBits() <= 64); 3377 3378 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3379 int Size = DataLayout.getTypeStoreSize(ITy); 3380 SmallVector<uint64_t, 4> Bits(Size); 3381 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3382 3383 int CharWidth = CGM.getContext().getCharWidth(); 3384 uint64_t Mask = 3385 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); 3386 3387 return Builder.CreateAnd(Src, Mask, "cmse.clear"); 3388 } 3389 3390 // Emit code to clear the bits in a record, which aren't a part of any user 3391 // declared member, when the record is a function argument. 3392 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3393 llvm::ArrayType *ATy, 3394 QualType QTy) { 3395 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3396 int Size = DataLayout.getTypeStoreSize(ATy); 3397 SmallVector<uint64_t, 16> Bits(Size); 3398 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3399 3400 // Clear each element of the LLVM array. 3401 int CharWidth = CGM.getContext().getCharWidth(); 3402 int CharsPerElt = 3403 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; 3404 int MaskIndex = 0; 3405 llvm::Value *R = llvm::UndefValue::get(ATy); 3406 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { 3407 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, 3408 DataLayout.isBigEndian()); 3409 MaskIndex += CharsPerElt; 3410 llvm::Value *T0 = Builder.CreateExtractValue(Src, I); 3411 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); 3412 R = Builder.CreateInsertValue(R, T1, I); 3413 } 3414 3415 return R; 3416 } 3417 3418 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 3419 bool EmitRetDbgLoc, 3420 SourceLocation EndLoc) { 3421 if (FI.isNoReturn()) { 3422 // Noreturn functions don't return. 3423 EmitUnreachable(EndLoc); 3424 return; 3425 } 3426 3427 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 3428 // Naked functions don't have epilogues. 3429 Builder.CreateUnreachable(); 3430 return; 3431 } 3432 3433 // Functions with no result always return void. 3434 if (!ReturnValue.isValid()) { 3435 Builder.CreateRetVoid(); 3436 return; 3437 } 3438 3439 llvm::DebugLoc RetDbgLoc; 3440 llvm::Value *RV = nullptr; 3441 QualType RetTy = FI.getReturnType(); 3442 const ABIArgInfo &RetAI = FI.getReturnInfo(); 3443 3444 switch (RetAI.getKind()) { 3445 case ABIArgInfo::InAlloca: 3446 // Aggregrates get evaluated directly into the destination. Sometimes we 3447 // need to return the sret value in a register, though. 3448 assert(hasAggregateEvaluationKind(RetTy)); 3449 if (RetAI.getInAllocaSRet()) { 3450 llvm::Function::arg_iterator EI = CurFn->arg_end(); 3451 --EI; 3452 llvm::Value *ArgStruct = &*EI; 3453 llvm::Value *SRet = Builder.CreateStructGEP( 3454 EI->getType()->getPointerElementType(), ArgStruct, 3455 RetAI.getInAllocaFieldIndex()); 3456 llvm::Type *Ty = 3457 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType(); 3458 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret"); 3459 } 3460 break; 3461 3462 case ABIArgInfo::Indirect: { 3463 auto AI = CurFn->arg_begin(); 3464 if (RetAI.isSRetAfterThis()) 3465 ++AI; 3466 switch (getEvaluationKind(RetTy)) { 3467 case TEK_Complex: { 3468 ComplexPairTy RT = 3469 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 3470 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 3471 /*isInit*/ true); 3472 break; 3473 } 3474 case TEK_Aggregate: 3475 // Do nothing; aggregrates get evaluated directly into the destination. 3476 break; 3477 case TEK_Scalar: 3478 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 3479 MakeNaturalAlignAddrLValue(&*AI, RetTy), 3480 /*isInit*/ true); 3481 break; 3482 } 3483 break; 3484 } 3485 3486 case ABIArgInfo::Extend: 3487 case ABIArgInfo::Direct: 3488 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 3489 RetAI.getDirectOffset() == 0) { 3490 // The internal return value temp always will have pointer-to-return-type 3491 // type, just do a load. 3492 3493 // If there is a dominating store to ReturnValue, we can elide 3494 // the load, zap the store, and usually zap the alloca. 3495 if (llvm::StoreInst *SI = 3496 findDominatingStoreToReturnValue(*this)) { 3497 // Reuse the debug location from the store unless there is 3498 // cleanup code to be emitted between the store and return 3499 // instruction. 3500 if (EmitRetDbgLoc && !AutoreleaseResult) 3501 RetDbgLoc = SI->getDebugLoc(); 3502 // Get the stored value and nuke the now-dead store. 3503 RV = SI->getValueOperand(); 3504 SI->eraseFromParent(); 3505 3506 // Otherwise, we have to do a simple load. 3507 } else { 3508 RV = Builder.CreateLoad(ReturnValue); 3509 } 3510 } else { 3511 // If the value is offset in memory, apply the offset now. 3512 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 3513 3514 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 3515 } 3516 3517 // In ARC, end functions that return a retainable type with a call 3518 // to objc_autoreleaseReturnValue. 3519 if (AutoreleaseResult) { 3520 #ifndef NDEBUG 3521 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 3522 // been stripped of the typedefs, so we cannot use RetTy here. Get the 3523 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 3524 // CurCodeDecl or BlockInfo. 3525 QualType RT; 3526 3527 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 3528 RT = FD->getReturnType(); 3529 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 3530 RT = MD->getReturnType(); 3531 else if (isa<BlockDecl>(CurCodeDecl)) 3532 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 3533 else 3534 llvm_unreachable("Unexpected function/method type"); 3535 3536 assert(getLangOpts().ObjCAutoRefCount && 3537 !FI.isReturnsRetained() && 3538 RT->isObjCRetainableType()); 3539 #endif 3540 RV = emitAutoreleaseOfResult(*this, RV); 3541 } 3542 3543 break; 3544 3545 case ABIArgInfo::Ignore: 3546 break; 3547 3548 case ABIArgInfo::CoerceAndExpand: { 3549 auto coercionType = RetAI.getCoerceAndExpandType(); 3550 3551 // Load all of the coerced elements out into results. 3552 llvm::SmallVector<llvm::Value*, 4> results; 3553 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 3554 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3555 auto coercedEltType = coercionType->getElementType(i); 3556 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 3557 continue; 3558 3559 auto eltAddr = Builder.CreateStructGEP(addr, i); 3560 auto elt = Builder.CreateLoad(eltAddr); 3561 results.push_back(elt); 3562 } 3563 3564 // If we have one result, it's the single direct result type. 3565 if (results.size() == 1) { 3566 RV = results[0]; 3567 3568 // Otherwise, we need to make a first-class aggregate. 3569 } else { 3570 // Construct a return type that lacks padding elements. 3571 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 3572 3573 RV = llvm::UndefValue::get(returnType); 3574 for (unsigned i = 0, e = results.size(); i != e; ++i) { 3575 RV = Builder.CreateInsertValue(RV, results[i], i); 3576 } 3577 } 3578 break; 3579 } 3580 case ABIArgInfo::Expand: 3581 case ABIArgInfo::IndirectAliased: 3582 llvm_unreachable("Invalid ABI kind for return argument"); 3583 } 3584 3585 llvm::Instruction *Ret; 3586 if (RV) { 3587 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { 3588 // For certain return types, clear padding bits, as they may reveal 3589 // sensitive information. 3590 // Small struct/union types are passed as integers. 3591 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); 3592 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) 3593 RV = EmitCMSEClearRecord(RV, ITy, RetTy); 3594 } 3595 EmitReturnValueCheck(RV); 3596 Ret = Builder.CreateRet(RV); 3597 } else { 3598 Ret = Builder.CreateRetVoid(); 3599 } 3600 3601 if (RetDbgLoc) 3602 Ret->setDebugLoc(std::move(RetDbgLoc)); 3603 } 3604 3605 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 3606 // A current decl may not be available when emitting vtable thunks. 3607 if (!CurCodeDecl) 3608 return; 3609 3610 // If the return block isn't reachable, neither is this check, so don't emit 3611 // it. 3612 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) 3613 return; 3614 3615 ReturnsNonNullAttr *RetNNAttr = nullptr; 3616 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3617 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3618 3619 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3620 return; 3621 3622 // Prefer the returns_nonnull attribute if it's present. 3623 SourceLocation AttrLoc; 3624 SanitizerMask CheckKind; 3625 SanitizerHandler Handler; 3626 if (RetNNAttr) { 3627 assert(!requiresReturnValueNullabilityCheck() && 3628 "Cannot check nullability and the nonnull attribute"); 3629 AttrLoc = RetNNAttr->getLocation(); 3630 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3631 Handler = SanitizerHandler::NonnullReturn; 3632 } else { 3633 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3634 if (auto *TSI = DD->getTypeSourceInfo()) 3635 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) 3636 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3637 CheckKind = SanitizerKind::NullabilityReturn; 3638 Handler = SanitizerHandler::NullabilityReturn; 3639 } 3640 3641 SanitizerScope SanScope(this); 3642 3643 // Make sure the "return" source location is valid. If we're checking a 3644 // nullability annotation, make sure the preconditions for the check are met. 3645 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3646 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3647 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3648 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3649 if (requiresReturnValueNullabilityCheck()) 3650 CanNullCheck = 3651 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3652 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3653 EmitBlock(Check); 3654 3655 // Now do the null check. 3656 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3657 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3658 llvm::Value *DynamicData[] = {SLocPtr}; 3659 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3660 3661 EmitBlock(NoCheck); 3662 3663 #ifndef NDEBUG 3664 // The return location should not be used after the check has been emitted. 3665 ReturnLocation = Address::invalid(); 3666 #endif 3667 } 3668 3669 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3670 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3671 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3672 } 3673 3674 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3675 QualType Ty) { 3676 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3677 // placeholders. 3678 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3679 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 3680 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 3681 3682 // FIXME: When we generate this IR in one pass, we shouldn't need 3683 // this win32-specific alignment hack. 3684 CharUnits Align = CharUnits::fromQuantity(4); 3685 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3686 3687 return AggValueSlot::forAddr(Address(Placeholder, Align), 3688 Ty.getQualifiers(), 3689 AggValueSlot::IsNotDestructed, 3690 AggValueSlot::DoesNotNeedGCBarriers, 3691 AggValueSlot::IsNotAliased, 3692 AggValueSlot::DoesNotOverlap); 3693 } 3694 3695 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3696 const VarDecl *param, 3697 SourceLocation loc) { 3698 // StartFunction converted the ABI-lowered parameter(s) into a 3699 // local alloca. We need to turn that into an r-value suitable 3700 // for EmitCall. 3701 Address local = GetAddrOfLocalVar(param); 3702 3703 QualType type = param->getType(); 3704 3705 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3706 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3707 } 3708 3709 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3710 // but the argument needs to be the original pointer. 3711 if (type->isReferenceType()) { 3712 args.add(RValue::get(Builder.CreateLoad(local)), type); 3713 3714 // In ARC, move out of consumed arguments so that the release cleanup 3715 // entered by StartFunction doesn't cause an over-release. This isn't 3716 // optimal -O0 code generation, but it should get cleaned up when 3717 // optimization is enabled. This also assumes that delegate calls are 3718 // performed exactly once for a set of arguments, but that should be safe. 3719 } else if (getLangOpts().ObjCAutoRefCount && 3720 param->hasAttr<NSConsumedAttr>() && 3721 type->isObjCRetainableType()) { 3722 llvm::Value *ptr = Builder.CreateLoad(local); 3723 auto null = 3724 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3725 Builder.CreateStore(null, local); 3726 args.add(RValue::get(ptr), type); 3727 3728 // For the most part, we just need to load the alloca, except that 3729 // aggregate r-values are actually pointers to temporaries. 3730 } else { 3731 args.add(convertTempToRValue(local, type, loc), type); 3732 } 3733 3734 // Deactivate the cleanup for the callee-destructed param that was pushed. 3735 if (type->isRecordType() && !CurFuncIsThunk && 3736 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 3737 param->needsDestruction(getContext())) { 3738 EHScopeStack::stable_iterator cleanup = 3739 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 3740 assert(cleanup.isValid() && 3741 "cleanup for callee-destructed param not recorded"); 3742 // This unreachable is a temporary marker which will be removed later. 3743 llvm::Instruction *isActive = Builder.CreateUnreachable(); 3744 args.addArgCleanupDeactivation(cleanup, isActive); 3745 } 3746 } 3747 3748 static bool isProvablyNull(llvm::Value *addr) { 3749 return isa<llvm::ConstantPointerNull>(addr); 3750 } 3751 3752 /// Emit the actual writing-back of a writeback. 3753 static void emitWriteback(CodeGenFunction &CGF, 3754 const CallArgList::Writeback &writeback) { 3755 const LValue &srcLV = writeback.Source; 3756 Address srcAddr = srcLV.getAddress(CGF); 3757 assert(!isProvablyNull(srcAddr.getPointer()) && 3758 "shouldn't have writeback for provably null argument"); 3759 3760 llvm::BasicBlock *contBB = nullptr; 3761 3762 // If the argument wasn't provably non-null, we need to null check 3763 // before doing the store. 3764 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3765 CGF.CGM.getDataLayout()); 3766 if (!provablyNonNull) { 3767 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3768 contBB = CGF.createBasicBlock("icr.done"); 3769 3770 llvm::Value *isNull = 3771 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3772 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3773 CGF.EmitBlock(writebackBB); 3774 } 3775 3776 // Load the value to writeback. 3777 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3778 3779 // Cast it back, in case we're writing an id to a Foo* or something. 3780 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3781 "icr.writeback-cast"); 3782 3783 // Perform the writeback. 3784 3785 // If we have a "to use" value, it's something we need to emit a use 3786 // of. This has to be carefully threaded in: if it's done after the 3787 // release it's potentially undefined behavior (and the optimizer 3788 // will ignore it), and if it happens before the retain then the 3789 // optimizer could move the release there. 3790 if (writeback.ToUse) { 3791 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3792 3793 // Retain the new value. No need to block-copy here: the block's 3794 // being passed up the stack. 3795 value = CGF.EmitARCRetainNonBlock(value); 3796 3797 // Emit the intrinsic use here. 3798 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3799 3800 // Load the old value (primitively). 3801 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3802 3803 // Put the new value in place (primitively). 3804 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3805 3806 // Release the old value. 3807 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3808 3809 // Otherwise, we can just do a normal lvalue store. 3810 } else { 3811 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3812 } 3813 3814 // Jump to the continuation block. 3815 if (!provablyNonNull) 3816 CGF.EmitBlock(contBB); 3817 } 3818 3819 static void emitWritebacks(CodeGenFunction &CGF, 3820 const CallArgList &args) { 3821 for (const auto &I : args.writebacks()) 3822 emitWriteback(CGF, I); 3823 } 3824 3825 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3826 const CallArgList &CallArgs) { 3827 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3828 CallArgs.getCleanupsToDeactivate(); 3829 // Iterate in reverse to increase the likelihood of popping the cleanup. 3830 for (const auto &I : llvm::reverse(Cleanups)) { 3831 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3832 I.IsActiveIP->eraseFromParent(); 3833 } 3834 } 3835 3836 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3837 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3838 if (uop->getOpcode() == UO_AddrOf) 3839 return uop->getSubExpr(); 3840 return nullptr; 3841 } 3842 3843 /// Emit an argument that's being passed call-by-writeback. That is, 3844 /// we are passing the address of an __autoreleased temporary; it 3845 /// might be copy-initialized with the current value of the given 3846 /// address, but it will definitely be copied out of after the call. 3847 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3848 const ObjCIndirectCopyRestoreExpr *CRE) { 3849 LValue srcLV; 3850 3851 // Make an optimistic effort to emit the address as an l-value. 3852 // This can fail if the argument expression is more complicated. 3853 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3854 srcLV = CGF.EmitLValue(lvExpr); 3855 3856 // Otherwise, just emit it as a scalar. 3857 } else { 3858 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3859 3860 QualType srcAddrType = 3861 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3862 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3863 } 3864 Address srcAddr = srcLV.getAddress(CGF); 3865 3866 // The dest and src types don't necessarily match in LLVM terms 3867 // because of the crazy ObjC compatibility rules. 3868 3869 llvm::PointerType *destType = 3870 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3871 3872 // If the address is a constant null, just pass the appropriate null. 3873 if (isProvablyNull(srcAddr.getPointer())) { 3874 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3875 CRE->getType()); 3876 return; 3877 } 3878 3879 // Create the temporary. 3880 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3881 CGF.getPointerAlign(), 3882 "icr.temp"); 3883 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3884 // and that cleanup will be conditional if we can't prove that the l-value 3885 // isn't null, so we need to register a dominating point so that the cleanups 3886 // system will make valid IR. 3887 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3888 3889 // Zero-initialize it if we're not doing a copy-initialization. 3890 bool shouldCopy = CRE->shouldCopy(); 3891 if (!shouldCopy) { 3892 llvm::Value *null = 3893 llvm::ConstantPointerNull::get( 3894 cast<llvm::PointerType>(destType->getElementType())); 3895 CGF.Builder.CreateStore(null, temp); 3896 } 3897 3898 llvm::BasicBlock *contBB = nullptr; 3899 llvm::BasicBlock *originBB = nullptr; 3900 3901 // If the address is *not* known to be non-null, we need to switch. 3902 llvm::Value *finalArgument; 3903 3904 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3905 CGF.CGM.getDataLayout()); 3906 if (provablyNonNull) { 3907 finalArgument = temp.getPointer(); 3908 } else { 3909 llvm::Value *isNull = 3910 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3911 3912 finalArgument = CGF.Builder.CreateSelect(isNull, 3913 llvm::ConstantPointerNull::get(destType), 3914 temp.getPointer(), "icr.argument"); 3915 3916 // If we need to copy, then the load has to be conditional, which 3917 // means we need control flow. 3918 if (shouldCopy) { 3919 originBB = CGF.Builder.GetInsertBlock(); 3920 contBB = CGF.createBasicBlock("icr.cont"); 3921 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3922 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3923 CGF.EmitBlock(copyBB); 3924 condEval.begin(CGF); 3925 } 3926 } 3927 3928 llvm::Value *valueToUse = nullptr; 3929 3930 // Perform a copy if necessary. 3931 if (shouldCopy) { 3932 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3933 assert(srcRV.isScalar()); 3934 3935 llvm::Value *src = srcRV.getScalarVal(); 3936 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3937 "icr.cast"); 3938 3939 // Use an ordinary store, not a store-to-lvalue. 3940 CGF.Builder.CreateStore(src, temp); 3941 3942 // If optimization is enabled, and the value was held in a 3943 // __strong variable, we need to tell the optimizer that this 3944 // value has to stay alive until we're doing the store back. 3945 // This is because the temporary is effectively unretained, 3946 // and so otherwise we can violate the high-level semantics. 3947 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3948 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3949 valueToUse = src; 3950 } 3951 } 3952 3953 // Finish the control flow if we needed it. 3954 if (shouldCopy && !provablyNonNull) { 3955 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3956 CGF.EmitBlock(contBB); 3957 3958 // Make a phi for the value to intrinsically use. 3959 if (valueToUse) { 3960 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3961 "icr.to-use"); 3962 phiToUse->addIncoming(valueToUse, copyBB); 3963 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3964 originBB); 3965 valueToUse = phiToUse; 3966 } 3967 3968 condEval.end(CGF); 3969 } 3970 3971 args.addWriteback(srcLV, temp, valueToUse); 3972 args.add(RValue::get(finalArgument), CRE->getType()); 3973 } 3974 3975 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3976 assert(!StackBase); 3977 3978 // Save the stack. 3979 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3980 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3981 } 3982 3983 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3984 if (StackBase) { 3985 // Restore the stack after the call. 3986 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3987 CGF.Builder.CreateCall(F, StackBase); 3988 } 3989 } 3990 3991 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3992 SourceLocation ArgLoc, 3993 AbstractCallee AC, 3994 unsigned ParmNum) { 3995 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3996 SanOpts.has(SanitizerKind::NullabilityArg))) 3997 return; 3998 3999 // The param decl may be missing in a variadic function. 4000 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 4001 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 4002 4003 // Prefer the nonnull attribute if it's present. 4004 const NonNullAttr *NNAttr = nullptr; 4005 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 4006 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 4007 4008 bool CanCheckNullability = false; 4009 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 4010 auto Nullability = PVD->getType()->getNullability(getContext()); 4011 CanCheckNullability = Nullability && 4012 *Nullability == NullabilityKind::NonNull && 4013 PVD->getTypeSourceInfo(); 4014 } 4015 4016 if (!NNAttr && !CanCheckNullability) 4017 return; 4018 4019 SourceLocation AttrLoc; 4020 SanitizerMask CheckKind; 4021 SanitizerHandler Handler; 4022 if (NNAttr) { 4023 AttrLoc = NNAttr->getLocation(); 4024 CheckKind = SanitizerKind::NonnullAttribute; 4025 Handler = SanitizerHandler::NonnullArg; 4026 } else { 4027 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 4028 CheckKind = SanitizerKind::NullabilityArg; 4029 Handler = SanitizerHandler::NullabilityArg; 4030 } 4031 4032 SanitizerScope SanScope(this); 4033 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); 4034 llvm::Constant *StaticData[] = { 4035 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 4036 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 4037 }; 4038 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 4039 } 4040 4041 // Check if the call is going to use the inalloca convention. This needs to 4042 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged 4043 // later, so we can't check it directly. 4044 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, 4045 ArrayRef<QualType> ArgTypes) { 4046 // The Swift calling conventions don't go through the target-specific 4047 // argument classification, they never use inalloca. 4048 // TODO: Consider limiting inalloca use to only calling conventions supported 4049 // by MSVC. 4050 if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync) 4051 return false; 4052 if (!CGM.getTarget().getCXXABI().isMicrosoft()) 4053 return false; 4054 return llvm::any_of(ArgTypes, [&](QualType Ty) { 4055 return isInAllocaArgument(CGM.getCXXABI(), Ty); 4056 }); 4057 } 4058 4059 #ifndef NDEBUG 4060 // Determine whether the given argument is an Objective-C method 4061 // that may have type parameters in its signature. 4062 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { 4063 const DeclContext *dc = method->getDeclContext(); 4064 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { 4065 return classDecl->getTypeParamListAsWritten(); 4066 } 4067 4068 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { 4069 return catDecl->getTypeParamList(); 4070 } 4071 4072 return false; 4073 } 4074 #endif 4075 4076 /// EmitCallArgs - Emit call arguments for a function. 4077 void CodeGenFunction::EmitCallArgs( 4078 CallArgList &Args, PrototypeWrapper Prototype, 4079 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 4080 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 4081 SmallVector<QualType, 16> ArgTypes; 4082 4083 assert((ParamsToSkip == 0 || Prototype.P) && 4084 "Can't skip parameters if type info is not provided"); 4085 4086 // This variable only captures *explicitly* written conventions, not those 4087 // applied by default via command line flags or target defaults, such as 4088 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would 4089 // require knowing if this is a C++ instance method or being able to see 4090 // unprototyped FunctionTypes. 4091 CallingConv ExplicitCC = CC_C; 4092 4093 // First, if a prototype was provided, use those argument types. 4094 bool IsVariadic = false; 4095 if (Prototype.P) { 4096 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); 4097 if (MD) { 4098 IsVariadic = MD->isVariadic(); 4099 ExplicitCC = getCallingConventionForDecl( 4100 MD, CGM.getTarget().getTriple().isOSWindows()); 4101 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, 4102 MD->param_type_end()); 4103 } else { 4104 const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); 4105 IsVariadic = FPT->isVariadic(); 4106 ExplicitCC = FPT->getExtInfo().getCC(); 4107 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, 4108 FPT->param_type_end()); 4109 } 4110 4111 #ifndef NDEBUG 4112 // Check that the prototyped types match the argument expression types. 4113 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); 4114 CallExpr::const_arg_iterator Arg = ArgRange.begin(); 4115 for (QualType Ty : ArgTypes) { 4116 assert(Arg != ArgRange.end() && "Running over edge of argument list!"); 4117 assert( 4118 (isGenericMethod || Ty->isVariablyModifiedType() || 4119 Ty.getNonReferenceType()->isObjCRetainableType() || 4120 getContext() 4121 .getCanonicalType(Ty.getNonReferenceType()) 4122 .getTypePtr() == 4123 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && 4124 "type mismatch in call argument!"); 4125 ++Arg; 4126 } 4127 4128 // Either we've emitted all the call args, or we have a call to variadic 4129 // function. 4130 assert((Arg == ArgRange.end() || IsVariadic) && 4131 "Extra arguments in non-variadic function!"); 4132 #endif 4133 } 4134 4135 // If we still have any arguments, emit them using the type of the argument. 4136 for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) 4137 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); 4138 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 4139 4140 // We must evaluate arguments from right to left in the MS C++ ABI, 4141 // because arguments are destroyed left to right in the callee. As a special 4142 // case, there are certain language constructs that require left-to-right 4143 // evaluation, and in those cases we consider the evaluation order requirement 4144 // to trump the "destruction order is reverse construction order" guarantee. 4145 bool LeftToRight = 4146 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 4147 ? Order == EvaluationOrder::ForceLeftToRight 4148 : Order != EvaluationOrder::ForceRightToLeft; 4149 4150 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 4151 RValue EmittedArg) { 4152 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 4153 return; 4154 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 4155 if (PS == nullptr) 4156 return; 4157 4158 const auto &Context = getContext(); 4159 auto SizeTy = Context.getSizeType(); 4160 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 4161 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 4162 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 4163 EmittedArg.getScalarVal(), 4164 PS->isDynamic()); 4165 Args.add(RValue::get(V), SizeTy); 4166 // If we're emitting args in reverse, be sure to do so with 4167 // pass_object_size, as well. 4168 if (!LeftToRight) 4169 std::swap(Args.back(), *(&Args.back() - 1)); 4170 }; 4171 4172 // Insert a stack save if we're going to need any inalloca args. 4173 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { 4174 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && 4175 "inalloca only supported on x86"); 4176 Args.allocateArgumentMemory(*this); 4177 } 4178 4179 // Evaluate each argument in the appropriate order. 4180 size_t CallArgsStart = Args.size(); 4181 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 4182 unsigned Idx = LeftToRight ? I : E - I - 1; 4183 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 4184 unsigned InitialArgSize = Args.size(); 4185 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 4186 // the argument and parameter match or the objc method is parameterized. 4187 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 4188 getContext().hasSameUnqualifiedType((*Arg)->getType(), 4189 ArgTypes[Idx]) || 4190 (isa<ObjCMethodDecl>(AC.getDecl()) && 4191 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 4192 "Argument and parameter types don't match"); 4193 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 4194 // In particular, we depend on it being the last arg in Args, and the 4195 // objectsize bits depend on there only being one arg if !LeftToRight. 4196 assert(InitialArgSize + 1 == Args.size() && 4197 "The code below depends on only adding one arg per EmitCallArg"); 4198 (void)InitialArgSize; 4199 // Since pointer argument are never emitted as LValue, it is safe to emit 4200 // non-null argument check for r-value only. 4201 if (!Args.back().hasLValue()) { 4202 RValue RVArg = Args.back().getKnownRValue(); 4203 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 4204 ParamsToSkip + Idx); 4205 // @llvm.objectsize should never have side-effects and shouldn't need 4206 // destruction/cleanups, so we can safely "emit" it after its arg, 4207 // regardless of right-to-leftness 4208 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 4209 } 4210 } 4211 4212 if (!LeftToRight) { 4213 // Un-reverse the arguments we just evaluated so they match up with the LLVM 4214 // IR function. 4215 std::reverse(Args.begin() + CallArgsStart, Args.end()); 4216 } 4217 } 4218 4219 namespace { 4220 4221 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 4222 DestroyUnpassedArg(Address Addr, QualType Ty) 4223 : Addr(Addr), Ty(Ty) {} 4224 4225 Address Addr; 4226 QualType Ty; 4227 4228 void Emit(CodeGenFunction &CGF, Flags flags) override { 4229 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 4230 if (DtorKind == QualType::DK_cxx_destructor) { 4231 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 4232 assert(!Dtor->isTrivial()); 4233 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 4234 /*Delegating=*/false, Addr, Ty); 4235 } else { 4236 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 4237 } 4238 } 4239 }; 4240 4241 struct DisableDebugLocationUpdates { 4242 CodeGenFunction &CGF; 4243 bool disabledDebugInfo; 4244 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 4245 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 4246 CGF.disableDebugInfo(); 4247 } 4248 ~DisableDebugLocationUpdates() { 4249 if (disabledDebugInfo) 4250 CGF.enableDebugInfo(); 4251 } 4252 }; 4253 4254 } // end anonymous namespace 4255 4256 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 4257 if (!HasLV) 4258 return RV; 4259 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 4260 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 4261 LV.isVolatile()); 4262 IsUsed = true; 4263 return RValue::getAggregate(Copy.getAddress(CGF)); 4264 } 4265 4266 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 4267 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 4268 if (!HasLV && RV.isScalar()) 4269 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); 4270 else if (!HasLV && RV.isComplex()) 4271 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 4272 else { 4273 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); 4274 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 4275 // We assume that call args are never copied into subobjects. 4276 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 4277 HasLV ? LV.isVolatileQualified() 4278 : RV.isVolatileQualified()); 4279 } 4280 IsUsed = true; 4281 } 4282 4283 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 4284 QualType type) { 4285 DisableDebugLocationUpdates Dis(*this, E); 4286 if (const ObjCIndirectCopyRestoreExpr *CRE 4287 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 4288 assert(getLangOpts().ObjCAutoRefCount); 4289 return emitWritebackArg(*this, args, CRE); 4290 } 4291 4292 assert(type->isReferenceType() == E->isGLValue() && 4293 "reference binding to unmaterialized r-value!"); 4294 4295 if (E->isGLValue()) { 4296 assert(E->getObjectKind() == OK_Ordinary); 4297 return args.add(EmitReferenceBindingToExpr(E), type); 4298 } 4299 4300 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 4301 4302 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 4303 // However, we still have to push an EH-only cleanup in case we unwind before 4304 // we make it to the call. 4305 if (type->isRecordType() && 4306 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 4307 // If we're using inalloca, use the argument memory. Otherwise, use a 4308 // temporary. 4309 AggValueSlot Slot = args.isUsingInAlloca() 4310 ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp"); 4311 4312 bool DestroyedInCallee = true, NeedsEHCleanup = true; 4313 if (const auto *RD = type->getAsCXXRecordDecl()) 4314 DestroyedInCallee = RD->hasNonTrivialDestructor(); 4315 else 4316 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 4317 4318 if (DestroyedInCallee) 4319 Slot.setExternallyDestructed(); 4320 4321 EmitAggExpr(E, Slot); 4322 RValue RV = Slot.asRValue(); 4323 args.add(RV, type); 4324 4325 if (DestroyedInCallee && NeedsEHCleanup) { 4326 // Create a no-op GEP between the placeholder and the cleanup so we can 4327 // RAUW it successfully. It also serves as a marker of the first 4328 // instruction where the cleanup is active. 4329 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 4330 type); 4331 // This unreachable is a temporary marker which will be removed later. 4332 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 4333 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 4334 } 4335 return; 4336 } 4337 4338 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 4339 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 4340 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 4341 assert(L.isSimple()); 4342 args.addUncopiedAggregate(L, type); 4343 return; 4344 } 4345 4346 args.add(EmitAnyExprToTemp(E), type); 4347 } 4348 4349 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 4350 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 4351 // implicitly widens null pointer constants that are arguments to varargs 4352 // functions to pointer-sized ints. 4353 if (!getTarget().getTriple().isOSWindows()) 4354 return Arg->getType(); 4355 4356 if (Arg->getType()->isIntegerType() && 4357 getContext().getTypeSize(Arg->getType()) < 4358 getContext().getTargetInfo().getPointerWidth(0) && 4359 Arg->isNullPointerConstant(getContext(), 4360 Expr::NPC_ValueDependentIsNotNull)) { 4361 return getContext().getIntPtrType(); 4362 } 4363 4364 return Arg->getType(); 4365 } 4366 4367 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4368 // optimizer it can aggressively ignore unwind edges. 4369 void 4370 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 4371 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 4372 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 4373 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 4374 CGM.getNoObjCARCExceptionsMetadata()); 4375 } 4376 4377 /// Emits a call to the given no-arguments nounwind runtime function. 4378 llvm::CallInst * 4379 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4380 const llvm::Twine &name) { 4381 return EmitNounwindRuntimeCall(callee, None, name); 4382 } 4383 4384 /// Emits a call to the given nounwind runtime function. 4385 llvm::CallInst * 4386 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4387 ArrayRef<llvm::Value *> args, 4388 const llvm::Twine &name) { 4389 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 4390 call->setDoesNotThrow(); 4391 return call; 4392 } 4393 4394 /// Emits a simple call (never an invoke) to the given no-arguments 4395 /// runtime function. 4396 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4397 const llvm::Twine &name) { 4398 return EmitRuntimeCall(callee, None, name); 4399 } 4400 4401 // Calls which may throw must have operand bundles indicating which funclet 4402 // they are nested within. 4403 SmallVector<llvm::OperandBundleDef, 1> 4404 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 4405 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4406 // There is no need for a funclet operand bundle if we aren't inside a 4407 // funclet. 4408 if (!CurrentFuncletPad) 4409 return BundleList; 4410 4411 // Skip intrinsics which cannot throw. 4412 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 4413 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 4414 return BundleList; 4415 4416 BundleList.emplace_back("funclet", CurrentFuncletPad); 4417 return BundleList; 4418 } 4419 4420 /// Emits a simple call (never an invoke) to the given runtime function. 4421 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4422 ArrayRef<llvm::Value *> args, 4423 const llvm::Twine &name) { 4424 llvm::CallInst *call = Builder.CreateCall( 4425 callee, args, getBundlesForFunclet(callee.getCallee()), name); 4426 call->setCallingConv(getRuntimeCC()); 4427 return call; 4428 } 4429 4430 /// Emits a call or invoke to the given noreturn runtime function. 4431 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( 4432 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { 4433 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4434 getBundlesForFunclet(callee.getCallee()); 4435 4436 if (getInvokeDest()) { 4437 llvm::InvokeInst *invoke = 4438 Builder.CreateInvoke(callee, 4439 getUnreachableBlock(), 4440 getInvokeDest(), 4441 args, 4442 BundleList); 4443 invoke->setDoesNotReturn(); 4444 invoke->setCallingConv(getRuntimeCC()); 4445 } else { 4446 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 4447 call->setDoesNotReturn(); 4448 call->setCallingConv(getRuntimeCC()); 4449 Builder.CreateUnreachable(); 4450 } 4451 } 4452 4453 /// Emits a call or invoke instruction to the given nullary runtime function. 4454 llvm::CallBase * 4455 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4456 const Twine &name) { 4457 return EmitRuntimeCallOrInvoke(callee, None, name); 4458 } 4459 4460 /// Emits a call or invoke instruction to the given runtime function. 4461 llvm::CallBase * 4462 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4463 ArrayRef<llvm::Value *> args, 4464 const Twine &name) { 4465 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 4466 call->setCallingConv(getRuntimeCC()); 4467 return call; 4468 } 4469 4470 /// Emits a call or invoke instruction to the given function, depending 4471 /// on the current state of the EH stack. 4472 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, 4473 ArrayRef<llvm::Value *> Args, 4474 const Twine &Name) { 4475 llvm::BasicBlock *InvokeDest = getInvokeDest(); 4476 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4477 getBundlesForFunclet(Callee.getCallee()); 4478 4479 llvm::CallBase *Inst; 4480 if (!InvokeDest) 4481 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 4482 else { 4483 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 4484 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 4485 Name); 4486 EmitBlock(ContBB); 4487 } 4488 4489 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4490 // optimizer it can aggressively ignore unwind edges. 4491 if (CGM.getLangOpts().ObjCAutoRefCount) 4492 AddObjCARCExceptionMetadata(Inst); 4493 4494 return Inst; 4495 } 4496 4497 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 4498 llvm::Value *New) { 4499 DeferredReplacements.push_back( 4500 std::make_pair(llvm::WeakTrackingVH(Old), New)); 4501 } 4502 4503 namespace { 4504 4505 /// Specify given \p NewAlign as the alignment of return value attribute. If 4506 /// such attribute already exists, re-set it to the maximal one of two options. 4507 LLVM_NODISCARD llvm::AttributeList 4508 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, 4509 const llvm::AttributeList &Attrs, 4510 llvm::Align NewAlign) { 4511 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); 4512 if (CurAlign >= NewAlign) 4513 return Attrs; 4514 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); 4515 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment) 4516 .addRetAttribute(Ctx, AlignAttr); 4517 } 4518 4519 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { 4520 protected: 4521 CodeGenFunction &CGF; 4522 4523 /// We do nothing if this is, or becomes, nullptr. 4524 const AlignedAttrTy *AA = nullptr; 4525 4526 llvm::Value *Alignment = nullptr; // May or may not be a constant. 4527 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. 4528 4529 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4530 : CGF(CGF_) { 4531 if (!FuncDecl) 4532 return; 4533 AA = FuncDecl->getAttr<AlignedAttrTy>(); 4534 } 4535 4536 public: 4537 /// If we can, materialize the alignment as an attribute on return value. 4538 LLVM_NODISCARD llvm::AttributeList 4539 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { 4540 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) 4541 return Attrs; 4542 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); 4543 if (!AlignmentCI) 4544 return Attrs; 4545 // We may legitimately have non-power-of-2 alignment here. 4546 // If so, this is UB land, emit it via `@llvm.assume` instead. 4547 if (!AlignmentCI->getValue().isPowerOf2()) 4548 return Attrs; 4549 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( 4550 CGF.getLLVMContext(), Attrs, 4551 llvm::Align( 4552 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); 4553 AA = nullptr; // We're done. Disallow doing anything else. 4554 return NewAttrs; 4555 } 4556 4557 /// Emit alignment assumption. 4558 /// This is a general fallback that we take if either there is an offset, 4559 /// or the alignment is variable or we are sanitizing for alignment. 4560 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { 4561 if (!AA) 4562 return; 4563 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, 4564 AA->getLocation(), Alignment, OffsetCI); 4565 AA = nullptr; // We're done. Disallow doing anything else. 4566 } 4567 }; 4568 4569 /// Helper data structure to emit `AssumeAlignedAttr`. 4570 class AssumeAlignedAttrEmitter final 4571 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { 4572 public: 4573 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4574 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4575 if (!AA) 4576 return; 4577 // It is guaranteed that the alignment/offset are constants. 4578 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); 4579 if (Expr *Offset = AA->getOffset()) { 4580 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); 4581 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. 4582 OffsetCI = nullptr; 4583 } 4584 } 4585 }; 4586 4587 /// Helper data structure to emit `AllocAlignAttr`. 4588 class AllocAlignAttrEmitter final 4589 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { 4590 public: 4591 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, 4592 const CallArgList &CallArgs) 4593 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4594 if (!AA) 4595 return; 4596 // Alignment may or may not be a constant, and that is okay. 4597 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] 4598 .getRValue(CGF) 4599 .getScalarVal(); 4600 } 4601 }; 4602 4603 } // namespace 4604 4605 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 4606 const CGCallee &Callee, 4607 ReturnValueSlot ReturnValue, 4608 const CallArgList &CallArgs, 4609 llvm::CallBase **callOrInvoke, bool IsMustTail, 4610 SourceLocation Loc) { 4611 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 4612 4613 assert(Callee.isOrdinary() || Callee.isVirtual()); 4614 4615 // Handle struct-return functions by passing a pointer to the 4616 // location that we would like to return into. 4617 QualType RetTy = CallInfo.getReturnType(); 4618 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 4619 4620 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); 4621 4622 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4623 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 4624 // We can only guarantee that a function is called from the correct 4625 // context/function based on the appropriate target attributes, 4626 // so only check in the case where we have both always_inline and target 4627 // since otherwise we could be making a conditional call after a check for 4628 // the proper cpu features (and it won't cause code generation issues due to 4629 // function based code generation). 4630 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 4631 TargetDecl->hasAttr<TargetAttr>()) 4632 checkTargetFeatures(Loc, FD); 4633 4634 // Some architectures (such as x86-64) have the ABI changed based on 4635 // attribute-target/features. Give them a chance to diagnose. 4636 CGM.getTargetCodeGenInfo().checkFunctionCallABI( 4637 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs); 4638 } 4639 4640 #ifndef NDEBUG 4641 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { 4642 // For an inalloca varargs function, we don't expect CallInfo to match the 4643 // function pointer's type, because the inalloca struct a will have extra 4644 // fields in it for the varargs parameters. Code later in this function 4645 // bitcasts the function pointer to the type derived from CallInfo. 4646 // 4647 // In other cases, we assert that the types match up (until pointers stop 4648 // having pointee types). 4649 if (Callee.isVirtual()) 4650 assert(IRFuncTy == Callee.getVirtualFunctionType()); 4651 else { 4652 llvm::PointerType *PtrTy = 4653 llvm::cast<llvm::PointerType>(Callee.getFunctionPointer()->getType()); 4654 assert(PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy)); 4655 } 4656 } 4657 #endif 4658 4659 // 1. Set up the arguments. 4660 4661 // If we're using inalloca, insert the allocation after the stack save. 4662 // FIXME: Do this earlier rather than hacking it in here! 4663 Address ArgMemory = Address::invalid(); 4664 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 4665 const llvm::DataLayout &DL = CGM.getDataLayout(); 4666 llvm::Instruction *IP = CallArgs.getStackBase(); 4667 llvm::AllocaInst *AI; 4668 if (IP) { 4669 IP = IP->getNextNode(); 4670 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 4671 "argmem", IP); 4672 } else { 4673 AI = CreateTempAlloca(ArgStruct, "argmem"); 4674 } 4675 auto Align = CallInfo.getArgStructAlignment(); 4676 AI->setAlignment(Align.getAsAlign()); 4677 AI->setUsedWithInAlloca(true); 4678 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 4679 ArgMemory = Address(AI, Align); 4680 } 4681 4682 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 4683 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 4684 4685 // If the call returns a temporary with struct return, create a temporary 4686 // alloca to hold the result, unless one is given to us. 4687 Address SRetPtr = Address::invalid(); 4688 Address SRetAlloca = Address::invalid(); 4689 llvm::Value *UnusedReturnSizePtr = nullptr; 4690 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 4691 if (!ReturnValue.isNull()) { 4692 SRetPtr = ReturnValue.getValue(); 4693 } else { 4694 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 4695 if (HaveInsertPoint() && ReturnValue.isUnused()) { 4696 llvm::TypeSize size = 4697 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 4698 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 4699 } 4700 } 4701 if (IRFunctionArgs.hasSRetArg()) { 4702 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 4703 } else if (RetAI.isInAlloca()) { 4704 Address Addr = 4705 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 4706 Builder.CreateStore(SRetPtr.getPointer(), Addr); 4707 } 4708 } 4709 4710 Address swiftErrorTemp = Address::invalid(); 4711 Address swiftErrorArg = Address::invalid(); 4712 4713 // When passing arguments using temporary allocas, we need to add the 4714 // appropriate lifetime markers. This vector keeps track of all the lifetime 4715 // markers that need to be ended right after the call. 4716 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; 4717 4718 // Translate all of the arguments as necessary to match the IR lowering. 4719 assert(CallInfo.arg_size() == CallArgs.size() && 4720 "Mismatch between function signature & arguments."); 4721 unsigned ArgNo = 0; 4722 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 4723 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 4724 I != E; ++I, ++info_it, ++ArgNo) { 4725 const ABIArgInfo &ArgInfo = info_it->info; 4726 4727 // Insert a padding argument to ensure proper alignment. 4728 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 4729 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 4730 llvm::UndefValue::get(ArgInfo.getPaddingType()); 4731 4732 unsigned FirstIRArg, NumIRArgs; 4733 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 4734 4735 switch (ArgInfo.getKind()) { 4736 case ABIArgInfo::InAlloca: { 4737 assert(NumIRArgs == 0); 4738 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 4739 if (I->isAggregate()) { 4740 Address Addr = I->hasLValue() 4741 ? I->getKnownLValue().getAddress(*this) 4742 : I->getKnownRValue().getAggregateAddress(); 4743 llvm::Instruction *Placeholder = 4744 cast<llvm::Instruction>(Addr.getPointer()); 4745 4746 if (!ArgInfo.getInAllocaIndirect()) { 4747 // Replace the placeholder with the appropriate argument slot GEP. 4748 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 4749 Builder.SetInsertPoint(Placeholder); 4750 Addr = Builder.CreateStructGEP(ArgMemory, 4751 ArgInfo.getInAllocaFieldIndex()); 4752 Builder.restoreIP(IP); 4753 } else { 4754 // For indirect things such as overaligned structs, replace the 4755 // placeholder with a regular aggregate temporary alloca. Store the 4756 // address of this alloca into the struct. 4757 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); 4758 Address ArgSlot = Builder.CreateStructGEP( 4759 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4760 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4761 } 4762 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 4763 } else if (ArgInfo.getInAllocaIndirect()) { 4764 // Make a temporary alloca and store the address of it into the argument 4765 // struct. 4766 Address Addr = CreateMemTempWithoutCast( 4767 I->Ty, getContext().getTypeAlignInChars(I->Ty), 4768 "indirect-arg-temp"); 4769 I->copyInto(*this, Addr); 4770 Address ArgSlot = 4771 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4772 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4773 } else { 4774 // Store the RValue into the argument struct. 4775 Address Addr = 4776 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4777 unsigned AS = Addr.getType()->getPointerAddressSpace(); 4778 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 4779 // There are some cases where a trivial bitcast is not avoidable. The 4780 // definition of a type later in a translation unit may change it's type 4781 // from {}* to (%struct.foo*)*. 4782 if (Addr.getType() != MemType) 4783 Addr = Builder.CreateBitCast(Addr, MemType); 4784 I->copyInto(*this, Addr); 4785 } 4786 break; 4787 } 4788 4789 case ABIArgInfo::Indirect: 4790 case ABIArgInfo::IndirectAliased: { 4791 assert(NumIRArgs == 1); 4792 if (!I->isAggregate()) { 4793 // Make a temporary alloca to pass the argument. 4794 Address Addr = CreateMemTempWithoutCast( 4795 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 4796 IRCallArgs[FirstIRArg] = Addr.getPointer(); 4797 4798 I->copyInto(*this, Addr); 4799 } else { 4800 // We want to avoid creating an unnecessary temporary+copy here; 4801 // however, we need one in three cases: 4802 // 1. If the argument is not byval, and we are required to copy the 4803 // source. (This case doesn't occur on any common architecture.) 4804 // 2. If the argument is byval, RV is not sufficiently aligned, and 4805 // we cannot force it to be sufficiently aligned. 4806 // 3. If the argument is byval, but RV is not located in default 4807 // or alloca address space. 4808 Address Addr = I->hasLValue() 4809 ? I->getKnownLValue().getAddress(*this) 4810 : I->getKnownRValue().getAggregateAddress(); 4811 llvm::Value *V = Addr.getPointer(); 4812 CharUnits Align = ArgInfo.getIndirectAlign(); 4813 const llvm::DataLayout *TD = &CGM.getDataLayout(); 4814 4815 assert((FirstIRArg >= IRFuncTy->getNumParams() || 4816 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 4817 TD->getAllocaAddrSpace()) && 4818 "indirect argument must be in alloca address space"); 4819 4820 bool NeedCopy = false; 4821 4822 if (Addr.getAlignment() < Align && 4823 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < 4824 Align.getAsAlign()) { 4825 NeedCopy = true; 4826 } else if (I->hasLValue()) { 4827 auto LV = I->getKnownLValue(); 4828 auto AS = LV.getAddressSpace(); 4829 4830 if (!ArgInfo.getIndirectByVal() || 4831 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { 4832 NeedCopy = true; 4833 } 4834 if (!getLangOpts().OpenCL) { 4835 if ((ArgInfo.getIndirectByVal() && 4836 (AS != LangAS::Default && 4837 AS != CGM.getASTAllocaAddressSpace()))) { 4838 NeedCopy = true; 4839 } 4840 } 4841 // For OpenCL even if RV is located in default or alloca address space 4842 // we don't want to perform address space cast for it. 4843 else if ((ArgInfo.getIndirectByVal() && 4844 Addr.getType()->getAddressSpace() != IRFuncTy-> 4845 getParamType(FirstIRArg)->getPointerAddressSpace())) { 4846 NeedCopy = true; 4847 } 4848 } 4849 4850 if (NeedCopy) { 4851 // Create an aligned temporary, and copy to it. 4852 Address AI = CreateMemTempWithoutCast( 4853 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 4854 IRCallArgs[FirstIRArg] = AI.getPointer(); 4855 4856 // Emit lifetime markers for the temporary alloca. 4857 llvm::TypeSize ByvalTempElementSize = 4858 CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); 4859 llvm::Value *LifetimeSize = 4860 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); 4861 4862 // Add cleanup code to emit the end lifetime marker after the call. 4863 if (LifetimeSize) // In case we disabled lifetime markers. 4864 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); 4865 4866 // Generate the copy. 4867 I->copyInto(*this, AI); 4868 } else { 4869 // Skip the extra memcpy call. 4870 auto *T = llvm::PointerType::getWithSamePointeeType( 4871 cast<llvm::PointerType>(V->getType()), 4872 CGM.getDataLayout().getAllocaAddrSpace()); 4873 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( 4874 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 4875 true); 4876 } 4877 } 4878 break; 4879 } 4880 4881 case ABIArgInfo::Ignore: 4882 assert(NumIRArgs == 0); 4883 break; 4884 4885 case ABIArgInfo::Extend: 4886 case ABIArgInfo::Direct: { 4887 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 4888 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 4889 ArgInfo.getDirectOffset() == 0) { 4890 assert(NumIRArgs == 1); 4891 llvm::Value *V; 4892 if (!I->isAggregate()) 4893 V = I->getKnownRValue().getScalarVal(); 4894 else 4895 V = Builder.CreateLoad( 4896 I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4897 : I->getKnownRValue().getAggregateAddress()); 4898 4899 // Implement swifterror by copying into a new swifterror argument. 4900 // We'll write back in the normal path out of the call. 4901 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 4902 == ParameterABI::SwiftErrorResult) { 4903 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 4904 4905 QualType pointeeTy = I->Ty->getPointeeType(); 4906 swiftErrorArg = 4907 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 4908 4909 swiftErrorTemp = 4910 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 4911 V = swiftErrorTemp.getPointer(); 4912 cast<llvm::AllocaInst>(V)->setSwiftError(true); 4913 4914 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 4915 Builder.CreateStore(errorValue, swiftErrorTemp); 4916 } 4917 4918 // We might have to widen integers, but we should never truncate. 4919 if (ArgInfo.getCoerceToType() != V->getType() && 4920 V->getType()->isIntegerTy()) 4921 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 4922 4923 // If the argument doesn't match, perform a bitcast to coerce it. This 4924 // can happen due to trivial type mismatches. 4925 if (FirstIRArg < IRFuncTy->getNumParams() && 4926 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 4927 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 4928 4929 IRCallArgs[FirstIRArg] = V; 4930 break; 4931 } 4932 4933 // FIXME: Avoid the conversion through memory if possible. 4934 Address Src = Address::invalid(); 4935 if (!I->isAggregate()) { 4936 Src = CreateMemTemp(I->Ty, "coerce"); 4937 I->copyInto(*this, Src); 4938 } else { 4939 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4940 : I->getKnownRValue().getAggregateAddress(); 4941 } 4942 4943 // If the value is offset in memory, apply the offset now. 4944 Src = emitAddressAtOffset(*this, Src, ArgInfo); 4945 4946 // Fast-isel and the optimizer generally like scalar values better than 4947 // FCAs, so we flatten them if this is safe to do for this argument. 4948 llvm::StructType *STy = 4949 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 4950 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 4951 llvm::Type *SrcTy = Src.getElementType(); 4952 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 4953 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 4954 4955 // If the source type is smaller than the destination type of the 4956 // coerce-to logic, copy the source value into a temp alloca the size 4957 // of the destination type to allow loading all of it. The bits past 4958 // the source value are left undef. 4959 if (SrcSize < DstSize) { 4960 Address TempAlloca 4961 = CreateTempAlloca(STy, Src.getAlignment(), 4962 Src.getName() + ".coerce"); 4963 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 4964 Src = TempAlloca; 4965 } else { 4966 Src = Builder.CreateElementBitCast(Src, STy); 4967 } 4968 4969 assert(NumIRArgs == STy->getNumElements()); 4970 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4971 Address EltPtr = Builder.CreateStructGEP(Src, i); 4972 llvm::Value *LI = Builder.CreateLoad(EltPtr); 4973 IRCallArgs[FirstIRArg + i] = LI; 4974 } 4975 } else { 4976 // In the simple case, just pass the coerced loaded value. 4977 assert(NumIRArgs == 1); 4978 llvm::Value *Load = 4979 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 4980 4981 if (CallInfo.isCmseNSCall()) { 4982 // For certain parameter types, clear padding bits, as they may reveal 4983 // sensitive information. 4984 // Small struct/union types are passed as integer arrays. 4985 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); 4986 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) 4987 Load = EmitCMSEClearRecord(Load, ATy, I->Ty); 4988 } 4989 IRCallArgs[FirstIRArg] = Load; 4990 } 4991 4992 break; 4993 } 4994 4995 case ABIArgInfo::CoerceAndExpand: { 4996 auto coercionType = ArgInfo.getCoerceAndExpandType(); 4997 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4998 4999 llvm::Value *tempSize = nullptr; 5000 Address addr = Address::invalid(); 5001 Address AllocaAddr = Address::invalid(); 5002 if (I->isAggregate()) { 5003 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 5004 : I->getKnownRValue().getAggregateAddress(); 5005 5006 } else { 5007 RValue RV = I->getKnownRValue(); 5008 assert(RV.isScalar()); // complex should always just be direct 5009 5010 llvm::Type *scalarType = RV.getScalarVal()->getType(); 5011 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 5012 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 5013 5014 // Materialize to a temporary. 5015 addr = 5016 CreateTempAlloca(RV.getScalarVal()->getType(), 5017 CharUnits::fromQuantity(std::max( 5018 layout->getAlignment().value(), scalarAlign)), 5019 "tmp", 5020 /*ArraySize=*/nullptr, &AllocaAddr); 5021 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 5022 5023 Builder.CreateStore(RV.getScalarVal(), addr); 5024 } 5025 5026 addr = Builder.CreateElementBitCast(addr, coercionType); 5027 5028 unsigned IRArgPos = FirstIRArg; 5029 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5030 llvm::Type *eltType = coercionType->getElementType(i); 5031 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5032 Address eltAddr = Builder.CreateStructGEP(addr, i); 5033 llvm::Value *elt = Builder.CreateLoad(eltAddr); 5034 IRCallArgs[IRArgPos++] = elt; 5035 } 5036 assert(IRArgPos == FirstIRArg + NumIRArgs); 5037 5038 if (tempSize) { 5039 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 5040 } 5041 5042 break; 5043 } 5044 5045 case ABIArgInfo::Expand: { 5046 unsigned IRArgPos = FirstIRArg; 5047 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 5048 assert(IRArgPos == FirstIRArg + NumIRArgs); 5049 break; 5050 } 5051 } 5052 } 5053 5054 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 5055 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 5056 5057 // If we're using inalloca, set up that argument. 5058 if (ArgMemory.isValid()) { 5059 llvm::Value *Arg = ArgMemory.getPointer(); 5060 if (CallInfo.isVariadic()) { 5061 // When passing non-POD arguments by value to variadic functions, we will 5062 // end up with a variadic prototype and an inalloca call site. In such 5063 // cases, we can't do any parameter mismatch checks. Give up and bitcast 5064 // the callee. 5065 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 5066 CalleePtr = 5067 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); 5068 } else { 5069 llvm::Type *LastParamTy = 5070 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 5071 if (Arg->getType() != LastParamTy) { 5072 #ifndef NDEBUG 5073 // Assert that these structs have equivalent element types. 5074 llvm::StructType *FullTy = CallInfo.getArgStruct(); 5075 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 5076 cast<llvm::PointerType>(LastParamTy)->getElementType()); 5077 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 5078 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 5079 DE = DeclaredTy->element_end(), 5080 FI = FullTy->element_begin(); 5081 DI != DE; ++DI, ++FI) 5082 assert(*DI == *FI); 5083 #endif 5084 Arg = Builder.CreateBitCast(Arg, LastParamTy); 5085 } 5086 } 5087 assert(IRFunctionArgs.hasInallocaArg()); 5088 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 5089 } 5090 5091 // 2. Prepare the function pointer. 5092 5093 // If the callee is a bitcast of a non-variadic function to have a 5094 // variadic function pointer type, check to see if we can remove the 5095 // bitcast. This comes up with unprototyped functions. 5096 // 5097 // This makes the IR nicer, but more importantly it ensures that we 5098 // can inline the function at -O0 if it is marked always_inline. 5099 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, 5100 llvm::Value *Ptr) -> llvm::Function * { 5101 if (!CalleeFT->isVarArg()) 5102 return nullptr; 5103 5104 // Get underlying value if it's a bitcast 5105 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { 5106 if (CE->getOpcode() == llvm::Instruction::BitCast) 5107 Ptr = CE->getOperand(0); 5108 } 5109 5110 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); 5111 if (!OrigFn) 5112 return nullptr; 5113 5114 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 5115 5116 // If the original type is variadic, or if any of the component types 5117 // disagree, we cannot remove the cast. 5118 if (OrigFT->isVarArg() || 5119 OrigFT->getNumParams() != CalleeFT->getNumParams() || 5120 OrigFT->getReturnType() != CalleeFT->getReturnType()) 5121 return nullptr; 5122 5123 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 5124 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 5125 return nullptr; 5126 5127 return OrigFn; 5128 }; 5129 5130 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { 5131 CalleePtr = OrigFn; 5132 IRFuncTy = OrigFn->getFunctionType(); 5133 } 5134 5135 // 3. Perform the actual call. 5136 5137 // Deactivate any cleanups that we're supposed to do immediately before 5138 // the call. 5139 if (!CallArgs.getCleanupsToDeactivate().empty()) 5140 deactivateArgCleanupsBeforeCall(*this, CallArgs); 5141 5142 // Assert that the arguments we computed match up. The IR verifier 5143 // will catch this, but this is a common enough source of problems 5144 // during IRGen changes that it's way better for debugging to catch 5145 // it ourselves here. 5146 #ifndef NDEBUG 5147 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 5148 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5149 // Inalloca argument can have different type. 5150 if (IRFunctionArgs.hasInallocaArg() && 5151 i == IRFunctionArgs.getInallocaArgNo()) 5152 continue; 5153 if (i < IRFuncTy->getNumParams()) 5154 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 5155 } 5156 #endif 5157 5158 // Update the largest vector width if any arguments have vector types. 5159 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5160 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType())) 5161 LargestVectorWidth = 5162 std::max((uint64_t)LargestVectorWidth, 5163 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5164 } 5165 5166 // Compute the calling convention and attributes. 5167 unsigned CallingConv; 5168 llvm::AttributeList Attrs; 5169 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 5170 Callee.getAbstractInfo(), Attrs, CallingConv, 5171 /*AttrOnCallSite=*/true, 5172 /*IsThunk=*/false); 5173 5174 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5175 if (FD->hasAttr<StrictFPAttr>()) 5176 // All calls within a strictfp function are marked strictfp 5177 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); 5178 5179 // Add call-site nomerge attribute if exists. 5180 if (InNoMergeAttributedStmt) 5181 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge); 5182 5183 // Apply some call-site-specific attributes. 5184 // TODO: work this into building the attribute set. 5185 5186 // Apply always_inline to all calls within flatten functions. 5187 // FIXME: should this really take priority over __try, below? 5188 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 5189 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { 5190 Attrs = 5191 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); 5192 } 5193 5194 // Disable inlining inside SEH __try blocks. 5195 if (isSEHTryScope()) { 5196 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); 5197 } 5198 5199 // Decide whether to use a call or an invoke. 5200 bool CannotThrow; 5201 if (currentFunctionUsesSEHTry()) { 5202 // SEH cares about asynchronous exceptions, so everything can "throw." 5203 CannotThrow = false; 5204 } else if (isCleanupPadScope() && 5205 EHPersonality::get(*this).isMSVCXXPersonality()) { 5206 // The MSVC++ personality will implicitly terminate the program if an 5207 // exception is thrown during a cleanup outside of a try/catch. 5208 // We don't need to model anything in IR to get this behavior. 5209 CannotThrow = true; 5210 } else { 5211 // Otherwise, nounwind call sites will never throw. 5212 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind); 5213 5214 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr)) 5215 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) 5216 CannotThrow = true; 5217 } 5218 5219 // If we made a temporary, be sure to clean up after ourselves. Note that we 5220 // can't depend on being inside of an ExprWithCleanups, so we need to manually 5221 // pop this cleanup later on. Being eager about this is OK, since this 5222 // temporary is 'invisible' outside of the callee. 5223 if (UnusedReturnSizePtr) 5224 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 5225 UnusedReturnSizePtr); 5226 5227 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 5228 5229 SmallVector<llvm::OperandBundleDef, 1> BundleList = 5230 getBundlesForFunclet(CalleePtr); 5231 5232 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5233 if (FD->hasAttr<StrictFPAttr>()) 5234 // All calls within a strictfp function are marked strictfp 5235 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); 5236 5237 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); 5238 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5239 5240 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); 5241 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5242 5243 // Emit the actual call/invoke instruction. 5244 llvm::CallBase *CI; 5245 if (!InvokeDest) { 5246 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); 5247 } else { 5248 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 5249 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, 5250 BundleList); 5251 EmitBlock(Cont); 5252 } 5253 if (callOrInvoke) 5254 *callOrInvoke = CI; 5255 5256 // If this is within a function that has the guard(nocf) attribute and is an 5257 // indirect call, add the "guard_nocf" attribute to this call to indicate that 5258 // Control Flow Guard checks should not be added, even if the call is inlined. 5259 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 5260 if (const auto *A = FD->getAttr<CFGuardAttr>()) { 5261 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) 5262 Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf"); 5263 } 5264 } 5265 5266 // Apply the attributes and calling convention. 5267 CI->setAttributes(Attrs); 5268 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 5269 5270 // Apply various metadata. 5271 5272 if (!CI->getType()->isVoidTy()) 5273 CI->setName("call"); 5274 5275 // Update largest vector width from the return type. 5276 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType())) 5277 LargestVectorWidth = 5278 std::max((uint64_t)LargestVectorWidth, 5279 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5280 5281 // Insert instrumentation or attach profile metadata at indirect call sites. 5282 // For more details, see the comment before the definition of 5283 // IPVK_IndirectCallTarget in InstrProfData.inc. 5284 if (!CI->getCalledFunction()) 5285 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 5286 CI, CalleePtr); 5287 5288 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 5289 // optimizer it can aggressively ignore unwind edges. 5290 if (CGM.getLangOpts().ObjCAutoRefCount) 5291 AddObjCARCExceptionMetadata(CI); 5292 5293 // Set tail call kind if necessary. 5294 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 5295 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 5296 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 5297 else if (IsMustTail) 5298 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); 5299 } 5300 5301 // Add metadata for calls to MSAllocator functions 5302 if (getDebugInfo() && TargetDecl && 5303 TargetDecl->hasAttr<MSAllocatorAttr>()) 5304 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); 5305 5306 // Add metadata if calling an __attribute__((error(""))) or warning fn. 5307 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) { 5308 llvm::ConstantInt *Line = 5309 llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding()); 5310 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line); 5311 llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD}); 5312 CI->setMetadata("srcloc", MDT); 5313 } 5314 5315 // 4. Finish the call. 5316 5317 // If the call doesn't return, finish the basic block and clear the 5318 // insertion point; this allows the rest of IRGen to discard 5319 // unreachable code. 5320 if (CI->doesNotReturn()) { 5321 if (UnusedReturnSizePtr) 5322 PopCleanupBlock(); 5323 5324 // Strip away the noreturn attribute to better diagnose unreachable UB. 5325 if (SanOpts.has(SanitizerKind::Unreachable)) { 5326 // Also remove from function since CallBase::hasFnAttr additionally checks 5327 // attributes of the called function. 5328 if (auto *F = CI->getCalledFunction()) 5329 F->removeFnAttr(llvm::Attribute::NoReturn); 5330 CI->removeFnAttr(llvm::Attribute::NoReturn); 5331 5332 // Avoid incompatibility with ASan which relies on the `noreturn` 5333 // attribute to insert handler calls. 5334 if (SanOpts.hasOneOf(SanitizerKind::Address | 5335 SanitizerKind::KernelAddress)) { 5336 SanitizerScope SanScope(this); 5337 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 5338 Builder.SetInsertPoint(CI); 5339 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 5340 llvm::FunctionCallee Fn = 5341 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 5342 EmitNounwindRuntimeCall(Fn); 5343 } 5344 } 5345 5346 EmitUnreachable(Loc); 5347 Builder.ClearInsertionPoint(); 5348 5349 // FIXME: For now, emit a dummy basic block because expr emitters in 5350 // generally are not ready to handle emitting expressions at unreachable 5351 // points. 5352 EnsureInsertPoint(); 5353 5354 // Return a reasonable RValue. 5355 return GetUndefRValue(RetTy); 5356 } 5357 5358 // If this is a musttail call, return immediately. We do not branch to the 5359 // epilogue in this case. 5360 if (IsMustTail) { 5361 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end(); 5362 ++it) { 5363 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it); 5364 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) 5365 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups"); 5366 } 5367 if (CI->getType()->isVoidTy()) 5368 Builder.CreateRetVoid(); 5369 else 5370 Builder.CreateRet(CI); 5371 Builder.ClearInsertionPoint(); 5372 EnsureInsertPoint(); 5373 return GetUndefRValue(RetTy); 5374 } 5375 5376 // Perform the swifterror writeback. 5377 if (swiftErrorTemp.isValid()) { 5378 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 5379 Builder.CreateStore(errorResult, swiftErrorArg); 5380 } 5381 5382 // Emit any call-associated writebacks immediately. Arguably this 5383 // should happen after any return-value munging. 5384 if (CallArgs.hasWritebacks()) 5385 emitWritebacks(*this, CallArgs); 5386 5387 // The stack cleanup for inalloca arguments has to run out of the normal 5388 // lexical order, so deactivate it and run it manually here. 5389 CallArgs.freeArgumentMemory(*this); 5390 5391 // Extract the return value. 5392 RValue Ret = [&] { 5393 switch (RetAI.getKind()) { 5394 case ABIArgInfo::CoerceAndExpand: { 5395 auto coercionType = RetAI.getCoerceAndExpandType(); 5396 5397 Address addr = SRetPtr; 5398 addr = Builder.CreateElementBitCast(addr, coercionType); 5399 5400 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 5401 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 5402 5403 unsigned unpaddedIndex = 0; 5404 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5405 llvm::Type *eltType = coercionType->getElementType(i); 5406 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5407 Address eltAddr = Builder.CreateStructGEP(addr, i); 5408 llvm::Value *elt = CI; 5409 if (requiresExtract) 5410 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 5411 else 5412 assert(unpaddedIndex == 0); 5413 Builder.CreateStore(elt, eltAddr); 5414 } 5415 // FALLTHROUGH 5416 LLVM_FALLTHROUGH; 5417 } 5418 5419 case ABIArgInfo::InAlloca: 5420 case ABIArgInfo::Indirect: { 5421 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 5422 if (UnusedReturnSizePtr) 5423 PopCleanupBlock(); 5424 return ret; 5425 } 5426 5427 case ABIArgInfo::Ignore: 5428 // If we are ignoring an argument that had a result, make sure to 5429 // construct the appropriate return value for our caller. 5430 return GetUndefRValue(RetTy); 5431 5432 case ABIArgInfo::Extend: 5433 case ABIArgInfo::Direct: { 5434 llvm::Type *RetIRTy = ConvertType(RetTy); 5435 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 5436 switch (getEvaluationKind(RetTy)) { 5437 case TEK_Complex: { 5438 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 5439 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 5440 return RValue::getComplex(std::make_pair(Real, Imag)); 5441 } 5442 case TEK_Aggregate: { 5443 Address DestPtr = ReturnValue.getValue(); 5444 bool DestIsVolatile = ReturnValue.isVolatile(); 5445 5446 if (!DestPtr.isValid()) { 5447 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 5448 DestIsVolatile = false; 5449 } 5450 EmitAggregateStore(CI, DestPtr, DestIsVolatile); 5451 return RValue::getAggregate(DestPtr); 5452 } 5453 case TEK_Scalar: { 5454 // If the argument doesn't match, perform a bitcast to coerce it. This 5455 // can happen due to trivial type mismatches. 5456 llvm::Value *V = CI; 5457 if (V->getType() != RetIRTy) 5458 V = Builder.CreateBitCast(V, RetIRTy); 5459 return RValue::get(V); 5460 } 5461 } 5462 llvm_unreachable("bad evaluation kind"); 5463 } 5464 5465 Address DestPtr = ReturnValue.getValue(); 5466 bool DestIsVolatile = ReturnValue.isVolatile(); 5467 5468 if (!DestPtr.isValid()) { 5469 DestPtr = CreateMemTemp(RetTy, "coerce"); 5470 DestIsVolatile = false; 5471 } 5472 5473 // If the value is offset in memory, apply the offset now. 5474 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 5475 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 5476 5477 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 5478 } 5479 5480 case ABIArgInfo::Expand: 5481 case ABIArgInfo::IndirectAliased: 5482 llvm_unreachable("Invalid ABI kind for return argument"); 5483 } 5484 5485 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 5486 } (); 5487 5488 // Emit the assume_aligned check on the return value. 5489 if (Ret.isScalar() && TargetDecl) { 5490 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5491 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5492 } 5493 5494 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though 5495 // we can't use the full cleanup mechanism. 5496 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) 5497 LifetimeEnd.Emit(*this, /*Flags=*/{}); 5498 5499 if (!ReturnValue.isExternallyDestructed() && 5500 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) 5501 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), 5502 RetTy); 5503 5504 return Ret; 5505 } 5506 5507 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 5508 if (isVirtual()) { 5509 const CallExpr *CE = getVirtualCallExpr(); 5510 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 5511 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), 5512 CE ? CE->getBeginLoc() : SourceLocation()); 5513 } 5514 5515 return *this; 5516 } 5517 5518 /* VarArg handling */ 5519 5520 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 5521 VAListAddr = VE->isMicrosoftABI() 5522 ? EmitMSVAListRef(VE->getSubExpr()) 5523 : EmitVAListRef(VE->getSubExpr()); 5524 QualType Ty = VE->getType(); 5525 if (VE->isMicrosoftABI()) 5526 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 5527 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 5528 } 5529