1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclCXX.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/Basic/CodeGenOptions.h" 28 #include "clang/Basic/TargetBuiltins.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "clang/CodeGen/CGFunctionInfo.h" 31 #include "clang/CodeGen/SwiftCallingConv.h" 32 #include "llvm/ADT/StringExtras.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/IR/Assumptions.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/CallingConv.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/InlineAsm.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/Transforms/Utils/Local.h" 42 using namespace clang; 43 using namespace CodeGen; 44 45 /***/ 46 47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 48 switch (CC) { 49 default: return llvm::CallingConv::C; 50 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 51 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 52 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 53 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 54 case CC_Win64: return llvm::CallingConv::Win64; 55 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 56 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 57 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 58 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 59 // TODO: Add support for __pascal to LLVM. 60 case CC_X86Pascal: return llvm::CallingConv::C; 61 // TODO: Add support for __vectorcall to LLVM. 62 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 63 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 64 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 65 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 66 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 67 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 68 case CC_Swift: return llvm::CallingConv::Swift; 69 } 70 } 71 72 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 73 /// qualification. Either or both of RD and MD may be null. A null RD indicates 74 /// that there is no meaningful 'this' type, and a null MD can occur when 75 /// calling a method pointer. 76 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, 77 const CXXMethodDecl *MD) { 78 QualType RecTy; 79 if (RD) 80 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 81 else 82 RecTy = Context.VoidTy; 83 84 if (MD) 85 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 86 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 87 } 88 89 /// Returns the canonical formal type of the given C++ method. 90 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 91 return MD->getType()->getCanonicalTypeUnqualified() 92 .getAs<FunctionProtoType>(); 93 } 94 95 /// Returns the "extra-canonicalized" return type, which discards 96 /// qualifiers on the return type. Codegen doesn't care about them, 97 /// and it makes ABI code a little easier to be able to assume that 98 /// all parameter and return types are top-level unqualified. 99 static CanQualType GetReturnType(QualType RetTy) { 100 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 101 } 102 103 /// Arrange the argument and result information for a value of the given 104 /// unprototyped freestanding function type. 105 const CGFunctionInfo & 106 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 107 // When translating an unprototyped function type, always use a 108 // variadic type. 109 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 110 /*instanceMethod=*/false, 111 /*chainCall=*/false, None, 112 FTNP->getExtInfo(), {}, RequiredArgs(0)); 113 } 114 115 static void addExtParameterInfosForCall( 116 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 117 const FunctionProtoType *proto, 118 unsigned prefixArgs, 119 unsigned totalArgs) { 120 assert(proto->hasExtParameterInfos()); 121 assert(paramInfos.size() <= prefixArgs); 122 assert(proto->getNumParams() + prefixArgs <= totalArgs); 123 124 paramInfos.reserve(totalArgs); 125 126 // Add default infos for any prefix args that don't already have infos. 127 paramInfos.resize(prefixArgs); 128 129 // Add infos for the prototype. 130 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 131 paramInfos.push_back(ParamInfo); 132 // pass_object_size params have no parameter info. 133 if (ParamInfo.hasPassObjectSize()) 134 paramInfos.emplace_back(); 135 } 136 137 assert(paramInfos.size() <= totalArgs && 138 "Did we forget to insert pass_object_size args?"); 139 // Add default infos for the variadic and/or suffix arguments. 140 paramInfos.resize(totalArgs); 141 } 142 143 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 144 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 145 static void appendParameterTypes(const CodeGenTypes &CGT, 146 SmallVectorImpl<CanQualType> &prefix, 147 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 148 CanQual<FunctionProtoType> FPT) { 149 // Fast path: don't touch param info if we don't need to. 150 if (!FPT->hasExtParameterInfos()) { 151 assert(paramInfos.empty() && 152 "We have paramInfos, but the prototype doesn't?"); 153 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 154 return; 155 } 156 157 unsigned PrefixSize = prefix.size(); 158 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 159 // parameters; the only thing that can change this is the presence of 160 // pass_object_size. So, we preallocate for the common case. 161 prefix.reserve(prefix.size() + FPT->getNumParams()); 162 163 auto ExtInfos = FPT->getExtParameterInfos(); 164 assert(ExtInfos.size() == FPT->getNumParams()); 165 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 166 prefix.push_back(FPT->getParamType(I)); 167 if (ExtInfos[I].hasPassObjectSize()) 168 prefix.push_back(CGT.getContext().getSizeType()); 169 } 170 171 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 172 prefix.size()); 173 } 174 175 /// Arrange the LLVM function layout for a value of the given function 176 /// type, on top of any implicit parameters already stored. 177 static const CGFunctionInfo & 178 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 179 SmallVectorImpl<CanQualType> &prefix, 180 CanQual<FunctionProtoType> FTP) { 181 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 182 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 183 // FIXME: Kill copy. 184 appendParameterTypes(CGT, prefix, paramInfos, FTP); 185 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 186 187 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 188 /*chainCall=*/false, prefix, 189 FTP->getExtInfo(), paramInfos, 190 Required); 191 } 192 193 /// Arrange the argument and result information for a value of the 194 /// given freestanding function type. 195 const CGFunctionInfo & 196 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 197 SmallVector<CanQualType, 16> argTypes; 198 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 199 FTP); 200 } 201 202 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, 203 bool IsWindows) { 204 // Set the appropriate calling convention for the Function. 205 if (D->hasAttr<StdCallAttr>()) 206 return CC_X86StdCall; 207 208 if (D->hasAttr<FastCallAttr>()) 209 return CC_X86FastCall; 210 211 if (D->hasAttr<RegCallAttr>()) 212 return CC_X86RegCall; 213 214 if (D->hasAttr<ThisCallAttr>()) 215 return CC_X86ThisCall; 216 217 if (D->hasAttr<VectorCallAttr>()) 218 return CC_X86VectorCall; 219 220 if (D->hasAttr<PascalAttr>()) 221 return CC_X86Pascal; 222 223 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 224 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 225 226 if (D->hasAttr<AArch64VectorPcsAttr>()) 227 return CC_AArch64VectorCall; 228 229 if (D->hasAttr<IntelOclBiccAttr>()) 230 return CC_IntelOclBicc; 231 232 if (D->hasAttr<MSABIAttr>()) 233 return IsWindows ? CC_C : CC_Win64; 234 235 if (D->hasAttr<SysVABIAttr>()) 236 return IsWindows ? CC_X86_64SysV : CC_C; 237 238 if (D->hasAttr<PreserveMostAttr>()) 239 return CC_PreserveMost; 240 241 if (D->hasAttr<PreserveAllAttr>()) 242 return CC_PreserveAll; 243 244 return CC_C; 245 } 246 247 /// Arrange the argument and result information for a call to an 248 /// unknown C++ non-static member function of the given abstract type. 249 /// (A null RD means we don't have any meaningful "this" argument type, 250 /// so fall back to a generic pointer type). 251 /// The member function must be an ordinary function, i.e. not a 252 /// constructor or destructor. 253 const CGFunctionInfo & 254 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 255 const FunctionProtoType *FTP, 256 const CXXMethodDecl *MD) { 257 SmallVector<CanQualType, 16> argTypes; 258 259 // Add the 'this' pointer. 260 argTypes.push_back(DeriveThisType(RD, MD)); 261 262 return ::arrangeLLVMFunctionInfo( 263 *this, true, argTypes, 264 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 265 } 266 267 /// Set calling convention for CUDA/HIP kernel. 268 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 269 const FunctionDecl *FD) { 270 if (FD->hasAttr<CUDAGlobalAttr>()) { 271 const FunctionType *FT = FTy->getAs<FunctionType>(); 272 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 273 FTy = FT->getCanonicalTypeUnqualified(); 274 } 275 } 276 277 /// Arrange the argument and result information for a declaration or 278 /// definition of the given C++ non-static member function. The 279 /// member function must be an ordinary function, i.e. not a 280 /// constructor or destructor. 281 const CGFunctionInfo & 282 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 283 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 284 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 285 286 CanQualType FT = GetFormalType(MD).getAs<Type>(); 287 setCUDAKernelCallingConvention(FT, CGM, MD); 288 auto prototype = FT.getAs<FunctionProtoType>(); 289 290 if (MD->isInstance()) { 291 // The abstract case is perfectly fine. 292 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 293 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 294 } 295 296 return arrangeFreeFunctionType(prototype); 297 } 298 299 bool CodeGenTypes::inheritingCtorHasParams( 300 const InheritedConstructor &Inherited, CXXCtorType Type) { 301 // Parameters are unnecessary if we're constructing a base class subobject 302 // and the inherited constructor lives in a virtual base. 303 return Type == Ctor_Complete || 304 !Inherited.getShadowDecl()->constructsVirtualBase() || 305 !Target.getCXXABI().hasConstructorVariants(); 306 } 307 308 const CGFunctionInfo & 309 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { 310 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 311 312 SmallVector<CanQualType, 16> argTypes; 313 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 314 argTypes.push_back(DeriveThisType(MD->getParent(), MD)); 315 316 bool PassParams = true; 317 318 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 319 // A base class inheriting constructor doesn't get forwarded arguments 320 // needed to construct a virtual base (or base class thereof). 321 if (auto Inherited = CD->getInheritedConstructor()) 322 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); 323 } 324 325 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 326 327 // Add the formal parameters. 328 if (PassParams) 329 appendParameterTypes(*this, argTypes, paramInfos, FTP); 330 331 CGCXXABI::AddedStructorArgCounts AddedArgs = 332 TheCXXABI.buildStructorSignature(GD, argTypes); 333 if (!paramInfos.empty()) { 334 // Note: prefix implies after the first param. 335 if (AddedArgs.Prefix) 336 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 337 FunctionProtoType::ExtParameterInfo{}); 338 if (AddedArgs.Suffix) 339 paramInfos.append(AddedArgs.Suffix, 340 FunctionProtoType::ExtParameterInfo{}); 341 } 342 343 RequiredArgs required = 344 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 345 : RequiredArgs::All); 346 347 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 348 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 349 ? argTypes.front() 350 : TheCXXABI.hasMostDerivedReturn(GD) 351 ? CGM.getContext().VoidPtrTy 352 : Context.VoidTy; 353 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 354 /*chainCall=*/false, argTypes, extInfo, 355 paramInfos, required); 356 } 357 358 static SmallVector<CanQualType, 16> 359 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 360 SmallVector<CanQualType, 16> argTypes; 361 for (auto &arg : args) 362 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 363 return argTypes; 364 } 365 366 static SmallVector<CanQualType, 16> 367 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 368 SmallVector<CanQualType, 16> argTypes; 369 for (auto &arg : args) 370 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 371 return argTypes; 372 } 373 374 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 375 getExtParameterInfosForCall(const FunctionProtoType *proto, 376 unsigned prefixArgs, unsigned totalArgs) { 377 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 378 if (proto->hasExtParameterInfos()) { 379 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 380 } 381 return result; 382 } 383 384 /// Arrange a call to a C++ method, passing the given arguments. 385 /// 386 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 387 /// parameter. 388 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 389 /// args. 390 /// PassProtoArgs indicates whether `args` has args for the parameters in the 391 /// given CXXConstructorDecl. 392 const CGFunctionInfo & 393 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 394 const CXXConstructorDecl *D, 395 CXXCtorType CtorKind, 396 unsigned ExtraPrefixArgs, 397 unsigned ExtraSuffixArgs, 398 bool PassProtoArgs) { 399 // FIXME: Kill copy. 400 SmallVector<CanQualType, 16> ArgTypes; 401 for (const auto &Arg : args) 402 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 403 404 // +1 for implicit this, which should always be args[0]. 405 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 406 407 CanQual<FunctionProtoType> FPT = GetFormalType(D); 408 RequiredArgs Required = PassProtoArgs 409 ? RequiredArgs::forPrototypePlus( 410 FPT, TotalPrefixArgs + ExtraSuffixArgs) 411 : RequiredArgs::All; 412 413 GlobalDecl GD(D, CtorKind); 414 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 415 ? ArgTypes.front() 416 : TheCXXABI.hasMostDerivedReturn(GD) 417 ? CGM.getContext().VoidPtrTy 418 : Context.VoidTy; 419 420 FunctionType::ExtInfo Info = FPT->getExtInfo(); 421 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 422 // If the prototype args are elided, we should only have ABI-specific args, 423 // which never have param info. 424 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 425 // ABI-specific suffix arguments are treated the same as variadic arguments. 426 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 427 ArgTypes.size()); 428 } 429 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 430 /*chainCall=*/false, ArgTypes, Info, 431 ParamInfos, Required); 432 } 433 434 /// Arrange the argument and result information for the declaration or 435 /// definition of the given function. 436 const CGFunctionInfo & 437 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 438 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 439 if (MD->isInstance()) 440 return arrangeCXXMethodDeclaration(MD); 441 442 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 443 444 assert(isa<FunctionType>(FTy)); 445 setCUDAKernelCallingConvention(FTy, CGM, FD); 446 447 // When declaring a function without a prototype, always use a 448 // non-variadic type. 449 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 450 return arrangeLLVMFunctionInfo( 451 noProto->getReturnType(), /*instanceMethod=*/false, 452 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 453 } 454 455 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 456 } 457 458 /// Arrange the argument and result information for the declaration or 459 /// definition of an Objective-C method. 460 const CGFunctionInfo & 461 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 462 // It happens that this is the same as a call with no optional 463 // arguments, except also using the formal 'self' type. 464 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 465 } 466 467 /// Arrange the argument and result information for the function type 468 /// through which to perform a send to the given Objective-C method, 469 /// using the given receiver type. The receiver type is not always 470 /// the 'self' type of the method or even an Objective-C pointer type. 471 /// This is *not* the right method for actually performing such a 472 /// message send, due to the possibility of optional arguments. 473 const CGFunctionInfo & 474 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 475 QualType receiverType) { 476 SmallVector<CanQualType, 16> argTys; 477 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 478 argTys.push_back(Context.getCanonicalParamType(receiverType)); 479 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 480 // FIXME: Kill copy? 481 for (const auto *I : MD->parameters()) { 482 argTys.push_back(Context.getCanonicalParamType(I->getType())); 483 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 484 I->hasAttr<NoEscapeAttr>()); 485 extParamInfos.push_back(extParamInfo); 486 } 487 488 FunctionType::ExtInfo einfo; 489 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 490 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 491 492 if (getContext().getLangOpts().ObjCAutoRefCount && 493 MD->hasAttr<NSReturnsRetainedAttr>()) 494 einfo = einfo.withProducesResult(true); 495 496 RequiredArgs required = 497 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 498 499 return arrangeLLVMFunctionInfo( 500 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 501 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 502 } 503 504 const CGFunctionInfo & 505 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 506 const CallArgList &args) { 507 auto argTypes = getArgTypesForCall(Context, args); 508 FunctionType::ExtInfo einfo; 509 510 return arrangeLLVMFunctionInfo( 511 GetReturnType(returnType), /*instanceMethod=*/false, 512 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 513 } 514 515 const CGFunctionInfo & 516 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 517 // FIXME: Do we need to handle ObjCMethodDecl? 518 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 519 520 if (isa<CXXConstructorDecl>(GD.getDecl()) || 521 isa<CXXDestructorDecl>(GD.getDecl())) 522 return arrangeCXXStructorDeclaration(GD); 523 524 return arrangeFunctionDeclaration(FD); 525 } 526 527 /// Arrange a thunk that takes 'this' as the first parameter followed by 528 /// varargs. Return a void pointer, regardless of the actual return type. 529 /// The body of the thunk will end in a musttail call to a function of the 530 /// correct type, and the caller will bitcast the function to the correct 531 /// prototype. 532 const CGFunctionInfo & 533 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 534 assert(MD->isVirtual() && "only methods have thunks"); 535 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 536 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; 537 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 538 /*chainCall=*/false, ArgTys, 539 FTP->getExtInfo(), {}, RequiredArgs(1)); 540 } 541 542 const CGFunctionInfo & 543 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 544 CXXCtorType CT) { 545 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 546 547 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 548 SmallVector<CanQualType, 2> ArgTys; 549 const CXXRecordDecl *RD = CD->getParent(); 550 ArgTys.push_back(DeriveThisType(RD, CD)); 551 if (CT == Ctor_CopyingClosure) 552 ArgTys.push_back(*FTP->param_type_begin()); 553 if (RD->getNumVBases() > 0) 554 ArgTys.push_back(Context.IntTy); 555 CallingConv CC = Context.getDefaultCallingConvention( 556 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 557 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 558 /*chainCall=*/false, ArgTys, 559 FunctionType::ExtInfo(CC), {}, 560 RequiredArgs::All); 561 } 562 563 /// Arrange a call as unto a free function, except possibly with an 564 /// additional number of formal parameters considered required. 565 static const CGFunctionInfo & 566 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 567 CodeGenModule &CGM, 568 const CallArgList &args, 569 const FunctionType *fnType, 570 unsigned numExtraRequiredArgs, 571 bool chainCall) { 572 assert(args.size() >= numExtraRequiredArgs); 573 574 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 575 576 // In most cases, there are no optional arguments. 577 RequiredArgs required = RequiredArgs::All; 578 579 // If we have a variadic prototype, the required arguments are the 580 // extra prefix plus the arguments in the prototype. 581 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 582 if (proto->isVariadic()) 583 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); 584 585 if (proto->hasExtParameterInfos()) 586 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 587 args.size()); 588 589 // If we don't have a prototype at all, but we're supposed to 590 // explicitly use the variadic convention for unprototyped calls, 591 // treat all of the arguments as required but preserve the nominal 592 // possibility of variadics. 593 } else if (CGM.getTargetCodeGenInfo() 594 .isNoProtoCallVariadic(args, 595 cast<FunctionNoProtoType>(fnType))) { 596 required = RequiredArgs(args.size()); 597 } 598 599 // FIXME: Kill copy. 600 SmallVector<CanQualType, 16> argTypes; 601 for (const auto &arg : args) 602 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 603 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 604 /*instanceMethod=*/false, chainCall, 605 argTypes, fnType->getExtInfo(), paramInfos, 606 required); 607 } 608 609 /// Figure out the rules for calling a function with the given formal 610 /// type using the given arguments. The arguments are necessary 611 /// because the function might be unprototyped, in which case it's 612 /// target-dependent in crazy ways. 613 const CGFunctionInfo & 614 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 615 const FunctionType *fnType, 616 bool chainCall) { 617 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 618 chainCall ? 1 : 0, chainCall); 619 } 620 621 /// A block function is essentially a free function with an 622 /// extra implicit argument. 623 const CGFunctionInfo & 624 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 625 const FunctionType *fnType) { 626 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 627 /*chainCall=*/false); 628 } 629 630 const CGFunctionInfo & 631 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 632 const FunctionArgList ¶ms) { 633 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 634 auto argTypes = getArgTypesForDeclaration(Context, params); 635 636 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 637 /*instanceMethod*/ false, /*chainCall*/ false, 638 argTypes, proto->getExtInfo(), paramInfos, 639 RequiredArgs::forPrototypePlus(proto, 1)); 640 } 641 642 const CGFunctionInfo & 643 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 644 const CallArgList &args) { 645 // FIXME: Kill copy. 646 SmallVector<CanQualType, 16> argTypes; 647 for (const auto &Arg : args) 648 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 649 return arrangeLLVMFunctionInfo( 650 GetReturnType(resultType), /*instanceMethod=*/false, 651 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 652 /*paramInfos=*/ {}, RequiredArgs::All); 653 } 654 655 const CGFunctionInfo & 656 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 657 const FunctionArgList &args) { 658 auto argTypes = getArgTypesForDeclaration(Context, args); 659 660 return arrangeLLVMFunctionInfo( 661 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 662 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 663 } 664 665 const CGFunctionInfo & 666 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 667 ArrayRef<CanQualType> argTypes) { 668 return arrangeLLVMFunctionInfo( 669 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 670 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 671 } 672 673 /// Arrange a call to a C++ method, passing the given arguments. 674 /// 675 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 676 /// does not count `this`. 677 const CGFunctionInfo & 678 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 679 const FunctionProtoType *proto, 680 RequiredArgs required, 681 unsigned numPrefixArgs) { 682 assert(numPrefixArgs + 1 <= args.size() && 683 "Emitting a call with less args than the required prefix?"); 684 // Add one to account for `this`. It's a bit awkward here, but we don't count 685 // `this` in similar places elsewhere. 686 auto paramInfos = 687 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 688 689 // FIXME: Kill copy. 690 auto argTypes = getArgTypesForCall(Context, args); 691 692 FunctionType::ExtInfo info = proto->getExtInfo(); 693 return arrangeLLVMFunctionInfo( 694 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 695 /*chainCall=*/false, argTypes, info, paramInfos, required); 696 } 697 698 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 699 return arrangeLLVMFunctionInfo( 700 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 701 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 702 } 703 704 const CGFunctionInfo & 705 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 706 const CallArgList &args) { 707 assert(signature.arg_size() <= args.size()); 708 if (signature.arg_size() == args.size()) 709 return signature; 710 711 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 712 auto sigParamInfos = signature.getExtParameterInfos(); 713 if (!sigParamInfos.empty()) { 714 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 715 paramInfos.resize(args.size()); 716 } 717 718 auto argTypes = getArgTypesForCall(Context, args); 719 720 assert(signature.getRequiredArgs().allowsOptionalArgs()); 721 return arrangeLLVMFunctionInfo(signature.getReturnType(), 722 signature.isInstanceMethod(), 723 signature.isChainCall(), 724 argTypes, 725 signature.getExtInfo(), 726 paramInfos, 727 signature.getRequiredArgs()); 728 } 729 730 namespace clang { 731 namespace CodeGen { 732 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 733 } 734 } 735 736 /// Arrange the argument and result information for an abstract value 737 /// of a given function type. This is the method which all of the 738 /// above functions ultimately defer to. 739 const CGFunctionInfo & 740 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 741 bool instanceMethod, 742 bool chainCall, 743 ArrayRef<CanQualType> argTypes, 744 FunctionType::ExtInfo info, 745 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 746 RequiredArgs required) { 747 assert(llvm::all_of(argTypes, 748 [](CanQualType T) { return T.isCanonicalAsParam(); })); 749 750 // Lookup or create unique function info. 751 llvm::FoldingSetNodeID ID; 752 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 753 required, resultType, argTypes); 754 755 void *insertPos = nullptr; 756 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 757 if (FI) 758 return *FI; 759 760 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 761 762 // Construct the function info. We co-allocate the ArgInfos. 763 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 764 paramInfos, resultType, argTypes, required); 765 FunctionInfos.InsertNode(FI, insertPos); 766 767 bool inserted = FunctionsBeingProcessed.insert(FI).second; 768 (void)inserted; 769 assert(inserted && "Recursively being processed?"); 770 771 // Compute ABI information. 772 if (CC == llvm::CallingConv::SPIR_KERNEL) { 773 // Force target independent argument handling for the host visible 774 // kernel functions. 775 computeSPIRKernelABIInfo(CGM, *FI); 776 } else if (info.getCC() == CC_Swift) { 777 swiftcall::computeABIInfo(CGM, *FI); 778 } else { 779 getABIInfo().computeInfo(*FI); 780 } 781 782 // Loop over all of the computed argument and return value info. If any of 783 // them are direct or extend without a specified coerce type, specify the 784 // default now. 785 ABIArgInfo &retInfo = FI->getReturnInfo(); 786 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 787 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 788 789 for (auto &I : FI->arguments()) 790 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 791 I.info.setCoerceToType(ConvertType(I.type)); 792 793 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 794 assert(erased && "Not in set?"); 795 796 return *FI; 797 } 798 799 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 800 bool instanceMethod, 801 bool chainCall, 802 const FunctionType::ExtInfo &info, 803 ArrayRef<ExtParameterInfo> paramInfos, 804 CanQualType resultType, 805 ArrayRef<CanQualType> argTypes, 806 RequiredArgs required) { 807 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 808 assert(!required.allowsOptionalArgs() || 809 required.getNumRequiredArgs() <= argTypes.size()); 810 811 void *buffer = 812 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 813 argTypes.size() + 1, paramInfos.size())); 814 815 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 816 FI->CallingConvention = llvmCC; 817 FI->EffectiveCallingConvention = llvmCC; 818 FI->ASTCallingConvention = info.getCC(); 819 FI->InstanceMethod = instanceMethod; 820 FI->ChainCall = chainCall; 821 FI->CmseNSCall = info.getCmseNSCall(); 822 FI->NoReturn = info.getNoReturn(); 823 FI->ReturnsRetained = info.getProducesResult(); 824 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 825 FI->NoCfCheck = info.getNoCfCheck(); 826 FI->Required = required; 827 FI->HasRegParm = info.getHasRegParm(); 828 FI->RegParm = info.getRegParm(); 829 FI->ArgStruct = nullptr; 830 FI->ArgStructAlign = 0; 831 FI->NumArgs = argTypes.size(); 832 FI->HasExtParameterInfos = !paramInfos.empty(); 833 FI->getArgsBuffer()[0].type = resultType; 834 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 835 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 836 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 837 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 838 return FI; 839 } 840 841 /***/ 842 843 namespace { 844 // ABIArgInfo::Expand implementation. 845 846 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 847 struct TypeExpansion { 848 enum TypeExpansionKind { 849 // Elements of constant arrays are expanded recursively. 850 TEK_ConstantArray, 851 // Record fields are expanded recursively (but if record is a union, only 852 // the field with the largest size is expanded). 853 TEK_Record, 854 // For complex types, real and imaginary parts are expanded recursively. 855 TEK_Complex, 856 // All other types are not expandable. 857 TEK_None 858 }; 859 860 const TypeExpansionKind Kind; 861 862 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 863 virtual ~TypeExpansion() {} 864 }; 865 866 struct ConstantArrayExpansion : TypeExpansion { 867 QualType EltTy; 868 uint64_t NumElts; 869 870 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 871 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 872 static bool classof(const TypeExpansion *TE) { 873 return TE->Kind == TEK_ConstantArray; 874 } 875 }; 876 877 struct RecordExpansion : TypeExpansion { 878 SmallVector<const CXXBaseSpecifier *, 1> Bases; 879 880 SmallVector<const FieldDecl *, 1> Fields; 881 882 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 883 SmallVector<const FieldDecl *, 1> &&Fields) 884 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 885 Fields(std::move(Fields)) {} 886 static bool classof(const TypeExpansion *TE) { 887 return TE->Kind == TEK_Record; 888 } 889 }; 890 891 struct ComplexExpansion : TypeExpansion { 892 QualType EltTy; 893 894 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 895 static bool classof(const TypeExpansion *TE) { 896 return TE->Kind == TEK_Complex; 897 } 898 }; 899 900 struct NoExpansion : TypeExpansion { 901 NoExpansion() : TypeExpansion(TEK_None) {} 902 static bool classof(const TypeExpansion *TE) { 903 return TE->Kind == TEK_None; 904 } 905 }; 906 } // namespace 907 908 static std::unique_ptr<TypeExpansion> 909 getTypeExpansion(QualType Ty, const ASTContext &Context) { 910 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 911 return std::make_unique<ConstantArrayExpansion>( 912 AT->getElementType(), AT->getSize().getZExtValue()); 913 } 914 if (const RecordType *RT = Ty->getAs<RecordType>()) { 915 SmallVector<const CXXBaseSpecifier *, 1> Bases; 916 SmallVector<const FieldDecl *, 1> Fields; 917 const RecordDecl *RD = RT->getDecl(); 918 assert(!RD->hasFlexibleArrayMember() && 919 "Cannot expand structure with flexible array."); 920 if (RD->isUnion()) { 921 // Unions can be here only in degenerative cases - all the fields are same 922 // after flattening. Thus we have to use the "largest" field. 923 const FieldDecl *LargestFD = nullptr; 924 CharUnits UnionSize = CharUnits::Zero(); 925 926 for (const auto *FD : RD->fields()) { 927 if (FD->isZeroLengthBitField(Context)) 928 continue; 929 assert(!FD->isBitField() && 930 "Cannot expand structure with bit-field members."); 931 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 932 if (UnionSize < FieldSize) { 933 UnionSize = FieldSize; 934 LargestFD = FD; 935 } 936 } 937 if (LargestFD) 938 Fields.push_back(LargestFD); 939 } else { 940 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 941 assert(!CXXRD->isDynamicClass() && 942 "cannot expand vtable pointers in dynamic classes"); 943 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 944 Bases.push_back(&BS); 945 } 946 947 for (const auto *FD : RD->fields()) { 948 if (FD->isZeroLengthBitField(Context)) 949 continue; 950 assert(!FD->isBitField() && 951 "Cannot expand structure with bit-field members."); 952 Fields.push_back(FD); 953 } 954 } 955 return std::make_unique<RecordExpansion>(std::move(Bases), 956 std::move(Fields)); 957 } 958 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 959 return std::make_unique<ComplexExpansion>(CT->getElementType()); 960 } 961 return std::make_unique<NoExpansion>(); 962 } 963 964 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 965 auto Exp = getTypeExpansion(Ty, Context); 966 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 967 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 968 } 969 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 970 int Res = 0; 971 for (auto BS : RExp->Bases) 972 Res += getExpansionSize(BS->getType(), Context); 973 for (auto FD : RExp->Fields) 974 Res += getExpansionSize(FD->getType(), Context); 975 return Res; 976 } 977 if (isa<ComplexExpansion>(Exp.get())) 978 return 2; 979 assert(isa<NoExpansion>(Exp.get())); 980 return 1; 981 } 982 983 void 984 CodeGenTypes::getExpandedTypes(QualType Ty, 985 SmallVectorImpl<llvm::Type *>::iterator &TI) { 986 auto Exp = getTypeExpansion(Ty, Context); 987 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 988 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 989 getExpandedTypes(CAExp->EltTy, TI); 990 } 991 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 992 for (auto BS : RExp->Bases) 993 getExpandedTypes(BS->getType(), TI); 994 for (auto FD : RExp->Fields) 995 getExpandedTypes(FD->getType(), TI); 996 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 997 llvm::Type *EltTy = ConvertType(CExp->EltTy); 998 *TI++ = EltTy; 999 *TI++ = EltTy; 1000 } else { 1001 assert(isa<NoExpansion>(Exp.get())); 1002 *TI++ = ConvertType(Ty); 1003 } 1004 } 1005 1006 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1007 ConstantArrayExpansion *CAE, 1008 Address BaseAddr, 1009 llvm::function_ref<void(Address)> Fn) { 1010 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1011 CharUnits EltAlign = 1012 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1013 1014 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1015 llvm::Value *EltAddr = 1016 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 1017 Fn(Address(EltAddr, EltAlign)); 1018 } 1019 } 1020 1021 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1022 llvm::Function::arg_iterator &AI) { 1023 assert(LV.isSimple() && 1024 "Unexpected non-simple lvalue during struct expansion."); 1025 1026 auto Exp = getTypeExpansion(Ty, getContext()); 1027 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1028 forConstantArrayExpansion( 1029 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { 1030 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1031 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1032 }); 1033 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1034 Address This = LV.getAddress(*this); 1035 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1036 // Perform a single step derived-to-base conversion. 1037 Address Base = 1038 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1039 /*NullCheckValue=*/false, SourceLocation()); 1040 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1041 1042 // Recurse onto bases. 1043 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1044 } 1045 for (auto FD : RExp->Fields) { 1046 // FIXME: What are the right qualifiers here? 1047 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1048 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1049 } 1050 } else if (isa<ComplexExpansion>(Exp.get())) { 1051 auto realValue = &*AI++; 1052 auto imagValue = &*AI++; 1053 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1054 } else { 1055 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a 1056 // primitive store. 1057 assert(isa<NoExpansion>(Exp.get())); 1058 if (LV.isBitField()) 1059 EmitStoreThroughLValue(RValue::get(&*AI++), LV); 1060 else 1061 EmitStoreOfScalar(&*AI++, LV); 1062 } 1063 } 1064 1065 void CodeGenFunction::ExpandTypeToArgs( 1066 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1067 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1068 auto Exp = getTypeExpansion(Ty, getContext()); 1069 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1070 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1071 : Arg.getKnownRValue().getAggregateAddress(); 1072 forConstantArrayExpansion( 1073 *this, CAExp, Addr, [&](Address EltAddr) { 1074 CallArg EltArg = CallArg( 1075 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1076 CAExp->EltTy); 1077 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1078 IRCallArgPos); 1079 }); 1080 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1081 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1082 : Arg.getKnownRValue().getAggregateAddress(); 1083 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1084 // Perform a single step derived-to-base conversion. 1085 Address Base = 1086 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1087 /*NullCheckValue=*/false, SourceLocation()); 1088 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1089 1090 // Recurse onto bases. 1091 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1092 IRCallArgPos); 1093 } 1094 1095 LValue LV = MakeAddrLValue(This, Ty); 1096 for (auto FD : RExp->Fields) { 1097 CallArg FldArg = 1098 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1099 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1100 IRCallArgPos); 1101 } 1102 } else if (isa<ComplexExpansion>(Exp.get())) { 1103 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1104 IRCallArgs[IRCallArgPos++] = CV.first; 1105 IRCallArgs[IRCallArgPos++] = CV.second; 1106 } else { 1107 assert(isa<NoExpansion>(Exp.get())); 1108 auto RV = Arg.getKnownRValue(); 1109 assert(RV.isScalar() && 1110 "Unexpected non-scalar rvalue during struct expansion."); 1111 1112 // Insert a bitcast as needed. 1113 llvm::Value *V = RV.getScalarVal(); 1114 if (IRCallArgPos < IRFuncTy->getNumParams() && 1115 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1116 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1117 1118 IRCallArgs[IRCallArgPos++] = V; 1119 } 1120 } 1121 1122 /// Create a temporary allocation for the purposes of coercion. 1123 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1124 CharUnits MinAlign, 1125 const Twine &Name = "tmp") { 1126 // Don't use an alignment that's worse than what LLVM would prefer. 1127 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1128 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1129 1130 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); 1131 } 1132 1133 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1134 /// accessing some number of bytes out of it, try to gep into the struct to get 1135 /// at its inner goodness. Dive as deep as possible without entering an element 1136 /// with an in-memory size smaller than DstSize. 1137 static Address 1138 EnterStructPointerForCoercedAccess(Address SrcPtr, 1139 llvm::StructType *SrcSTy, 1140 uint64_t DstSize, CodeGenFunction &CGF) { 1141 // We can't dive into a zero-element struct. 1142 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1143 1144 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1145 1146 // If the first elt is at least as large as what we're looking for, or if the 1147 // first element is the same size as the whole struct, we can enter it. The 1148 // comparison must be made on the store size and not the alloca size. Using 1149 // the alloca size may overstate the size of the load. 1150 uint64_t FirstEltSize = 1151 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1152 if (FirstEltSize < DstSize && 1153 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1154 return SrcPtr; 1155 1156 // GEP into the first element. 1157 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); 1158 1159 // If the first element is a struct, recurse. 1160 llvm::Type *SrcTy = SrcPtr.getElementType(); 1161 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1162 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1163 1164 return SrcPtr; 1165 } 1166 1167 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1168 /// are either integers or pointers. This does a truncation of the value if it 1169 /// is too large or a zero extension if it is too small. 1170 /// 1171 /// This behaves as if the value were coerced through memory, so on big-endian 1172 /// targets the high bits are preserved in a truncation, while little-endian 1173 /// targets preserve the low bits. 1174 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1175 llvm::Type *Ty, 1176 CodeGenFunction &CGF) { 1177 if (Val->getType() == Ty) 1178 return Val; 1179 1180 if (isa<llvm::PointerType>(Val->getType())) { 1181 // If this is Pointer->Pointer avoid conversion to and from int. 1182 if (isa<llvm::PointerType>(Ty)) 1183 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1184 1185 // Convert the pointer to an integer so we can play with its width. 1186 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1187 } 1188 1189 llvm::Type *DestIntTy = Ty; 1190 if (isa<llvm::PointerType>(DestIntTy)) 1191 DestIntTy = CGF.IntPtrTy; 1192 1193 if (Val->getType() != DestIntTy) { 1194 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1195 if (DL.isBigEndian()) { 1196 // Preserve the high bits on big-endian targets. 1197 // That is what memory coercion does. 1198 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1199 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1200 1201 if (SrcSize > DstSize) { 1202 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1203 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1204 } else { 1205 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1206 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1207 } 1208 } else { 1209 // Little-endian targets preserve the low bits. No shifts required. 1210 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1211 } 1212 } 1213 1214 if (isa<llvm::PointerType>(Ty)) 1215 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1216 return Val; 1217 } 1218 1219 1220 1221 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1222 /// a pointer to an object of type \arg Ty, known to be aligned to 1223 /// \arg SrcAlign bytes. 1224 /// 1225 /// This safely handles the case when the src type is smaller than the 1226 /// destination type; in this situation the values of bits which not 1227 /// present in the src are undefined. 1228 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1229 CodeGenFunction &CGF) { 1230 llvm::Type *SrcTy = Src.getElementType(); 1231 1232 // If SrcTy and Ty are the same, just do a load. 1233 if (SrcTy == Ty) 1234 return CGF.Builder.CreateLoad(Src); 1235 1236 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1237 1238 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1239 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, 1240 DstSize.getFixedSize(), CGF); 1241 SrcTy = Src.getElementType(); 1242 } 1243 1244 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1245 1246 // If the source and destination are integer or pointer types, just do an 1247 // extension or truncation to the desired type. 1248 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1249 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1250 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1251 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1252 } 1253 1254 // If load is legal, just bitcast the src pointer. 1255 if (!SrcSize.isScalable() && !DstSize.isScalable() && 1256 SrcSize.getFixedSize() >= DstSize.getFixedSize()) { 1257 // Generally SrcSize is never greater than DstSize, since this means we are 1258 // losing bits. However, this can happen in cases where the structure has 1259 // additional padding, for example due to a user specified alignment. 1260 // 1261 // FIXME: Assert that we aren't truncating non-padding bits when have access 1262 // to that information. 1263 Src = CGF.Builder.CreateBitCast(Src, 1264 Ty->getPointerTo(Src.getAddressSpace())); 1265 return CGF.Builder.CreateLoad(Src); 1266 } 1267 1268 // If coercing a fixed vector to a scalable vector for ABI compatibility, and 1269 // the types match, use the llvm.experimental.vector.insert intrinsic to 1270 // perform the conversion. 1271 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { 1272 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 1273 if (ScalableDst->getElementType() == FixedSrc->getElementType()) { 1274 auto *Load = CGF.Builder.CreateLoad(Src); 1275 auto *UndefVec = llvm::UndefValue::get(ScalableDst); 1276 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 1277 return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero, 1278 "castScalableSve"); 1279 } 1280 } 1281 } 1282 1283 // Otherwise do coercion through memory. This is stupid, but simple. 1284 Address Tmp = 1285 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); 1286 CGF.Builder.CreateMemCpy( 1287 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), 1288 Src.getAlignment().getAsAlign(), 1289 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); 1290 return CGF.Builder.CreateLoad(Tmp); 1291 } 1292 1293 // Function to store a first-class aggregate into memory. We prefer to 1294 // store the elements rather than the aggregate to be more friendly to 1295 // fast-isel. 1296 // FIXME: Do we need to recurse here? 1297 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, 1298 bool DestIsVolatile) { 1299 // Prefer scalar stores to first-class aggregate stores. 1300 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { 1301 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1302 Address EltPtr = Builder.CreateStructGEP(Dest, i); 1303 llvm::Value *Elt = Builder.CreateExtractValue(Val, i); 1304 Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1305 } 1306 } else { 1307 Builder.CreateStore(Val, Dest, DestIsVolatile); 1308 } 1309 } 1310 1311 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1312 /// where the source and destination may have different types. The 1313 /// destination is known to be aligned to \arg DstAlign bytes. 1314 /// 1315 /// This safely handles the case when the src type is larger than the 1316 /// destination type; the upper bits of the src will be lost. 1317 static void CreateCoercedStore(llvm::Value *Src, 1318 Address Dst, 1319 bool DstIsVolatile, 1320 CodeGenFunction &CGF) { 1321 llvm::Type *SrcTy = Src->getType(); 1322 llvm::Type *DstTy = Dst.getElementType(); 1323 if (SrcTy == DstTy) { 1324 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1325 return; 1326 } 1327 1328 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1329 1330 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1331 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, 1332 SrcSize.getFixedSize(), CGF); 1333 DstTy = Dst.getElementType(); 1334 } 1335 1336 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); 1337 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); 1338 if (SrcPtrTy && DstPtrTy && 1339 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { 1340 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); 1341 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1342 return; 1343 } 1344 1345 // If the source and destination are integer or pointer types, just do an 1346 // extension or truncation to the desired type. 1347 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1348 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1349 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1350 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1351 return; 1352 } 1353 1354 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1355 1356 // If store is legal, just bitcast the src pointer. 1357 if (isa<llvm::ScalableVectorType>(SrcTy) || 1358 isa<llvm::ScalableVectorType>(DstTy) || 1359 SrcSize.getFixedSize() <= DstSize.getFixedSize()) { 1360 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1361 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); 1362 } else { 1363 // Otherwise do coercion through memory. This is stupid, but 1364 // simple. 1365 1366 // Generally SrcSize is never greater than DstSize, since this means we are 1367 // losing bits. However, this can happen in cases where the structure has 1368 // additional padding, for example due to a user specified alignment. 1369 // 1370 // FIXME: Assert that we aren't truncating non-padding bits when have access 1371 // to that information. 1372 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1373 CGF.Builder.CreateStore(Src, Tmp); 1374 CGF.Builder.CreateMemCpy( 1375 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), 1376 Tmp.getAlignment().getAsAlign(), 1377 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); 1378 } 1379 } 1380 1381 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1382 const ABIArgInfo &info) { 1383 if (unsigned offset = info.getDirectOffset()) { 1384 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1385 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1386 CharUnits::fromQuantity(offset)); 1387 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1388 } 1389 return addr; 1390 } 1391 1392 namespace { 1393 1394 /// Encapsulates information about the way function arguments from 1395 /// CGFunctionInfo should be passed to actual LLVM IR function. 1396 class ClangToLLVMArgMapping { 1397 static const unsigned InvalidIndex = ~0U; 1398 unsigned InallocaArgNo; 1399 unsigned SRetArgNo; 1400 unsigned TotalIRArgs; 1401 1402 /// Arguments of LLVM IR function corresponding to single Clang argument. 1403 struct IRArgs { 1404 unsigned PaddingArgIndex; 1405 // Argument is expanded to IR arguments at positions 1406 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1407 unsigned FirstArgIndex; 1408 unsigned NumberOfArgs; 1409 1410 IRArgs() 1411 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1412 NumberOfArgs(0) {} 1413 }; 1414 1415 SmallVector<IRArgs, 8> ArgInfo; 1416 1417 public: 1418 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1419 bool OnlyRequiredArgs = false) 1420 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1421 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1422 construct(Context, FI, OnlyRequiredArgs); 1423 } 1424 1425 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1426 unsigned getInallocaArgNo() const { 1427 assert(hasInallocaArg()); 1428 return InallocaArgNo; 1429 } 1430 1431 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1432 unsigned getSRetArgNo() const { 1433 assert(hasSRetArg()); 1434 return SRetArgNo; 1435 } 1436 1437 unsigned totalIRArgs() const { return TotalIRArgs; } 1438 1439 bool hasPaddingArg(unsigned ArgNo) const { 1440 assert(ArgNo < ArgInfo.size()); 1441 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1442 } 1443 unsigned getPaddingArgNo(unsigned ArgNo) const { 1444 assert(hasPaddingArg(ArgNo)); 1445 return ArgInfo[ArgNo].PaddingArgIndex; 1446 } 1447 1448 /// Returns index of first IR argument corresponding to ArgNo, and their 1449 /// quantity. 1450 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1451 assert(ArgNo < ArgInfo.size()); 1452 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1453 ArgInfo[ArgNo].NumberOfArgs); 1454 } 1455 1456 private: 1457 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1458 bool OnlyRequiredArgs); 1459 }; 1460 1461 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1462 const CGFunctionInfo &FI, 1463 bool OnlyRequiredArgs) { 1464 unsigned IRArgNo = 0; 1465 bool SwapThisWithSRet = false; 1466 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1467 1468 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1469 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1470 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1471 } 1472 1473 unsigned ArgNo = 0; 1474 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1475 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1476 ++I, ++ArgNo) { 1477 assert(I != FI.arg_end()); 1478 QualType ArgType = I->type; 1479 const ABIArgInfo &AI = I->info; 1480 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1481 auto &IRArgs = ArgInfo[ArgNo]; 1482 1483 if (AI.getPaddingType()) 1484 IRArgs.PaddingArgIndex = IRArgNo++; 1485 1486 switch (AI.getKind()) { 1487 case ABIArgInfo::Extend: 1488 case ABIArgInfo::Direct: { 1489 // FIXME: handle sseregparm someday... 1490 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1491 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1492 IRArgs.NumberOfArgs = STy->getNumElements(); 1493 } else { 1494 IRArgs.NumberOfArgs = 1; 1495 } 1496 break; 1497 } 1498 case ABIArgInfo::Indirect: 1499 case ABIArgInfo::IndirectAliased: 1500 IRArgs.NumberOfArgs = 1; 1501 break; 1502 case ABIArgInfo::Ignore: 1503 case ABIArgInfo::InAlloca: 1504 // ignore and inalloca doesn't have matching LLVM parameters. 1505 IRArgs.NumberOfArgs = 0; 1506 break; 1507 case ABIArgInfo::CoerceAndExpand: 1508 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1509 break; 1510 case ABIArgInfo::Expand: 1511 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1512 break; 1513 } 1514 1515 if (IRArgs.NumberOfArgs > 0) { 1516 IRArgs.FirstArgIndex = IRArgNo; 1517 IRArgNo += IRArgs.NumberOfArgs; 1518 } 1519 1520 // Skip over the sret parameter when it comes second. We already handled it 1521 // above. 1522 if (IRArgNo == 1 && SwapThisWithSRet) 1523 IRArgNo++; 1524 } 1525 assert(ArgNo == ArgInfo.size()); 1526 1527 if (FI.usesInAlloca()) 1528 InallocaArgNo = IRArgNo++; 1529 1530 TotalIRArgs = IRArgNo; 1531 } 1532 } // namespace 1533 1534 /***/ 1535 1536 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1537 const auto &RI = FI.getReturnInfo(); 1538 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1539 } 1540 1541 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1542 return ReturnTypeUsesSRet(FI) && 1543 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1544 } 1545 1546 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1547 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1548 switch (BT->getKind()) { 1549 default: 1550 return false; 1551 case BuiltinType::Float: 1552 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1553 case BuiltinType::Double: 1554 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1555 case BuiltinType::LongDouble: 1556 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1557 } 1558 } 1559 1560 return false; 1561 } 1562 1563 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1564 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1565 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1566 if (BT->getKind() == BuiltinType::LongDouble) 1567 return getTarget().useObjCFP2RetForComplexLongDouble(); 1568 } 1569 } 1570 1571 return false; 1572 } 1573 1574 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1575 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1576 return GetFunctionType(FI); 1577 } 1578 1579 llvm::FunctionType * 1580 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1581 1582 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1583 (void)Inserted; 1584 assert(Inserted && "Recursively being processed?"); 1585 1586 llvm::Type *resultType = nullptr; 1587 const ABIArgInfo &retAI = FI.getReturnInfo(); 1588 switch (retAI.getKind()) { 1589 case ABIArgInfo::Expand: 1590 case ABIArgInfo::IndirectAliased: 1591 llvm_unreachable("Invalid ABI kind for return argument"); 1592 1593 case ABIArgInfo::Extend: 1594 case ABIArgInfo::Direct: 1595 resultType = retAI.getCoerceToType(); 1596 break; 1597 1598 case ABIArgInfo::InAlloca: 1599 if (retAI.getInAllocaSRet()) { 1600 // sret things on win32 aren't void, they return the sret pointer. 1601 QualType ret = FI.getReturnType(); 1602 llvm::Type *ty = ConvertType(ret); 1603 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1604 resultType = llvm::PointerType::get(ty, addressSpace); 1605 } else { 1606 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1607 } 1608 break; 1609 1610 case ABIArgInfo::Indirect: 1611 case ABIArgInfo::Ignore: 1612 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1613 break; 1614 1615 case ABIArgInfo::CoerceAndExpand: 1616 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1617 break; 1618 } 1619 1620 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1621 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1622 1623 // Add type for sret argument. 1624 if (IRFunctionArgs.hasSRetArg()) { 1625 QualType Ret = FI.getReturnType(); 1626 llvm::Type *Ty = ConvertType(Ret); 1627 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1628 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1629 llvm::PointerType::get(Ty, AddressSpace); 1630 } 1631 1632 // Add type for inalloca argument. 1633 if (IRFunctionArgs.hasInallocaArg()) { 1634 auto ArgStruct = FI.getArgStruct(); 1635 assert(ArgStruct); 1636 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1637 } 1638 1639 // Add in all of the required arguments. 1640 unsigned ArgNo = 0; 1641 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1642 ie = it + FI.getNumRequiredArgs(); 1643 for (; it != ie; ++it, ++ArgNo) { 1644 const ABIArgInfo &ArgInfo = it->info; 1645 1646 // Insert a padding type to ensure proper alignment. 1647 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1648 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1649 ArgInfo.getPaddingType(); 1650 1651 unsigned FirstIRArg, NumIRArgs; 1652 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1653 1654 switch (ArgInfo.getKind()) { 1655 case ABIArgInfo::Ignore: 1656 case ABIArgInfo::InAlloca: 1657 assert(NumIRArgs == 0); 1658 break; 1659 1660 case ABIArgInfo::Indirect: { 1661 assert(NumIRArgs == 1); 1662 // indirect arguments are always on the stack, which is alloca addr space. 1663 llvm::Type *LTy = ConvertTypeForMem(it->type); 1664 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1665 CGM.getDataLayout().getAllocaAddrSpace()); 1666 break; 1667 } 1668 case ABIArgInfo::IndirectAliased: { 1669 assert(NumIRArgs == 1); 1670 llvm::Type *LTy = ConvertTypeForMem(it->type); 1671 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace()); 1672 break; 1673 } 1674 case ABIArgInfo::Extend: 1675 case ABIArgInfo::Direct: { 1676 // Fast-isel and the optimizer generally like scalar values better than 1677 // FCAs, so we flatten them if this is safe to do for this argument. 1678 llvm::Type *argType = ArgInfo.getCoerceToType(); 1679 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1680 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1681 assert(NumIRArgs == st->getNumElements()); 1682 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1683 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1684 } else { 1685 assert(NumIRArgs == 1); 1686 ArgTypes[FirstIRArg] = argType; 1687 } 1688 break; 1689 } 1690 1691 case ABIArgInfo::CoerceAndExpand: { 1692 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1693 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1694 *ArgTypesIter++ = EltTy; 1695 } 1696 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1697 break; 1698 } 1699 1700 case ABIArgInfo::Expand: 1701 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1702 getExpandedTypes(it->type, ArgTypesIter); 1703 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1704 break; 1705 } 1706 } 1707 1708 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1709 assert(Erased && "Not in set?"); 1710 1711 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1712 } 1713 1714 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1715 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1716 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1717 1718 if (!isFuncTypeConvertible(FPT)) 1719 return llvm::StructType::get(getLLVMContext()); 1720 1721 return GetFunctionType(GD); 1722 } 1723 1724 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1725 llvm::AttrBuilder &FuncAttrs, 1726 const FunctionProtoType *FPT) { 1727 if (!FPT) 1728 return; 1729 1730 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1731 FPT->isNothrow()) 1732 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1733 } 1734 1735 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, 1736 QualType ReturnType) { 1737 // We can't just discard the return value for a record type with a 1738 // complex destructor or a non-trivially copyable type. 1739 if (const RecordType *RT = 1740 ReturnType.getCanonicalType()->getAs<RecordType>()) { 1741 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1742 return ClassDecl->hasTrivialDestructor(); 1743 } 1744 return ReturnType.isTriviallyCopyableType(Context); 1745 } 1746 1747 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, 1748 bool HasOptnone, 1749 bool AttrOnCallSite, 1750 llvm::AttrBuilder &FuncAttrs) { 1751 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1752 if (!HasOptnone) { 1753 if (CodeGenOpts.OptimizeSize) 1754 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1755 if (CodeGenOpts.OptimizeSize == 2) 1756 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1757 } 1758 1759 if (CodeGenOpts.DisableRedZone) 1760 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1761 if (CodeGenOpts.IndirectTlsSegRefs) 1762 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1763 if (CodeGenOpts.NoImplicitFloat) 1764 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1765 1766 if (AttrOnCallSite) { 1767 // Attributes that should go on the call site only. 1768 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) 1769 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1770 if (!CodeGenOpts.TrapFuncName.empty()) 1771 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1772 } else { 1773 StringRef FpKind; 1774 switch (CodeGenOpts.getFramePointer()) { 1775 case CodeGenOptions::FramePointerKind::None: 1776 FpKind = "none"; 1777 break; 1778 case CodeGenOptions::FramePointerKind::NonLeaf: 1779 FpKind = "non-leaf"; 1780 break; 1781 case CodeGenOptions::FramePointerKind::All: 1782 FpKind = "all"; 1783 break; 1784 } 1785 FuncAttrs.addAttribute("frame-pointer", FpKind); 1786 1787 if (CodeGenOpts.LessPreciseFPMAD) 1788 FuncAttrs.addAttribute("less-precise-fpmad", "true"); 1789 1790 if (CodeGenOpts.NullPointerIsValid) 1791 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); 1792 1793 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) 1794 FuncAttrs.addAttribute("denormal-fp-math", 1795 CodeGenOpts.FPDenormalMode.str()); 1796 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { 1797 FuncAttrs.addAttribute( 1798 "denormal-fp-math-f32", 1799 CodeGenOpts.FP32DenormalMode.str()); 1800 } 1801 1802 if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore) 1803 FuncAttrs.addAttribute("no-trapping-math", "true"); 1804 1805 // Strict (compliant) code is the default, so only add this attribute to 1806 // indicate that we are trying to workaround a problem case. 1807 if (!CodeGenOpts.StrictFloatCastOverflow) 1808 FuncAttrs.addAttribute("strict-float-cast-overflow", "false"); 1809 1810 // TODO: Are these all needed? 1811 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1812 if (LangOpts.NoHonorInfs) 1813 FuncAttrs.addAttribute("no-infs-fp-math", "true"); 1814 if (LangOpts.NoHonorNaNs) 1815 FuncAttrs.addAttribute("no-nans-fp-math", "true"); 1816 if (LangOpts.UnsafeFPMath) 1817 FuncAttrs.addAttribute("unsafe-fp-math", "true"); 1818 if (CodeGenOpts.SoftFloat) 1819 FuncAttrs.addAttribute("use-soft-float", "true"); 1820 FuncAttrs.addAttribute("stack-protector-buffer-size", 1821 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1822 if (LangOpts.NoSignedZero) 1823 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); 1824 1825 // TODO: Reciprocal estimate codegen options should apply to instructions? 1826 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1827 if (!Recips.empty()) 1828 FuncAttrs.addAttribute("reciprocal-estimates", 1829 llvm::join(Recips, ",")); 1830 1831 if (!CodeGenOpts.PreferVectorWidth.empty() && 1832 CodeGenOpts.PreferVectorWidth != "none") 1833 FuncAttrs.addAttribute("prefer-vector-width", 1834 CodeGenOpts.PreferVectorWidth); 1835 1836 if (CodeGenOpts.StackRealignment) 1837 FuncAttrs.addAttribute("stackrealign"); 1838 if (CodeGenOpts.Backchain) 1839 FuncAttrs.addAttribute("backchain"); 1840 if (CodeGenOpts.EnableSegmentedStacks) 1841 FuncAttrs.addAttribute("split-stack"); 1842 1843 if (CodeGenOpts.SpeculativeLoadHardening) 1844 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1845 } 1846 1847 if (getLangOpts().assumeFunctionsAreConvergent()) { 1848 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1849 // convergent (meaning, they may call an intrinsically convergent op, such 1850 // as __syncthreads() / barrier(), and so can't have certain optimizations 1851 // applied around them). LLVM will remove this attribute where it safely 1852 // can. 1853 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1854 } 1855 1856 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1857 // Exceptions aren't supported in CUDA device code. 1858 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1859 } 1860 1861 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1862 StringRef Var, Value; 1863 std::tie(Var, Value) = Attr.split('='); 1864 FuncAttrs.addAttribute(Var, Value); 1865 } 1866 } 1867 1868 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { 1869 llvm::AttrBuilder FuncAttrs; 1870 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), 1871 /* AttrOnCallSite = */ false, FuncAttrs); 1872 // TODO: call GetCPUAndFeaturesAttributes? 1873 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); 1874 } 1875 1876 void CodeGenModule::addDefaultFunctionDefinitionAttributes( 1877 llvm::AttrBuilder &attrs) { 1878 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, 1879 /*for call*/ false, attrs); 1880 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); 1881 } 1882 1883 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, 1884 const LangOptions &LangOpts, 1885 const NoBuiltinAttr *NBA = nullptr) { 1886 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { 1887 SmallString<32> AttributeName; 1888 AttributeName += "no-builtin-"; 1889 AttributeName += BuiltinName; 1890 FuncAttrs.addAttribute(AttributeName); 1891 }; 1892 1893 // First, handle the language options passed through -fno-builtin. 1894 if (LangOpts.NoBuiltin) { 1895 // -fno-builtin disables them all. 1896 FuncAttrs.addAttribute("no-builtins"); 1897 return; 1898 } 1899 1900 // Then, add attributes for builtins specified through -fno-builtin-<name>. 1901 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); 1902 1903 // Now, let's check the __attribute__((no_builtin("...")) attribute added to 1904 // the source. 1905 if (!NBA) 1906 return; 1907 1908 // If there is a wildcard in the builtin names specified through the 1909 // attribute, disable them all. 1910 if (llvm::is_contained(NBA->builtinNames(), "*")) { 1911 FuncAttrs.addAttribute("no-builtins"); 1912 return; 1913 } 1914 1915 // And last, add the rest of the builtin names. 1916 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); 1917 } 1918 1919 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, 1920 const llvm::DataLayout &DL, const ABIArgInfo &AI, 1921 bool CheckCoerce = true) { 1922 llvm::Type *Ty = Types.ConvertTypeForMem(QTy); 1923 if (AI.getKind() == ABIArgInfo::Indirect) 1924 return true; 1925 if (AI.getKind() == ABIArgInfo::Extend) 1926 return true; 1927 if (!DL.typeSizeEqualsStoreSize(Ty)) 1928 // TODO: This will result in a modest amount of values not marked noundef 1929 // when they could be. We care about values that *invisibly* contain undef 1930 // bits from the perspective of LLVM IR. 1931 return false; 1932 if (CheckCoerce && AI.canHaveCoerceToType()) { 1933 llvm::Type *CoerceTy = AI.getCoerceToType(); 1934 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy), 1935 DL.getTypeSizeInBits(Ty))) 1936 // If we're coercing to a type with a greater size than the canonical one, 1937 // we're introducing new undef bits. 1938 // Coercing to a type of smaller or equal size is ok, as we know that 1939 // there's no internal padding (typeSizeEqualsStoreSize). 1940 return false; 1941 } 1942 if (QTy->isExtIntType()) 1943 return true; 1944 if (QTy->isReferenceType()) 1945 return true; 1946 if (QTy->isNullPtrType()) 1947 return false; 1948 if (QTy->isMemberPointerType()) 1949 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For 1950 // now, never mark them. 1951 return false; 1952 if (QTy->isScalarType()) { 1953 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy)) 1954 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); 1955 return true; 1956 } 1957 if (const VectorType *Vector = dyn_cast<VectorType>(QTy)) 1958 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); 1959 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy)) 1960 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); 1961 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy)) 1962 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); 1963 1964 // TODO: Some structs may be `noundef`, in specific situations. 1965 return false; 1966 } 1967 1968 /// Construct the IR attribute list of a function or call. 1969 /// 1970 /// When adding an attribute, please consider where it should be handled: 1971 /// 1972 /// - getDefaultFunctionAttributes is for attributes that are essentially 1973 /// part of the global target configuration (but perhaps can be 1974 /// overridden on a per-function basis). Adding attributes there 1975 /// will cause them to also be set in frontends that build on Clang's 1976 /// target-configuration logic, as well as for code defined in library 1977 /// modules such as CUDA's libdevice. 1978 /// 1979 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes 1980 /// and adds declaration-specific, convention-specific, and 1981 /// frontend-specific logic. The last is of particular importance: 1982 /// attributes that restrict how the frontend generates code must be 1983 /// added here rather than getDefaultFunctionAttributes. 1984 /// 1985 void CodeGenModule::ConstructAttributeList(StringRef Name, 1986 const CGFunctionInfo &FI, 1987 CGCalleeInfo CalleeInfo, 1988 llvm::AttributeList &AttrList, 1989 unsigned &CallingConv, 1990 bool AttrOnCallSite, bool IsThunk) { 1991 llvm::AttrBuilder FuncAttrs; 1992 llvm::AttrBuilder RetAttrs; 1993 1994 // Collect function IR attributes from the CC lowering. 1995 // We'll collect the paramete and result attributes later. 1996 CallingConv = FI.getEffectiveCallingConvention(); 1997 if (FI.isNoReturn()) 1998 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1999 if (FI.isCmseNSCall()) 2000 FuncAttrs.addAttribute("cmse_nonsecure_call"); 2001 2002 // Collect function IR attributes from the callee prototype if we have one. 2003 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 2004 CalleeInfo.getCalleeFunctionProtoType()); 2005 2006 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 2007 2008 bool HasOptnone = false; 2009 // The NoBuiltinAttr attached to the target FunctionDecl. 2010 const NoBuiltinAttr *NBA = nullptr; 2011 2012 // Collect function IR attributes based on declaration-specific 2013 // information. 2014 // FIXME: handle sseregparm someday... 2015 if (TargetDecl) { 2016 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 2017 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 2018 if (TargetDecl->hasAttr<NoThrowAttr>()) 2019 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2020 if (TargetDecl->hasAttr<NoReturnAttr>()) 2021 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2022 if (TargetDecl->hasAttr<ColdAttr>()) 2023 FuncAttrs.addAttribute(llvm::Attribute::Cold); 2024 if (TargetDecl->hasAttr<HotAttr>()) 2025 FuncAttrs.addAttribute(llvm::Attribute::Hot); 2026 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 2027 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 2028 if (TargetDecl->hasAttr<ConvergentAttr>()) 2029 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 2030 2031 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2032 AddAttributesFromFunctionProtoType( 2033 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 2034 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { 2035 // A sane operator new returns a non-aliasing pointer. 2036 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); 2037 if (getCodeGenOpts().AssumeSaneOperatorNew && 2038 (Kind == OO_New || Kind == OO_Array_New)) 2039 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2040 } 2041 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 2042 const bool IsVirtualCall = MD && MD->isVirtual(); 2043 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a 2044 // virtual function. These attributes are not inherited by overloads. 2045 if (!(AttrOnCallSite && IsVirtualCall)) { 2046 if (Fn->isNoReturn()) 2047 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2048 NBA = Fn->getAttr<NoBuiltinAttr>(); 2049 } 2050 // Only place nomerge attribute on call sites, never functions. This 2051 // allows it to work on indirect virtual function calls. 2052 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) 2053 FuncAttrs.addAttribute(llvm::Attribute::NoMerge); 2054 2055 // Add known guaranteed alignment for allocation functions. 2056 if (unsigned BuiltinID = Fn->getBuiltinID()) { 2057 switch (BuiltinID) { 2058 case Builtin::BIaligned_alloc: 2059 case Builtin::BIcalloc: 2060 case Builtin::BImalloc: 2061 case Builtin::BImemalign: 2062 case Builtin::BIrealloc: 2063 case Builtin::BIstrdup: 2064 case Builtin::BIstrndup: 2065 RetAttrs.addAlignmentAttr(Context.getTargetInfo().getNewAlign() / 2066 Context.getTargetInfo().getCharWidth()); 2067 break; 2068 default: 2069 break; 2070 } 2071 } 2072 } 2073 2074 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 2075 if (TargetDecl->hasAttr<ConstAttr>()) { 2076 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 2077 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2078 // gcc specifies that 'const' functions have greater restrictions than 2079 // 'pure' functions, so they also cannot have infinite loops. 2080 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2081 } else if (TargetDecl->hasAttr<PureAttr>()) { 2082 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 2083 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2084 // gcc specifies that 'pure' functions cannot have infinite loops. 2085 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2086 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 2087 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 2088 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2089 } 2090 if (TargetDecl->hasAttr<RestrictAttr>()) 2091 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2092 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 2093 !CodeGenOpts.NullPointerIsValid) 2094 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2095 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 2096 FuncAttrs.addAttribute("no_caller_saved_registers"); 2097 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 2098 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 2099 if (TargetDecl->hasAttr<LeafAttr>()) 2100 FuncAttrs.addAttribute(llvm::Attribute::NoCallback); 2101 2102 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 2103 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 2104 Optional<unsigned> NumElemsParam; 2105 if (AllocSize->getNumElemsParam().isValid()) 2106 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 2107 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 2108 NumElemsParam); 2109 } 2110 2111 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { 2112 if (getLangOpts().OpenCLVersion <= 120) { 2113 // OpenCL v1.2 Work groups are always uniform 2114 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 2115 } else { 2116 // OpenCL v2.0 Work groups may be whether uniform or not. 2117 // '-cl-uniform-work-group-size' compile option gets a hint 2118 // to the compiler that the global work-size be a multiple of 2119 // the work-group size specified to clEnqueueNDRangeKernel 2120 // (i.e. work groups are uniform). 2121 FuncAttrs.addAttribute("uniform-work-group-size", 2122 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 2123 } 2124 } 2125 2126 std::string AssumptionValueStr; 2127 for (AssumptionAttr *AssumptionA : 2128 TargetDecl->specific_attrs<AssumptionAttr>()) { 2129 std::string AS = AssumptionA->getAssumption().str(); 2130 if (!AS.empty() && !AssumptionValueStr.empty()) 2131 AssumptionValueStr += ","; 2132 AssumptionValueStr += AS; 2133 } 2134 2135 if (!AssumptionValueStr.empty()) 2136 FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr); 2137 } 2138 2139 // Attach "no-builtins" attributes to: 2140 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". 2141 // * definitions: "no-builtins" or "no-builtin-<name>" only. 2142 // The attributes can come from: 2143 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> 2144 // * FunctionDecl attributes: __attribute__((no_builtin(...))) 2145 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); 2146 2147 // Collect function IR attributes based on global settiings. 2148 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 2149 2150 // Override some default IR attributes based on declaration-specific 2151 // information. 2152 if (TargetDecl) { 2153 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 2154 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 2155 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 2156 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 2157 if (TargetDecl->hasAttr<NoSplitStackAttr>()) 2158 FuncAttrs.removeAttribute("split-stack"); 2159 2160 // Add NonLazyBind attribute to function declarations when -fno-plt 2161 // is used. 2162 // FIXME: what if we just haven't processed the function definition 2163 // yet, or if it's an external definition like C99 inline? 2164 if (CodeGenOpts.NoPLT) { 2165 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2166 if (!Fn->isDefined() && !AttrOnCallSite) { 2167 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 2168 } 2169 } 2170 } 2171 } 2172 2173 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage 2174 // functions with -funique-internal-linkage-names. 2175 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { 2176 if (isa<FunctionDecl>(TargetDecl)) { 2177 if (this->getFunctionLinkage(CalleeInfo.getCalleeDecl()) == 2178 llvm::GlobalValue::InternalLinkage) 2179 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy", 2180 "selected"); 2181 } 2182 } 2183 2184 // Collect non-call-site function IR attributes from declaration-specific 2185 // information. 2186 if (!AttrOnCallSite) { 2187 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) 2188 FuncAttrs.addAttribute("cmse_nonsecure_entry"); 2189 2190 // Whether tail calls are enabled. 2191 auto shouldDisableTailCalls = [&] { 2192 // Should this be honored in getDefaultFunctionAttributes? 2193 if (CodeGenOpts.DisableTailCalls) 2194 return true; 2195 2196 if (!TargetDecl) 2197 return false; 2198 2199 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 2200 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 2201 return true; 2202 2203 if (CodeGenOpts.NoEscapingBlockTailCalls) { 2204 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 2205 if (!BD->doesNotEscape()) 2206 return true; 2207 } 2208 2209 return false; 2210 }; 2211 if (shouldDisableTailCalls()) 2212 FuncAttrs.addAttribute("disable-tail-calls", "true"); 2213 2214 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes 2215 // handles these separately to set them based on the global defaults. 2216 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 2217 } 2218 2219 // Collect attributes from arguments and return values. 2220 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 2221 2222 QualType RetTy = FI.getReturnType(); 2223 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2224 const llvm::DataLayout &DL = getDataLayout(); 2225 2226 // C++ explicitly makes returning undefined values UB. C's rule only applies 2227 // to used values, so we never mark them noundef for now. 2228 bool HasStrictReturn = getLangOpts().CPlusPlus; 2229 if (TargetDecl) { 2230 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) 2231 HasStrictReturn &= !FDecl->isExternC(); 2232 else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) 2233 // Function pointer 2234 HasStrictReturn &= !VDecl->isExternC(); 2235 } 2236 2237 // We don't want to be too aggressive with the return checking, unless 2238 // it's explicit in the code opts or we're using an appropriate sanitizer. 2239 // Try to respect what the programmer intended. 2240 HasStrictReturn &= getCodeGenOpts().StrictReturn || 2241 !MayDropFunctionReturn(getContext(), RetTy) || 2242 getLangOpts().Sanitize.has(SanitizerKind::Memory) || 2243 getLangOpts().Sanitize.has(SanitizerKind::Return); 2244 2245 // Determine if the return type could be partially undef 2246 if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) { 2247 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && 2248 DetermineNoUndef(RetTy, getTypes(), DL, RetAI)) 2249 RetAttrs.addAttribute(llvm::Attribute::NoUndef); 2250 } 2251 2252 switch (RetAI.getKind()) { 2253 case ABIArgInfo::Extend: 2254 if (RetAI.isSignExt()) 2255 RetAttrs.addAttribute(llvm::Attribute::SExt); 2256 else 2257 RetAttrs.addAttribute(llvm::Attribute::ZExt); 2258 LLVM_FALLTHROUGH; 2259 case ABIArgInfo::Direct: 2260 if (RetAI.getInReg()) 2261 RetAttrs.addAttribute(llvm::Attribute::InReg); 2262 break; 2263 case ABIArgInfo::Ignore: 2264 break; 2265 2266 case ABIArgInfo::InAlloca: 2267 case ABIArgInfo::Indirect: { 2268 // inalloca and sret disable readnone and readonly 2269 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2270 .removeAttribute(llvm::Attribute::ReadNone); 2271 break; 2272 } 2273 2274 case ABIArgInfo::CoerceAndExpand: 2275 break; 2276 2277 case ABIArgInfo::Expand: 2278 case ABIArgInfo::IndirectAliased: 2279 llvm_unreachable("Invalid ABI kind for return argument"); 2280 } 2281 2282 if (!IsThunk) { 2283 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2284 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 2285 QualType PTy = RefTy->getPointeeType(); 2286 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2287 RetAttrs.addDereferenceableAttr( 2288 getMinimumObjectSize(PTy).getQuantity()); 2289 if (getContext().getTargetAddressSpace(PTy) == 0 && 2290 !CodeGenOpts.NullPointerIsValid) 2291 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2292 if (PTy->isObjectType()) { 2293 llvm::Align Alignment = 2294 getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); 2295 RetAttrs.addAlignmentAttr(Alignment); 2296 } 2297 } 2298 } 2299 2300 bool hasUsedSRet = false; 2301 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2302 2303 // Attach attributes to sret. 2304 if (IRFunctionArgs.hasSRetArg()) { 2305 llvm::AttrBuilder SRETAttrs; 2306 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); 2307 hasUsedSRet = true; 2308 if (RetAI.getInReg()) 2309 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2310 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); 2311 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2312 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2313 } 2314 2315 // Attach attributes to inalloca argument. 2316 if (IRFunctionArgs.hasInallocaArg()) { 2317 llvm::AttrBuilder Attrs; 2318 Attrs.addInAllocaAttr(FI.getArgStruct()); 2319 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2320 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2321 } 2322 2323 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, 2324 // unless this is a thunk function. 2325 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2326 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && 2327 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { 2328 auto IRArgs = IRFunctionArgs.getIRArgs(0); 2329 2330 assert(IRArgs.second == 1 && "Expected only a single `this` pointer."); 2331 2332 llvm::AttrBuilder Attrs; 2333 2334 QualType ThisTy = 2335 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType(); 2336 2337 if (!CodeGenOpts.NullPointerIsValid && 2338 getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) { 2339 Attrs.addAttribute(llvm::Attribute::NonNull); 2340 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity()); 2341 } else { 2342 // FIXME dereferenceable should be correct here, regardless of 2343 // NullPointerIsValid. However, dereferenceable currently does not always 2344 // respect NullPointerIsValid and may imply nonnull and break the program. 2345 // See https://reviews.llvm.org/D66618 for discussions. 2346 Attrs.addDereferenceableOrNullAttr( 2347 getMinimumObjectSize( 2348 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) 2349 .getQuantity()); 2350 } 2351 2352 llvm::Align Alignment = 2353 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr, 2354 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true) 2355 .getAsAlign(); 2356 Attrs.addAlignmentAttr(Alignment); 2357 2358 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs); 2359 } 2360 2361 unsigned ArgNo = 0; 2362 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2363 E = FI.arg_end(); 2364 I != E; ++I, ++ArgNo) { 2365 QualType ParamType = I->type; 2366 const ABIArgInfo &AI = I->info; 2367 llvm::AttrBuilder Attrs; 2368 2369 // Add attribute for padding argument, if necessary. 2370 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2371 if (AI.getPaddingInReg()) { 2372 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2373 llvm::AttributeSet::get( 2374 getLLVMContext(), 2375 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); 2376 } 2377 } 2378 2379 // Decide whether the argument we're handling could be partially undef 2380 bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI); 2381 if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef) 2382 Attrs.addAttribute(llvm::Attribute::NoUndef); 2383 2384 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2385 // have the corresponding parameter variable. It doesn't make 2386 // sense to do it here because parameters are so messed up. 2387 switch (AI.getKind()) { 2388 case ABIArgInfo::Extend: 2389 if (AI.isSignExt()) 2390 Attrs.addAttribute(llvm::Attribute::SExt); 2391 else 2392 Attrs.addAttribute(llvm::Attribute::ZExt); 2393 LLVM_FALLTHROUGH; 2394 case ABIArgInfo::Direct: 2395 if (ArgNo == 0 && FI.isChainCall()) 2396 Attrs.addAttribute(llvm::Attribute::Nest); 2397 else if (AI.getInReg()) 2398 Attrs.addAttribute(llvm::Attribute::InReg); 2399 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); 2400 break; 2401 2402 case ABIArgInfo::Indirect: { 2403 if (AI.getInReg()) 2404 Attrs.addAttribute(llvm::Attribute::InReg); 2405 2406 if (AI.getIndirectByVal()) 2407 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); 2408 2409 auto *Decl = ParamType->getAsRecordDecl(); 2410 if (CodeGenOpts.PassByValueIsNoAlias && Decl && 2411 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs) 2412 // When calling the function, the pointer passed in will be the only 2413 // reference to the underlying object. Mark it accordingly. 2414 Attrs.addAttribute(llvm::Attribute::NoAlias); 2415 2416 // TODO: We could add the byref attribute if not byval, but it would 2417 // require updating many testcases. 2418 2419 CharUnits Align = AI.getIndirectAlign(); 2420 2421 // In a byval argument, it is important that the required 2422 // alignment of the type is honored, as LLVM might be creating a 2423 // *new* stack object, and needs to know what alignment to give 2424 // it. (Sometimes it can deduce a sensible alignment on its own, 2425 // but not if clang decides it must emit a packed struct, or the 2426 // user specifies increased alignment requirements.) 2427 // 2428 // This is different from indirect *not* byval, where the object 2429 // exists already, and the align attribute is purely 2430 // informative. 2431 assert(!Align.isZero()); 2432 2433 // For now, only add this when we have a byval argument. 2434 // TODO: be less lazy about updating test cases. 2435 if (AI.getIndirectByVal()) 2436 Attrs.addAlignmentAttr(Align.getQuantity()); 2437 2438 // byval disables readnone and readonly. 2439 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2440 .removeAttribute(llvm::Attribute::ReadNone); 2441 2442 break; 2443 } 2444 case ABIArgInfo::IndirectAliased: { 2445 CharUnits Align = AI.getIndirectAlign(); 2446 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); 2447 Attrs.addAlignmentAttr(Align.getQuantity()); 2448 break; 2449 } 2450 case ABIArgInfo::Ignore: 2451 case ABIArgInfo::Expand: 2452 case ABIArgInfo::CoerceAndExpand: 2453 break; 2454 2455 case ABIArgInfo::InAlloca: 2456 // inalloca disables readnone and readonly. 2457 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2458 .removeAttribute(llvm::Attribute::ReadNone); 2459 continue; 2460 } 2461 2462 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2463 QualType PTy = RefTy->getPointeeType(); 2464 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2465 Attrs.addDereferenceableAttr( 2466 getMinimumObjectSize(PTy).getQuantity()); 2467 if (getContext().getTargetAddressSpace(PTy) == 0 && 2468 !CodeGenOpts.NullPointerIsValid) 2469 Attrs.addAttribute(llvm::Attribute::NonNull); 2470 if (PTy->isObjectType()) { 2471 llvm::Align Alignment = 2472 getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); 2473 Attrs.addAlignmentAttr(Alignment); 2474 } 2475 } 2476 2477 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2478 case ParameterABI::Ordinary: 2479 break; 2480 2481 case ParameterABI::SwiftIndirectResult: { 2482 // Add 'sret' if we haven't already used it for something, but 2483 // only if the result is void. 2484 if (!hasUsedSRet && RetTy->isVoidType()) { 2485 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); 2486 hasUsedSRet = true; 2487 } 2488 2489 // Add 'noalias' in either case. 2490 Attrs.addAttribute(llvm::Attribute::NoAlias); 2491 2492 // Add 'dereferenceable' and 'alignment'. 2493 auto PTy = ParamType->getPointeeType(); 2494 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2495 auto info = getContext().getTypeInfoInChars(PTy); 2496 Attrs.addDereferenceableAttr(info.Width.getQuantity()); 2497 Attrs.addAlignmentAttr(info.Align.getAsAlign()); 2498 } 2499 break; 2500 } 2501 2502 case ParameterABI::SwiftErrorResult: 2503 Attrs.addAttribute(llvm::Attribute::SwiftError); 2504 break; 2505 2506 case ParameterABI::SwiftContext: 2507 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2508 break; 2509 2510 case ParameterABI::SwiftAsyncContext: 2511 Attrs.addAttribute(llvm::Attribute::SwiftAsync); 2512 break; 2513 } 2514 2515 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2516 Attrs.addAttribute(llvm::Attribute::NoCapture); 2517 2518 if (Attrs.hasAttributes()) { 2519 unsigned FirstIRArg, NumIRArgs; 2520 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2521 for (unsigned i = 0; i < NumIRArgs; i++) 2522 ArgAttrs[FirstIRArg + i] = 2523 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2524 } 2525 } 2526 assert(ArgNo == FI.arg_size()); 2527 2528 AttrList = llvm::AttributeList::get( 2529 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2530 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2531 } 2532 2533 /// An argument came in as a promoted argument; demote it back to its 2534 /// declared type. 2535 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2536 const VarDecl *var, 2537 llvm::Value *value) { 2538 llvm::Type *varType = CGF.ConvertType(var->getType()); 2539 2540 // This can happen with promotions that actually don't change the 2541 // underlying type, like the enum promotions. 2542 if (value->getType() == varType) return value; 2543 2544 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2545 && "unexpected promotion type"); 2546 2547 if (isa<llvm::IntegerType>(varType)) 2548 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2549 2550 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2551 } 2552 2553 /// Returns the attribute (either parameter attribute, or function 2554 /// attribute), which declares argument ArgNo to be non-null. 2555 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2556 QualType ArgType, unsigned ArgNo) { 2557 // FIXME: __attribute__((nonnull)) can also be applied to: 2558 // - references to pointers, where the pointee is known to be 2559 // nonnull (apparently a Clang extension) 2560 // - transparent unions containing pointers 2561 // In the former case, LLVM IR cannot represent the constraint. In 2562 // the latter case, we have no guarantee that the transparent union 2563 // is in fact passed as a pointer. 2564 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2565 return nullptr; 2566 // First, check attribute on parameter itself. 2567 if (PVD) { 2568 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2569 return ParmNNAttr; 2570 } 2571 // Check function attributes. 2572 if (!FD) 2573 return nullptr; 2574 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2575 if (NNAttr->isNonNull(ArgNo)) 2576 return NNAttr; 2577 } 2578 return nullptr; 2579 } 2580 2581 namespace { 2582 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2583 Address Temp; 2584 Address Arg; 2585 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2586 void Emit(CodeGenFunction &CGF, Flags flags) override { 2587 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2588 CGF.Builder.CreateStore(errorValue, Arg); 2589 } 2590 }; 2591 } 2592 2593 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2594 llvm::Function *Fn, 2595 const FunctionArgList &Args) { 2596 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2597 // Naked functions don't have prologues. 2598 return; 2599 2600 // If this is an implicit-return-zero function, go ahead and 2601 // initialize the return value. TODO: it might be nice to have 2602 // a more general mechanism for this that didn't require synthesized 2603 // return statements. 2604 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2605 if (FD->hasImplicitReturnZero()) { 2606 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2607 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2608 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2609 Builder.CreateStore(Zero, ReturnValue); 2610 } 2611 } 2612 2613 // FIXME: We no longer need the types from FunctionArgList; lift up and 2614 // simplify. 2615 2616 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2617 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); 2618 2619 // If we're using inalloca, all the memory arguments are GEPs off of the last 2620 // parameter, which is a pointer to the complete memory area. 2621 Address ArgStruct = Address::invalid(); 2622 if (IRFunctionArgs.hasInallocaArg()) { 2623 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), 2624 FI.getArgStructAlignment()); 2625 2626 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2627 } 2628 2629 // Name the struct return parameter. 2630 if (IRFunctionArgs.hasSRetArg()) { 2631 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); 2632 AI->setName("agg.result"); 2633 AI->addAttr(llvm::Attribute::NoAlias); 2634 } 2635 2636 // Track if we received the parameter as a pointer (indirect, byval, or 2637 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2638 // into a local alloca for us. 2639 SmallVector<ParamValue, 16> ArgVals; 2640 ArgVals.reserve(Args.size()); 2641 2642 // Create a pointer value for every parameter declaration. This usually 2643 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2644 // any cleanups or do anything that might unwind. We do that separately, so 2645 // we can push the cleanups in the correct order for the ABI. 2646 assert(FI.arg_size() == Args.size() && 2647 "Mismatch between function signature & arguments."); 2648 unsigned ArgNo = 0; 2649 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2650 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2651 i != e; ++i, ++info_it, ++ArgNo) { 2652 const VarDecl *Arg = *i; 2653 const ABIArgInfo &ArgI = info_it->info; 2654 2655 bool isPromoted = 2656 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2657 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2658 // the parameter is promoted. In this case we convert to 2659 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2660 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2661 assert(hasScalarEvaluationKind(Ty) == 2662 hasScalarEvaluationKind(Arg->getType())); 2663 2664 unsigned FirstIRArg, NumIRArgs; 2665 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2666 2667 switch (ArgI.getKind()) { 2668 case ABIArgInfo::InAlloca: { 2669 assert(NumIRArgs == 0); 2670 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2671 Address V = 2672 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); 2673 if (ArgI.getInAllocaIndirect()) 2674 V = Address(Builder.CreateLoad(V), 2675 getContext().getTypeAlignInChars(Ty)); 2676 ArgVals.push_back(ParamValue::forIndirect(V)); 2677 break; 2678 } 2679 2680 case ABIArgInfo::Indirect: 2681 case ABIArgInfo::IndirectAliased: { 2682 assert(NumIRArgs == 1); 2683 Address ParamAddr = 2684 Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign()); 2685 2686 if (!hasScalarEvaluationKind(Ty)) { 2687 // Aggregates and complex variables are accessed by reference. All we 2688 // need to do is realign the value, if requested. Also, if the address 2689 // may be aliased, copy it to ensure that the parameter variable is 2690 // mutable and has a unique adress, as C requires. 2691 Address V = ParamAddr; 2692 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { 2693 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2694 2695 // Copy from the incoming argument pointer to the temporary with the 2696 // appropriate alignment. 2697 // 2698 // FIXME: We should have a common utility for generating an aggregate 2699 // copy. 2700 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2701 Builder.CreateMemCpy( 2702 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), 2703 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), 2704 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); 2705 V = AlignedTemp; 2706 } 2707 ArgVals.push_back(ParamValue::forIndirect(V)); 2708 } else { 2709 // Load scalar value from indirect argument. 2710 llvm::Value *V = 2711 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2712 2713 if (isPromoted) 2714 V = emitArgumentDemotion(*this, Arg, V); 2715 ArgVals.push_back(ParamValue::forDirect(V)); 2716 } 2717 break; 2718 } 2719 2720 case ABIArgInfo::Extend: 2721 case ABIArgInfo::Direct: { 2722 auto AI = Fn->getArg(FirstIRArg); 2723 llvm::Type *LTy = ConvertType(Arg->getType()); 2724 2725 // Prepare parameter attributes. So far, only attributes for pointer 2726 // parameters are prepared. See 2727 // http://llvm.org/docs/LangRef.html#paramattrs. 2728 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && 2729 ArgI.getCoerceToType()->isPointerTy()) { 2730 assert(NumIRArgs == 1); 2731 2732 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2733 // Set `nonnull` attribute if any. 2734 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2735 PVD->getFunctionScopeIndex()) && 2736 !CGM.getCodeGenOpts().NullPointerIsValid) 2737 AI->addAttr(llvm::Attribute::NonNull); 2738 2739 QualType OTy = PVD->getOriginalType(); 2740 if (const auto *ArrTy = 2741 getContext().getAsConstantArrayType(OTy)) { 2742 // A C99 array parameter declaration with the static keyword also 2743 // indicates dereferenceability, and if the size is constant we can 2744 // use the dereferenceable attribute (which requires the size in 2745 // bytes). 2746 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2747 QualType ETy = ArrTy->getElementType(); 2748 llvm::Align Alignment = 2749 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2750 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2751 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2752 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2753 ArrSize) { 2754 llvm::AttrBuilder Attrs; 2755 Attrs.addDereferenceableAttr( 2756 getContext().getTypeSizeInChars(ETy).getQuantity() * 2757 ArrSize); 2758 AI->addAttrs(Attrs); 2759 } else if (getContext().getTargetInfo().getNullPointerValue( 2760 ETy.getAddressSpace()) == 0 && 2761 !CGM.getCodeGenOpts().NullPointerIsValid) { 2762 AI->addAttr(llvm::Attribute::NonNull); 2763 } 2764 } 2765 } else if (const auto *ArrTy = 2766 getContext().getAsVariableArrayType(OTy)) { 2767 // For C99 VLAs with the static keyword, we don't know the size so 2768 // we can't use the dereferenceable attribute, but in addrspace(0) 2769 // we know that it must be nonnull. 2770 if (ArrTy->getSizeModifier() == VariableArrayType::Static) { 2771 QualType ETy = ArrTy->getElementType(); 2772 llvm::Align Alignment = 2773 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2774 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2775 if (!getContext().getTargetAddressSpace(ETy) && 2776 !CGM.getCodeGenOpts().NullPointerIsValid) 2777 AI->addAttr(llvm::Attribute::NonNull); 2778 } 2779 } 2780 2781 // Set `align` attribute if any. 2782 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2783 if (!AVAttr) 2784 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2785 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2786 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 2787 // If alignment-assumption sanitizer is enabled, we do *not* add 2788 // alignment attribute here, but emit normal alignment assumption, 2789 // so the UBSAN check could function. 2790 llvm::ConstantInt *AlignmentCI = 2791 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); 2792 unsigned AlignmentInt = 2793 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); 2794 if (AI->getParamAlign().valueOrOne() < AlignmentInt) { 2795 AI->removeAttr(llvm::Attribute::AttrKind::Alignment); 2796 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr( 2797 llvm::Align(AlignmentInt))); 2798 } 2799 } 2800 } 2801 2802 // Set 'noalias' if an argument type has the `restrict` qualifier. 2803 if (Arg->getType().isRestrictQualified()) 2804 AI->addAttr(llvm::Attribute::NoAlias); 2805 } 2806 2807 // Prepare the argument value. If we have the trivial case, handle it 2808 // with no muss and fuss. 2809 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2810 ArgI.getCoerceToType() == ConvertType(Ty) && 2811 ArgI.getDirectOffset() == 0) { 2812 assert(NumIRArgs == 1); 2813 2814 // LLVM expects swifterror parameters to be used in very restricted 2815 // ways. Copy the value into a less-restricted temporary. 2816 llvm::Value *V = AI; 2817 if (FI.getExtParameterInfo(ArgNo).getABI() 2818 == ParameterABI::SwiftErrorResult) { 2819 QualType pointeeTy = Ty->getPointeeType(); 2820 assert(pointeeTy->isPointerType()); 2821 Address temp = 2822 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2823 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2824 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2825 Builder.CreateStore(incomingErrorValue, temp); 2826 V = temp.getPointer(); 2827 2828 // Push a cleanup to copy the value back at the end of the function. 2829 // The convention does not guarantee that the value will be written 2830 // back if the function exits with an unwind exception. 2831 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2832 } 2833 2834 // Ensure the argument is the correct type. 2835 if (V->getType() != ArgI.getCoerceToType()) 2836 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2837 2838 if (isPromoted) 2839 V = emitArgumentDemotion(*this, Arg, V); 2840 2841 // Because of merging of function types from multiple decls it is 2842 // possible for the type of an argument to not match the corresponding 2843 // type in the function type. Since we are codegening the callee 2844 // in here, add a cast to the argument type. 2845 llvm::Type *LTy = ConvertType(Arg->getType()); 2846 if (V->getType() != LTy) 2847 V = Builder.CreateBitCast(V, LTy); 2848 2849 ArgVals.push_back(ParamValue::forDirect(V)); 2850 break; 2851 } 2852 2853 // VLST arguments are coerced to VLATs at the function boundary for 2854 // ABI consistency. If this is a VLST that was coerced to 2855 // a VLAT at the function boundary and the types match up, use 2856 // llvm.experimental.vector.extract to convert back to the original 2857 // VLST. 2858 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { 2859 auto *Coerced = Fn->getArg(FirstIRArg); 2860 if (auto *VecTyFrom = 2861 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { 2862 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { 2863 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); 2864 2865 assert(NumIRArgs == 1); 2866 Coerced->setName(Arg->getName() + ".coerce"); 2867 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( 2868 VecTyTo, Coerced, Zero, "castFixedSve"))); 2869 break; 2870 } 2871 } 2872 } 2873 2874 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2875 Arg->getName()); 2876 2877 // Pointer to store into. 2878 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2879 2880 // Fast-isel and the optimizer generally like scalar values better than 2881 // FCAs, so we flatten them if this is safe to do for this argument. 2882 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2883 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2884 STy->getNumElements() > 1) { 2885 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2886 llvm::Type *DstTy = Ptr.getElementType(); 2887 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2888 2889 Address AddrToStoreInto = Address::invalid(); 2890 if (SrcSize <= DstSize) { 2891 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2892 } else { 2893 AddrToStoreInto = 2894 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2895 } 2896 2897 assert(STy->getNumElements() == NumIRArgs); 2898 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2899 auto AI = Fn->getArg(FirstIRArg + i); 2900 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2901 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); 2902 Builder.CreateStore(AI, EltPtr); 2903 } 2904 2905 if (SrcSize > DstSize) { 2906 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2907 } 2908 2909 } else { 2910 // Simple case, just do a coerced store of the argument into the alloca. 2911 assert(NumIRArgs == 1); 2912 auto AI = Fn->getArg(FirstIRArg); 2913 AI->setName(Arg->getName() + ".coerce"); 2914 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); 2915 } 2916 2917 // Match to what EmitParmDecl is expecting for this type. 2918 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2919 llvm::Value *V = 2920 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 2921 if (isPromoted) 2922 V = emitArgumentDemotion(*this, Arg, V); 2923 ArgVals.push_back(ParamValue::forDirect(V)); 2924 } else { 2925 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2926 } 2927 break; 2928 } 2929 2930 case ABIArgInfo::CoerceAndExpand: { 2931 // Reconstruct into a temporary. 2932 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2933 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2934 2935 auto coercionType = ArgI.getCoerceAndExpandType(); 2936 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2937 2938 unsigned argIndex = FirstIRArg; 2939 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2940 llvm::Type *eltType = coercionType->getElementType(i); 2941 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2942 continue; 2943 2944 auto eltAddr = Builder.CreateStructGEP(alloca, i); 2945 auto elt = Fn->getArg(argIndex++); 2946 Builder.CreateStore(elt, eltAddr); 2947 } 2948 assert(argIndex == FirstIRArg + NumIRArgs); 2949 break; 2950 } 2951 2952 case ABIArgInfo::Expand: { 2953 // If this structure was expanded into multiple arguments then 2954 // we need to create a temporary and reconstruct it from the 2955 // arguments. 2956 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2957 LValue LV = MakeAddrLValue(Alloca, Ty); 2958 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2959 2960 auto FnArgIter = Fn->arg_begin() + FirstIRArg; 2961 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2962 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); 2963 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2964 auto AI = Fn->getArg(FirstIRArg + i); 2965 AI->setName(Arg->getName() + "." + Twine(i)); 2966 } 2967 break; 2968 } 2969 2970 case ABIArgInfo::Ignore: 2971 assert(NumIRArgs == 0); 2972 // Initialize the local variable appropriately. 2973 if (!hasScalarEvaluationKind(Ty)) { 2974 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2975 } else { 2976 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2977 ArgVals.push_back(ParamValue::forDirect(U)); 2978 } 2979 break; 2980 } 2981 } 2982 2983 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2984 for (int I = Args.size() - 1; I >= 0; --I) 2985 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2986 } else { 2987 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2988 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2989 } 2990 } 2991 2992 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2993 while (insn->use_empty()) { 2994 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2995 if (!bitcast) return; 2996 2997 // This is "safe" because we would have used a ConstantExpr otherwise. 2998 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2999 bitcast->eraseFromParent(); 3000 } 3001 } 3002 3003 /// Try to emit a fused autorelease of a return result. 3004 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 3005 llvm::Value *result) { 3006 // We must be immediately followed the cast. 3007 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 3008 if (BB->empty()) return nullptr; 3009 if (&BB->back() != result) return nullptr; 3010 3011 llvm::Type *resultType = result->getType(); 3012 3013 // result is in a BasicBlock and is therefore an Instruction. 3014 llvm::Instruction *generator = cast<llvm::Instruction>(result); 3015 3016 SmallVector<llvm::Instruction *, 4> InstsToKill; 3017 3018 // Look for: 3019 // %generator = bitcast %type1* %generator2 to %type2* 3020 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 3021 // We would have emitted this as a constant if the operand weren't 3022 // an Instruction. 3023 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 3024 3025 // Require the generator to be immediately followed by the cast. 3026 if (generator->getNextNode() != bitcast) 3027 return nullptr; 3028 3029 InstsToKill.push_back(bitcast); 3030 } 3031 3032 // Look for: 3033 // %generator = call i8* @objc_retain(i8* %originalResult) 3034 // or 3035 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 3036 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 3037 if (!call) return nullptr; 3038 3039 bool doRetainAutorelease; 3040 3041 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { 3042 doRetainAutorelease = true; 3043 } else if (call->getCalledOperand() == 3044 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { 3045 doRetainAutorelease = false; 3046 3047 // If we emitted an assembly marker for this call (and the 3048 // ARCEntrypoints field should have been set if so), go looking 3049 // for that call. If we can't find it, we can't do this 3050 // optimization. But it should always be the immediately previous 3051 // instruction, unless we needed bitcasts around the call. 3052 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 3053 llvm::Instruction *prev = call->getPrevNode(); 3054 assert(prev); 3055 if (isa<llvm::BitCastInst>(prev)) { 3056 prev = prev->getPrevNode(); 3057 assert(prev); 3058 } 3059 assert(isa<llvm::CallInst>(prev)); 3060 assert(cast<llvm::CallInst>(prev)->getCalledOperand() == 3061 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 3062 InstsToKill.push_back(prev); 3063 } 3064 } else { 3065 return nullptr; 3066 } 3067 3068 result = call->getArgOperand(0); 3069 InstsToKill.push_back(call); 3070 3071 // Keep killing bitcasts, for sanity. Note that we no longer care 3072 // about precise ordering as long as there's exactly one use. 3073 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 3074 if (!bitcast->hasOneUse()) break; 3075 InstsToKill.push_back(bitcast); 3076 result = bitcast->getOperand(0); 3077 } 3078 3079 // Delete all the unnecessary instructions, from latest to earliest. 3080 for (auto *I : InstsToKill) 3081 I->eraseFromParent(); 3082 3083 // Do the fused retain/autorelease if we were asked to. 3084 if (doRetainAutorelease) 3085 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 3086 3087 // Cast back to the result type. 3088 return CGF.Builder.CreateBitCast(result, resultType); 3089 } 3090 3091 /// If this is a +1 of the value of an immutable 'self', remove it. 3092 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 3093 llvm::Value *result) { 3094 // This is only applicable to a method with an immutable 'self'. 3095 const ObjCMethodDecl *method = 3096 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 3097 if (!method) return nullptr; 3098 const VarDecl *self = method->getSelfDecl(); 3099 if (!self->getType().isConstQualified()) return nullptr; 3100 3101 // Look for a retain call. 3102 llvm::CallInst *retainCall = 3103 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 3104 if (!retainCall || retainCall->getCalledOperand() != 3105 CGF.CGM.getObjCEntrypoints().objc_retain) 3106 return nullptr; 3107 3108 // Look for an ordinary load of 'self'. 3109 llvm::Value *retainedValue = retainCall->getArgOperand(0); 3110 llvm::LoadInst *load = 3111 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 3112 if (!load || load->isAtomic() || load->isVolatile() || 3113 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 3114 return nullptr; 3115 3116 // Okay! Burn it all down. This relies for correctness on the 3117 // assumption that the retain is emitted as part of the return and 3118 // that thereafter everything is used "linearly". 3119 llvm::Type *resultType = result->getType(); 3120 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 3121 assert(retainCall->use_empty()); 3122 retainCall->eraseFromParent(); 3123 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 3124 3125 return CGF.Builder.CreateBitCast(load, resultType); 3126 } 3127 3128 /// Emit an ARC autorelease of the result of a function. 3129 /// 3130 /// \return the value to actually return from the function 3131 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 3132 llvm::Value *result) { 3133 // If we're returning 'self', kill the initial retain. This is a 3134 // heuristic attempt to "encourage correctness" in the really unfortunate 3135 // case where we have a return of self during a dealloc and we desperately 3136 // need to avoid the possible autorelease. 3137 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 3138 return self; 3139 3140 // At -O0, try to emit a fused retain/autorelease. 3141 if (CGF.shouldUseFusedARCCalls()) 3142 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 3143 return fused; 3144 3145 return CGF.EmitARCAutoreleaseReturnValue(result); 3146 } 3147 3148 /// Heuristically search for a dominating store to the return-value slot. 3149 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 3150 // Check if a User is a store which pointerOperand is the ReturnValue. 3151 // We are looking for stores to the ReturnValue, not for stores of the 3152 // ReturnValue to some other location. 3153 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 3154 auto *SI = dyn_cast<llvm::StoreInst>(U); 3155 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 3156 return nullptr; 3157 // These aren't actually possible for non-coerced returns, and we 3158 // only care about non-coerced returns on this code path. 3159 assert(!SI->isAtomic() && !SI->isVolatile()); 3160 return SI; 3161 }; 3162 // If there are multiple uses of the return-value slot, just check 3163 // for something immediately preceding the IP. Sometimes this can 3164 // happen with how we generate implicit-returns; it can also happen 3165 // with noreturn cleanups. 3166 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 3167 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3168 if (IP->empty()) return nullptr; 3169 llvm::Instruction *I = &IP->back(); 3170 3171 // Skip lifetime markers 3172 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 3173 IE = IP->rend(); 3174 II != IE; ++II) { 3175 if (llvm::IntrinsicInst *Intrinsic = 3176 dyn_cast<llvm::IntrinsicInst>(&*II)) { 3177 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 3178 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 3179 ++II; 3180 if (II == IE) 3181 break; 3182 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 3183 continue; 3184 } 3185 } 3186 I = &*II; 3187 break; 3188 } 3189 3190 return GetStoreIfValid(I); 3191 } 3192 3193 llvm::StoreInst *store = 3194 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 3195 if (!store) return nullptr; 3196 3197 // Now do a first-and-dirty dominance check: just walk up the 3198 // single-predecessors chain from the current insertion point. 3199 llvm::BasicBlock *StoreBB = store->getParent(); 3200 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3201 while (IP != StoreBB) { 3202 if (!(IP = IP->getSinglePredecessor())) 3203 return nullptr; 3204 } 3205 3206 // Okay, the store's basic block dominates the insertion point; we 3207 // can do our thing. 3208 return store; 3209 } 3210 3211 // Helper functions for EmitCMSEClearRecord 3212 3213 // Set the bits corresponding to a field having width `BitWidth` and located at 3214 // offset `BitOffset` (from the least significant bit) within a storage unit of 3215 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. 3216 // Use little-endian layout, i.e.`Bits[0]` is the LSB. 3217 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, 3218 int BitWidth, int CharWidth) { 3219 assert(CharWidth <= 64); 3220 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); 3221 3222 int Pos = 0; 3223 if (BitOffset >= CharWidth) { 3224 Pos += BitOffset / CharWidth; 3225 BitOffset = BitOffset % CharWidth; 3226 } 3227 3228 const uint64_t Used = (uint64_t(1) << CharWidth) - 1; 3229 if (BitOffset + BitWidth >= CharWidth) { 3230 Bits[Pos++] |= (Used << BitOffset) & Used; 3231 BitWidth -= CharWidth - BitOffset; 3232 BitOffset = 0; 3233 } 3234 3235 while (BitWidth >= CharWidth) { 3236 Bits[Pos++] = Used; 3237 BitWidth -= CharWidth; 3238 } 3239 3240 if (BitWidth > 0) 3241 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; 3242 } 3243 3244 // Set the bits corresponding to a field having width `BitWidth` and located at 3245 // offset `BitOffset` (from the least significant bit) within a storage unit of 3246 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of 3247 // `Bits` corresponds to one target byte. Use target endian layout. 3248 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, 3249 int StorageSize, int BitOffset, int BitWidth, 3250 int CharWidth, bool BigEndian) { 3251 3252 SmallVector<uint64_t, 8> TmpBits(StorageSize); 3253 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); 3254 3255 if (BigEndian) 3256 std::reverse(TmpBits.begin(), TmpBits.end()); 3257 3258 for (uint64_t V : TmpBits) 3259 Bits[StorageOffset++] |= V; 3260 } 3261 3262 static void setUsedBits(CodeGenModule &, QualType, int, 3263 SmallVectorImpl<uint64_t> &); 3264 3265 // Set the bits in `Bits`, which correspond to the value representations of 3266 // the actual members of the record type `RTy`. Note that this function does 3267 // not handle base classes, virtual tables, etc, since they cannot happen in 3268 // CMSE function arguments or return. The bit mask corresponds to the target 3269 // memory layout, i.e. it's endian dependent. 3270 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, 3271 SmallVectorImpl<uint64_t> &Bits) { 3272 ASTContext &Context = CGM.getContext(); 3273 int CharWidth = Context.getCharWidth(); 3274 const RecordDecl *RD = RTy->getDecl()->getDefinition(); 3275 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); 3276 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); 3277 3278 int Idx = 0; 3279 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { 3280 const FieldDecl *F = *I; 3281 3282 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || 3283 F->getType()->isIncompleteArrayType()) 3284 continue; 3285 3286 if (F->isBitField()) { 3287 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); 3288 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), 3289 BFI.StorageSize / CharWidth, BFI.Offset, 3290 BFI.Size, CharWidth, 3291 CGM.getDataLayout().isBigEndian()); 3292 continue; 3293 } 3294 3295 setUsedBits(CGM, F->getType(), 3296 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); 3297 } 3298 } 3299 3300 // Set the bits in `Bits`, which correspond to the value representations of 3301 // the elements of an array type `ATy`. 3302 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, 3303 int Offset, SmallVectorImpl<uint64_t> &Bits) { 3304 const ASTContext &Context = CGM.getContext(); 3305 3306 QualType ETy = Context.getBaseElementType(ATy); 3307 int Size = Context.getTypeSizeInChars(ETy).getQuantity(); 3308 SmallVector<uint64_t, 4> TmpBits(Size); 3309 setUsedBits(CGM, ETy, 0, TmpBits); 3310 3311 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { 3312 auto Src = TmpBits.begin(); 3313 auto Dst = Bits.begin() + Offset + I * Size; 3314 for (int J = 0; J < Size; ++J) 3315 *Dst++ |= *Src++; 3316 } 3317 } 3318 3319 // Set the bits in `Bits`, which correspond to the value representations of 3320 // the type `QTy`. 3321 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, 3322 SmallVectorImpl<uint64_t> &Bits) { 3323 if (const auto *RTy = QTy->getAs<RecordType>()) 3324 return setUsedBits(CGM, RTy, Offset, Bits); 3325 3326 ASTContext &Context = CGM.getContext(); 3327 if (const auto *ATy = Context.getAsConstantArrayType(QTy)) 3328 return setUsedBits(CGM, ATy, Offset, Bits); 3329 3330 int Size = Context.getTypeSizeInChars(QTy).getQuantity(); 3331 if (Size <= 0) 3332 return; 3333 3334 std::fill_n(Bits.begin() + Offset, Size, 3335 (uint64_t(1) << Context.getCharWidth()) - 1); 3336 } 3337 3338 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, 3339 int Pos, int Size, int CharWidth, 3340 bool BigEndian) { 3341 assert(Size > 0); 3342 uint64_t Mask = 0; 3343 if (BigEndian) { 3344 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; 3345 ++P) 3346 Mask = (Mask << CharWidth) | *P; 3347 } else { 3348 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; 3349 do 3350 Mask = (Mask << CharWidth) | *--P; 3351 while (P != End); 3352 } 3353 return Mask; 3354 } 3355 3356 // Emit code to clear the bits in a record, which aren't a part of any user 3357 // declared member, when the record is a function return. 3358 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3359 llvm::IntegerType *ITy, 3360 QualType QTy) { 3361 assert(Src->getType() == ITy); 3362 assert(ITy->getScalarSizeInBits() <= 64); 3363 3364 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3365 int Size = DataLayout.getTypeStoreSize(ITy); 3366 SmallVector<uint64_t, 4> Bits(Size); 3367 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3368 3369 int CharWidth = CGM.getContext().getCharWidth(); 3370 uint64_t Mask = 3371 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); 3372 3373 return Builder.CreateAnd(Src, Mask, "cmse.clear"); 3374 } 3375 3376 // Emit code to clear the bits in a record, which aren't a part of any user 3377 // declared member, when the record is a function argument. 3378 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3379 llvm::ArrayType *ATy, 3380 QualType QTy) { 3381 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3382 int Size = DataLayout.getTypeStoreSize(ATy); 3383 SmallVector<uint64_t, 16> Bits(Size); 3384 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3385 3386 // Clear each element of the LLVM array. 3387 int CharWidth = CGM.getContext().getCharWidth(); 3388 int CharsPerElt = 3389 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; 3390 int MaskIndex = 0; 3391 llvm::Value *R = llvm::UndefValue::get(ATy); 3392 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { 3393 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, 3394 DataLayout.isBigEndian()); 3395 MaskIndex += CharsPerElt; 3396 llvm::Value *T0 = Builder.CreateExtractValue(Src, I); 3397 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); 3398 R = Builder.CreateInsertValue(R, T1, I); 3399 } 3400 3401 return R; 3402 } 3403 3404 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 3405 bool EmitRetDbgLoc, 3406 SourceLocation EndLoc) { 3407 if (FI.isNoReturn()) { 3408 // Noreturn functions don't return. 3409 EmitUnreachable(EndLoc); 3410 return; 3411 } 3412 3413 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 3414 // Naked functions don't have epilogues. 3415 Builder.CreateUnreachable(); 3416 return; 3417 } 3418 3419 // Functions with no result always return void. 3420 if (!ReturnValue.isValid()) { 3421 Builder.CreateRetVoid(); 3422 return; 3423 } 3424 3425 llvm::DebugLoc RetDbgLoc; 3426 llvm::Value *RV = nullptr; 3427 QualType RetTy = FI.getReturnType(); 3428 const ABIArgInfo &RetAI = FI.getReturnInfo(); 3429 3430 switch (RetAI.getKind()) { 3431 case ABIArgInfo::InAlloca: 3432 // Aggregrates get evaluated directly into the destination. Sometimes we 3433 // need to return the sret value in a register, though. 3434 assert(hasAggregateEvaluationKind(RetTy)); 3435 if (RetAI.getInAllocaSRet()) { 3436 llvm::Function::arg_iterator EI = CurFn->arg_end(); 3437 --EI; 3438 llvm::Value *ArgStruct = &*EI; 3439 llvm::Value *SRet = Builder.CreateStructGEP( 3440 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 3441 llvm::Type *Ty = 3442 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType(); 3443 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret"); 3444 } 3445 break; 3446 3447 case ABIArgInfo::Indirect: { 3448 auto AI = CurFn->arg_begin(); 3449 if (RetAI.isSRetAfterThis()) 3450 ++AI; 3451 switch (getEvaluationKind(RetTy)) { 3452 case TEK_Complex: { 3453 ComplexPairTy RT = 3454 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 3455 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 3456 /*isInit*/ true); 3457 break; 3458 } 3459 case TEK_Aggregate: 3460 // Do nothing; aggregrates get evaluated directly into the destination. 3461 break; 3462 case TEK_Scalar: 3463 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 3464 MakeNaturalAlignAddrLValue(&*AI, RetTy), 3465 /*isInit*/ true); 3466 break; 3467 } 3468 break; 3469 } 3470 3471 case ABIArgInfo::Extend: 3472 case ABIArgInfo::Direct: 3473 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 3474 RetAI.getDirectOffset() == 0) { 3475 // The internal return value temp always will have pointer-to-return-type 3476 // type, just do a load. 3477 3478 // If there is a dominating store to ReturnValue, we can elide 3479 // the load, zap the store, and usually zap the alloca. 3480 if (llvm::StoreInst *SI = 3481 findDominatingStoreToReturnValue(*this)) { 3482 // Reuse the debug location from the store unless there is 3483 // cleanup code to be emitted between the store and return 3484 // instruction. 3485 if (EmitRetDbgLoc && !AutoreleaseResult) 3486 RetDbgLoc = SI->getDebugLoc(); 3487 // Get the stored value and nuke the now-dead store. 3488 RV = SI->getValueOperand(); 3489 SI->eraseFromParent(); 3490 3491 // Otherwise, we have to do a simple load. 3492 } else { 3493 RV = Builder.CreateLoad(ReturnValue); 3494 } 3495 } else { 3496 // If the value is offset in memory, apply the offset now. 3497 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 3498 3499 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 3500 } 3501 3502 // In ARC, end functions that return a retainable type with a call 3503 // to objc_autoreleaseReturnValue. 3504 if (AutoreleaseResult) { 3505 #ifndef NDEBUG 3506 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 3507 // been stripped of the typedefs, so we cannot use RetTy here. Get the 3508 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 3509 // CurCodeDecl or BlockInfo. 3510 QualType RT; 3511 3512 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 3513 RT = FD->getReturnType(); 3514 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 3515 RT = MD->getReturnType(); 3516 else if (isa<BlockDecl>(CurCodeDecl)) 3517 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 3518 else 3519 llvm_unreachable("Unexpected function/method type"); 3520 3521 assert(getLangOpts().ObjCAutoRefCount && 3522 !FI.isReturnsRetained() && 3523 RT->isObjCRetainableType()); 3524 #endif 3525 RV = emitAutoreleaseOfResult(*this, RV); 3526 } 3527 3528 break; 3529 3530 case ABIArgInfo::Ignore: 3531 break; 3532 3533 case ABIArgInfo::CoerceAndExpand: { 3534 auto coercionType = RetAI.getCoerceAndExpandType(); 3535 3536 // Load all of the coerced elements out into results. 3537 llvm::SmallVector<llvm::Value*, 4> results; 3538 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 3539 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3540 auto coercedEltType = coercionType->getElementType(i); 3541 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 3542 continue; 3543 3544 auto eltAddr = Builder.CreateStructGEP(addr, i); 3545 auto elt = Builder.CreateLoad(eltAddr); 3546 results.push_back(elt); 3547 } 3548 3549 // If we have one result, it's the single direct result type. 3550 if (results.size() == 1) { 3551 RV = results[0]; 3552 3553 // Otherwise, we need to make a first-class aggregate. 3554 } else { 3555 // Construct a return type that lacks padding elements. 3556 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 3557 3558 RV = llvm::UndefValue::get(returnType); 3559 for (unsigned i = 0, e = results.size(); i != e; ++i) { 3560 RV = Builder.CreateInsertValue(RV, results[i], i); 3561 } 3562 } 3563 break; 3564 } 3565 case ABIArgInfo::Expand: 3566 case ABIArgInfo::IndirectAliased: 3567 llvm_unreachable("Invalid ABI kind for return argument"); 3568 } 3569 3570 llvm::Instruction *Ret; 3571 if (RV) { 3572 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { 3573 // For certain return types, clear padding bits, as they may reveal 3574 // sensitive information. 3575 // Small struct/union types are passed as integers. 3576 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); 3577 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) 3578 RV = EmitCMSEClearRecord(RV, ITy, RetTy); 3579 } 3580 EmitReturnValueCheck(RV); 3581 Ret = Builder.CreateRet(RV); 3582 } else { 3583 Ret = Builder.CreateRetVoid(); 3584 } 3585 3586 if (RetDbgLoc) 3587 Ret->setDebugLoc(std::move(RetDbgLoc)); 3588 } 3589 3590 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 3591 // A current decl may not be available when emitting vtable thunks. 3592 if (!CurCodeDecl) 3593 return; 3594 3595 // If the return block isn't reachable, neither is this check, so don't emit 3596 // it. 3597 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) 3598 return; 3599 3600 ReturnsNonNullAttr *RetNNAttr = nullptr; 3601 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3602 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3603 3604 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3605 return; 3606 3607 // Prefer the returns_nonnull attribute if it's present. 3608 SourceLocation AttrLoc; 3609 SanitizerMask CheckKind; 3610 SanitizerHandler Handler; 3611 if (RetNNAttr) { 3612 assert(!requiresReturnValueNullabilityCheck() && 3613 "Cannot check nullability and the nonnull attribute"); 3614 AttrLoc = RetNNAttr->getLocation(); 3615 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3616 Handler = SanitizerHandler::NonnullReturn; 3617 } else { 3618 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3619 if (auto *TSI = DD->getTypeSourceInfo()) 3620 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) 3621 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3622 CheckKind = SanitizerKind::NullabilityReturn; 3623 Handler = SanitizerHandler::NullabilityReturn; 3624 } 3625 3626 SanitizerScope SanScope(this); 3627 3628 // Make sure the "return" source location is valid. If we're checking a 3629 // nullability annotation, make sure the preconditions for the check are met. 3630 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3631 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3632 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3633 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3634 if (requiresReturnValueNullabilityCheck()) 3635 CanNullCheck = 3636 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3637 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3638 EmitBlock(Check); 3639 3640 // Now do the null check. 3641 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3642 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3643 llvm::Value *DynamicData[] = {SLocPtr}; 3644 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3645 3646 EmitBlock(NoCheck); 3647 3648 #ifndef NDEBUG 3649 // The return location should not be used after the check has been emitted. 3650 ReturnLocation = Address::invalid(); 3651 #endif 3652 } 3653 3654 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3655 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3656 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3657 } 3658 3659 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3660 QualType Ty) { 3661 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3662 // placeholders. 3663 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3664 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 3665 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 3666 3667 // FIXME: When we generate this IR in one pass, we shouldn't need 3668 // this win32-specific alignment hack. 3669 CharUnits Align = CharUnits::fromQuantity(4); 3670 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3671 3672 return AggValueSlot::forAddr(Address(Placeholder, Align), 3673 Ty.getQualifiers(), 3674 AggValueSlot::IsNotDestructed, 3675 AggValueSlot::DoesNotNeedGCBarriers, 3676 AggValueSlot::IsNotAliased, 3677 AggValueSlot::DoesNotOverlap); 3678 } 3679 3680 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3681 const VarDecl *param, 3682 SourceLocation loc) { 3683 // StartFunction converted the ABI-lowered parameter(s) into a 3684 // local alloca. We need to turn that into an r-value suitable 3685 // for EmitCall. 3686 Address local = GetAddrOfLocalVar(param); 3687 3688 QualType type = param->getType(); 3689 3690 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3691 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3692 } 3693 3694 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3695 // but the argument needs to be the original pointer. 3696 if (type->isReferenceType()) { 3697 args.add(RValue::get(Builder.CreateLoad(local)), type); 3698 3699 // In ARC, move out of consumed arguments so that the release cleanup 3700 // entered by StartFunction doesn't cause an over-release. This isn't 3701 // optimal -O0 code generation, but it should get cleaned up when 3702 // optimization is enabled. This also assumes that delegate calls are 3703 // performed exactly once for a set of arguments, but that should be safe. 3704 } else if (getLangOpts().ObjCAutoRefCount && 3705 param->hasAttr<NSConsumedAttr>() && 3706 type->isObjCRetainableType()) { 3707 llvm::Value *ptr = Builder.CreateLoad(local); 3708 auto null = 3709 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3710 Builder.CreateStore(null, local); 3711 args.add(RValue::get(ptr), type); 3712 3713 // For the most part, we just need to load the alloca, except that 3714 // aggregate r-values are actually pointers to temporaries. 3715 } else { 3716 args.add(convertTempToRValue(local, type, loc), type); 3717 } 3718 3719 // Deactivate the cleanup for the callee-destructed param that was pushed. 3720 if (type->isRecordType() && !CurFuncIsThunk && 3721 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 3722 param->needsDestruction(getContext())) { 3723 EHScopeStack::stable_iterator cleanup = 3724 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 3725 assert(cleanup.isValid() && 3726 "cleanup for callee-destructed param not recorded"); 3727 // This unreachable is a temporary marker which will be removed later. 3728 llvm::Instruction *isActive = Builder.CreateUnreachable(); 3729 args.addArgCleanupDeactivation(cleanup, isActive); 3730 } 3731 } 3732 3733 static bool isProvablyNull(llvm::Value *addr) { 3734 return isa<llvm::ConstantPointerNull>(addr); 3735 } 3736 3737 /// Emit the actual writing-back of a writeback. 3738 static void emitWriteback(CodeGenFunction &CGF, 3739 const CallArgList::Writeback &writeback) { 3740 const LValue &srcLV = writeback.Source; 3741 Address srcAddr = srcLV.getAddress(CGF); 3742 assert(!isProvablyNull(srcAddr.getPointer()) && 3743 "shouldn't have writeback for provably null argument"); 3744 3745 llvm::BasicBlock *contBB = nullptr; 3746 3747 // If the argument wasn't provably non-null, we need to null check 3748 // before doing the store. 3749 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3750 CGF.CGM.getDataLayout()); 3751 if (!provablyNonNull) { 3752 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3753 contBB = CGF.createBasicBlock("icr.done"); 3754 3755 llvm::Value *isNull = 3756 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3757 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3758 CGF.EmitBlock(writebackBB); 3759 } 3760 3761 // Load the value to writeback. 3762 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3763 3764 // Cast it back, in case we're writing an id to a Foo* or something. 3765 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3766 "icr.writeback-cast"); 3767 3768 // Perform the writeback. 3769 3770 // If we have a "to use" value, it's something we need to emit a use 3771 // of. This has to be carefully threaded in: if it's done after the 3772 // release it's potentially undefined behavior (and the optimizer 3773 // will ignore it), and if it happens before the retain then the 3774 // optimizer could move the release there. 3775 if (writeback.ToUse) { 3776 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3777 3778 // Retain the new value. No need to block-copy here: the block's 3779 // being passed up the stack. 3780 value = CGF.EmitARCRetainNonBlock(value); 3781 3782 // Emit the intrinsic use here. 3783 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3784 3785 // Load the old value (primitively). 3786 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3787 3788 // Put the new value in place (primitively). 3789 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3790 3791 // Release the old value. 3792 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3793 3794 // Otherwise, we can just do a normal lvalue store. 3795 } else { 3796 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3797 } 3798 3799 // Jump to the continuation block. 3800 if (!provablyNonNull) 3801 CGF.EmitBlock(contBB); 3802 } 3803 3804 static void emitWritebacks(CodeGenFunction &CGF, 3805 const CallArgList &args) { 3806 for (const auto &I : args.writebacks()) 3807 emitWriteback(CGF, I); 3808 } 3809 3810 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3811 const CallArgList &CallArgs) { 3812 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3813 CallArgs.getCleanupsToDeactivate(); 3814 // Iterate in reverse to increase the likelihood of popping the cleanup. 3815 for (const auto &I : llvm::reverse(Cleanups)) { 3816 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3817 I.IsActiveIP->eraseFromParent(); 3818 } 3819 } 3820 3821 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3822 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3823 if (uop->getOpcode() == UO_AddrOf) 3824 return uop->getSubExpr(); 3825 return nullptr; 3826 } 3827 3828 /// Emit an argument that's being passed call-by-writeback. That is, 3829 /// we are passing the address of an __autoreleased temporary; it 3830 /// might be copy-initialized with the current value of the given 3831 /// address, but it will definitely be copied out of after the call. 3832 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3833 const ObjCIndirectCopyRestoreExpr *CRE) { 3834 LValue srcLV; 3835 3836 // Make an optimistic effort to emit the address as an l-value. 3837 // This can fail if the argument expression is more complicated. 3838 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3839 srcLV = CGF.EmitLValue(lvExpr); 3840 3841 // Otherwise, just emit it as a scalar. 3842 } else { 3843 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3844 3845 QualType srcAddrType = 3846 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3847 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3848 } 3849 Address srcAddr = srcLV.getAddress(CGF); 3850 3851 // The dest and src types don't necessarily match in LLVM terms 3852 // because of the crazy ObjC compatibility rules. 3853 3854 llvm::PointerType *destType = 3855 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3856 3857 // If the address is a constant null, just pass the appropriate null. 3858 if (isProvablyNull(srcAddr.getPointer())) { 3859 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3860 CRE->getType()); 3861 return; 3862 } 3863 3864 // Create the temporary. 3865 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3866 CGF.getPointerAlign(), 3867 "icr.temp"); 3868 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3869 // and that cleanup will be conditional if we can't prove that the l-value 3870 // isn't null, so we need to register a dominating point so that the cleanups 3871 // system will make valid IR. 3872 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3873 3874 // Zero-initialize it if we're not doing a copy-initialization. 3875 bool shouldCopy = CRE->shouldCopy(); 3876 if (!shouldCopy) { 3877 llvm::Value *null = 3878 llvm::ConstantPointerNull::get( 3879 cast<llvm::PointerType>(destType->getElementType())); 3880 CGF.Builder.CreateStore(null, temp); 3881 } 3882 3883 llvm::BasicBlock *contBB = nullptr; 3884 llvm::BasicBlock *originBB = nullptr; 3885 3886 // If the address is *not* known to be non-null, we need to switch. 3887 llvm::Value *finalArgument; 3888 3889 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3890 CGF.CGM.getDataLayout()); 3891 if (provablyNonNull) { 3892 finalArgument = temp.getPointer(); 3893 } else { 3894 llvm::Value *isNull = 3895 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3896 3897 finalArgument = CGF.Builder.CreateSelect(isNull, 3898 llvm::ConstantPointerNull::get(destType), 3899 temp.getPointer(), "icr.argument"); 3900 3901 // If we need to copy, then the load has to be conditional, which 3902 // means we need control flow. 3903 if (shouldCopy) { 3904 originBB = CGF.Builder.GetInsertBlock(); 3905 contBB = CGF.createBasicBlock("icr.cont"); 3906 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3907 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3908 CGF.EmitBlock(copyBB); 3909 condEval.begin(CGF); 3910 } 3911 } 3912 3913 llvm::Value *valueToUse = nullptr; 3914 3915 // Perform a copy if necessary. 3916 if (shouldCopy) { 3917 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3918 assert(srcRV.isScalar()); 3919 3920 llvm::Value *src = srcRV.getScalarVal(); 3921 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3922 "icr.cast"); 3923 3924 // Use an ordinary store, not a store-to-lvalue. 3925 CGF.Builder.CreateStore(src, temp); 3926 3927 // If optimization is enabled, and the value was held in a 3928 // __strong variable, we need to tell the optimizer that this 3929 // value has to stay alive until we're doing the store back. 3930 // This is because the temporary is effectively unretained, 3931 // and so otherwise we can violate the high-level semantics. 3932 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3933 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3934 valueToUse = src; 3935 } 3936 } 3937 3938 // Finish the control flow if we needed it. 3939 if (shouldCopy && !provablyNonNull) { 3940 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3941 CGF.EmitBlock(contBB); 3942 3943 // Make a phi for the value to intrinsically use. 3944 if (valueToUse) { 3945 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3946 "icr.to-use"); 3947 phiToUse->addIncoming(valueToUse, copyBB); 3948 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3949 originBB); 3950 valueToUse = phiToUse; 3951 } 3952 3953 condEval.end(CGF); 3954 } 3955 3956 args.addWriteback(srcLV, temp, valueToUse); 3957 args.add(RValue::get(finalArgument), CRE->getType()); 3958 } 3959 3960 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3961 assert(!StackBase); 3962 3963 // Save the stack. 3964 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3965 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3966 } 3967 3968 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3969 if (StackBase) { 3970 // Restore the stack after the call. 3971 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3972 CGF.Builder.CreateCall(F, StackBase); 3973 } 3974 } 3975 3976 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3977 SourceLocation ArgLoc, 3978 AbstractCallee AC, 3979 unsigned ParmNum) { 3980 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3981 SanOpts.has(SanitizerKind::NullabilityArg))) 3982 return; 3983 3984 // The param decl may be missing in a variadic function. 3985 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 3986 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3987 3988 // Prefer the nonnull attribute if it's present. 3989 const NonNullAttr *NNAttr = nullptr; 3990 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 3991 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 3992 3993 bool CanCheckNullability = false; 3994 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 3995 auto Nullability = PVD->getType()->getNullability(getContext()); 3996 CanCheckNullability = Nullability && 3997 *Nullability == NullabilityKind::NonNull && 3998 PVD->getTypeSourceInfo(); 3999 } 4000 4001 if (!NNAttr && !CanCheckNullability) 4002 return; 4003 4004 SourceLocation AttrLoc; 4005 SanitizerMask CheckKind; 4006 SanitizerHandler Handler; 4007 if (NNAttr) { 4008 AttrLoc = NNAttr->getLocation(); 4009 CheckKind = SanitizerKind::NonnullAttribute; 4010 Handler = SanitizerHandler::NonnullArg; 4011 } else { 4012 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 4013 CheckKind = SanitizerKind::NullabilityArg; 4014 Handler = SanitizerHandler::NullabilityArg; 4015 } 4016 4017 SanitizerScope SanScope(this); 4018 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); 4019 llvm::Constant *StaticData[] = { 4020 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 4021 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 4022 }; 4023 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 4024 } 4025 4026 // Check if the call is going to use the inalloca convention. This needs to 4027 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged 4028 // later, so we can't check it directly. 4029 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, 4030 ArrayRef<QualType> ArgTypes) { 4031 // The Swift calling convention doesn't go through the target-specific 4032 // argument classification, so it never uses inalloca. 4033 // TODO: Consider limiting inalloca use to only calling conventions supported 4034 // by MSVC. 4035 if (ExplicitCC == CC_Swift) 4036 return false; 4037 if (!CGM.getTarget().getCXXABI().isMicrosoft()) 4038 return false; 4039 return llvm::any_of(ArgTypes, [&](QualType Ty) { 4040 return isInAllocaArgument(CGM.getCXXABI(), Ty); 4041 }); 4042 } 4043 4044 #ifndef NDEBUG 4045 // Determine whether the given argument is an Objective-C method 4046 // that may have type parameters in its signature. 4047 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { 4048 const DeclContext *dc = method->getDeclContext(); 4049 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { 4050 return classDecl->getTypeParamListAsWritten(); 4051 } 4052 4053 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { 4054 return catDecl->getTypeParamList(); 4055 } 4056 4057 return false; 4058 } 4059 #endif 4060 4061 /// EmitCallArgs - Emit call arguments for a function. 4062 void CodeGenFunction::EmitCallArgs( 4063 CallArgList &Args, PrototypeWrapper Prototype, 4064 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 4065 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 4066 SmallVector<QualType, 16> ArgTypes; 4067 4068 assert((ParamsToSkip == 0 || Prototype.P) && 4069 "Can't skip parameters if type info is not provided"); 4070 4071 // This variable only captures *explicitly* written conventions, not those 4072 // applied by default via command line flags or target defaults, such as 4073 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would 4074 // require knowing if this is a C++ instance method or being able to see 4075 // unprototyped FunctionTypes. 4076 CallingConv ExplicitCC = CC_C; 4077 4078 // First, if a prototype was provided, use those argument types. 4079 bool IsVariadic = false; 4080 if (Prototype.P) { 4081 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); 4082 if (MD) { 4083 IsVariadic = MD->isVariadic(); 4084 ExplicitCC = getCallingConventionForDecl( 4085 MD, CGM.getTarget().getTriple().isOSWindows()); 4086 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, 4087 MD->param_type_end()); 4088 } else { 4089 const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); 4090 IsVariadic = FPT->isVariadic(); 4091 ExplicitCC = FPT->getExtInfo().getCC(); 4092 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, 4093 FPT->param_type_end()); 4094 } 4095 4096 #ifndef NDEBUG 4097 // Check that the prototyped types match the argument expression types. 4098 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); 4099 CallExpr::const_arg_iterator Arg = ArgRange.begin(); 4100 for (QualType Ty : ArgTypes) { 4101 assert(Arg != ArgRange.end() && "Running over edge of argument list!"); 4102 assert( 4103 (isGenericMethod || Ty->isVariablyModifiedType() || 4104 Ty.getNonReferenceType()->isObjCRetainableType() || 4105 getContext() 4106 .getCanonicalType(Ty.getNonReferenceType()) 4107 .getTypePtr() == 4108 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && 4109 "type mismatch in call argument!"); 4110 ++Arg; 4111 } 4112 4113 // Either we've emitted all the call args, or we have a call to variadic 4114 // function. 4115 assert((Arg == ArgRange.end() || IsVariadic) && 4116 "Extra arguments in non-variadic function!"); 4117 #endif 4118 } 4119 4120 // If we still have any arguments, emit them using the type of the argument. 4121 for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()), 4122 ArgRange.end())) 4123 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); 4124 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 4125 4126 // We must evaluate arguments from right to left in the MS C++ ABI, 4127 // because arguments are destroyed left to right in the callee. As a special 4128 // case, there are certain language constructs that require left-to-right 4129 // evaluation, and in those cases we consider the evaluation order requirement 4130 // to trump the "destruction order is reverse construction order" guarantee. 4131 bool LeftToRight = 4132 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 4133 ? Order == EvaluationOrder::ForceLeftToRight 4134 : Order != EvaluationOrder::ForceRightToLeft; 4135 4136 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 4137 RValue EmittedArg) { 4138 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 4139 return; 4140 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 4141 if (PS == nullptr) 4142 return; 4143 4144 const auto &Context = getContext(); 4145 auto SizeTy = Context.getSizeType(); 4146 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 4147 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 4148 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 4149 EmittedArg.getScalarVal(), 4150 PS->isDynamic()); 4151 Args.add(RValue::get(V), SizeTy); 4152 // If we're emitting args in reverse, be sure to do so with 4153 // pass_object_size, as well. 4154 if (!LeftToRight) 4155 std::swap(Args.back(), *(&Args.back() - 1)); 4156 }; 4157 4158 // Insert a stack save if we're going to need any inalloca args. 4159 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { 4160 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && 4161 "inalloca only supported on x86"); 4162 Args.allocateArgumentMemory(*this); 4163 } 4164 4165 // Evaluate each argument in the appropriate order. 4166 size_t CallArgsStart = Args.size(); 4167 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 4168 unsigned Idx = LeftToRight ? I : E - I - 1; 4169 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 4170 unsigned InitialArgSize = Args.size(); 4171 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 4172 // the argument and parameter match or the objc method is parameterized. 4173 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 4174 getContext().hasSameUnqualifiedType((*Arg)->getType(), 4175 ArgTypes[Idx]) || 4176 (isa<ObjCMethodDecl>(AC.getDecl()) && 4177 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 4178 "Argument and parameter types don't match"); 4179 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 4180 // In particular, we depend on it being the last arg in Args, and the 4181 // objectsize bits depend on there only being one arg if !LeftToRight. 4182 assert(InitialArgSize + 1 == Args.size() && 4183 "The code below depends on only adding one arg per EmitCallArg"); 4184 (void)InitialArgSize; 4185 // Since pointer argument are never emitted as LValue, it is safe to emit 4186 // non-null argument check for r-value only. 4187 if (!Args.back().hasLValue()) { 4188 RValue RVArg = Args.back().getKnownRValue(); 4189 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 4190 ParamsToSkip + Idx); 4191 // @llvm.objectsize should never have side-effects and shouldn't need 4192 // destruction/cleanups, so we can safely "emit" it after its arg, 4193 // regardless of right-to-leftness 4194 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 4195 } 4196 } 4197 4198 if (!LeftToRight) { 4199 // Un-reverse the arguments we just evaluated so they match up with the LLVM 4200 // IR function. 4201 std::reverse(Args.begin() + CallArgsStart, Args.end()); 4202 } 4203 } 4204 4205 namespace { 4206 4207 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 4208 DestroyUnpassedArg(Address Addr, QualType Ty) 4209 : Addr(Addr), Ty(Ty) {} 4210 4211 Address Addr; 4212 QualType Ty; 4213 4214 void Emit(CodeGenFunction &CGF, Flags flags) override { 4215 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 4216 if (DtorKind == QualType::DK_cxx_destructor) { 4217 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 4218 assert(!Dtor->isTrivial()); 4219 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 4220 /*Delegating=*/false, Addr, Ty); 4221 } else { 4222 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 4223 } 4224 } 4225 }; 4226 4227 struct DisableDebugLocationUpdates { 4228 CodeGenFunction &CGF; 4229 bool disabledDebugInfo; 4230 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 4231 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 4232 CGF.disableDebugInfo(); 4233 } 4234 ~DisableDebugLocationUpdates() { 4235 if (disabledDebugInfo) 4236 CGF.enableDebugInfo(); 4237 } 4238 }; 4239 4240 } // end anonymous namespace 4241 4242 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 4243 if (!HasLV) 4244 return RV; 4245 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 4246 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 4247 LV.isVolatile()); 4248 IsUsed = true; 4249 return RValue::getAggregate(Copy.getAddress(CGF)); 4250 } 4251 4252 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 4253 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 4254 if (!HasLV && RV.isScalar()) 4255 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); 4256 else if (!HasLV && RV.isComplex()) 4257 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 4258 else { 4259 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); 4260 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 4261 // We assume that call args are never copied into subobjects. 4262 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 4263 HasLV ? LV.isVolatileQualified() 4264 : RV.isVolatileQualified()); 4265 } 4266 IsUsed = true; 4267 } 4268 4269 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 4270 QualType type) { 4271 DisableDebugLocationUpdates Dis(*this, E); 4272 if (const ObjCIndirectCopyRestoreExpr *CRE 4273 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 4274 assert(getLangOpts().ObjCAutoRefCount); 4275 return emitWritebackArg(*this, args, CRE); 4276 } 4277 4278 assert(type->isReferenceType() == E->isGLValue() && 4279 "reference binding to unmaterialized r-value!"); 4280 4281 if (E->isGLValue()) { 4282 assert(E->getObjectKind() == OK_Ordinary); 4283 return args.add(EmitReferenceBindingToExpr(E), type); 4284 } 4285 4286 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 4287 4288 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 4289 // However, we still have to push an EH-only cleanup in case we unwind before 4290 // we make it to the call. 4291 if (type->isRecordType() && 4292 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 4293 // If we're using inalloca, use the argument memory. Otherwise, use a 4294 // temporary. 4295 AggValueSlot Slot; 4296 if (args.isUsingInAlloca()) 4297 Slot = createPlaceholderSlot(*this, type); 4298 else 4299 Slot = CreateAggTemp(type, "agg.tmp"); 4300 4301 bool DestroyedInCallee = true, NeedsEHCleanup = true; 4302 if (const auto *RD = type->getAsCXXRecordDecl()) 4303 DestroyedInCallee = RD->hasNonTrivialDestructor(); 4304 else 4305 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 4306 4307 if (DestroyedInCallee) 4308 Slot.setExternallyDestructed(); 4309 4310 EmitAggExpr(E, Slot); 4311 RValue RV = Slot.asRValue(); 4312 args.add(RV, type); 4313 4314 if (DestroyedInCallee && NeedsEHCleanup) { 4315 // Create a no-op GEP between the placeholder and the cleanup so we can 4316 // RAUW it successfully. It also serves as a marker of the first 4317 // instruction where the cleanup is active. 4318 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 4319 type); 4320 // This unreachable is a temporary marker which will be removed later. 4321 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 4322 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 4323 } 4324 return; 4325 } 4326 4327 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 4328 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 4329 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 4330 assert(L.isSimple()); 4331 args.addUncopiedAggregate(L, type); 4332 return; 4333 } 4334 4335 args.add(EmitAnyExprToTemp(E), type); 4336 } 4337 4338 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 4339 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 4340 // implicitly widens null pointer constants that are arguments to varargs 4341 // functions to pointer-sized ints. 4342 if (!getTarget().getTriple().isOSWindows()) 4343 return Arg->getType(); 4344 4345 if (Arg->getType()->isIntegerType() && 4346 getContext().getTypeSize(Arg->getType()) < 4347 getContext().getTargetInfo().getPointerWidth(0) && 4348 Arg->isNullPointerConstant(getContext(), 4349 Expr::NPC_ValueDependentIsNotNull)) { 4350 return getContext().getIntPtrType(); 4351 } 4352 4353 return Arg->getType(); 4354 } 4355 4356 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4357 // optimizer it can aggressively ignore unwind edges. 4358 void 4359 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 4360 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 4361 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 4362 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 4363 CGM.getNoObjCARCExceptionsMetadata()); 4364 } 4365 4366 /// Emits a call to the given no-arguments nounwind runtime function. 4367 llvm::CallInst * 4368 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4369 const llvm::Twine &name) { 4370 return EmitNounwindRuntimeCall(callee, None, name); 4371 } 4372 4373 /// Emits a call to the given nounwind runtime function. 4374 llvm::CallInst * 4375 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4376 ArrayRef<llvm::Value *> args, 4377 const llvm::Twine &name) { 4378 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 4379 call->setDoesNotThrow(); 4380 return call; 4381 } 4382 4383 /// Emits a simple call (never an invoke) to the given no-arguments 4384 /// runtime function. 4385 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4386 const llvm::Twine &name) { 4387 return EmitRuntimeCall(callee, None, name); 4388 } 4389 4390 // Calls which may throw must have operand bundles indicating which funclet 4391 // they are nested within. 4392 SmallVector<llvm::OperandBundleDef, 1> 4393 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 4394 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4395 // There is no need for a funclet operand bundle if we aren't inside a 4396 // funclet. 4397 if (!CurrentFuncletPad) 4398 return BundleList; 4399 4400 // Skip intrinsics which cannot throw. 4401 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 4402 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 4403 return BundleList; 4404 4405 BundleList.emplace_back("funclet", CurrentFuncletPad); 4406 return BundleList; 4407 } 4408 4409 /// Emits a simple call (never an invoke) to the given runtime function. 4410 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4411 ArrayRef<llvm::Value *> args, 4412 const llvm::Twine &name) { 4413 llvm::CallInst *call = Builder.CreateCall( 4414 callee, args, getBundlesForFunclet(callee.getCallee()), name); 4415 call->setCallingConv(getRuntimeCC()); 4416 return call; 4417 } 4418 4419 /// Emits a call or invoke to the given noreturn runtime function. 4420 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( 4421 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { 4422 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4423 getBundlesForFunclet(callee.getCallee()); 4424 4425 if (getInvokeDest()) { 4426 llvm::InvokeInst *invoke = 4427 Builder.CreateInvoke(callee, 4428 getUnreachableBlock(), 4429 getInvokeDest(), 4430 args, 4431 BundleList); 4432 invoke->setDoesNotReturn(); 4433 invoke->setCallingConv(getRuntimeCC()); 4434 } else { 4435 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 4436 call->setDoesNotReturn(); 4437 call->setCallingConv(getRuntimeCC()); 4438 Builder.CreateUnreachable(); 4439 } 4440 } 4441 4442 /// Emits a call or invoke instruction to the given nullary runtime function. 4443 llvm::CallBase * 4444 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4445 const Twine &name) { 4446 return EmitRuntimeCallOrInvoke(callee, None, name); 4447 } 4448 4449 /// Emits a call or invoke instruction to the given runtime function. 4450 llvm::CallBase * 4451 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4452 ArrayRef<llvm::Value *> args, 4453 const Twine &name) { 4454 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 4455 call->setCallingConv(getRuntimeCC()); 4456 return call; 4457 } 4458 4459 /// Emits a call or invoke instruction to the given function, depending 4460 /// on the current state of the EH stack. 4461 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, 4462 ArrayRef<llvm::Value *> Args, 4463 const Twine &Name) { 4464 llvm::BasicBlock *InvokeDest = getInvokeDest(); 4465 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4466 getBundlesForFunclet(Callee.getCallee()); 4467 4468 llvm::CallBase *Inst; 4469 if (!InvokeDest) 4470 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 4471 else { 4472 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 4473 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 4474 Name); 4475 EmitBlock(ContBB); 4476 } 4477 4478 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4479 // optimizer it can aggressively ignore unwind edges. 4480 if (CGM.getLangOpts().ObjCAutoRefCount) 4481 AddObjCARCExceptionMetadata(Inst); 4482 4483 return Inst; 4484 } 4485 4486 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 4487 llvm::Value *New) { 4488 DeferredReplacements.push_back( 4489 std::make_pair(llvm::WeakTrackingVH(Old), New)); 4490 } 4491 4492 namespace { 4493 4494 /// Specify given \p NewAlign as the alignment of return value attribute. If 4495 /// such attribute already exists, re-set it to the maximal one of two options. 4496 LLVM_NODISCARD llvm::AttributeList 4497 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, 4498 const llvm::AttributeList &Attrs, 4499 llvm::Align NewAlign) { 4500 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); 4501 if (CurAlign >= NewAlign) 4502 return Attrs; 4503 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); 4504 return Attrs 4505 .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex, 4506 llvm::Attribute::AttrKind::Alignment) 4507 .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr); 4508 } 4509 4510 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { 4511 protected: 4512 CodeGenFunction &CGF; 4513 4514 /// We do nothing if this is, or becomes, nullptr. 4515 const AlignedAttrTy *AA = nullptr; 4516 4517 llvm::Value *Alignment = nullptr; // May or may not be a constant. 4518 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. 4519 4520 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4521 : CGF(CGF_) { 4522 if (!FuncDecl) 4523 return; 4524 AA = FuncDecl->getAttr<AlignedAttrTy>(); 4525 } 4526 4527 public: 4528 /// If we can, materialize the alignment as an attribute on return value. 4529 LLVM_NODISCARD llvm::AttributeList 4530 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { 4531 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) 4532 return Attrs; 4533 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); 4534 if (!AlignmentCI) 4535 return Attrs; 4536 // We may legitimately have non-power-of-2 alignment here. 4537 // If so, this is UB land, emit it via `@llvm.assume` instead. 4538 if (!AlignmentCI->getValue().isPowerOf2()) 4539 return Attrs; 4540 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( 4541 CGF.getLLVMContext(), Attrs, 4542 llvm::Align( 4543 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); 4544 AA = nullptr; // We're done. Disallow doing anything else. 4545 return NewAttrs; 4546 } 4547 4548 /// Emit alignment assumption. 4549 /// This is a general fallback that we take if either there is an offset, 4550 /// or the alignment is variable or we are sanitizing for alignment. 4551 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { 4552 if (!AA) 4553 return; 4554 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, 4555 AA->getLocation(), Alignment, OffsetCI); 4556 AA = nullptr; // We're done. Disallow doing anything else. 4557 } 4558 }; 4559 4560 /// Helper data structure to emit `AssumeAlignedAttr`. 4561 class AssumeAlignedAttrEmitter final 4562 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { 4563 public: 4564 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4565 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4566 if (!AA) 4567 return; 4568 // It is guaranteed that the alignment/offset are constants. 4569 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); 4570 if (Expr *Offset = AA->getOffset()) { 4571 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); 4572 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. 4573 OffsetCI = nullptr; 4574 } 4575 } 4576 }; 4577 4578 /// Helper data structure to emit `AllocAlignAttr`. 4579 class AllocAlignAttrEmitter final 4580 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { 4581 public: 4582 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, 4583 const CallArgList &CallArgs) 4584 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4585 if (!AA) 4586 return; 4587 // Alignment may or may not be a constant, and that is okay. 4588 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] 4589 .getRValue(CGF) 4590 .getScalarVal(); 4591 } 4592 }; 4593 4594 } // namespace 4595 4596 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 4597 const CGCallee &Callee, 4598 ReturnValueSlot ReturnValue, 4599 const CallArgList &CallArgs, 4600 llvm::CallBase **callOrInvoke, bool IsMustTail, 4601 SourceLocation Loc) { 4602 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 4603 4604 assert(Callee.isOrdinary() || Callee.isVirtual()); 4605 4606 // Handle struct-return functions by passing a pointer to the 4607 // location that we would like to return into. 4608 QualType RetTy = CallInfo.getReturnType(); 4609 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 4610 4611 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); 4612 4613 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4614 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 4615 // We can only guarantee that a function is called from the correct 4616 // context/function based on the appropriate target attributes, 4617 // so only check in the case where we have both always_inline and target 4618 // since otherwise we could be making a conditional call after a check for 4619 // the proper cpu features (and it won't cause code generation issues due to 4620 // function based code generation). 4621 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 4622 TargetDecl->hasAttr<TargetAttr>()) 4623 checkTargetFeatures(Loc, FD); 4624 4625 // Some architectures (such as x86-64) have the ABI changed based on 4626 // attribute-target/features. Give them a chance to diagnose. 4627 CGM.getTargetCodeGenInfo().checkFunctionCallABI( 4628 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs); 4629 } 4630 4631 #ifndef NDEBUG 4632 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { 4633 // For an inalloca varargs function, we don't expect CallInfo to match the 4634 // function pointer's type, because the inalloca struct a will have extra 4635 // fields in it for the varargs parameters. Code later in this function 4636 // bitcasts the function pointer to the type derived from CallInfo. 4637 // 4638 // In other cases, we assert that the types match up (until pointers stop 4639 // having pointee types). 4640 llvm::Type *TypeFromVal; 4641 if (Callee.isVirtual()) 4642 TypeFromVal = Callee.getVirtualFunctionType(); 4643 else 4644 TypeFromVal = 4645 Callee.getFunctionPointer()->getType()->getPointerElementType(); 4646 assert(IRFuncTy == TypeFromVal); 4647 } 4648 #endif 4649 4650 // 1. Set up the arguments. 4651 4652 // If we're using inalloca, insert the allocation after the stack save. 4653 // FIXME: Do this earlier rather than hacking it in here! 4654 Address ArgMemory = Address::invalid(); 4655 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 4656 const llvm::DataLayout &DL = CGM.getDataLayout(); 4657 llvm::Instruction *IP = CallArgs.getStackBase(); 4658 llvm::AllocaInst *AI; 4659 if (IP) { 4660 IP = IP->getNextNode(); 4661 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 4662 "argmem", IP); 4663 } else { 4664 AI = CreateTempAlloca(ArgStruct, "argmem"); 4665 } 4666 auto Align = CallInfo.getArgStructAlignment(); 4667 AI->setAlignment(Align.getAsAlign()); 4668 AI->setUsedWithInAlloca(true); 4669 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 4670 ArgMemory = Address(AI, Align); 4671 } 4672 4673 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 4674 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 4675 4676 // If the call returns a temporary with struct return, create a temporary 4677 // alloca to hold the result, unless one is given to us. 4678 Address SRetPtr = Address::invalid(); 4679 Address SRetAlloca = Address::invalid(); 4680 llvm::Value *UnusedReturnSizePtr = nullptr; 4681 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 4682 if (!ReturnValue.isNull()) { 4683 SRetPtr = ReturnValue.getValue(); 4684 } else { 4685 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 4686 if (HaveInsertPoint() && ReturnValue.isUnused()) { 4687 llvm::TypeSize size = 4688 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 4689 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 4690 } 4691 } 4692 if (IRFunctionArgs.hasSRetArg()) { 4693 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 4694 } else if (RetAI.isInAlloca()) { 4695 Address Addr = 4696 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 4697 Builder.CreateStore(SRetPtr.getPointer(), Addr); 4698 } 4699 } 4700 4701 Address swiftErrorTemp = Address::invalid(); 4702 Address swiftErrorArg = Address::invalid(); 4703 4704 // When passing arguments using temporary allocas, we need to add the 4705 // appropriate lifetime markers. This vector keeps track of all the lifetime 4706 // markers that need to be ended right after the call. 4707 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; 4708 4709 // Translate all of the arguments as necessary to match the IR lowering. 4710 assert(CallInfo.arg_size() == CallArgs.size() && 4711 "Mismatch between function signature & arguments."); 4712 unsigned ArgNo = 0; 4713 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 4714 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 4715 I != E; ++I, ++info_it, ++ArgNo) { 4716 const ABIArgInfo &ArgInfo = info_it->info; 4717 4718 // Insert a padding argument to ensure proper alignment. 4719 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 4720 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 4721 llvm::UndefValue::get(ArgInfo.getPaddingType()); 4722 4723 unsigned FirstIRArg, NumIRArgs; 4724 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 4725 4726 switch (ArgInfo.getKind()) { 4727 case ABIArgInfo::InAlloca: { 4728 assert(NumIRArgs == 0); 4729 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 4730 if (I->isAggregate()) { 4731 Address Addr = I->hasLValue() 4732 ? I->getKnownLValue().getAddress(*this) 4733 : I->getKnownRValue().getAggregateAddress(); 4734 llvm::Instruction *Placeholder = 4735 cast<llvm::Instruction>(Addr.getPointer()); 4736 4737 if (!ArgInfo.getInAllocaIndirect()) { 4738 // Replace the placeholder with the appropriate argument slot GEP. 4739 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 4740 Builder.SetInsertPoint(Placeholder); 4741 Addr = Builder.CreateStructGEP(ArgMemory, 4742 ArgInfo.getInAllocaFieldIndex()); 4743 Builder.restoreIP(IP); 4744 } else { 4745 // For indirect things such as overaligned structs, replace the 4746 // placeholder with a regular aggregate temporary alloca. Store the 4747 // address of this alloca into the struct. 4748 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); 4749 Address ArgSlot = Builder.CreateStructGEP( 4750 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4751 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4752 } 4753 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 4754 } else if (ArgInfo.getInAllocaIndirect()) { 4755 // Make a temporary alloca and store the address of it into the argument 4756 // struct. 4757 Address Addr = CreateMemTempWithoutCast( 4758 I->Ty, getContext().getTypeAlignInChars(I->Ty), 4759 "indirect-arg-temp"); 4760 I->copyInto(*this, Addr); 4761 Address ArgSlot = 4762 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4763 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4764 } else { 4765 // Store the RValue into the argument struct. 4766 Address Addr = 4767 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4768 unsigned AS = Addr.getType()->getPointerAddressSpace(); 4769 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 4770 // There are some cases where a trivial bitcast is not avoidable. The 4771 // definition of a type later in a translation unit may change it's type 4772 // from {}* to (%struct.foo*)*. 4773 if (Addr.getType() != MemType) 4774 Addr = Builder.CreateBitCast(Addr, MemType); 4775 I->copyInto(*this, Addr); 4776 } 4777 break; 4778 } 4779 4780 case ABIArgInfo::Indirect: 4781 case ABIArgInfo::IndirectAliased: { 4782 assert(NumIRArgs == 1); 4783 if (!I->isAggregate()) { 4784 // Make a temporary alloca to pass the argument. 4785 Address Addr = CreateMemTempWithoutCast( 4786 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 4787 IRCallArgs[FirstIRArg] = Addr.getPointer(); 4788 4789 I->copyInto(*this, Addr); 4790 } else { 4791 // We want to avoid creating an unnecessary temporary+copy here; 4792 // however, we need one in three cases: 4793 // 1. If the argument is not byval, and we are required to copy the 4794 // source. (This case doesn't occur on any common architecture.) 4795 // 2. If the argument is byval, RV is not sufficiently aligned, and 4796 // we cannot force it to be sufficiently aligned. 4797 // 3. If the argument is byval, but RV is not located in default 4798 // or alloca address space. 4799 Address Addr = I->hasLValue() 4800 ? I->getKnownLValue().getAddress(*this) 4801 : I->getKnownRValue().getAggregateAddress(); 4802 llvm::Value *V = Addr.getPointer(); 4803 CharUnits Align = ArgInfo.getIndirectAlign(); 4804 const llvm::DataLayout *TD = &CGM.getDataLayout(); 4805 4806 assert((FirstIRArg >= IRFuncTy->getNumParams() || 4807 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 4808 TD->getAllocaAddrSpace()) && 4809 "indirect argument must be in alloca address space"); 4810 4811 bool NeedCopy = false; 4812 4813 if (Addr.getAlignment() < Align && 4814 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < 4815 Align.getAsAlign()) { 4816 NeedCopy = true; 4817 } else if (I->hasLValue()) { 4818 auto LV = I->getKnownLValue(); 4819 auto AS = LV.getAddressSpace(); 4820 4821 if (!ArgInfo.getIndirectByVal() || 4822 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { 4823 NeedCopy = true; 4824 } 4825 if (!getLangOpts().OpenCL) { 4826 if ((ArgInfo.getIndirectByVal() && 4827 (AS != LangAS::Default && 4828 AS != CGM.getASTAllocaAddressSpace()))) { 4829 NeedCopy = true; 4830 } 4831 } 4832 // For OpenCL even if RV is located in default or alloca address space 4833 // we don't want to perform address space cast for it. 4834 else if ((ArgInfo.getIndirectByVal() && 4835 Addr.getType()->getAddressSpace() != IRFuncTy-> 4836 getParamType(FirstIRArg)->getPointerAddressSpace())) { 4837 NeedCopy = true; 4838 } 4839 } 4840 4841 if (NeedCopy) { 4842 // Create an aligned temporary, and copy to it. 4843 Address AI = CreateMemTempWithoutCast( 4844 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 4845 IRCallArgs[FirstIRArg] = AI.getPointer(); 4846 4847 // Emit lifetime markers for the temporary alloca. 4848 llvm::TypeSize ByvalTempElementSize = 4849 CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); 4850 llvm::Value *LifetimeSize = 4851 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); 4852 4853 // Add cleanup code to emit the end lifetime marker after the call. 4854 if (LifetimeSize) // In case we disabled lifetime markers. 4855 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); 4856 4857 // Generate the copy. 4858 I->copyInto(*this, AI); 4859 } else { 4860 // Skip the extra memcpy call. 4861 auto *T = V->getType()->getPointerElementType()->getPointerTo( 4862 CGM.getDataLayout().getAllocaAddrSpace()); 4863 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( 4864 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 4865 true); 4866 } 4867 } 4868 break; 4869 } 4870 4871 case ABIArgInfo::Ignore: 4872 assert(NumIRArgs == 0); 4873 break; 4874 4875 case ABIArgInfo::Extend: 4876 case ABIArgInfo::Direct: { 4877 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 4878 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 4879 ArgInfo.getDirectOffset() == 0) { 4880 assert(NumIRArgs == 1); 4881 llvm::Value *V; 4882 if (!I->isAggregate()) 4883 V = I->getKnownRValue().getScalarVal(); 4884 else 4885 V = Builder.CreateLoad( 4886 I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4887 : I->getKnownRValue().getAggregateAddress()); 4888 4889 // Implement swifterror by copying into a new swifterror argument. 4890 // We'll write back in the normal path out of the call. 4891 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 4892 == ParameterABI::SwiftErrorResult) { 4893 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 4894 4895 QualType pointeeTy = I->Ty->getPointeeType(); 4896 swiftErrorArg = 4897 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 4898 4899 swiftErrorTemp = 4900 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 4901 V = swiftErrorTemp.getPointer(); 4902 cast<llvm::AllocaInst>(V)->setSwiftError(true); 4903 4904 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 4905 Builder.CreateStore(errorValue, swiftErrorTemp); 4906 } 4907 4908 // We might have to widen integers, but we should never truncate. 4909 if (ArgInfo.getCoerceToType() != V->getType() && 4910 V->getType()->isIntegerTy()) 4911 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 4912 4913 // If the argument doesn't match, perform a bitcast to coerce it. This 4914 // can happen due to trivial type mismatches. 4915 if (FirstIRArg < IRFuncTy->getNumParams() && 4916 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 4917 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 4918 4919 IRCallArgs[FirstIRArg] = V; 4920 break; 4921 } 4922 4923 // FIXME: Avoid the conversion through memory if possible. 4924 Address Src = Address::invalid(); 4925 if (!I->isAggregate()) { 4926 Src = CreateMemTemp(I->Ty, "coerce"); 4927 I->copyInto(*this, Src); 4928 } else { 4929 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4930 : I->getKnownRValue().getAggregateAddress(); 4931 } 4932 4933 // If the value is offset in memory, apply the offset now. 4934 Src = emitAddressAtOffset(*this, Src, ArgInfo); 4935 4936 // Fast-isel and the optimizer generally like scalar values better than 4937 // FCAs, so we flatten them if this is safe to do for this argument. 4938 llvm::StructType *STy = 4939 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 4940 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 4941 llvm::Type *SrcTy = Src.getElementType(); 4942 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 4943 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 4944 4945 // If the source type is smaller than the destination type of the 4946 // coerce-to logic, copy the source value into a temp alloca the size 4947 // of the destination type to allow loading all of it. The bits past 4948 // the source value are left undef. 4949 if (SrcSize < DstSize) { 4950 Address TempAlloca 4951 = CreateTempAlloca(STy, Src.getAlignment(), 4952 Src.getName() + ".coerce"); 4953 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 4954 Src = TempAlloca; 4955 } else { 4956 Src = Builder.CreateBitCast(Src, 4957 STy->getPointerTo(Src.getAddressSpace())); 4958 } 4959 4960 assert(NumIRArgs == STy->getNumElements()); 4961 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4962 Address EltPtr = Builder.CreateStructGEP(Src, i); 4963 llvm::Value *LI = Builder.CreateLoad(EltPtr); 4964 IRCallArgs[FirstIRArg + i] = LI; 4965 } 4966 } else { 4967 // In the simple case, just pass the coerced loaded value. 4968 assert(NumIRArgs == 1); 4969 llvm::Value *Load = 4970 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 4971 4972 if (CallInfo.isCmseNSCall()) { 4973 // For certain parameter types, clear padding bits, as they may reveal 4974 // sensitive information. 4975 // Small struct/union types are passed as integer arrays. 4976 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); 4977 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) 4978 Load = EmitCMSEClearRecord(Load, ATy, I->Ty); 4979 } 4980 IRCallArgs[FirstIRArg] = Load; 4981 } 4982 4983 break; 4984 } 4985 4986 case ABIArgInfo::CoerceAndExpand: { 4987 auto coercionType = ArgInfo.getCoerceAndExpandType(); 4988 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4989 4990 llvm::Value *tempSize = nullptr; 4991 Address addr = Address::invalid(); 4992 Address AllocaAddr = Address::invalid(); 4993 if (I->isAggregate()) { 4994 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4995 : I->getKnownRValue().getAggregateAddress(); 4996 4997 } else { 4998 RValue RV = I->getKnownRValue(); 4999 assert(RV.isScalar()); // complex should always just be direct 5000 5001 llvm::Type *scalarType = RV.getScalarVal()->getType(); 5002 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 5003 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 5004 5005 // Materialize to a temporary. 5006 addr = CreateTempAlloca( 5007 RV.getScalarVal()->getType(), 5008 CharUnits::fromQuantity(std::max( 5009 (unsigned)layout->getAlignment().value(), scalarAlign)), 5010 "tmp", 5011 /*ArraySize=*/nullptr, &AllocaAddr); 5012 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 5013 5014 Builder.CreateStore(RV.getScalarVal(), addr); 5015 } 5016 5017 addr = Builder.CreateElementBitCast(addr, coercionType); 5018 5019 unsigned IRArgPos = FirstIRArg; 5020 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5021 llvm::Type *eltType = coercionType->getElementType(i); 5022 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5023 Address eltAddr = Builder.CreateStructGEP(addr, i); 5024 llvm::Value *elt = Builder.CreateLoad(eltAddr); 5025 IRCallArgs[IRArgPos++] = elt; 5026 } 5027 assert(IRArgPos == FirstIRArg + NumIRArgs); 5028 5029 if (tempSize) { 5030 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 5031 } 5032 5033 break; 5034 } 5035 5036 case ABIArgInfo::Expand: { 5037 unsigned IRArgPos = FirstIRArg; 5038 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 5039 assert(IRArgPos == FirstIRArg + NumIRArgs); 5040 break; 5041 } 5042 } 5043 } 5044 5045 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 5046 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 5047 5048 // If we're using inalloca, set up that argument. 5049 if (ArgMemory.isValid()) { 5050 llvm::Value *Arg = ArgMemory.getPointer(); 5051 if (CallInfo.isVariadic()) { 5052 // When passing non-POD arguments by value to variadic functions, we will 5053 // end up with a variadic prototype and an inalloca call site. In such 5054 // cases, we can't do any parameter mismatch checks. Give up and bitcast 5055 // the callee. 5056 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 5057 CalleePtr = 5058 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); 5059 } else { 5060 llvm::Type *LastParamTy = 5061 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 5062 if (Arg->getType() != LastParamTy) { 5063 #ifndef NDEBUG 5064 // Assert that these structs have equivalent element types. 5065 llvm::StructType *FullTy = CallInfo.getArgStruct(); 5066 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 5067 cast<llvm::PointerType>(LastParamTy)->getElementType()); 5068 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 5069 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 5070 DE = DeclaredTy->element_end(), 5071 FI = FullTy->element_begin(); 5072 DI != DE; ++DI, ++FI) 5073 assert(*DI == *FI); 5074 #endif 5075 Arg = Builder.CreateBitCast(Arg, LastParamTy); 5076 } 5077 } 5078 assert(IRFunctionArgs.hasInallocaArg()); 5079 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 5080 } 5081 5082 // 2. Prepare the function pointer. 5083 5084 // If the callee is a bitcast of a non-variadic function to have a 5085 // variadic function pointer type, check to see if we can remove the 5086 // bitcast. This comes up with unprototyped functions. 5087 // 5088 // This makes the IR nicer, but more importantly it ensures that we 5089 // can inline the function at -O0 if it is marked always_inline. 5090 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, 5091 llvm::Value *Ptr) -> llvm::Function * { 5092 if (!CalleeFT->isVarArg()) 5093 return nullptr; 5094 5095 // Get underlying value if it's a bitcast 5096 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { 5097 if (CE->getOpcode() == llvm::Instruction::BitCast) 5098 Ptr = CE->getOperand(0); 5099 } 5100 5101 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); 5102 if (!OrigFn) 5103 return nullptr; 5104 5105 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 5106 5107 // If the original type is variadic, or if any of the component types 5108 // disagree, we cannot remove the cast. 5109 if (OrigFT->isVarArg() || 5110 OrigFT->getNumParams() != CalleeFT->getNumParams() || 5111 OrigFT->getReturnType() != CalleeFT->getReturnType()) 5112 return nullptr; 5113 5114 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 5115 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 5116 return nullptr; 5117 5118 return OrigFn; 5119 }; 5120 5121 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { 5122 CalleePtr = OrigFn; 5123 IRFuncTy = OrigFn->getFunctionType(); 5124 } 5125 5126 // 3. Perform the actual call. 5127 5128 // Deactivate any cleanups that we're supposed to do immediately before 5129 // the call. 5130 if (!CallArgs.getCleanupsToDeactivate().empty()) 5131 deactivateArgCleanupsBeforeCall(*this, CallArgs); 5132 5133 // Assert that the arguments we computed match up. The IR verifier 5134 // will catch this, but this is a common enough source of problems 5135 // during IRGen changes that it's way better for debugging to catch 5136 // it ourselves here. 5137 #ifndef NDEBUG 5138 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 5139 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5140 // Inalloca argument can have different type. 5141 if (IRFunctionArgs.hasInallocaArg() && 5142 i == IRFunctionArgs.getInallocaArgNo()) 5143 continue; 5144 if (i < IRFuncTy->getNumParams()) 5145 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 5146 } 5147 #endif 5148 5149 // Update the largest vector width if any arguments have vector types. 5150 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5151 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType())) 5152 LargestVectorWidth = 5153 std::max((uint64_t)LargestVectorWidth, 5154 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5155 } 5156 5157 // Compute the calling convention and attributes. 5158 unsigned CallingConv; 5159 llvm::AttributeList Attrs; 5160 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 5161 Callee.getAbstractInfo(), Attrs, CallingConv, 5162 /*AttrOnCallSite=*/true, 5163 /*IsThunk=*/false); 5164 5165 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5166 if (FD->hasAttr<StrictFPAttr>()) 5167 // All calls within a strictfp function are marked strictfp 5168 Attrs = 5169 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5170 llvm::Attribute::StrictFP); 5171 5172 // Add call-site nomerge attribute if exists. 5173 if (InNoMergeAttributedStmt) 5174 Attrs = 5175 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5176 llvm::Attribute::NoMerge); 5177 5178 // Apply some call-site-specific attributes. 5179 // TODO: work this into building the attribute set. 5180 5181 // Apply always_inline to all calls within flatten functions. 5182 // FIXME: should this really take priority over __try, below? 5183 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 5184 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { 5185 Attrs = 5186 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5187 llvm::Attribute::AlwaysInline); 5188 } 5189 5190 // Disable inlining inside SEH __try blocks. 5191 if (isSEHTryScope()) { 5192 Attrs = 5193 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5194 llvm::Attribute::NoInline); 5195 } 5196 5197 // Decide whether to use a call or an invoke. 5198 bool CannotThrow; 5199 if (currentFunctionUsesSEHTry()) { 5200 // SEH cares about asynchronous exceptions, so everything can "throw." 5201 CannotThrow = false; 5202 } else if (isCleanupPadScope() && 5203 EHPersonality::get(*this).isMSVCXXPersonality()) { 5204 // The MSVC++ personality will implicitly terminate the program if an 5205 // exception is thrown during a cleanup outside of a try/catch. 5206 // We don't need to model anything in IR to get this behavior. 5207 CannotThrow = true; 5208 } else { 5209 // Otherwise, nounwind call sites will never throw. 5210 CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind); 5211 5212 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr)) 5213 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) 5214 CannotThrow = true; 5215 } 5216 5217 // If we made a temporary, be sure to clean up after ourselves. Note that we 5218 // can't depend on being inside of an ExprWithCleanups, so we need to manually 5219 // pop this cleanup later on. Being eager about this is OK, since this 5220 // temporary is 'invisible' outside of the callee. 5221 if (UnusedReturnSizePtr) 5222 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 5223 UnusedReturnSizePtr); 5224 5225 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 5226 5227 SmallVector<llvm::OperandBundleDef, 1> BundleList = 5228 getBundlesForFunclet(CalleePtr); 5229 5230 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5231 if (FD->hasAttr<StrictFPAttr>()) 5232 // All calls within a strictfp function are marked strictfp 5233 Attrs = 5234 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5235 llvm::Attribute::StrictFP); 5236 5237 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); 5238 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5239 5240 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); 5241 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5242 5243 // Emit the actual call/invoke instruction. 5244 llvm::CallBase *CI; 5245 if (!InvokeDest) { 5246 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); 5247 } else { 5248 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 5249 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, 5250 BundleList); 5251 EmitBlock(Cont); 5252 } 5253 if (callOrInvoke) 5254 *callOrInvoke = CI; 5255 5256 // If this is within a function that has the guard(nocf) attribute and is an 5257 // indirect call, add the "guard_nocf" attribute to this call to indicate that 5258 // Control Flow Guard checks should not be added, even if the call is inlined. 5259 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 5260 if (const auto *A = FD->getAttr<CFGuardAttr>()) { 5261 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) 5262 Attrs = Attrs.addAttribute( 5263 getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf"); 5264 } 5265 } 5266 5267 // Apply the attributes and calling convention. 5268 CI->setAttributes(Attrs); 5269 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 5270 5271 // Apply various metadata. 5272 5273 if (!CI->getType()->isVoidTy()) 5274 CI->setName("call"); 5275 5276 // Update largest vector width from the return type. 5277 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType())) 5278 LargestVectorWidth = 5279 std::max((uint64_t)LargestVectorWidth, 5280 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5281 5282 // Insert instrumentation or attach profile metadata at indirect call sites. 5283 // For more details, see the comment before the definition of 5284 // IPVK_IndirectCallTarget in InstrProfData.inc. 5285 if (!CI->getCalledFunction()) 5286 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 5287 CI, CalleePtr); 5288 5289 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 5290 // optimizer it can aggressively ignore unwind edges. 5291 if (CGM.getLangOpts().ObjCAutoRefCount) 5292 AddObjCARCExceptionMetadata(CI); 5293 5294 // Set tail call kind if necessary. 5295 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 5296 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 5297 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 5298 else if (IsMustTail) 5299 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); 5300 } 5301 5302 // Add metadata for calls to MSAllocator functions 5303 if (getDebugInfo() && TargetDecl && 5304 TargetDecl->hasAttr<MSAllocatorAttr>()) 5305 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); 5306 5307 // 4. Finish the call. 5308 5309 // If the call doesn't return, finish the basic block and clear the 5310 // insertion point; this allows the rest of IRGen to discard 5311 // unreachable code. 5312 if (CI->doesNotReturn()) { 5313 if (UnusedReturnSizePtr) 5314 PopCleanupBlock(); 5315 5316 // Strip away the noreturn attribute to better diagnose unreachable UB. 5317 if (SanOpts.has(SanitizerKind::Unreachable)) { 5318 // Also remove from function since CallBase::hasFnAttr additionally checks 5319 // attributes of the called function. 5320 if (auto *F = CI->getCalledFunction()) 5321 F->removeFnAttr(llvm::Attribute::NoReturn); 5322 CI->removeAttribute(llvm::AttributeList::FunctionIndex, 5323 llvm::Attribute::NoReturn); 5324 5325 // Avoid incompatibility with ASan which relies on the `noreturn` 5326 // attribute to insert handler calls. 5327 if (SanOpts.hasOneOf(SanitizerKind::Address | 5328 SanitizerKind::KernelAddress)) { 5329 SanitizerScope SanScope(this); 5330 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 5331 Builder.SetInsertPoint(CI); 5332 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 5333 llvm::FunctionCallee Fn = 5334 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 5335 EmitNounwindRuntimeCall(Fn); 5336 } 5337 } 5338 5339 EmitUnreachable(Loc); 5340 Builder.ClearInsertionPoint(); 5341 5342 // FIXME: For now, emit a dummy basic block because expr emitters in 5343 // generally are not ready to handle emitting expressions at unreachable 5344 // points. 5345 EnsureInsertPoint(); 5346 5347 // Return a reasonable RValue. 5348 return GetUndefRValue(RetTy); 5349 } 5350 5351 // If this is a musttail call, return immediately. We do not branch to the 5352 // epilogue in this case. 5353 if (IsMustTail) { 5354 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end(); 5355 ++it) { 5356 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it); 5357 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) 5358 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups"); 5359 } 5360 if (CI->getType()->isVoidTy()) 5361 Builder.CreateRetVoid(); 5362 else 5363 Builder.CreateRet(CI); 5364 Builder.ClearInsertionPoint(); 5365 EnsureInsertPoint(); 5366 return GetUndefRValue(RetTy); 5367 } 5368 5369 // Perform the swifterror writeback. 5370 if (swiftErrorTemp.isValid()) { 5371 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 5372 Builder.CreateStore(errorResult, swiftErrorArg); 5373 } 5374 5375 // Emit any call-associated writebacks immediately. Arguably this 5376 // should happen after any return-value munging. 5377 if (CallArgs.hasWritebacks()) 5378 emitWritebacks(*this, CallArgs); 5379 5380 // The stack cleanup for inalloca arguments has to run out of the normal 5381 // lexical order, so deactivate it and run it manually here. 5382 CallArgs.freeArgumentMemory(*this); 5383 5384 // Extract the return value. 5385 RValue Ret = [&] { 5386 switch (RetAI.getKind()) { 5387 case ABIArgInfo::CoerceAndExpand: { 5388 auto coercionType = RetAI.getCoerceAndExpandType(); 5389 5390 Address addr = SRetPtr; 5391 addr = Builder.CreateElementBitCast(addr, coercionType); 5392 5393 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 5394 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 5395 5396 unsigned unpaddedIndex = 0; 5397 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5398 llvm::Type *eltType = coercionType->getElementType(i); 5399 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5400 Address eltAddr = Builder.CreateStructGEP(addr, i); 5401 llvm::Value *elt = CI; 5402 if (requiresExtract) 5403 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 5404 else 5405 assert(unpaddedIndex == 0); 5406 Builder.CreateStore(elt, eltAddr); 5407 } 5408 // FALLTHROUGH 5409 LLVM_FALLTHROUGH; 5410 } 5411 5412 case ABIArgInfo::InAlloca: 5413 case ABIArgInfo::Indirect: { 5414 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 5415 if (UnusedReturnSizePtr) 5416 PopCleanupBlock(); 5417 return ret; 5418 } 5419 5420 case ABIArgInfo::Ignore: 5421 // If we are ignoring an argument that had a result, make sure to 5422 // construct the appropriate return value for our caller. 5423 return GetUndefRValue(RetTy); 5424 5425 case ABIArgInfo::Extend: 5426 case ABIArgInfo::Direct: { 5427 llvm::Type *RetIRTy = ConvertType(RetTy); 5428 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 5429 switch (getEvaluationKind(RetTy)) { 5430 case TEK_Complex: { 5431 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 5432 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 5433 return RValue::getComplex(std::make_pair(Real, Imag)); 5434 } 5435 case TEK_Aggregate: { 5436 Address DestPtr = ReturnValue.getValue(); 5437 bool DestIsVolatile = ReturnValue.isVolatile(); 5438 5439 if (!DestPtr.isValid()) { 5440 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 5441 DestIsVolatile = false; 5442 } 5443 EmitAggregateStore(CI, DestPtr, DestIsVolatile); 5444 return RValue::getAggregate(DestPtr); 5445 } 5446 case TEK_Scalar: { 5447 // If the argument doesn't match, perform a bitcast to coerce it. This 5448 // can happen due to trivial type mismatches. 5449 llvm::Value *V = CI; 5450 if (V->getType() != RetIRTy) 5451 V = Builder.CreateBitCast(V, RetIRTy); 5452 return RValue::get(V); 5453 } 5454 } 5455 llvm_unreachable("bad evaluation kind"); 5456 } 5457 5458 Address DestPtr = ReturnValue.getValue(); 5459 bool DestIsVolatile = ReturnValue.isVolatile(); 5460 5461 if (!DestPtr.isValid()) { 5462 DestPtr = CreateMemTemp(RetTy, "coerce"); 5463 DestIsVolatile = false; 5464 } 5465 5466 // If the value is offset in memory, apply the offset now. 5467 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 5468 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 5469 5470 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 5471 } 5472 5473 case ABIArgInfo::Expand: 5474 case ABIArgInfo::IndirectAliased: 5475 llvm_unreachable("Invalid ABI kind for return argument"); 5476 } 5477 5478 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 5479 } (); 5480 5481 // Emit the assume_aligned check on the return value. 5482 if (Ret.isScalar() && TargetDecl) { 5483 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5484 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5485 } 5486 5487 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though 5488 // we can't use the full cleanup mechanism. 5489 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) 5490 LifetimeEnd.Emit(*this, /*Flags=*/{}); 5491 5492 if (!ReturnValue.isExternallyDestructed() && 5493 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) 5494 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), 5495 RetTy); 5496 5497 return Ret; 5498 } 5499 5500 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 5501 if (isVirtual()) { 5502 const CallExpr *CE = getVirtualCallExpr(); 5503 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 5504 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), 5505 CE ? CE->getBeginLoc() : SourceLocation()); 5506 } 5507 5508 return *this; 5509 } 5510 5511 /* VarArg handling */ 5512 5513 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 5514 VAListAddr = VE->isMicrosoftABI() 5515 ? EmitMSVAListRef(VE->getSubExpr()) 5516 : EmitVAListRef(VE->getSubExpr()); 5517 QualType Ty = VE->getType(); 5518 if (VE->isMicrosoftABI()) 5519 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 5520 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 5521 } 5522