1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclCXX.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/Basic/CodeGenOptions.h" 28 #include "clang/Basic/TargetBuiltins.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "clang/CodeGen/CGFunctionInfo.h" 31 #include "clang/CodeGen/SwiftCallingConv.h" 32 #include "llvm/ADT/StringExtras.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/IR/Assumptions.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/CallingConv.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/InlineAsm.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/Type.h" 42 #include "llvm/Transforms/Utils/Local.h" 43 using namespace clang; 44 using namespace CodeGen; 45 46 /***/ 47 48 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 49 switch (CC) { 50 default: return llvm::CallingConv::C; 51 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 52 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 53 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 54 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 55 case CC_Win64: return llvm::CallingConv::Win64; 56 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 57 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 58 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 59 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 60 // TODO: Add support for __pascal to LLVM. 61 case CC_X86Pascal: return llvm::CallingConv::C; 62 // TODO: Add support for __vectorcall to LLVM. 63 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 64 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 65 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 66 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 67 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 68 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 69 case CC_Swift: return llvm::CallingConv::Swift; 70 case CC_SwiftAsync: return llvm::CallingConv::SwiftTail; 71 } 72 } 73 74 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 75 /// qualification. Either or both of RD and MD may be null. A null RD indicates 76 /// that there is no meaningful 'this' type, and a null MD can occur when 77 /// calling a method pointer. 78 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, 79 const CXXMethodDecl *MD) { 80 QualType RecTy; 81 if (RD) 82 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 83 else 84 RecTy = Context.VoidTy; 85 86 if (MD) 87 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 88 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 89 } 90 91 /// Returns the canonical formal type of the given C++ method. 92 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 93 return MD->getType()->getCanonicalTypeUnqualified() 94 .getAs<FunctionProtoType>(); 95 } 96 97 /// Returns the "extra-canonicalized" return type, which discards 98 /// qualifiers on the return type. Codegen doesn't care about them, 99 /// and it makes ABI code a little easier to be able to assume that 100 /// all parameter and return types are top-level unqualified. 101 static CanQualType GetReturnType(QualType RetTy) { 102 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 103 } 104 105 /// Arrange the argument and result information for a value of the given 106 /// unprototyped freestanding function type. 107 const CGFunctionInfo & 108 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 109 // When translating an unprototyped function type, always use a 110 // variadic type. 111 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 112 /*instanceMethod=*/false, 113 /*chainCall=*/false, None, 114 FTNP->getExtInfo(), {}, RequiredArgs(0)); 115 } 116 117 static void addExtParameterInfosForCall( 118 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 119 const FunctionProtoType *proto, 120 unsigned prefixArgs, 121 unsigned totalArgs) { 122 assert(proto->hasExtParameterInfos()); 123 assert(paramInfos.size() <= prefixArgs); 124 assert(proto->getNumParams() + prefixArgs <= totalArgs); 125 126 paramInfos.reserve(totalArgs); 127 128 // Add default infos for any prefix args that don't already have infos. 129 paramInfos.resize(prefixArgs); 130 131 // Add infos for the prototype. 132 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 133 paramInfos.push_back(ParamInfo); 134 // pass_object_size params have no parameter info. 135 if (ParamInfo.hasPassObjectSize()) 136 paramInfos.emplace_back(); 137 } 138 139 assert(paramInfos.size() <= totalArgs && 140 "Did we forget to insert pass_object_size args?"); 141 // Add default infos for the variadic and/or suffix arguments. 142 paramInfos.resize(totalArgs); 143 } 144 145 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 146 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 147 static void appendParameterTypes(const CodeGenTypes &CGT, 148 SmallVectorImpl<CanQualType> &prefix, 149 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 150 CanQual<FunctionProtoType> FPT) { 151 // Fast path: don't touch param info if we don't need to. 152 if (!FPT->hasExtParameterInfos()) { 153 assert(paramInfos.empty() && 154 "We have paramInfos, but the prototype doesn't?"); 155 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 156 return; 157 } 158 159 unsigned PrefixSize = prefix.size(); 160 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 161 // parameters; the only thing that can change this is the presence of 162 // pass_object_size. So, we preallocate for the common case. 163 prefix.reserve(prefix.size() + FPT->getNumParams()); 164 165 auto ExtInfos = FPT->getExtParameterInfos(); 166 assert(ExtInfos.size() == FPT->getNumParams()); 167 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 168 prefix.push_back(FPT->getParamType(I)); 169 if (ExtInfos[I].hasPassObjectSize()) 170 prefix.push_back(CGT.getContext().getSizeType()); 171 } 172 173 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 174 prefix.size()); 175 } 176 177 /// Arrange the LLVM function layout for a value of the given function 178 /// type, on top of any implicit parameters already stored. 179 static const CGFunctionInfo & 180 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 181 SmallVectorImpl<CanQualType> &prefix, 182 CanQual<FunctionProtoType> FTP) { 183 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 184 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 185 // FIXME: Kill copy. 186 appendParameterTypes(CGT, prefix, paramInfos, FTP); 187 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 188 189 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 190 /*chainCall=*/false, prefix, 191 FTP->getExtInfo(), paramInfos, 192 Required); 193 } 194 195 /// Arrange the argument and result information for a value of the 196 /// given freestanding function type. 197 const CGFunctionInfo & 198 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 199 SmallVector<CanQualType, 16> argTypes; 200 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 201 FTP); 202 } 203 204 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, 205 bool IsWindows) { 206 // Set the appropriate calling convention for the Function. 207 if (D->hasAttr<StdCallAttr>()) 208 return CC_X86StdCall; 209 210 if (D->hasAttr<FastCallAttr>()) 211 return CC_X86FastCall; 212 213 if (D->hasAttr<RegCallAttr>()) 214 return CC_X86RegCall; 215 216 if (D->hasAttr<ThisCallAttr>()) 217 return CC_X86ThisCall; 218 219 if (D->hasAttr<VectorCallAttr>()) 220 return CC_X86VectorCall; 221 222 if (D->hasAttr<PascalAttr>()) 223 return CC_X86Pascal; 224 225 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 226 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 227 228 if (D->hasAttr<AArch64VectorPcsAttr>()) 229 return CC_AArch64VectorCall; 230 231 if (D->hasAttr<IntelOclBiccAttr>()) 232 return CC_IntelOclBicc; 233 234 if (D->hasAttr<MSABIAttr>()) 235 return IsWindows ? CC_C : CC_Win64; 236 237 if (D->hasAttr<SysVABIAttr>()) 238 return IsWindows ? CC_X86_64SysV : CC_C; 239 240 if (D->hasAttr<PreserveMostAttr>()) 241 return CC_PreserveMost; 242 243 if (D->hasAttr<PreserveAllAttr>()) 244 return CC_PreserveAll; 245 246 return CC_C; 247 } 248 249 /// Arrange the argument and result information for a call to an 250 /// unknown C++ non-static member function of the given abstract type. 251 /// (A null RD means we don't have any meaningful "this" argument type, 252 /// so fall back to a generic pointer type). 253 /// The member function must be an ordinary function, i.e. not a 254 /// constructor or destructor. 255 const CGFunctionInfo & 256 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 257 const FunctionProtoType *FTP, 258 const CXXMethodDecl *MD) { 259 SmallVector<CanQualType, 16> argTypes; 260 261 // Add the 'this' pointer. 262 argTypes.push_back(DeriveThisType(RD, MD)); 263 264 return ::arrangeLLVMFunctionInfo( 265 *this, true, argTypes, 266 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 267 } 268 269 /// Set calling convention for CUDA/HIP kernel. 270 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 271 const FunctionDecl *FD) { 272 if (FD->hasAttr<CUDAGlobalAttr>()) { 273 const FunctionType *FT = FTy->getAs<FunctionType>(); 274 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 275 FTy = FT->getCanonicalTypeUnqualified(); 276 } 277 } 278 279 /// Arrange the argument and result information for a declaration or 280 /// definition of the given C++ non-static member function. The 281 /// member function must be an ordinary function, i.e. not a 282 /// constructor or destructor. 283 const CGFunctionInfo & 284 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 285 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 286 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 287 288 CanQualType FT = GetFormalType(MD).getAs<Type>(); 289 setCUDAKernelCallingConvention(FT, CGM, MD); 290 auto prototype = FT.getAs<FunctionProtoType>(); 291 292 if (MD->isInstance()) { 293 // The abstract case is perfectly fine. 294 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 295 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 296 } 297 298 return arrangeFreeFunctionType(prototype); 299 } 300 301 bool CodeGenTypes::inheritingCtorHasParams( 302 const InheritedConstructor &Inherited, CXXCtorType Type) { 303 // Parameters are unnecessary if we're constructing a base class subobject 304 // and the inherited constructor lives in a virtual base. 305 return Type == Ctor_Complete || 306 !Inherited.getShadowDecl()->constructsVirtualBase() || 307 !Target.getCXXABI().hasConstructorVariants(); 308 } 309 310 const CGFunctionInfo & 311 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { 312 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 313 314 SmallVector<CanQualType, 16> argTypes; 315 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 316 argTypes.push_back(DeriveThisType(MD->getParent(), MD)); 317 318 bool PassParams = true; 319 320 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 321 // A base class inheriting constructor doesn't get forwarded arguments 322 // needed to construct a virtual base (or base class thereof). 323 if (auto Inherited = CD->getInheritedConstructor()) 324 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); 325 } 326 327 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 328 329 // Add the formal parameters. 330 if (PassParams) 331 appendParameterTypes(*this, argTypes, paramInfos, FTP); 332 333 CGCXXABI::AddedStructorArgCounts AddedArgs = 334 TheCXXABI.buildStructorSignature(GD, argTypes); 335 if (!paramInfos.empty()) { 336 // Note: prefix implies after the first param. 337 if (AddedArgs.Prefix) 338 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 339 FunctionProtoType::ExtParameterInfo{}); 340 if (AddedArgs.Suffix) 341 paramInfos.append(AddedArgs.Suffix, 342 FunctionProtoType::ExtParameterInfo{}); 343 } 344 345 RequiredArgs required = 346 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 347 : RequiredArgs::All); 348 349 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 350 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 351 ? argTypes.front() 352 : TheCXXABI.hasMostDerivedReturn(GD) 353 ? CGM.getContext().VoidPtrTy 354 : Context.VoidTy; 355 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 356 /*chainCall=*/false, argTypes, extInfo, 357 paramInfos, required); 358 } 359 360 static SmallVector<CanQualType, 16> 361 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 362 SmallVector<CanQualType, 16> argTypes; 363 for (auto &arg : args) 364 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 365 return argTypes; 366 } 367 368 static SmallVector<CanQualType, 16> 369 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 370 SmallVector<CanQualType, 16> argTypes; 371 for (auto &arg : args) 372 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 373 return argTypes; 374 } 375 376 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 377 getExtParameterInfosForCall(const FunctionProtoType *proto, 378 unsigned prefixArgs, unsigned totalArgs) { 379 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 380 if (proto->hasExtParameterInfos()) { 381 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 382 } 383 return result; 384 } 385 386 /// Arrange a call to a C++ method, passing the given arguments. 387 /// 388 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 389 /// parameter. 390 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 391 /// args. 392 /// PassProtoArgs indicates whether `args` has args for the parameters in the 393 /// given CXXConstructorDecl. 394 const CGFunctionInfo & 395 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 396 const CXXConstructorDecl *D, 397 CXXCtorType CtorKind, 398 unsigned ExtraPrefixArgs, 399 unsigned ExtraSuffixArgs, 400 bool PassProtoArgs) { 401 // FIXME: Kill copy. 402 SmallVector<CanQualType, 16> ArgTypes; 403 for (const auto &Arg : args) 404 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 405 406 // +1 for implicit this, which should always be args[0]. 407 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 408 409 CanQual<FunctionProtoType> FPT = GetFormalType(D); 410 RequiredArgs Required = PassProtoArgs 411 ? RequiredArgs::forPrototypePlus( 412 FPT, TotalPrefixArgs + ExtraSuffixArgs) 413 : RequiredArgs::All; 414 415 GlobalDecl GD(D, CtorKind); 416 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 417 ? ArgTypes.front() 418 : TheCXXABI.hasMostDerivedReturn(GD) 419 ? CGM.getContext().VoidPtrTy 420 : Context.VoidTy; 421 422 FunctionType::ExtInfo Info = FPT->getExtInfo(); 423 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 424 // If the prototype args are elided, we should only have ABI-specific args, 425 // which never have param info. 426 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 427 // ABI-specific suffix arguments are treated the same as variadic arguments. 428 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 429 ArgTypes.size()); 430 } 431 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 432 /*chainCall=*/false, ArgTypes, Info, 433 ParamInfos, Required); 434 } 435 436 /// Arrange the argument and result information for the declaration or 437 /// definition of the given function. 438 const CGFunctionInfo & 439 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 440 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 441 if (MD->isInstance()) 442 return arrangeCXXMethodDeclaration(MD); 443 444 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 445 446 assert(isa<FunctionType>(FTy)); 447 setCUDAKernelCallingConvention(FTy, CGM, FD); 448 449 // When declaring a function without a prototype, always use a 450 // non-variadic type. 451 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 452 return arrangeLLVMFunctionInfo( 453 noProto->getReturnType(), /*instanceMethod=*/false, 454 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 455 } 456 457 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 458 } 459 460 /// Arrange the argument and result information for the declaration or 461 /// definition of an Objective-C method. 462 const CGFunctionInfo & 463 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 464 // It happens that this is the same as a call with no optional 465 // arguments, except also using the formal 'self' type. 466 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 467 } 468 469 /// Arrange the argument and result information for the function type 470 /// through which to perform a send to the given Objective-C method, 471 /// using the given receiver type. The receiver type is not always 472 /// the 'self' type of the method or even an Objective-C pointer type. 473 /// This is *not* the right method for actually performing such a 474 /// message send, due to the possibility of optional arguments. 475 const CGFunctionInfo & 476 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 477 QualType receiverType) { 478 SmallVector<CanQualType, 16> argTys; 479 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 480 argTys.push_back(Context.getCanonicalParamType(receiverType)); 481 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 482 // FIXME: Kill copy? 483 for (const auto *I : MD->parameters()) { 484 argTys.push_back(Context.getCanonicalParamType(I->getType())); 485 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 486 I->hasAttr<NoEscapeAttr>()); 487 extParamInfos.push_back(extParamInfo); 488 } 489 490 FunctionType::ExtInfo einfo; 491 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 492 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 493 494 if (getContext().getLangOpts().ObjCAutoRefCount && 495 MD->hasAttr<NSReturnsRetainedAttr>()) 496 einfo = einfo.withProducesResult(true); 497 498 RequiredArgs required = 499 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 500 501 return arrangeLLVMFunctionInfo( 502 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 503 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 504 } 505 506 const CGFunctionInfo & 507 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 508 const CallArgList &args) { 509 auto argTypes = getArgTypesForCall(Context, args); 510 FunctionType::ExtInfo einfo; 511 512 return arrangeLLVMFunctionInfo( 513 GetReturnType(returnType), /*instanceMethod=*/false, 514 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 515 } 516 517 const CGFunctionInfo & 518 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 519 // FIXME: Do we need to handle ObjCMethodDecl? 520 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 521 522 if (isa<CXXConstructorDecl>(GD.getDecl()) || 523 isa<CXXDestructorDecl>(GD.getDecl())) 524 return arrangeCXXStructorDeclaration(GD); 525 526 return arrangeFunctionDeclaration(FD); 527 } 528 529 /// Arrange a thunk that takes 'this' as the first parameter followed by 530 /// varargs. Return a void pointer, regardless of the actual return type. 531 /// The body of the thunk will end in a musttail call to a function of the 532 /// correct type, and the caller will bitcast the function to the correct 533 /// prototype. 534 const CGFunctionInfo & 535 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 536 assert(MD->isVirtual() && "only methods have thunks"); 537 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 538 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; 539 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 540 /*chainCall=*/false, ArgTys, 541 FTP->getExtInfo(), {}, RequiredArgs(1)); 542 } 543 544 const CGFunctionInfo & 545 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 546 CXXCtorType CT) { 547 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 548 549 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 550 SmallVector<CanQualType, 2> ArgTys; 551 const CXXRecordDecl *RD = CD->getParent(); 552 ArgTys.push_back(DeriveThisType(RD, CD)); 553 if (CT == Ctor_CopyingClosure) 554 ArgTys.push_back(*FTP->param_type_begin()); 555 if (RD->getNumVBases() > 0) 556 ArgTys.push_back(Context.IntTy); 557 CallingConv CC = Context.getDefaultCallingConvention( 558 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 559 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 560 /*chainCall=*/false, ArgTys, 561 FunctionType::ExtInfo(CC), {}, 562 RequiredArgs::All); 563 } 564 565 /// Arrange a call as unto a free function, except possibly with an 566 /// additional number of formal parameters considered required. 567 static const CGFunctionInfo & 568 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 569 CodeGenModule &CGM, 570 const CallArgList &args, 571 const FunctionType *fnType, 572 unsigned numExtraRequiredArgs, 573 bool chainCall) { 574 assert(args.size() >= numExtraRequiredArgs); 575 576 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 577 578 // In most cases, there are no optional arguments. 579 RequiredArgs required = RequiredArgs::All; 580 581 // If we have a variadic prototype, the required arguments are the 582 // extra prefix plus the arguments in the prototype. 583 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 584 if (proto->isVariadic()) 585 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); 586 587 if (proto->hasExtParameterInfos()) 588 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 589 args.size()); 590 591 // If we don't have a prototype at all, but we're supposed to 592 // explicitly use the variadic convention for unprototyped calls, 593 // treat all of the arguments as required but preserve the nominal 594 // possibility of variadics. 595 } else if (CGM.getTargetCodeGenInfo() 596 .isNoProtoCallVariadic(args, 597 cast<FunctionNoProtoType>(fnType))) { 598 required = RequiredArgs(args.size()); 599 } 600 601 // FIXME: Kill copy. 602 SmallVector<CanQualType, 16> argTypes; 603 for (const auto &arg : args) 604 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 605 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 606 /*instanceMethod=*/false, chainCall, 607 argTypes, fnType->getExtInfo(), paramInfos, 608 required); 609 } 610 611 /// Figure out the rules for calling a function with the given formal 612 /// type using the given arguments. The arguments are necessary 613 /// because the function might be unprototyped, in which case it's 614 /// target-dependent in crazy ways. 615 const CGFunctionInfo & 616 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 617 const FunctionType *fnType, 618 bool chainCall) { 619 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 620 chainCall ? 1 : 0, chainCall); 621 } 622 623 /// A block function is essentially a free function with an 624 /// extra implicit argument. 625 const CGFunctionInfo & 626 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 627 const FunctionType *fnType) { 628 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 629 /*chainCall=*/false); 630 } 631 632 const CGFunctionInfo & 633 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 634 const FunctionArgList ¶ms) { 635 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 636 auto argTypes = getArgTypesForDeclaration(Context, params); 637 638 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 639 /*instanceMethod*/ false, /*chainCall*/ false, 640 argTypes, proto->getExtInfo(), paramInfos, 641 RequiredArgs::forPrototypePlus(proto, 1)); 642 } 643 644 const CGFunctionInfo & 645 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 646 const CallArgList &args) { 647 // FIXME: Kill copy. 648 SmallVector<CanQualType, 16> argTypes; 649 for (const auto &Arg : args) 650 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 651 return arrangeLLVMFunctionInfo( 652 GetReturnType(resultType), /*instanceMethod=*/false, 653 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 654 /*paramInfos=*/ {}, RequiredArgs::All); 655 } 656 657 const CGFunctionInfo & 658 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 659 const FunctionArgList &args) { 660 auto argTypes = getArgTypesForDeclaration(Context, args); 661 662 return arrangeLLVMFunctionInfo( 663 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 664 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 665 } 666 667 const CGFunctionInfo & 668 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 669 ArrayRef<CanQualType> argTypes) { 670 return arrangeLLVMFunctionInfo( 671 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 672 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 673 } 674 675 /// Arrange a call to a C++ method, passing the given arguments. 676 /// 677 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 678 /// does not count `this`. 679 const CGFunctionInfo & 680 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 681 const FunctionProtoType *proto, 682 RequiredArgs required, 683 unsigned numPrefixArgs) { 684 assert(numPrefixArgs + 1 <= args.size() && 685 "Emitting a call with less args than the required prefix?"); 686 // Add one to account for `this`. It's a bit awkward here, but we don't count 687 // `this` in similar places elsewhere. 688 auto paramInfos = 689 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 690 691 // FIXME: Kill copy. 692 auto argTypes = getArgTypesForCall(Context, args); 693 694 FunctionType::ExtInfo info = proto->getExtInfo(); 695 return arrangeLLVMFunctionInfo( 696 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 697 /*chainCall=*/false, argTypes, info, paramInfos, required); 698 } 699 700 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 701 return arrangeLLVMFunctionInfo( 702 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 703 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 704 } 705 706 const CGFunctionInfo & 707 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 708 const CallArgList &args) { 709 assert(signature.arg_size() <= args.size()); 710 if (signature.arg_size() == args.size()) 711 return signature; 712 713 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 714 auto sigParamInfos = signature.getExtParameterInfos(); 715 if (!sigParamInfos.empty()) { 716 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 717 paramInfos.resize(args.size()); 718 } 719 720 auto argTypes = getArgTypesForCall(Context, args); 721 722 assert(signature.getRequiredArgs().allowsOptionalArgs()); 723 return arrangeLLVMFunctionInfo(signature.getReturnType(), 724 signature.isInstanceMethod(), 725 signature.isChainCall(), 726 argTypes, 727 signature.getExtInfo(), 728 paramInfos, 729 signature.getRequiredArgs()); 730 } 731 732 namespace clang { 733 namespace CodeGen { 734 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 735 } 736 } 737 738 /// Arrange the argument and result information for an abstract value 739 /// of a given function type. This is the method which all of the 740 /// above functions ultimately defer to. 741 const CGFunctionInfo & 742 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 743 bool instanceMethod, 744 bool chainCall, 745 ArrayRef<CanQualType> argTypes, 746 FunctionType::ExtInfo info, 747 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 748 RequiredArgs required) { 749 assert(llvm::all_of(argTypes, 750 [](CanQualType T) { return T.isCanonicalAsParam(); })); 751 752 // Lookup or create unique function info. 753 llvm::FoldingSetNodeID ID; 754 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 755 required, resultType, argTypes); 756 757 void *insertPos = nullptr; 758 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 759 if (FI) 760 return *FI; 761 762 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 763 764 // Construct the function info. We co-allocate the ArgInfos. 765 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 766 paramInfos, resultType, argTypes, required); 767 FunctionInfos.InsertNode(FI, insertPos); 768 769 bool inserted = FunctionsBeingProcessed.insert(FI).second; 770 (void)inserted; 771 assert(inserted && "Recursively being processed?"); 772 773 // Compute ABI information. 774 if (CC == llvm::CallingConv::SPIR_KERNEL) { 775 // Force target independent argument handling for the host visible 776 // kernel functions. 777 computeSPIRKernelABIInfo(CGM, *FI); 778 } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) { 779 swiftcall::computeABIInfo(CGM, *FI); 780 } else { 781 getABIInfo().computeInfo(*FI); 782 } 783 784 // Loop over all of the computed argument and return value info. If any of 785 // them are direct or extend without a specified coerce type, specify the 786 // default now. 787 ABIArgInfo &retInfo = FI->getReturnInfo(); 788 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 789 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 790 791 for (auto &I : FI->arguments()) 792 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 793 I.info.setCoerceToType(ConvertType(I.type)); 794 795 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 796 assert(erased && "Not in set?"); 797 798 return *FI; 799 } 800 801 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 802 bool instanceMethod, 803 bool chainCall, 804 const FunctionType::ExtInfo &info, 805 ArrayRef<ExtParameterInfo> paramInfos, 806 CanQualType resultType, 807 ArrayRef<CanQualType> argTypes, 808 RequiredArgs required) { 809 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 810 assert(!required.allowsOptionalArgs() || 811 required.getNumRequiredArgs() <= argTypes.size()); 812 813 void *buffer = 814 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 815 argTypes.size() + 1, paramInfos.size())); 816 817 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 818 FI->CallingConvention = llvmCC; 819 FI->EffectiveCallingConvention = llvmCC; 820 FI->ASTCallingConvention = info.getCC(); 821 FI->InstanceMethod = instanceMethod; 822 FI->ChainCall = chainCall; 823 FI->CmseNSCall = info.getCmseNSCall(); 824 FI->NoReturn = info.getNoReturn(); 825 FI->ReturnsRetained = info.getProducesResult(); 826 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 827 FI->NoCfCheck = info.getNoCfCheck(); 828 FI->Required = required; 829 FI->HasRegParm = info.getHasRegParm(); 830 FI->RegParm = info.getRegParm(); 831 FI->ArgStruct = nullptr; 832 FI->ArgStructAlign = 0; 833 FI->NumArgs = argTypes.size(); 834 FI->HasExtParameterInfos = !paramInfos.empty(); 835 FI->getArgsBuffer()[0].type = resultType; 836 FI->MaxVectorWidth = 0; 837 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 838 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 839 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 840 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 841 return FI; 842 } 843 844 /***/ 845 846 namespace { 847 // ABIArgInfo::Expand implementation. 848 849 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 850 struct TypeExpansion { 851 enum TypeExpansionKind { 852 // Elements of constant arrays are expanded recursively. 853 TEK_ConstantArray, 854 // Record fields are expanded recursively (but if record is a union, only 855 // the field with the largest size is expanded). 856 TEK_Record, 857 // For complex types, real and imaginary parts are expanded recursively. 858 TEK_Complex, 859 // All other types are not expandable. 860 TEK_None 861 }; 862 863 const TypeExpansionKind Kind; 864 865 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 866 virtual ~TypeExpansion() {} 867 }; 868 869 struct ConstantArrayExpansion : TypeExpansion { 870 QualType EltTy; 871 uint64_t NumElts; 872 873 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 874 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 875 static bool classof(const TypeExpansion *TE) { 876 return TE->Kind == TEK_ConstantArray; 877 } 878 }; 879 880 struct RecordExpansion : TypeExpansion { 881 SmallVector<const CXXBaseSpecifier *, 1> Bases; 882 883 SmallVector<const FieldDecl *, 1> Fields; 884 885 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 886 SmallVector<const FieldDecl *, 1> &&Fields) 887 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 888 Fields(std::move(Fields)) {} 889 static bool classof(const TypeExpansion *TE) { 890 return TE->Kind == TEK_Record; 891 } 892 }; 893 894 struct ComplexExpansion : TypeExpansion { 895 QualType EltTy; 896 897 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 898 static bool classof(const TypeExpansion *TE) { 899 return TE->Kind == TEK_Complex; 900 } 901 }; 902 903 struct NoExpansion : TypeExpansion { 904 NoExpansion() : TypeExpansion(TEK_None) {} 905 static bool classof(const TypeExpansion *TE) { 906 return TE->Kind == TEK_None; 907 } 908 }; 909 } // namespace 910 911 static std::unique_ptr<TypeExpansion> 912 getTypeExpansion(QualType Ty, const ASTContext &Context) { 913 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 914 return std::make_unique<ConstantArrayExpansion>( 915 AT->getElementType(), AT->getSize().getZExtValue()); 916 } 917 if (const RecordType *RT = Ty->getAs<RecordType>()) { 918 SmallVector<const CXXBaseSpecifier *, 1> Bases; 919 SmallVector<const FieldDecl *, 1> Fields; 920 const RecordDecl *RD = RT->getDecl(); 921 assert(!RD->hasFlexibleArrayMember() && 922 "Cannot expand structure with flexible array."); 923 if (RD->isUnion()) { 924 // Unions can be here only in degenerative cases - all the fields are same 925 // after flattening. Thus we have to use the "largest" field. 926 const FieldDecl *LargestFD = nullptr; 927 CharUnits UnionSize = CharUnits::Zero(); 928 929 for (const auto *FD : RD->fields()) { 930 if (FD->isZeroLengthBitField(Context)) 931 continue; 932 assert(!FD->isBitField() && 933 "Cannot expand structure with bit-field members."); 934 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 935 if (UnionSize < FieldSize) { 936 UnionSize = FieldSize; 937 LargestFD = FD; 938 } 939 } 940 if (LargestFD) 941 Fields.push_back(LargestFD); 942 } else { 943 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 944 assert(!CXXRD->isDynamicClass() && 945 "cannot expand vtable pointers in dynamic classes"); 946 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases())); 947 } 948 949 for (const auto *FD : RD->fields()) { 950 if (FD->isZeroLengthBitField(Context)) 951 continue; 952 assert(!FD->isBitField() && 953 "Cannot expand structure with bit-field members."); 954 Fields.push_back(FD); 955 } 956 } 957 return std::make_unique<RecordExpansion>(std::move(Bases), 958 std::move(Fields)); 959 } 960 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 961 return std::make_unique<ComplexExpansion>(CT->getElementType()); 962 } 963 return std::make_unique<NoExpansion>(); 964 } 965 966 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 967 auto Exp = getTypeExpansion(Ty, Context); 968 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 969 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 970 } 971 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 972 int Res = 0; 973 for (auto BS : RExp->Bases) 974 Res += getExpansionSize(BS->getType(), Context); 975 for (auto FD : RExp->Fields) 976 Res += getExpansionSize(FD->getType(), Context); 977 return Res; 978 } 979 if (isa<ComplexExpansion>(Exp.get())) 980 return 2; 981 assert(isa<NoExpansion>(Exp.get())); 982 return 1; 983 } 984 985 void 986 CodeGenTypes::getExpandedTypes(QualType Ty, 987 SmallVectorImpl<llvm::Type *>::iterator &TI) { 988 auto Exp = getTypeExpansion(Ty, Context); 989 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 990 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 991 getExpandedTypes(CAExp->EltTy, TI); 992 } 993 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 994 for (auto BS : RExp->Bases) 995 getExpandedTypes(BS->getType(), TI); 996 for (auto FD : RExp->Fields) 997 getExpandedTypes(FD->getType(), TI); 998 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 999 llvm::Type *EltTy = ConvertType(CExp->EltTy); 1000 *TI++ = EltTy; 1001 *TI++ = EltTy; 1002 } else { 1003 assert(isa<NoExpansion>(Exp.get())); 1004 *TI++ = ConvertType(Ty); 1005 } 1006 } 1007 1008 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1009 ConstantArrayExpansion *CAE, 1010 Address BaseAddr, 1011 llvm::function_ref<void(Address)> Fn) { 1012 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1013 CharUnits EltAlign = 1014 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1015 llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy); 1016 1017 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1018 llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32( 1019 BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i); 1020 Fn(Address(EltAddr, EltTy, EltAlign)); 1021 } 1022 } 1023 1024 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1025 llvm::Function::arg_iterator &AI) { 1026 assert(LV.isSimple() && 1027 "Unexpected non-simple lvalue during struct expansion."); 1028 1029 auto Exp = getTypeExpansion(Ty, getContext()); 1030 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1031 forConstantArrayExpansion( 1032 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { 1033 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1034 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1035 }); 1036 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1037 Address This = LV.getAddress(*this); 1038 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1039 // Perform a single step derived-to-base conversion. 1040 Address Base = 1041 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1042 /*NullCheckValue=*/false, SourceLocation()); 1043 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1044 1045 // Recurse onto bases. 1046 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1047 } 1048 for (auto FD : RExp->Fields) { 1049 // FIXME: What are the right qualifiers here? 1050 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1051 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1052 } 1053 } else if (isa<ComplexExpansion>(Exp.get())) { 1054 auto realValue = &*AI++; 1055 auto imagValue = &*AI++; 1056 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1057 } else { 1058 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a 1059 // primitive store. 1060 assert(isa<NoExpansion>(Exp.get())); 1061 llvm::Value *Arg = &*AI++; 1062 if (LV.isBitField()) { 1063 EmitStoreThroughLValue(RValue::get(Arg), LV); 1064 } else { 1065 // TODO: currently there are some places are inconsistent in what LLVM 1066 // pointer type they use (see D118744). Once clang uses opaque pointers 1067 // all LLVM pointer types will be the same and we can remove this check. 1068 if (Arg->getType()->isPointerTy()) { 1069 Address Addr = LV.getAddress(*this); 1070 Arg = Builder.CreateBitCast(Arg, Addr.getElementType()); 1071 } 1072 EmitStoreOfScalar(Arg, LV); 1073 } 1074 } 1075 } 1076 1077 void CodeGenFunction::ExpandTypeToArgs( 1078 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1079 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1080 auto Exp = getTypeExpansion(Ty, getContext()); 1081 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1082 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1083 : Arg.getKnownRValue().getAggregateAddress(); 1084 forConstantArrayExpansion( 1085 *this, CAExp, Addr, [&](Address EltAddr) { 1086 CallArg EltArg = CallArg( 1087 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1088 CAExp->EltTy); 1089 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1090 IRCallArgPos); 1091 }); 1092 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1093 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1094 : Arg.getKnownRValue().getAggregateAddress(); 1095 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1096 // Perform a single step derived-to-base conversion. 1097 Address Base = 1098 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1099 /*NullCheckValue=*/false, SourceLocation()); 1100 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1101 1102 // Recurse onto bases. 1103 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1104 IRCallArgPos); 1105 } 1106 1107 LValue LV = MakeAddrLValue(This, Ty); 1108 for (auto FD : RExp->Fields) { 1109 CallArg FldArg = 1110 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1111 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1112 IRCallArgPos); 1113 } 1114 } else if (isa<ComplexExpansion>(Exp.get())) { 1115 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1116 IRCallArgs[IRCallArgPos++] = CV.first; 1117 IRCallArgs[IRCallArgPos++] = CV.second; 1118 } else { 1119 assert(isa<NoExpansion>(Exp.get())); 1120 auto RV = Arg.getKnownRValue(); 1121 assert(RV.isScalar() && 1122 "Unexpected non-scalar rvalue during struct expansion."); 1123 1124 // Insert a bitcast as needed. 1125 llvm::Value *V = RV.getScalarVal(); 1126 if (IRCallArgPos < IRFuncTy->getNumParams() && 1127 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1128 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1129 1130 IRCallArgs[IRCallArgPos++] = V; 1131 } 1132 } 1133 1134 /// Create a temporary allocation for the purposes of coercion. 1135 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1136 CharUnits MinAlign, 1137 const Twine &Name = "tmp") { 1138 // Don't use an alignment that's worse than what LLVM would prefer. 1139 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1140 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1141 1142 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); 1143 } 1144 1145 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1146 /// accessing some number of bytes out of it, try to gep into the struct to get 1147 /// at its inner goodness. Dive as deep as possible without entering an element 1148 /// with an in-memory size smaller than DstSize. 1149 static Address 1150 EnterStructPointerForCoercedAccess(Address SrcPtr, 1151 llvm::StructType *SrcSTy, 1152 uint64_t DstSize, CodeGenFunction &CGF) { 1153 // We can't dive into a zero-element struct. 1154 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1155 1156 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1157 1158 // If the first elt is at least as large as what we're looking for, or if the 1159 // first element is the same size as the whole struct, we can enter it. The 1160 // comparison must be made on the store size and not the alloca size. Using 1161 // the alloca size may overstate the size of the load. 1162 uint64_t FirstEltSize = 1163 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1164 if (FirstEltSize < DstSize && 1165 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1166 return SrcPtr; 1167 1168 // GEP into the first element. 1169 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); 1170 1171 // If the first element is a struct, recurse. 1172 llvm::Type *SrcTy = SrcPtr.getElementType(); 1173 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1174 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1175 1176 return SrcPtr; 1177 } 1178 1179 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1180 /// are either integers or pointers. This does a truncation of the value if it 1181 /// is too large or a zero extension if it is too small. 1182 /// 1183 /// This behaves as if the value were coerced through memory, so on big-endian 1184 /// targets the high bits are preserved in a truncation, while little-endian 1185 /// targets preserve the low bits. 1186 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1187 llvm::Type *Ty, 1188 CodeGenFunction &CGF) { 1189 if (Val->getType() == Ty) 1190 return Val; 1191 1192 if (isa<llvm::PointerType>(Val->getType())) { 1193 // If this is Pointer->Pointer avoid conversion to and from int. 1194 if (isa<llvm::PointerType>(Ty)) 1195 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1196 1197 // Convert the pointer to an integer so we can play with its width. 1198 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1199 } 1200 1201 llvm::Type *DestIntTy = Ty; 1202 if (isa<llvm::PointerType>(DestIntTy)) 1203 DestIntTy = CGF.IntPtrTy; 1204 1205 if (Val->getType() != DestIntTy) { 1206 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1207 if (DL.isBigEndian()) { 1208 // Preserve the high bits on big-endian targets. 1209 // That is what memory coercion does. 1210 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1211 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1212 1213 if (SrcSize > DstSize) { 1214 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1215 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1216 } else { 1217 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1218 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1219 } 1220 } else { 1221 // Little-endian targets preserve the low bits. No shifts required. 1222 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1223 } 1224 } 1225 1226 if (isa<llvm::PointerType>(Ty)) 1227 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1228 return Val; 1229 } 1230 1231 1232 1233 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1234 /// a pointer to an object of type \arg Ty, known to be aligned to 1235 /// \arg SrcAlign bytes. 1236 /// 1237 /// This safely handles the case when the src type is smaller than the 1238 /// destination type; in this situation the values of bits which not 1239 /// present in the src are undefined. 1240 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1241 CodeGenFunction &CGF) { 1242 llvm::Type *SrcTy = Src.getElementType(); 1243 1244 // If SrcTy and Ty are the same, just do a load. 1245 if (SrcTy == Ty) 1246 return CGF.Builder.CreateLoad(Src); 1247 1248 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1249 1250 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1251 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, 1252 DstSize.getFixedSize(), CGF); 1253 SrcTy = Src.getElementType(); 1254 } 1255 1256 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1257 1258 // If the source and destination are integer or pointer types, just do an 1259 // extension or truncation to the desired type. 1260 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1261 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1262 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1263 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1264 } 1265 1266 // If load is legal, just bitcast the src pointer. 1267 if (!SrcSize.isScalable() && !DstSize.isScalable() && 1268 SrcSize.getFixedSize() >= DstSize.getFixedSize()) { 1269 // Generally SrcSize is never greater than DstSize, since this means we are 1270 // losing bits. However, this can happen in cases where the structure has 1271 // additional padding, for example due to a user specified alignment. 1272 // 1273 // FIXME: Assert that we aren't truncating non-padding bits when have access 1274 // to that information. 1275 Src = CGF.Builder.CreateElementBitCast(Src, Ty); 1276 return CGF.Builder.CreateLoad(Src); 1277 } 1278 1279 // If coercing a fixed vector to a scalable vector for ABI compatibility, and 1280 // the types match, use the llvm.experimental.vector.insert intrinsic to 1281 // perform the conversion. 1282 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { 1283 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 1284 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate 1285 // vector, use a vector insert and bitcast the result. 1286 bool NeedsBitcast = false; 1287 auto PredType = 1288 llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16); 1289 llvm::Type *OrigType = Ty; 1290 if (ScalableDst == PredType && 1291 FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) { 1292 ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2); 1293 NeedsBitcast = true; 1294 } 1295 if (ScalableDst->getElementType() == FixedSrc->getElementType()) { 1296 auto *Load = CGF.Builder.CreateLoad(Src); 1297 auto *UndefVec = llvm::UndefValue::get(ScalableDst); 1298 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 1299 llvm::Value *Result = CGF.Builder.CreateInsertVector( 1300 ScalableDst, UndefVec, Load, Zero, "castScalableSve"); 1301 if (NeedsBitcast) 1302 Result = CGF.Builder.CreateBitCast(Result, OrigType); 1303 return Result; 1304 } 1305 } 1306 } 1307 1308 // Otherwise do coercion through memory. This is stupid, but simple. 1309 Address Tmp = 1310 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); 1311 CGF.Builder.CreateMemCpy( 1312 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), 1313 Src.getAlignment().getAsAlign(), 1314 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); 1315 return CGF.Builder.CreateLoad(Tmp); 1316 } 1317 1318 // Function to store a first-class aggregate into memory. We prefer to 1319 // store the elements rather than the aggregate to be more friendly to 1320 // fast-isel. 1321 // FIXME: Do we need to recurse here? 1322 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, 1323 bool DestIsVolatile) { 1324 // Prefer scalar stores to first-class aggregate stores. 1325 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { 1326 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1327 Address EltPtr = Builder.CreateStructGEP(Dest, i); 1328 llvm::Value *Elt = Builder.CreateExtractValue(Val, i); 1329 Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1330 } 1331 } else { 1332 Builder.CreateStore(Val, Dest, DestIsVolatile); 1333 } 1334 } 1335 1336 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1337 /// where the source and destination may have different types. The 1338 /// destination is known to be aligned to \arg DstAlign bytes. 1339 /// 1340 /// This safely handles the case when the src type is larger than the 1341 /// destination type; the upper bits of the src will be lost. 1342 static void CreateCoercedStore(llvm::Value *Src, 1343 Address Dst, 1344 bool DstIsVolatile, 1345 CodeGenFunction &CGF) { 1346 llvm::Type *SrcTy = Src->getType(); 1347 llvm::Type *DstTy = Dst.getElementType(); 1348 if (SrcTy == DstTy) { 1349 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1350 return; 1351 } 1352 1353 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1354 1355 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1356 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, 1357 SrcSize.getFixedSize(), CGF); 1358 DstTy = Dst.getElementType(); 1359 } 1360 1361 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); 1362 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); 1363 if (SrcPtrTy && DstPtrTy && 1364 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { 1365 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); 1366 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1367 return; 1368 } 1369 1370 // If the source and destination are integer or pointer types, just do an 1371 // extension or truncation to the desired type. 1372 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1373 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1374 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1375 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1376 return; 1377 } 1378 1379 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1380 1381 // If store is legal, just bitcast the src pointer. 1382 if (isa<llvm::ScalableVectorType>(SrcTy) || 1383 isa<llvm::ScalableVectorType>(DstTy) || 1384 SrcSize.getFixedSize() <= DstSize.getFixedSize()) { 1385 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1386 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); 1387 } else { 1388 // Otherwise do coercion through memory. This is stupid, but 1389 // simple. 1390 1391 // Generally SrcSize is never greater than DstSize, since this means we are 1392 // losing bits. However, this can happen in cases where the structure has 1393 // additional padding, for example due to a user specified alignment. 1394 // 1395 // FIXME: Assert that we aren't truncating non-padding bits when have access 1396 // to that information. 1397 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1398 CGF.Builder.CreateStore(Src, Tmp); 1399 CGF.Builder.CreateMemCpy( 1400 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), 1401 Tmp.getAlignment().getAsAlign(), 1402 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); 1403 } 1404 } 1405 1406 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1407 const ABIArgInfo &info) { 1408 if (unsigned offset = info.getDirectOffset()) { 1409 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1410 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1411 CharUnits::fromQuantity(offset)); 1412 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1413 } 1414 return addr; 1415 } 1416 1417 namespace { 1418 1419 /// Encapsulates information about the way function arguments from 1420 /// CGFunctionInfo should be passed to actual LLVM IR function. 1421 class ClangToLLVMArgMapping { 1422 static const unsigned InvalidIndex = ~0U; 1423 unsigned InallocaArgNo; 1424 unsigned SRetArgNo; 1425 unsigned TotalIRArgs; 1426 1427 /// Arguments of LLVM IR function corresponding to single Clang argument. 1428 struct IRArgs { 1429 unsigned PaddingArgIndex; 1430 // Argument is expanded to IR arguments at positions 1431 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1432 unsigned FirstArgIndex; 1433 unsigned NumberOfArgs; 1434 1435 IRArgs() 1436 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1437 NumberOfArgs(0) {} 1438 }; 1439 1440 SmallVector<IRArgs, 8> ArgInfo; 1441 1442 public: 1443 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1444 bool OnlyRequiredArgs = false) 1445 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1446 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1447 construct(Context, FI, OnlyRequiredArgs); 1448 } 1449 1450 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1451 unsigned getInallocaArgNo() const { 1452 assert(hasInallocaArg()); 1453 return InallocaArgNo; 1454 } 1455 1456 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1457 unsigned getSRetArgNo() const { 1458 assert(hasSRetArg()); 1459 return SRetArgNo; 1460 } 1461 1462 unsigned totalIRArgs() const { return TotalIRArgs; } 1463 1464 bool hasPaddingArg(unsigned ArgNo) const { 1465 assert(ArgNo < ArgInfo.size()); 1466 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1467 } 1468 unsigned getPaddingArgNo(unsigned ArgNo) const { 1469 assert(hasPaddingArg(ArgNo)); 1470 return ArgInfo[ArgNo].PaddingArgIndex; 1471 } 1472 1473 /// Returns index of first IR argument corresponding to ArgNo, and their 1474 /// quantity. 1475 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1476 assert(ArgNo < ArgInfo.size()); 1477 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1478 ArgInfo[ArgNo].NumberOfArgs); 1479 } 1480 1481 private: 1482 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1483 bool OnlyRequiredArgs); 1484 }; 1485 1486 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1487 const CGFunctionInfo &FI, 1488 bool OnlyRequiredArgs) { 1489 unsigned IRArgNo = 0; 1490 bool SwapThisWithSRet = false; 1491 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1492 1493 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1494 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1495 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1496 } 1497 1498 unsigned ArgNo = 0; 1499 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1500 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1501 ++I, ++ArgNo) { 1502 assert(I != FI.arg_end()); 1503 QualType ArgType = I->type; 1504 const ABIArgInfo &AI = I->info; 1505 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1506 auto &IRArgs = ArgInfo[ArgNo]; 1507 1508 if (AI.getPaddingType()) 1509 IRArgs.PaddingArgIndex = IRArgNo++; 1510 1511 switch (AI.getKind()) { 1512 case ABIArgInfo::Extend: 1513 case ABIArgInfo::Direct: { 1514 // FIXME: handle sseregparm someday... 1515 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1516 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1517 IRArgs.NumberOfArgs = STy->getNumElements(); 1518 } else { 1519 IRArgs.NumberOfArgs = 1; 1520 } 1521 break; 1522 } 1523 case ABIArgInfo::Indirect: 1524 case ABIArgInfo::IndirectAliased: 1525 IRArgs.NumberOfArgs = 1; 1526 break; 1527 case ABIArgInfo::Ignore: 1528 case ABIArgInfo::InAlloca: 1529 // ignore and inalloca doesn't have matching LLVM parameters. 1530 IRArgs.NumberOfArgs = 0; 1531 break; 1532 case ABIArgInfo::CoerceAndExpand: 1533 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1534 break; 1535 case ABIArgInfo::Expand: 1536 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1537 break; 1538 } 1539 1540 if (IRArgs.NumberOfArgs > 0) { 1541 IRArgs.FirstArgIndex = IRArgNo; 1542 IRArgNo += IRArgs.NumberOfArgs; 1543 } 1544 1545 // Skip over the sret parameter when it comes second. We already handled it 1546 // above. 1547 if (IRArgNo == 1 && SwapThisWithSRet) 1548 IRArgNo++; 1549 } 1550 assert(ArgNo == ArgInfo.size()); 1551 1552 if (FI.usesInAlloca()) 1553 InallocaArgNo = IRArgNo++; 1554 1555 TotalIRArgs = IRArgNo; 1556 } 1557 } // namespace 1558 1559 /***/ 1560 1561 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1562 const auto &RI = FI.getReturnInfo(); 1563 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1564 } 1565 1566 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1567 return ReturnTypeUsesSRet(FI) && 1568 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1569 } 1570 1571 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1572 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1573 switch (BT->getKind()) { 1574 default: 1575 return false; 1576 case BuiltinType::Float: 1577 return getTarget().useObjCFPRetForRealType(FloatModeKind::Float); 1578 case BuiltinType::Double: 1579 return getTarget().useObjCFPRetForRealType(FloatModeKind::Double); 1580 case BuiltinType::LongDouble: 1581 return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble); 1582 } 1583 } 1584 1585 return false; 1586 } 1587 1588 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1589 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1590 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1591 if (BT->getKind() == BuiltinType::LongDouble) 1592 return getTarget().useObjCFP2RetForComplexLongDouble(); 1593 } 1594 } 1595 1596 return false; 1597 } 1598 1599 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1600 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1601 return GetFunctionType(FI); 1602 } 1603 1604 llvm::FunctionType * 1605 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1606 1607 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1608 (void)Inserted; 1609 assert(Inserted && "Recursively being processed?"); 1610 1611 llvm::Type *resultType = nullptr; 1612 const ABIArgInfo &retAI = FI.getReturnInfo(); 1613 switch (retAI.getKind()) { 1614 case ABIArgInfo::Expand: 1615 case ABIArgInfo::IndirectAliased: 1616 llvm_unreachable("Invalid ABI kind for return argument"); 1617 1618 case ABIArgInfo::Extend: 1619 case ABIArgInfo::Direct: 1620 resultType = retAI.getCoerceToType(); 1621 break; 1622 1623 case ABIArgInfo::InAlloca: 1624 if (retAI.getInAllocaSRet()) { 1625 // sret things on win32 aren't void, they return the sret pointer. 1626 QualType ret = FI.getReturnType(); 1627 llvm::Type *ty = ConvertType(ret); 1628 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1629 resultType = llvm::PointerType::get(ty, addressSpace); 1630 } else { 1631 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1632 } 1633 break; 1634 1635 case ABIArgInfo::Indirect: 1636 case ABIArgInfo::Ignore: 1637 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1638 break; 1639 1640 case ABIArgInfo::CoerceAndExpand: 1641 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1642 break; 1643 } 1644 1645 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1646 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1647 1648 // Add type for sret argument. 1649 if (IRFunctionArgs.hasSRetArg()) { 1650 QualType Ret = FI.getReturnType(); 1651 llvm::Type *Ty = ConvertType(Ret); 1652 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1653 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1654 llvm::PointerType::get(Ty, AddressSpace); 1655 } 1656 1657 // Add type for inalloca argument. 1658 if (IRFunctionArgs.hasInallocaArg()) { 1659 auto ArgStruct = FI.getArgStruct(); 1660 assert(ArgStruct); 1661 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1662 } 1663 1664 // Add in all of the required arguments. 1665 unsigned ArgNo = 0; 1666 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1667 ie = it + FI.getNumRequiredArgs(); 1668 for (; it != ie; ++it, ++ArgNo) { 1669 const ABIArgInfo &ArgInfo = it->info; 1670 1671 // Insert a padding type to ensure proper alignment. 1672 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1673 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1674 ArgInfo.getPaddingType(); 1675 1676 unsigned FirstIRArg, NumIRArgs; 1677 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1678 1679 switch (ArgInfo.getKind()) { 1680 case ABIArgInfo::Ignore: 1681 case ABIArgInfo::InAlloca: 1682 assert(NumIRArgs == 0); 1683 break; 1684 1685 case ABIArgInfo::Indirect: { 1686 assert(NumIRArgs == 1); 1687 // indirect arguments are always on the stack, which is alloca addr space. 1688 llvm::Type *LTy = ConvertTypeForMem(it->type); 1689 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1690 CGM.getDataLayout().getAllocaAddrSpace()); 1691 break; 1692 } 1693 case ABIArgInfo::IndirectAliased: { 1694 assert(NumIRArgs == 1); 1695 llvm::Type *LTy = ConvertTypeForMem(it->type); 1696 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace()); 1697 break; 1698 } 1699 case ABIArgInfo::Extend: 1700 case ABIArgInfo::Direct: { 1701 // Fast-isel and the optimizer generally like scalar values better than 1702 // FCAs, so we flatten them if this is safe to do for this argument. 1703 llvm::Type *argType = ArgInfo.getCoerceToType(); 1704 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1705 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1706 assert(NumIRArgs == st->getNumElements()); 1707 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1708 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1709 } else { 1710 assert(NumIRArgs == 1); 1711 ArgTypes[FirstIRArg] = argType; 1712 } 1713 break; 1714 } 1715 1716 case ABIArgInfo::CoerceAndExpand: { 1717 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1718 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1719 *ArgTypesIter++ = EltTy; 1720 } 1721 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1722 break; 1723 } 1724 1725 case ABIArgInfo::Expand: 1726 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1727 getExpandedTypes(it->type, ArgTypesIter); 1728 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1729 break; 1730 } 1731 } 1732 1733 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1734 assert(Erased && "Not in set?"); 1735 1736 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1737 } 1738 1739 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1740 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1741 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1742 1743 if (!isFuncTypeConvertible(FPT)) 1744 return llvm::StructType::get(getLLVMContext()); 1745 1746 return GetFunctionType(GD); 1747 } 1748 1749 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1750 llvm::AttrBuilder &FuncAttrs, 1751 const FunctionProtoType *FPT) { 1752 if (!FPT) 1753 return; 1754 1755 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1756 FPT->isNothrow()) 1757 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1758 } 1759 1760 static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs, 1761 const Decl *Callee) { 1762 if (!Callee) 1763 return; 1764 1765 SmallVector<StringRef, 4> Attrs; 1766 1767 for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>()) 1768 AA->getAssumption().split(Attrs, ","); 1769 1770 if (!Attrs.empty()) 1771 FuncAttrs.addAttribute(llvm::AssumptionAttrKey, 1772 llvm::join(Attrs.begin(), Attrs.end(), ",")); 1773 } 1774 1775 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, 1776 QualType ReturnType) { 1777 // We can't just discard the return value for a record type with a 1778 // complex destructor or a non-trivially copyable type. 1779 if (const RecordType *RT = 1780 ReturnType.getCanonicalType()->getAs<RecordType>()) { 1781 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1782 return ClassDecl->hasTrivialDestructor(); 1783 } 1784 return ReturnType.isTriviallyCopyableType(Context); 1785 } 1786 1787 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, 1788 bool HasOptnone, 1789 bool AttrOnCallSite, 1790 llvm::AttrBuilder &FuncAttrs) { 1791 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1792 if (!HasOptnone) { 1793 if (CodeGenOpts.OptimizeSize) 1794 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1795 if (CodeGenOpts.OptimizeSize == 2) 1796 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1797 } 1798 1799 if (CodeGenOpts.DisableRedZone) 1800 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1801 if (CodeGenOpts.IndirectTlsSegRefs) 1802 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1803 if (CodeGenOpts.NoImplicitFloat) 1804 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1805 1806 if (AttrOnCallSite) { 1807 // Attributes that should go on the call site only. 1808 // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking 1809 // the -fno-builtin-foo list. 1810 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name)) 1811 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1812 if (!CodeGenOpts.TrapFuncName.empty()) 1813 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1814 } else { 1815 StringRef FpKind; 1816 switch (CodeGenOpts.getFramePointer()) { 1817 case CodeGenOptions::FramePointerKind::None: 1818 FpKind = "none"; 1819 break; 1820 case CodeGenOptions::FramePointerKind::NonLeaf: 1821 FpKind = "non-leaf"; 1822 break; 1823 case CodeGenOptions::FramePointerKind::All: 1824 FpKind = "all"; 1825 break; 1826 } 1827 FuncAttrs.addAttribute("frame-pointer", FpKind); 1828 1829 if (CodeGenOpts.LessPreciseFPMAD) 1830 FuncAttrs.addAttribute("less-precise-fpmad", "true"); 1831 1832 if (CodeGenOpts.NullPointerIsValid) 1833 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); 1834 1835 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) 1836 FuncAttrs.addAttribute("denormal-fp-math", 1837 CodeGenOpts.FPDenormalMode.str()); 1838 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { 1839 FuncAttrs.addAttribute( 1840 "denormal-fp-math-f32", 1841 CodeGenOpts.FP32DenormalMode.str()); 1842 } 1843 1844 if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore) 1845 FuncAttrs.addAttribute("no-trapping-math", "true"); 1846 1847 // TODO: Are these all needed? 1848 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1849 if (LangOpts.NoHonorInfs) 1850 FuncAttrs.addAttribute("no-infs-fp-math", "true"); 1851 if (LangOpts.NoHonorNaNs) 1852 FuncAttrs.addAttribute("no-nans-fp-math", "true"); 1853 if (LangOpts.ApproxFunc) 1854 FuncAttrs.addAttribute("approx-func-fp-math", "true"); 1855 if (LangOpts.UnsafeFPMath) 1856 FuncAttrs.addAttribute("unsafe-fp-math", "true"); 1857 if (CodeGenOpts.SoftFloat) 1858 FuncAttrs.addAttribute("use-soft-float", "true"); 1859 FuncAttrs.addAttribute("stack-protector-buffer-size", 1860 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1861 if (LangOpts.NoSignedZero) 1862 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); 1863 1864 // TODO: Reciprocal estimate codegen options should apply to instructions? 1865 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1866 if (!Recips.empty()) 1867 FuncAttrs.addAttribute("reciprocal-estimates", 1868 llvm::join(Recips, ",")); 1869 1870 if (!CodeGenOpts.PreferVectorWidth.empty() && 1871 CodeGenOpts.PreferVectorWidth != "none") 1872 FuncAttrs.addAttribute("prefer-vector-width", 1873 CodeGenOpts.PreferVectorWidth); 1874 1875 if (CodeGenOpts.StackRealignment) 1876 FuncAttrs.addAttribute("stackrealign"); 1877 if (CodeGenOpts.Backchain) 1878 FuncAttrs.addAttribute("backchain"); 1879 if (CodeGenOpts.EnableSegmentedStacks) 1880 FuncAttrs.addAttribute("split-stack"); 1881 1882 if (CodeGenOpts.SpeculativeLoadHardening) 1883 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1884 1885 // Add zero-call-used-regs attribute. 1886 switch (CodeGenOpts.getZeroCallUsedRegs()) { 1887 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip: 1888 FuncAttrs.removeAttribute("zero-call-used-regs"); 1889 break; 1890 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg: 1891 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg"); 1892 break; 1893 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR: 1894 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr"); 1895 break; 1896 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg: 1897 FuncAttrs.addAttribute("zero-call-used-regs", "used-arg"); 1898 break; 1899 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used: 1900 FuncAttrs.addAttribute("zero-call-used-regs", "used"); 1901 break; 1902 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg: 1903 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg"); 1904 break; 1905 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR: 1906 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr"); 1907 break; 1908 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg: 1909 FuncAttrs.addAttribute("zero-call-used-regs", "all-arg"); 1910 break; 1911 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All: 1912 FuncAttrs.addAttribute("zero-call-used-regs", "all"); 1913 break; 1914 } 1915 } 1916 1917 if (getLangOpts().assumeFunctionsAreConvergent()) { 1918 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1919 // convergent (meaning, they may call an intrinsically convergent op, such 1920 // as __syncthreads() / barrier(), and so can't have certain optimizations 1921 // applied around them). LLVM will remove this attribute where it safely 1922 // can. 1923 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1924 } 1925 1926 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1927 // Exceptions aren't supported in CUDA device code. 1928 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1929 } 1930 1931 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1932 StringRef Var, Value; 1933 std::tie(Var, Value) = Attr.split('='); 1934 FuncAttrs.addAttribute(Var, Value); 1935 } 1936 } 1937 1938 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { 1939 llvm::AttrBuilder FuncAttrs(F.getContext()); 1940 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), 1941 /* AttrOnCallSite = */ false, FuncAttrs); 1942 // TODO: call GetCPUAndFeaturesAttributes? 1943 F.addFnAttrs(FuncAttrs); 1944 } 1945 1946 void CodeGenModule::addDefaultFunctionDefinitionAttributes( 1947 llvm::AttrBuilder &attrs) { 1948 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, 1949 /*for call*/ false, attrs); 1950 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); 1951 } 1952 1953 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, 1954 const LangOptions &LangOpts, 1955 const NoBuiltinAttr *NBA = nullptr) { 1956 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { 1957 SmallString<32> AttributeName; 1958 AttributeName += "no-builtin-"; 1959 AttributeName += BuiltinName; 1960 FuncAttrs.addAttribute(AttributeName); 1961 }; 1962 1963 // First, handle the language options passed through -fno-builtin. 1964 if (LangOpts.NoBuiltin) { 1965 // -fno-builtin disables them all. 1966 FuncAttrs.addAttribute("no-builtins"); 1967 return; 1968 } 1969 1970 // Then, add attributes for builtins specified through -fno-builtin-<name>. 1971 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); 1972 1973 // Now, let's check the __attribute__((no_builtin("...")) attribute added to 1974 // the source. 1975 if (!NBA) 1976 return; 1977 1978 // If there is a wildcard in the builtin names specified through the 1979 // attribute, disable them all. 1980 if (llvm::is_contained(NBA->builtinNames(), "*")) { 1981 FuncAttrs.addAttribute("no-builtins"); 1982 return; 1983 } 1984 1985 // And last, add the rest of the builtin names. 1986 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); 1987 } 1988 1989 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, 1990 const llvm::DataLayout &DL, const ABIArgInfo &AI, 1991 bool CheckCoerce = true) { 1992 llvm::Type *Ty = Types.ConvertTypeForMem(QTy); 1993 if (AI.getKind() == ABIArgInfo::Indirect) 1994 return true; 1995 if (AI.getKind() == ABIArgInfo::Extend) 1996 return true; 1997 if (!DL.typeSizeEqualsStoreSize(Ty)) 1998 // TODO: This will result in a modest amount of values not marked noundef 1999 // when they could be. We care about values that *invisibly* contain undef 2000 // bits from the perspective of LLVM IR. 2001 return false; 2002 if (CheckCoerce && AI.canHaveCoerceToType()) { 2003 llvm::Type *CoerceTy = AI.getCoerceToType(); 2004 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy), 2005 DL.getTypeSizeInBits(Ty))) 2006 // If we're coercing to a type with a greater size than the canonical one, 2007 // we're introducing new undef bits. 2008 // Coercing to a type of smaller or equal size is ok, as we know that 2009 // there's no internal padding (typeSizeEqualsStoreSize). 2010 return false; 2011 } 2012 if (QTy->isBitIntType()) 2013 return true; 2014 if (QTy->isReferenceType()) 2015 return true; 2016 if (QTy->isNullPtrType()) 2017 return false; 2018 if (QTy->isMemberPointerType()) 2019 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For 2020 // now, never mark them. 2021 return false; 2022 if (QTy->isScalarType()) { 2023 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy)) 2024 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); 2025 return true; 2026 } 2027 if (const VectorType *Vector = dyn_cast<VectorType>(QTy)) 2028 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); 2029 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy)) 2030 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); 2031 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy)) 2032 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); 2033 2034 // TODO: Some structs may be `noundef`, in specific situations. 2035 return false; 2036 } 2037 2038 /// Construct the IR attribute list of a function or call. 2039 /// 2040 /// When adding an attribute, please consider where it should be handled: 2041 /// 2042 /// - getDefaultFunctionAttributes is for attributes that are essentially 2043 /// part of the global target configuration (but perhaps can be 2044 /// overridden on a per-function basis). Adding attributes there 2045 /// will cause them to also be set in frontends that build on Clang's 2046 /// target-configuration logic, as well as for code defined in library 2047 /// modules such as CUDA's libdevice. 2048 /// 2049 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes 2050 /// and adds declaration-specific, convention-specific, and 2051 /// frontend-specific logic. The last is of particular importance: 2052 /// attributes that restrict how the frontend generates code must be 2053 /// added here rather than getDefaultFunctionAttributes. 2054 /// 2055 void CodeGenModule::ConstructAttributeList(StringRef Name, 2056 const CGFunctionInfo &FI, 2057 CGCalleeInfo CalleeInfo, 2058 llvm::AttributeList &AttrList, 2059 unsigned &CallingConv, 2060 bool AttrOnCallSite, bool IsThunk) { 2061 llvm::AttrBuilder FuncAttrs(getLLVMContext()); 2062 llvm::AttrBuilder RetAttrs(getLLVMContext()); 2063 2064 // Collect function IR attributes from the CC lowering. 2065 // We'll collect the paramete and result attributes later. 2066 CallingConv = FI.getEffectiveCallingConvention(); 2067 if (FI.isNoReturn()) 2068 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2069 if (FI.isCmseNSCall()) 2070 FuncAttrs.addAttribute("cmse_nonsecure_call"); 2071 2072 // Collect function IR attributes from the callee prototype if we have one. 2073 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 2074 CalleeInfo.getCalleeFunctionProtoType()); 2075 2076 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 2077 2078 // Attach assumption attributes to the declaration. If this is a call 2079 // site, attach assumptions from the caller to the call as well. 2080 AddAttributesFromAssumes(FuncAttrs, TargetDecl); 2081 2082 bool HasOptnone = false; 2083 // The NoBuiltinAttr attached to the target FunctionDecl. 2084 const NoBuiltinAttr *NBA = nullptr; 2085 2086 // Collect function IR attributes based on declaration-specific 2087 // information. 2088 // FIXME: handle sseregparm someday... 2089 if (TargetDecl) { 2090 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 2091 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 2092 if (TargetDecl->hasAttr<NoThrowAttr>()) 2093 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2094 if (TargetDecl->hasAttr<NoReturnAttr>()) 2095 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2096 if (TargetDecl->hasAttr<ColdAttr>()) 2097 FuncAttrs.addAttribute(llvm::Attribute::Cold); 2098 if (TargetDecl->hasAttr<HotAttr>()) 2099 FuncAttrs.addAttribute(llvm::Attribute::Hot); 2100 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 2101 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 2102 if (TargetDecl->hasAttr<ConvergentAttr>()) 2103 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 2104 2105 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2106 AddAttributesFromFunctionProtoType( 2107 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 2108 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { 2109 // A sane operator new returns a non-aliasing pointer. 2110 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); 2111 if (getCodeGenOpts().AssumeSaneOperatorNew && 2112 (Kind == OO_New || Kind == OO_Array_New)) 2113 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2114 } 2115 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 2116 const bool IsVirtualCall = MD && MD->isVirtual(); 2117 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a 2118 // virtual function. These attributes are not inherited by overloads. 2119 if (!(AttrOnCallSite && IsVirtualCall)) { 2120 if (Fn->isNoReturn()) 2121 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2122 NBA = Fn->getAttr<NoBuiltinAttr>(); 2123 } 2124 // Only place nomerge attribute on call sites, never functions. This 2125 // allows it to work on indirect virtual function calls. 2126 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) 2127 FuncAttrs.addAttribute(llvm::Attribute::NoMerge); 2128 } 2129 2130 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 2131 if (TargetDecl->hasAttr<ConstAttr>()) { 2132 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 2133 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2134 // gcc specifies that 'const' functions have greater restrictions than 2135 // 'pure' functions, so they also cannot have infinite loops. 2136 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2137 } else if (TargetDecl->hasAttr<PureAttr>()) { 2138 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 2139 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2140 // gcc specifies that 'pure' functions cannot have infinite loops. 2141 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2142 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 2143 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 2144 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2145 } 2146 if (TargetDecl->hasAttr<RestrictAttr>()) 2147 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2148 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 2149 !CodeGenOpts.NullPointerIsValid) 2150 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2151 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 2152 FuncAttrs.addAttribute("no_caller_saved_registers"); 2153 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 2154 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 2155 if (TargetDecl->hasAttr<LeafAttr>()) 2156 FuncAttrs.addAttribute(llvm::Attribute::NoCallback); 2157 2158 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 2159 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 2160 Optional<unsigned> NumElemsParam; 2161 if (AllocSize->getNumElemsParam().isValid()) 2162 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 2163 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 2164 NumElemsParam); 2165 } 2166 2167 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { 2168 if (getLangOpts().OpenCLVersion <= 120) { 2169 // OpenCL v1.2 Work groups are always uniform 2170 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 2171 } else { 2172 // OpenCL v2.0 Work groups may be whether uniform or not. 2173 // '-cl-uniform-work-group-size' compile option gets a hint 2174 // to the compiler that the global work-size be a multiple of 2175 // the work-group size specified to clEnqueueNDRangeKernel 2176 // (i.e. work groups are uniform). 2177 FuncAttrs.addAttribute("uniform-work-group-size", 2178 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 2179 } 2180 } 2181 } 2182 2183 // Attach "no-builtins" attributes to: 2184 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". 2185 // * definitions: "no-builtins" or "no-builtin-<name>" only. 2186 // The attributes can come from: 2187 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> 2188 // * FunctionDecl attributes: __attribute__((no_builtin(...))) 2189 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); 2190 2191 // Collect function IR attributes based on global settiings. 2192 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 2193 2194 // Override some default IR attributes based on declaration-specific 2195 // information. 2196 if (TargetDecl) { 2197 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 2198 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 2199 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 2200 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 2201 if (TargetDecl->hasAttr<NoSplitStackAttr>()) 2202 FuncAttrs.removeAttribute("split-stack"); 2203 if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) { 2204 // A function "__attribute__((...))" overrides the command-line flag. 2205 auto Kind = 2206 TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs(); 2207 FuncAttrs.removeAttribute("zero-call-used-regs"); 2208 FuncAttrs.addAttribute( 2209 "zero-call-used-regs", 2210 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind)); 2211 } 2212 2213 // Add NonLazyBind attribute to function declarations when -fno-plt 2214 // is used. 2215 // FIXME: what if we just haven't processed the function definition 2216 // yet, or if it's an external definition like C99 inline? 2217 if (CodeGenOpts.NoPLT) { 2218 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2219 if (!Fn->isDefined() && !AttrOnCallSite) { 2220 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 2221 } 2222 } 2223 } 2224 } 2225 2226 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage 2227 // functions with -funique-internal-linkage-names. 2228 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { 2229 if (isa<FunctionDecl>(TargetDecl)) { 2230 if (this->getFunctionLinkage(CalleeInfo.getCalleeDecl()) == 2231 llvm::GlobalValue::InternalLinkage) 2232 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy", 2233 "selected"); 2234 } 2235 } 2236 2237 // Collect non-call-site function IR attributes from declaration-specific 2238 // information. 2239 if (!AttrOnCallSite) { 2240 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) 2241 FuncAttrs.addAttribute("cmse_nonsecure_entry"); 2242 2243 // Whether tail calls are enabled. 2244 auto shouldDisableTailCalls = [&] { 2245 // Should this be honored in getDefaultFunctionAttributes? 2246 if (CodeGenOpts.DisableTailCalls) 2247 return true; 2248 2249 if (!TargetDecl) 2250 return false; 2251 2252 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 2253 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 2254 return true; 2255 2256 if (CodeGenOpts.NoEscapingBlockTailCalls) { 2257 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 2258 if (!BD->doesNotEscape()) 2259 return true; 2260 } 2261 2262 return false; 2263 }; 2264 if (shouldDisableTailCalls()) 2265 FuncAttrs.addAttribute("disable-tail-calls", "true"); 2266 2267 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes 2268 // handles these separately to set them based on the global defaults. 2269 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 2270 } 2271 2272 // Collect attributes from arguments and return values. 2273 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 2274 2275 QualType RetTy = FI.getReturnType(); 2276 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2277 const llvm::DataLayout &DL = getDataLayout(); 2278 2279 // C++ explicitly makes returning undefined values UB. C's rule only applies 2280 // to used values, so we never mark them noundef for now. 2281 bool HasStrictReturn = getLangOpts().CPlusPlus; 2282 if (TargetDecl && HasStrictReturn) { 2283 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) 2284 HasStrictReturn &= !FDecl->isExternC(); 2285 else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) 2286 // Function pointer 2287 HasStrictReturn &= !VDecl->isExternC(); 2288 } 2289 2290 // We don't want to be too aggressive with the return checking, unless 2291 // it's explicit in the code opts or we're using an appropriate sanitizer. 2292 // Try to respect what the programmer intended. 2293 HasStrictReturn &= getCodeGenOpts().StrictReturn || 2294 !MayDropFunctionReturn(getContext(), RetTy) || 2295 getLangOpts().Sanitize.has(SanitizerKind::Memory) || 2296 getLangOpts().Sanitize.has(SanitizerKind::Return); 2297 2298 // Determine if the return type could be partially undef 2299 if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) { 2300 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && 2301 DetermineNoUndef(RetTy, getTypes(), DL, RetAI)) 2302 RetAttrs.addAttribute(llvm::Attribute::NoUndef); 2303 } 2304 2305 switch (RetAI.getKind()) { 2306 case ABIArgInfo::Extend: 2307 if (RetAI.isSignExt()) 2308 RetAttrs.addAttribute(llvm::Attribute::SExt); 2309 else 2310 RetAttrs.addAttribute(llvm::Attribute::ZExt); 2311 LLVM_FALLTHROUGH; 2312 case ABIArgInfo::Direct: 2313 if (RetAI.getInReg()) 2314 RetAttrs.addAttribute(llvm::Attribute::InReg); 2315 break; 2316 case ABIArgInfo::Ignore: 2317 break; 2318 2319 case ABIArgInfo::InAlloca: 2320 case ABIArgInfo::Indirect: { 2321 // inalloca and sret disable readnone and readonly 2322 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2323 .removeAttribute(llvm::Attribute::ReadNone); 2324 break; 2325 } 2326 2327 case ABIArgInfo::CoerceAndExpand: 2328 break; 2329 2330 case ABIArgInfo::Expand: 2331 case ABIArgInfo::IndirectAliased: 2332 llvm_unreachable("Invalid ABI kind for return argument"); 2333 } 2334 2335 if (!IsThunk) { 2336 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2337 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 2338 QualType PTy = RefTy->getPointeeType(); 2339 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2340 RetAttrs.addDereferenceableAttr( 2341 getMinimumObjectSize(PTy).getQuantity()); 2342 if (getContext().getTargetAddressSpace(PTy) == 0 && 2343 !CodeGenOpts.NullPointerIsValid) 2344 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2345 if (PTy->isObjectType()) { 2346 llvm::Align Alignment = 2347 getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); 2348 RetAttrs.addAlignmentAttr(Alignment); 2349 } 2350 } 2351 } 2352 2353 bool hasUsedSRet = false; 2354 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2355 2356 // Attach attributes to sret. 2357 if (IRFunctionArgs.hasSRetArg()) { 2358 llvm::AttrBuilder SRETAttrs(getLLVMContext()); 2359 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); 2360 hasUsedSRet = true; 2361 if (RetAI.getInReg()) 2362 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2363 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); 2364 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2365 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2366 } 2367 2368 // Attach attributes to inalloca argument. 2369 if (IRFunctionArgs.hasInallocaArg()) { 2370 llvm::AttrBuilder Attrs(getLLVMContext()); 2371 Attrs.addInAllocaAttr(FI.getArgStruct()); 2372 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2373 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2374 } 2375 2376 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument, 2377 // unless this is a thunk function. 2378 // FIXME: fix this properly, https://reviews.llvm.org/D100388 2379 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && 2380 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) { 2381 auto IRArgs = IRFunctionArgs.getIRArgs(0); 2382 2383 assert(IRArgs.second == 1 && "Expected only a single `this` pointer."); 2384 2385 llvm::AttrBuilder Attrs(getLLVMContext()); 2386 2387 QualType ThisTy = 2388 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType(); 2389 2390 if (!CodeGenOpts.NullPointerIsValid && 2391 getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) { 2392 Attrs.addAttribute(llvm::Attribute::NonNull); 2393 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity()); 2394 } else { 2395 // FIXME dereferenceable should be correct here, regardless of 2396 // NullPointerIsValid. However, dereferenceable currently does not always 2397 // respect NullPointerIsValid and may imply nonnull and break the program. 2398 // See https://reviews.llvm.org/D66618 for discussions. 2399 Attrs.addDereferenceableOrNullAttr( 2400 getMinimumObjectSize( 2401 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) 2402 .getQuantity()); 2403 } 2404 2405 llvm::Align Alignment = 2406 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr, 2407 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true) 2408 .getAsAlign(); 2409 Attrs.addAlignmentAttr(Alignment); 2410 2411 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs); 2412 } 2413 2414 unsigned ArgNo = 0; 2415 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2416 E = FI.arg_end(); 2417 I != E; ++I, ++ArgNo) { 2418 QualType ParamType = I->type; 2419 const ABIArgInfo &AI = I->info; 2420 llvm::AttrBuilder Attrs(getLLVMContext()); 2421 2422 // Add attribute for padding argument, if necessary. 2423 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2424 if (AI.getPaddingInReg()) { 2425 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2426 llvm::AttributeSet::get( 2427 getLLVMContext(), 2428 llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg)); 2429 } 2430 } 2431 2432 // Decide whether the argument we're handling could be partially undef 2433 if (CodeGenOpts.EnableNoundefAttrs && 2434 DetermineNoUndef(ParamType, getTypes(), DL, AI)) { 2435 Attrs.addAttribute(llvm::Attribute::NoUndef); 2436 } 2437 2438 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2439 // have the corresponding parameter variable. It doesn't make 2440 // sense to do it here because parameters are so messed up. 2441 switch (AI.getKind()) { 2442 case ABIArgInfo::Extend: 2443 if (AI.isSignExt()) 2444 Attrs.addAttribute(llvm::Attribute::SExt); 2445 else 2446 Attrs.addAttribute(llvm::Attribute::ZExt); 2447 LLVM_FALLTHROUGH; 2448 case ABIArgInfo::Direct: 2449 if (ArgNo == 0 && FI.isChainCall()) 2450 Attrs.addAttribute(llvm::Attribute::Nest); 2451 else if (AI.getInReg()) 2452 Attrs.addAttribute(llvm::Attribute::InReg); 2453 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign())); 2454 break; 2455 2456 case ABIArgInfo::Indirect: { 2457 if (AI.getInReg()) 2458 Attrs.addAttribute(llvm::Attribute::InReg); 2459 2460 if (AI.getIndirectByVal()) 2461 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); 2462 2463 auto *Decl = ParamType->getAsRecordDecl(); 2464 if (CodeGenOpts.PassByValueIsNoAlias && Decl && 2465 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs) 2466 // When calling the function, the pointer passed in will be the only 2467 // reference to the underlying object. Mark it accordingly. 2468 Attrs.addAttribute(llvm::Attribute::NoAlias); 2469 2470 // TODO: We could add the byref attribute if not byval, but it would 2471 // require updating many testcases. 2472 2473 CharUnits Align = AI.getIndirectAlign(); 2474 2475 // In a byval argument, it is important that the required 2476 // alignment of the type is honored, as LLVM might be creating a 2477 // *new* stack object, and needs to know what alignment to give 2478 // it. (Sometimes it can deduce a sensible alignment on its own, 2479 // but not if clang decides it must emit a packed struct, or the 2480 // user specifies increased alignment requirements.) 2481 // 2482 // This is different from indirect *not* byval, where the object 2483 // exists already, and the align attribute is purely 2484 // informative. 2485 assert(!Align.isZero()); 2486 2487 // For now, only add this when we have a byval argument. 2488 // TODO: be less lazy about updating test cases. 2489 if (AI.getIndirectByVal()) 2490 Attrs.addAlignmentAttr(Align.getQuantity()); 2491 2492 // byval disables readnone and readonly. 2493 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2494 .removeAttribute(llvm::Attribute::ReadNone); 2495 2496 break; 2497 } 2498 case ABIArgInfo::IndirectAliased: { 2499 CharUnits Align = AI.getIndirectAlign(); 2500 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); 2501 Attrs.addAlignmentAttr(Align.getQuantity()); 2502 break; 2503 } 2504 case ABIArgInfo::Ignore: 2505 case ABIArgInfo::Expand: 2506 case ABIArgInfo::CoerceAndExpand: 2507 break; 2508 2509 case ABIArgInfo::InAlloca: 2510 // inalloca disables readnone and readonly. 2511 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2512 .removeAttribute(llvm::Attribute::ReadNone); 2513 continue; 2514 } 2515 2516 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2517 QualType PTy = RefTy->getPointeeType(); 2518 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2519 Attrs.addDereferenceableAttr( 2520 getMinimumObjectSize(PTy).getQuantity()); 2521 if (getContext().getTargetAddressSpace(PTy) == 0 && 2522 !CodeGenOpts.NullPointerIsValid) 2523 Attrs.addAttribute(llvm::Attribute::NonNull); 2524 if (PTy->isObjectType()) { 2525 llvm::Align Alignment = 2526 getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); 2527 Attrs.addAlignmentAttr(Alignment); 2528 } 2529 } 2530 2531 // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types: 2532 // > For arguments to a __kernel function declared to be a pointer to a 2533 // > data type, the OpenCL compiler can assume that the pointee is always 2534 // > appropriately aligned as required by the data type. 2535 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() && 2536 ParamType->isPointerType()) { 2537 QualType PTy = ParamType->getPointeeType(); 2538 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2539 llvm::Align Alignment = 2540 getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); 2541 Attrs.addAlignmentAttr(Alignment); 2542 } 2543 } 2544 2545 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2546 case ParameterABI::Ordinary: 2547 break; 2548 2549 case ParameterABI::SwiftIndirectResult: { 2550 // Add 'sret' if we haven't already used it for something, but 2551 // only if the result is void. 2552 if (!hasUsedSRet && RetTy->isVoidType()) { 2553 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); 2554 hasUsedSRet = true; 2555 } 2556 2557 // Add 'noalias' in either case. 2558 Attrs.addAttribute(llvm::Attribute::NoAlias); 2559 2560 // Add 'dereferenceable' and 'alignment'. 2561 auto PTy = ParamType->getPointeeType(); 2562 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2563 auto info = getContext().getTypeInfoInChars(PTy); 2564 Attrs.addDereferenceableAttr(info.Width.getQuantity()); 2565 Attrs.addAlignmentAttr(info.Align.getAsAlign()); 2566 } 2567 break; 2568 } 2569 2570 case ParameterABI::SwiftErrorResult: 2571 Attrs.addAttribute(llvm::Attribute::SwiftError); 2572 break; 2573 2574 case ParameterABI::SwiftContext: 2575 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2576 break; 2577 2578 case ParameterABI::SwiftAsyncContext: 2579 Attrs.addAttribute(llvm::Attribute::SwiftAsync); 2580 break; 2581 } 2582 2583 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2584 Attrs.addAttribute(llvm::Attribute::NoCapture); 2585 2586 if (Attrs.hasAttributes()) { 2587 unsigned FirstIRArg, NumIRArgs; 2588 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2589 for (unsigned i = 0; i < NumIRArgs; i++) 2590 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes( 2591 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs)); 2592 } 2593 } 2594 assert(ArgNo == FI.arg_size()); 2595 2596 AttrList = llvm::AttributeList::get( 2597 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2598 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2599 } 2600 2601 /// An argument came in as a promoted argument; demote it back to its 2602 /// declared type. 2603 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2604 const VarDecl *var, 2605 llvm::Value *value) { 2606 llvm::Type *varType = CGF.ConvertType(var->getType()); 2607 2608 // This can happen with promotions that actually don't change the 2609 // underlying type, like the enum promotions. 2610 if (value->getType() == varType) return value; 2611 2612 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2613 && "unexpected promotion type"); 2614 2615 if (isa<llvm::IntegerType>(varType)) 2616 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2617 2618 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2619 } 2620 2621 /// Returns the attribute (either parameter attribute, or function 2622 /// attribute), which declares argument ArgNo to be non-null. 2623 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2624 QualType ArgType, unsigned ArgNo) { 2625 // FIXME: __attribute__((nonnull)) can also be applied to: 2626 // - references to pointers, where the pointee is known to be 2627 // nonnull (apparently a Clang extension) 2628 // - transparent unions containing pointers 2629 // In the former case, LLVM IR cannot represent the constraint. In 2630 // the latter case, we have no guarantee that the transparent union 2631 // is in fact passed as a pointer. 2632 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2633 return nullptr; 2634 // First, check attribute on parameter itself. 2635 if (PVD) { 2636 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2637 return ParmNNAttr; 2638 } 2639 // Check function attributes. 2640 if (!FD) 2641 return nullptr; 2642 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2643 if (NNAttr->isNonNull(ArgNo)) 2644 return NNAttr; 2645 } 2646 return nullptr; 2647 } 2648 2649 namespace { 2650 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2651 Address Temp; 2652 Address Arg; 2653 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2654 void Emit(CodeGenFunction &CGF, Flags flags) override { 2655 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2656 CGF.Builder.CreateStore(errorValue, Arg); 2657 } 2658 }; 2659 } 2660 2661 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2662 llvm::Function *Fn, 2663 const FunctionArgList &Args) { 2664 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2665 // Naked functions don't have prologues. 2666 return; 2667 2668 // If this is an implicit-return-zero function, go ahead and 2669 // initialize the return value. TODO: it might be nice to have 2670 // a more general mechanism for this that didn't require synthesized 2671 // return statements. 2672 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2673 if (FD->hasImplicitReturnZero()) { 2674 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2675 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2676 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2677 Builder.CreateStore(Zero, ReturnValue); 2678 } 2679 } 2680 2681 // FIXME: We no longer need the types from FunctionArgList; lift up and 2682 // simplify. 2683 2684 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2685 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); 2686 2687 // If we're using inalloca, all the memory arguments are GEPs off of the last 2688 // parameter, which is a pointer to the complete memory area. 2689 Address ArgStruct = Address::invalid(); 2690 if (IRFunctionArgs.hasInallocaArg()) { 2691 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), 2692 FI.getArgStruct(), FI.getArgStructAlignment()); 2693 2694 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2695 } 2696 2697 // Name the struct return parameter. 2698 if (IRFunctionArgs.hasSRetArg()) { 2699 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); 2700 AI->setName("agg.result"); 2701 AI->addAttr(llvm::Attribute::NoAlias); 2702 } 2703 2704 // Track if we received the parameter as a pointer (indirect, byval, or 2705 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2706 // into a local alloca for us. 2707 SmallVector<ParamValue, 16> ArgVals; 2708 ArgVals.reserve(Args.size()); 2709 2710 // Create a pointer value for every parameter declaration. This usually 2711 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2712 // any cleanups or do anything that might unwind. We do that separately, so 2713 // we can push the cleanups in the correct order for the ABI. 2714 assert(FI.arg_size() == Args.size() && 2715 "Mismatch between function signature & arguments."); 2716 unsigned ArgNo = 0; 2717 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2718 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2719 i != e; ++i, ++info_it, ++ArgNo) { 2720 const VarDecl *Arg = *i; 2721 const ABIArgInfo &ArgI = info_it->info; 2722 2723 bool isPromoted = 2724 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2725 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2726 // the parameter is promoted. In this case we convert to 2727 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2728 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2729 assert(hasScalarEvaluationKind(Ty) == 2730 hasScalarEvaluationKind(Arg->getType())); 2731 2732 unsigned FirstIRArg, NumIRArgs; 2733 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2734 2735 switch (ArgI.getKind()) { 2736 case ABIArgInfo::InAlloca: { 2737 assert(NumIRArgs == 0); 2738 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2739 Address V = 2740 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); 2741 if (ArgI.getInAllocaIndirect()) 2742 V = Address(Builder.CreateLoad(V), ConvertTypeForMem(Ty), 2743 getContext().getTypeAlignInChars(Ty)); 2744 ArgVals.push_back(ParamValue::forIndirect(V)); 2745 break; 2746 } 2747 2748 case ABIArgInfo::Indirect: 2749 case ABIArgInfo::IndirectAliased: { 2750 assert(NumIRArgs == 1); 2751 Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty), 2752 ArgI.getIndirectAlign()); 2753 2754 if (!hasScalarEvaluationKind(Ty)) { 2755 // Aggregates and complex variables are accessed by reference. All we 2756 // need to do is realign the value, if requested. Also, if the address 2757 // may be aliased, copy it to ensure that the parameter variable is 2758 // mutable and has a unique adress, as C requires. 2759 Address V = ParamAddr; 2760 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { 2761 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2762 2763 // Copy from the incoming argument pointer to the temporary with the 2764 // appropriate alignment. 2765 // 2766 // FIXME: We should have a common utility for generating an aggregate 2767 // copy. 2768 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2769 Builder.CreateMemCpy( 2770 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), 2771 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), 2772 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); 2773 V = AlignedTemp; 2774 } 2775 ArgVals.push_back(ParamValue::forIndirect(V)); 2776 } else { 2777 // Load scalar value from indirect argument. 2778 llvm::Value *V = 2779 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2780 2781 if (isPromoted) 2782 V = emitArgumentDemotion(*this, Arg, V); 2783 ArgVals.push_back(ParamValue::forDirect(V)); 2784 } 2785 break; 2786 } 2787 2788 case ABIArgInfo::Extend: 2789 case ABIArgInfo::Direct: { 2790 auto AI = Fn->getArg(FirstIRArg); 2791 llvm::Type *LTy = ConvertType(Arg->getType()); 2792 2793 // Prepare parameter attributes. So far, only attributes for pointer 2794 // parameters are prepared. See 2795 // http://llvm.org/docs/LangRef.html#paramattrs. 2796 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && 2797 ArgI.getCoerceToType()->isPointerTy()) { 2798 assert(NumIRArgs == 1); 2799 2800 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2801 // Set `nonnull` attribute if any. 2802 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2803 PVD->getFunctionScopeIndex()) && 2804 !CGM.getCodeGenOpts().NullPointerIsValid) 2805 AI->addAttr(llvm::Attribute::NonNull); 2806 2807 QualType OTy = PVD->getOriginalType(); 2808 if (const auto *ArrTy = 2809 getContext().getAsConstantArrayType(OTy)) { 2810 // A C99 array parameter declaration with the static keyword also 2811 // indicates dereferenceability, and if the size is constant we can 2812 // use the dereferenceable attribute (which requires the size in 2813 // bytes). 2814 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2815 QualType ETy = ArrTy->getElementType(); 2816 llvm::Align Alignment = 2817 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2818 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); 2819 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2820 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2821 ArrSize) { 2822 llvm::AttrBuilder Attrs(getLLVMContext()); 2823 Attrs.addDereferenceableAttr( 2824 getContext().getTypeSizeInChars(ETy).getQuantity() * 2825 ArrSize); 2826 AI->addAttrs(Attrs); 2827 } else if (getContext().getTargetInfo().getNullPointerValue( 2828 ETy.getAddressSpace()) == 0 && 2829 !CGM.getCodeGenOpts().NullPointerIsValid) { 2830 AI->addAttr(llvm::Attribute::NonNull); 2831 } 2832 } 2833 } else if (const auto *ArrTy = 2834 getContext().getAsVariableArrayType(OTy)) { 2835 // For C99 VLAs with the static keyword, we don't know the size so 2836 // we can't use the dereferenceable attribute, but in addrspace(0) 2837 // we know that it must be nonnull. 2838 if (ArrTy->getSizeModifier() == VariableArrayType::Static) { 2839 QualType ETy = ArrTy->getElementType(); 2840 llvm::Align Alignment = 2841 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2842 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment)); 2843 if (!getContext().getTargetAddressSpace(ETy) && 2844 !CGM.getCodeGenOpts().NullPointerIsValid) 2845 AI->addAttr(llvm::Attribute::NonNull); 2846 } 2847 } 2848 2849 // Set `align` attribute if any. 2850 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2851 if (!AVAttr) 2852 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2853 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2854 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 2855 // If alignment-assumption sanitizer is enabled, we do *not* add 2856 // alignment attribute here, but emit normal alignment assumption, 2857 // so the UBSAN check could function. 2858 llvm::ConstantInt *AlignmentCI = 2859 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); 2860 uint64_t AlignmentInt = 2861 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); 2862 if (AI->getParamAlign().valueOrOne() < AlignmentInt) { 2863 AI->removeAttr(llvm::Attribute::AttrKind::Alignment); 2864 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr( 2865 llvm::Align(AlignmentInt))); 2866 } 2867 } 2868 } 2869 2870 // Set 'noalias' if an argument type has the `restrict` qualifier. 2871 if (Arg->getType().isRestrictQualified()) 2872 AI->addAttr(llvm::Attribute::NoAlias); 2873 } 2874 2875 // Prepare the argument value. If we have the trivial case, handle it 2876 // with no muss and fuss. 2877 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2878 ArgI.getCoerceToType() == ConvertType(Ty) && 2879 ArgI.getDirectOffset() == 0) { 2880 assert(NumIRArgs == 1); 2881 2882 // LLVM expects swifterror parameters to be used in very restricted 2883 // ways. Copy the value into a less-restricted temporary. 2884 llvm::Value *V = AI; 2885 if (FI.getExtParameterInfo(ArgNo).getABI() 2886 == ParameterABI::SwiftErrorResult) { 2887 QualType pointeeTy = Ty->getPointeeType(); 2888 assert(pointeeTy->isPointerType()); 2889 Address temp = 2890 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2891 Address arg(V, ConvertTypeForMem(pointeeTy), 2892 getContext().getTypeAlignInChars(pointeeTy)); 2893 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2894 Builder.CreateStore(incomingErrorValue, temp); 2895 V = temp.getPointer(); 2896 2897 // Push a cleanup to copy the value back at the end of the function. 2898 // The convention does not guarantee that the value will be written 2899 // back if the function exits with an unwind exception. 2900 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2901 } 2902 2903 // Ensure the argument is the correct type. 2904 if (V->getType() != ArgI.getCoerceToType()) 2905 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2906 2907 if (isPromoted) 2908 V = emitArgumentDemotion(*this, Arg, V); 2909 2910 // Because of merging of function types from multiple decls it is 2911 // possible for the type of an argument to not match the corresponding 2912 // type in the function type. Since we are codegening the callee 2913 // in here, add a cast to the argument type. 2914 llvm::Type *LTy = ConvertType(Arg->getType()); 2915 if (V->getType() != LTy) 2916 V = Builder.CreateBitCast(V, LTy); 2917 2918 ArgVals.push_back(ParamValue::forDirect(V)); 2919 break; 2920 } 2921 2922 // VLST arguments are coerced to VLATs at the function boundary for 2923 // ABI consistency. If this is a VLST that was coerced to 2924 // a VLAT at the function boundary and the types match up, use 2925 // llvm.experimental.vector.extract to convert back to the original 2926 // VLST. 2927 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { 2928 llvm::Value *Coerced = Fn->getArg(FirstIRArg); 2929 if (auto *VecTyFrom = 2930 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { 2931 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8 2932 // vector, bitcast the source and use a vector extract. 2933 auto PredType = 2934 llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16); 2935 if (VecTyFrom == PredType && 2936 VecTyTo->getElementType() == Builder.getInt8Ty()) { 2937 VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2); 2938 Coerced = Builder.CreateBitCast(Coerced, VecTyFrom); 2939 } 2940 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { 2941 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); 2942 2943 assert(NumIRArgs == 1); 2944 Coerced->setName(Arg->getName() + ".coerce"); 2945 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( 2946 VecTyTo, Coerced, Zero, "castFixedSve"))); 2947 break; 2948 } 2949 } 2950 } 2951 2952 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2953 Arg->getName()); 2954 2955 // Pointer to store into. 2956 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2957 2958 // Fast-isel and the optimizer generally like scalar values better than 2959 // FCAs, so we flatten them if this is safe to do for this argument. 2960 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2961 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2962 STy->getNumElements() > 1) { 2963 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2964 llvm::Type *DstTy = Ptr.getElementType(); 2965 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2966 2967 Address AddrToStoreInto = Address::invalid(); 2968 if (SrcSize <= DstSize) { 2969 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2970 } else { 2971 AddrToStoreInto = 2972 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2973 } 2974 2975 assert(STy->getNumElements() == NumIRArgs); 2976 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2977 auto AI = Fn->getArg(FirstIRArg + i); 2978 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2979 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); 2980 Builder.CreateStore(AI, EltPtr); 2981 } 2982 2983 if (SrcSize > DstSize) { 2984 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2985 } 2986 2987 } else { 2988 // Simple case, just do a coerced store of the argument into the alloca. 2989 assert(NumIRArgs == 1); 2990 auto AI = Fn->getArg(FirstIRArg); 2991 AI->setName(Arg->getName() + ".coerce"); 2992 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); 2993 } 2994 2995 // Match to what EmitParmDecl is expecting for this type. 2996 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2997 llvm::Value *V = 2998 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 2999 if (isPromoted) 3000 V = emitArgumentDemotion(*this, Arg, V); 3001 ArgVals.push_back(ParamValue::forDirect(V)); 3002 } else { 3003 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 3004 } 3005 break; 3006 } 3007 3008 case ABIArgInfo::CoerceAndExpand: { 3009 // Reconstruct into a temporary. 3010 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 3011 ArgVals.push_back(ParamValue::forIndirect(alloca)); 3012 3013 auto coercionType = ArgI.getCoerceAndExpandType(); 3014 alloca = Builder.CreateElementBitCast(alloca, coercionType); 3015 3016 unsigned argIndex = FirstIRArg; 3017 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3018 llvm::Type *eltType = coercionType->getElementType(i); 3019 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 3020 continue; 3021 3022 auto eltAddr = Builder.CreateStructGEP(alloca, i); 3023 auto elt = Fn->getArg(argIndex++); 3024 Builder.CreateStore(elt, eltAddr); 3025 } 3026 assert(argIndex == FirstIRArg + NumIRArgs); 3027 break; 3028 } 3029 3030 case ABIArgInfo::Expand: { 3031 // If this structure was expanded into multiple arguments then 3032 // we need to create a temporary and reconstruct it from the 3033 // arguments. 3034 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 3035 LValue LV = MakeAddrLValue(Alloca, Ty); 3036 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 3037 3038 auto FnArgIter = Fn->arg_begin() + FirstIRArg; 3039 ExpandTypeFromArgs(Ty, LV, FnArgIter); 3040 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); 3041 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 3042 auto AI = Fn->getArg(FirstIRArg + i); 3043 AI->setName(Arg->getName() + "." + Twine(i)); 3044 } 3045 break; 3046 } 3047 3048 case ABIArgInfo::Ignore: 3049 assert(NumIRArgs == 0); 3050 // Initialize the local variable appropriately. 3051 if (!hasScalarEvaluationKind(Ty)) { 3052 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 3053 } else { 3054 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 3055 ArgVals.push_back(ParamValue::forDirect(U)); 3056 } 3057 break; 3058 } 3059 } 3060 3061 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 3062 for (int I = Args.size() - 1; I >= 0; --I) 3063 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 3064 } else { 3065 for (unsigned I = 0, E = Args.size(); I != E; ++I) 3066 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 3067 } 3068 } 3069 3070 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 3071 while (insn->use_empty()) { 3072 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 3073 if (!bitcast) return; 3074 3075 // This is "safe" because we would have used a ConstantExpr otherwise. 3076 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 3077 bitcast->eraseFromParent(); 3078 } 3079 } 3080 3081 /// Try to emit a fused autorelease of a return result. 3082 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 3083 llvm::Value *result) { 3084 // We must be immediately followed the cast. 3085 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 3086 if (BB->empty()) return nullptr; 3087 if (&BB->back() != result) return nullptr; 3088 3089 llvm::Type *resultType = result->getType(); 3090 3091 // result is in a BasicBlock and is therefore an Instruction. 3092 llvm::Instruction *generator = cast<llvm::Instruction>(result); 3093 3094 SmallVector<llvm::Instruction *, 4> InstsToKill; 3095 3096 // Look for: 3097 // %generator = bitcast %type1* %generator2 to %type2* 3098 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 3099 // We would have emitted this as a constant if the operand weren't 3100 // an Instruction. 3101 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 3102 3103 // Require the generator to be immediately followed by the cast. 3104 if (generator->getNextNode() != bitcast) 3105 return nullptr; 3106 3107 InstsToKill.push_back(bitcast); 3108 } 3109 3110 // Look for: 3111 // %generator = call i8* @objc_retain(i8* %originalResult) 3112 // or 3113 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 3114 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 3115 if (!call) return nullptr; 3116 3117 bool doRetainAutorelease; 3118 3119 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { 3120 doRetainAutorelease = true; 3121 } else if (call->getCalledOperand() == 3122 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { 3123 doRetainAutorelease = false; 3124 3125 // If we emitted an assembly marker for this call (and the 3126 // ARCEntrypoints field should have been set if so), go looking 3127 // for that call. If we can't find it, we can't do this 3128 // optimization. But it should always be the immediately previous 3129 // instruction, unless we needed bitcasts around the call. 3130 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 3131 llvm::Instruction *prev = call->getPrevNode(); 3132 assert(prev); 3133 if (isa<llvm::BitCastInst>(prev)) { 3134 prev = prev->getPrevNode(); 3135 assert(prev); 3136 } 3137 assert(isa<llvm::CallInst>(prev)); 3138 assert(cast<llvm::CallInst>(prev)->getCalledOperand() == 3139 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 3140 InstsToKill.push_back(prev); 3141 } 3142 } else { 3143 return nullptr; 3144 } 3145 3146 result = call->getArgOperand(0); 3147 InstsToKill.push_back(call); 3148 3149 // Keep killing bitcasts, for sanity. Note that we no longer care 3150 // about precise ordering as long as there's exactly one use. 3151 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 3152 if (!bitcast->hasOneUse()) break; 3153 InstsToKill.push_back(bitcast); 3154 result = bitcast->getOperand(0); 3155 } 3156 3157 // Delete all the unnecessary instructions, from latest to earliest. 3158 for (auto *I : InstsToKill) 3159 I->eraseFromParent(); 3160 3161 // Do the fused retain/autorelease if we were asked to. 3162 if (doRetainAutorelease) 3163 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 3164 3165 // Cast back to the result type. 3166 return CGF.Builder.CreateBitCast(result, resultType); 3167 } 3168 3169 /// If this is a +1 of the value of an immutable 'self', remove it. 3170 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 3171 llvm::Value *result) { 3172 // This is only applicable to a method with an immutable 'self'. 3173 const ObjCMethodDecl *method = 3174 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 3175 if (!method) return nullptr; 3176 const VarDecl *self = method->getSelfDecl(); 3177 if (!self->getType().isConstQualified()) return nullptr; 3178 3179 // Look for a retain call. 3180 llvm::CallInst *retainCall = 3181 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 3182 if (!retainCall || retainCall->getCalledOperand() != 3183 CGF.CGM.getObjCEntrypoints().objc_retain) 3184 return nullptr; 3185 3186 // Look for an ordinary load of 'self'. 3187 llvm::Value *retainedValue = retainCall->getArgOperand(0); 3188 llvm::LoadInst *load = 3189 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 3190 if (!load || load->isAtomic() || load->isVolatile() || 3191 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 3192 return nullptr; 3193 3194 // Okay! Burn it all down. This relies for correctness on the 3195 // assumption that the retain is emitted as part of the return and 3196 // that thereafter everything is used "linearly". 3197 llvm::Type *resultType = result->getType(); 3198 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 3199 assert(retainCall->use_empty()); 3200 retainCall->eraseFromParent(); 3201 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 3202 3203 return CGF.Builder.CreateBitCast(load, resultType); 3204 } 3205 3206 /// Emit an ARC autorelease of the result of a function. 3207 /// 3208 /// \return the value to actually return from the function 3209 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 3210 llvm::Value *result) { 3211 // If we're returning 'self', kill the initial retain. This is a 3212 // heuristic attempt to "encourage correctness" in the really unfortunate 3213 // case where we have a return of self during a dealloc and we desperately 3214 // need to avoid the possible autorelease. 3215 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 3216 return self; 3217 3218 // At -O0, try to emit a fused retain/autorelease. 3219 if (CGF.shouldUseFusedARCCalls()) 3220 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 3221 return fused; 3222 3223 return CGF.EmitARCAutoreleaseReturnValue(result); 3224 } 3225 3226 /// Heuristically search for a dominating store to the return-value slot. 3227 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 3228 // Check if a User is a store which pointerOperand is the ReturnValue. 3229 // We are looking for stores to the ReturnValue, not for stores of the 3230 // ReturnValue to some other location. 3231 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 3232 auto *SI = dyn_cast<llvm::StoreInst>(U); 3233 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() || 3234 SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType()) 3235 return nullptr; 3236 // These aren't actually possible for non-coerced returns, and we 3237 // only care about non-coerced returns on this code path. 3238 assert(!SI->isAtomic() && !SI->isVolatile()); 3239 return SI; 3240 }; 3241 // If there are multiple uses of the return-value slot, just check 3242 // for something immediately preceding the IP. Sometimes this can 3243 // happen with how we generate implicit-returns; it can also happen 3244 // with noreturn cleanups. 3245 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 3246 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3247 if (IP->empty()) return nullptr; 3248 3249 // Look at directly preceding instruction, skipping bitcasts and lifetime 3250 // markers. 3251 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) { 3252 if (isa<llvm::BitCastInst>(&I)) 3253 continue; 3254 if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I)) 3255 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end) 3256 continue; 3257 3258 return GetStoreIfValid(&I); 3259 } 3260 return nullptr; 3261 } 3262 3263 llvm::StoreInst *store = 3264 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 3265 if (!store) return nullptr; 3266 3267 // Now do a first-and-dirty dominance check: just walk up the 3268 // single-predecessors chain from the current insertion point. 3269 llvm::BasicBlock *StoreBB = store->getParent(); 3270 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3271 while (IP != StoreBB) { 3272 if (!(IP = IP->getSinglePredecessor())) 3273 return nullptr; 3274 } 3275 3276 // Okay, the store's basic block dominates the insertion point; we 3277 // can do our thing. 3278 return store; 3279 } 3280 3281 // Helper functions for EmitCMSEClearRecord 3282 3283 // Set the bits corresponding to a field having width `BitWidth` and located at 3284 // offset `BitOffset` (from the least significant bit) within a storage unit of 3285 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. 3286 // Use little-endian layout, i.e.`Bits[0]` is the LSB. 3287 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, 3288 int BitWidth, int CharWidth) { 3289 assert(CharWidth <= 64); 3290 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); 3291 3292 int Pos = 0; 3293 if (BitOffset >= CharWidth) { 3294 Pos += BitOffset / CharWidth; 3295 BitOffset = BitOffset % CharWidth; 3296 } 3297 3298 const uint64_t Used = (uint64_t(1) << CharWidth) - 1; 3299 if (BitOffset + BitWidth >= CharWidth) { 3300 Bits[Pos++] |= (Used << BitOffset) & Used; 3301 BitWidth -= CharWidth - BitOffset; 3302 BitOffset = 0; 3303 } 3304 3305 while (BitWidth >= CharWidth) { 3306 Bits[Pos++] = Used; 3307 BitWidth -= CharWidth; 3308 } 3309 3310 if (BitWidth > 0) 3311 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; 3312 } 3313 3314 // Set the bits corresponding to a field having width `BitWidth` and located at 3315 // offset `BitOffset` (from the least significant bit) within a storage unit of 3316 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of 3317 // `Bits` corresponds to one target byte. Use target endian layout. 3318 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, 3319 int StorageSize, int BitOffset, int BitWidth, 3320 int CharWidth, bool BigEndian) { 3321 3322 SmallVector<uint64_t, 8> TmpBits(StorageSize); 3323 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); 3324 3325 if (BigEndian) 3326 std::reverse(TmpBits.begin(), TmpBits.end()); 3327 3328 for (uint64_t V : TmpBits) 3329 Bits[StorageOffset++] |= V; 3330 } 3331 3332 static void setUsedBits(CodeGenModule &, QualType, int, 3333 SmallVectorImpl<uint64_t> &); 3334 3335 // Set the bits in `Bits`, which correspond to the value representations of 3336 // the actual members of the record type `RTy`. Note that this function does 3337 // not handle base classes, virtual tables, etc, since they cannot happen in 3338 // CMSE function arguments or return. The bit mask corresponds to the target 3339 // memory layout, i.e. it's endian dependent. 3340 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, 3341 SmallVectorImpl<uint64_t> &Bits) { 3342 ASTContext &Context = CGM.getContext(); 3343 int CharWidth = Context.getCharWidth(); 3344 const RecordDecl *RD = RTy->getDecl()->getDefinition(); 3345 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); 3346 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); 3347 3348 int Idx = 0; 3349 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { 3350 const FieldDecl *F = *I; 3351 3352 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || 3353 F->getType()->isIncompleteArrayType()) 3354 continue; 3355 3356 if (F->isBitField()) { 3357 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); 3358 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), 3359 BFI.StorageSize / CharWidth, BFI.Offset, 3360 BFI.Size, CharWidth, 3361 CGM.getDataLayout().isBigEndian()); 3362 continue; 3363 } 3364 3365 setUsedBits(CGM, F->getType(), 3366 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); 3367 } 3368 } 3369 3370 // Set the bits in `Bits`, which correspond to the value representations of 3371 // the elements of an array type `ATy`. 3372 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, 3373 int Offset, SmallVectorImpl<uint64_t> &Bits) { 3374 const ASTContext &Context = CGM.getContext(); 3375 3376 QualType ETy = Context.getBaseElementType(ATy); 3377 int Size = Context.getTypeSizeInChars(ETy).getQuantity(); 3378 SmallVector<uint64_t, 4> TmpBits(Size); 3379 setUsedBits(CGM, ETy, 0, TmpBits); 3380 3381 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { 3382 auto Src = TmpBits.begin(); 3383 auto Dst = Bits.begin() + Offset + I * Size; 3384 for (int J = 0; J < Size; ++J) 3385 *Dst++ |= *Src++; 3386 } 3387 } 3388 3389 // Set the bits in `Bits`, which correspond to the value representations of 3390 // the type `QTy`. 3391 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, 3392 SmallVectorImpl<uint64_t> &Bits) { 3393 if (const auto *RTy = QTy->getAs<RecordType>()) 3394 return setUsedBits(CGM, RTy, Offset, Bits); 3395 3396 ASTContext &Context = CGM.getContext(); 3397 if (const auto *ATy = Context.getAsConstantArrayType(QTy)) 3398 return setUsedBits(CGM, ATy, Offset, Bits); 3399 3400 int Size = Context.getTypeSizeInChars(QTy).getQuantity(); 3401 if (Size <= 0) 3402 return; 3403 3404 std::fill_n(Bits.begin() + Offset, Size, 3405 (uint64_t(1) << Context.getCharWidth()) - 1); 3406 } 3407 3408 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, 3409 int Pos, int Size, int CharWidth, 3410 bool BigEndian) { 3411 assert(Size > 0); 3412 uint64_t Mask = 0; 3413 if (BigEndian) { 3414 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; 3415 ++P) 3416 Mask = (Mask << CharWidth) | *P; 3417 } else { 3418 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; 3419 do 3420 Mask = (Mask << CharWidth) | *--P; 3421 while (P != End); 3422 } 3423 return Mask; 3424 } 3425 3426 // Emit code to clear the bits in a record, which aren't a part of any user 3427 // declared member, when the record is a function return. 3428 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3429 llvm::IntegerType *ITy, 3430 QualType QTy) { 3431 assert(Src->getType() == ITy); 3432 assert(ITy->getScalarSizeInBits() <= 64); 3433 3434 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3435 int Size = DataLayout.getTypeStoreSize(ITy); 3436 SmallVector<uint64_t, 4> Bits(Size); 3437 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3438 3439 int CharWidth = CGM.getContext().getCharWidth(); 3440 uint64_t Mask = 3441 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); 3442 3443 return Builder.CreateAnd(Src, Mask, "cmse.clear"); 3444 } 3445 3446 // Emit code to clear the bits in a record, which aren't a part of any user 3447 // declared member, when the record is a function argument. 3448 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3449 llvm::ArrayType *ATy, 3450 QualType QTy) { 3451 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3452 int Size = DataLayout.getTypeStoreSize(ATy); 3453 SmallVector<uint64_t, 16> Bits(Size); 3454 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3455 3456 // Clear each element of the LLVM array. 3457 int CharWidth = CGM.getContext().getCharWidth(); 3458 int CharsPerElt = 3459 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; 3460 int MaskIndex = 0; 3461 llvm::Value *R = llvm::UndefValue::get(ATy); 3462 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { 3463 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, 3464 DataLayout.isBigEndian()); 3465 MaskIndex += CharsPerElt; 3466 llvm::Value *T0 = Builder.CreateExtractValue(Src, I); 3467 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); 3468 R = Builder.CreateInsertValue(R, T1, I); 3469 } 3470 3471 return R; 3472 } 3473 3474 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 3475 bool EmitRetDbgLoc, 3476 SourceLocation EndLoc) { 3477 if (FI.isNoReturn()) { 3478 // Noreturn functions don't return. 3479 EmitUnreachable(EndLoc); 3480 return; 3481 } 3482 3483 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 3484 // Naked functions don't have epilogues. 3485 Builder.CreateUnreachable(); 3486 return; 3487 } 3488 3489 // Functions with no result always return void. 3490 if (!ReturnValue.isValid()) { 3491 Builder.CreateRetVoid(); 3492 return; 3493 } 3494 3495 llvm::DebugLoc RetDbgLoc; 3496 llvm::Value *RV = nullptr; 3497 QualType RetTy = FI.getReturnType(); 3498 const ABIArgInfo &RetAI = FI.getReturnInfo(); 3499 3500 switch (RetAI.getKind()) { 3501 case ABIArgInfo::InAlloca: 3502 // Aggregrates get evaluated directly into the destination. Sometimes we 3503 // need to return the sret value in a register, though. 3504 assert(hasAggregateEvaluationKind(RetTy)); 3505 if (RetAI.getInAllocaSRet()) { 3506 llvm::Function::arg_iterator EI = CurFn->arg_end(); 3507 --EI; 3508 llvm::Value *ArgStruct = &*EI; 3509 llvm::Value *SRet = Builder.CreateStructGEP( 3510 FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex()); 3511 llvm::Type *Ty = 3512 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType(); 3513 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret"); 3514 } 3515 break; 3516 3517 case ABIArgInfo::Indirect: { 3518 auto AI = CurFn->arg_begin(); 3519 if (RetAI.isSRetAfterThis()) 3520 ++AI; 3521 switch (getEvaluationKind(RetTy)) { 3522 case TEK_Complex: { 3523 ComplexPairTy RT = 3524 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 3525 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 3526 /*isInit*/ true); 3527 break; 3528 } 3529 case TEK_Aggregate: 3530 // Do nothing; aggregrates get evaluated directly into the destination. 3531 break; 3532 case TEK_Scalar: { 3533 LValueBaseInfo BaseInfo; 3534 TBAAAccessInfo TBAAInfo; 3535 CharUnits Alignment = 3536 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo); 3537 Address ArgAddr(&*AI, ConvertType(RetTy), Alignment); 3538 LValue ArgVal = 3539 LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo); 3540 EmitStoreOfScalar( 3541 Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true); 3542 break; 3543 } 3544 } 3545 break; 3546 } 3547 3548 case ABIArgInfo::Extend: 3549 case ABIArgInfo::Direct: 3550 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 3551 RetAI.getDirectOffset() == 0) { 3552 // The internal return value temp always will have pointer-to-return-type 3553 // type, just do a load. 3554 3555 // If there is a dominating store to ReturnValue, we can elide 3556 // the load, zap the store, and usually zap the alloca. 3557 if (llvm::StoreInst *SI = 3558 findDominatingStoreToReturnValue(*this)) { 3559 // Reuse the debug location from the store unless there is 3560 // cleanup code to be emitted between the store and return 3561 // instruction. 3562 if (EmitRetDbgLoc && !AutoreleaseResult) 3563 RetDbgLoc = SI->getDebugLoc(); 3564 // Get the stored value and nuke the now-dead store. 3565 RV = SI->getValueOperand(); 3566 SI->eraseFromParent(); 3567 3568 // Otherwise, we have to do a simple load. 3569 } else { 3570 RV = Builder.CreateLoad(ReturnValue); 3571 } 3572 } else { 3573 // If the value is offset in memory, apply the offset now. 3574 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 3575 3576 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 3577 } 3578 3579 // In ARC, end functions that return a retainable type with a call 3580 // to objc_autoreleaseReturnValue. 3581 if (AutoreleaseResult) { 3582 #ifndef NDEBUG 3583 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 3584 // been stripped of the typedefs, so we cannot use RetTy here. Get the 3585 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 3586 // CurCodeDecl or BlockInfo. 3587 QualType RT; 3588 3589 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 3590 RT = FD->getReturnType(); 3591 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 3592 RT = MD->getReturnType(); 3593 else if (isa<BlockDecl>(CurCodeDecl)) 3594 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 3595 else 3596 llvm_unreachable("Unexpected function/method type"); 3597 3598 assert(getLangOpts().ObjCAutoRefCount && 3599 !FI.isReturnsRetained() && 3600 RT->isObjCRetainableType()); 3601 #endif 3602 RV = emitAutoreleaseOfResult(*this, RV); 3603 } 3604 3605 break; 3606 3607 case ABIArgInfo::Ignore: 3608 break; 3609 3610 case ABIArgInfo::CoerceAndExpand: { 3611 auto coercionType = RetAI.getCoerceAndExpandType(); 3612 3613 // Load all of the coerced elements out into results. 3614 llvm::SmallVector<llvm::Value*, 4> results; 3615 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 3616 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3617 auto coercedEltType = coercionType->getElementType(i); 3618 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 3619 continue; 3620 3621 auto eltAddr = Builder.CreateStructGEP(addr, i); 3622 auto elt = Builder.CreateLoad(eltAddr); 3623 results.push_back(elt); 3624 } 3625 3626 // If we have one result, it's the single direct result type. 3627 if (results.size() == 1) { 3628 RV = results[0]; 3629 3630 // Otherwise, we need to make a first-class aggregate. 3631 } else { 3632 // Construct a return type that lacks padding elements. 3633 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 3634 3635 RV = llvm::UndefValue::get(returnType); 3636 for (unsigned i = 0, e = results.size(); i != e; ++i) { 3637 RV = Builder.CreateInsertValue(RV, results[i], i); 3638 } 3639 } 3640 break; 3641 } 3642 case ABIArgInfo::Expand: 3643 case ABIArgInfo::IndirectAliased: 3644 llvm_unreachable("Invalid ABI kind for return argument"); 3645 } 3646 3647 llvm::Instruction *Ret; 3648 if (RV) { 3649 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { 3650 // For certain return types, clear padding bits, as they may reveal 3651 // sensitive information. 3652 // Small struct/union types are passed as integers. 3653 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); 3654 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) 3655 RV = EmitCMSEClearRecord(RV, ITy, RetTy); 3656 } 3657 EmitReturnValueCheck(RV); 3658 Ret = Builder.CreateRet(RV); 3659 } else { 3660 Ret = Builder.CreateRetVoid(); 3661 } 3662 3663 if (RetDbgLoc) 3664 Ret->setDebugLoc(std::move(RetDbgLoc)); 3665 } 3666 3667 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 3668 // A current decl may not be available when emitting vtable thunks. 3669 if (!CurCodeDecl) 3670 return; 3671 3672 // If the return block isn't reachable, neither is this check, so don't emit 3673 // it. 3674 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) 3675 return; 3676 3677 ReturnsNonNullAttr *RetNNAttr = nullptr; 3678 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3679 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3680 3681 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3682 return; 3683 3684 // Prefer the returns_nonnull attribute if it's present. 3685 SourceLocation AttrLoc; 3686 SanitizerMask CheckKind; 3687 SanitizerHandler Handler; 3688 if (RetNNAttr) { 3689 assert(!requiresReturnValueNullabilityCheck() && 3690 "Cannot check nullability and the nonnull attribute"); 3691 AttrLoc = RetNNAttr->getLocation(); 3692 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3693 Handler = SanitizerHandler::NonnullReturn; 3694 } else { 3695 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3696 if (auto *TSI = DD->getTypeSourceInfo()) 3697 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) 3698 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3699 CheckKind = SanitizerKind::NullabilityReturn; 3700 Handler = SanitizerHandler::NullabilityReturn; 3701 } 3702 3703 SanitizerScope SanScope(this); 3704 3705 // Make sure the "return" source location is valid. If we're checking a 3706 // nullability annotation, make sure the preconditions for the check are met. 3707 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3708 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3709 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3710 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3711 if (requiresReturnValueNullabilityCheck()) 3712 CanNullCheck = 3713 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3714 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3715 EmitBlock(Check); 3716 3717 // Now do the null check. 3718 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3719 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3720 llvm::Value *DynamicData[] = {SLocPtr}; 3721 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3722 3723 EmitBlock(NoCheck); 3724 3725 #ifndef NDEBUG 3726 // The return location should not be used after the check has been emitted. 3727 ReturnLocation = Address::invalid(); 3728 #endif 3729 } 3730 3731 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3732 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3733 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3734 } 3735 3736 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3737 QualType Ty) { 3738 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3739 // placeholders. 3740 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3741 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 3742 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 3743 3744 // FIXME: When we generate this IR in one pass, we shouldn't need 3745 // this win32-specific alignment hack. 3746 CharUnits Align = CharUnits::fromQuantity(4); 3747 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3748 3749 return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align), 3750 Ty.getQualifiers(), 3751 AggValueSlot::IsNotDestructed, 3752 AggValueSlot::DoesNotNeedGCBarriers, 3753 AggValueSlot::IsNotAliased, 3754 AggValueSlot::DoesNotOverlap); 3755 } 3756 3757 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3758 const VarDecl *param, 3759 SourceLocation loc) { 3760 // StartFunction converted the ABI-lowered parameter(s) into a 3761 // local alloca. We need to turn that into an r-value suitable 3762 // for EmitCall. 3763 Address local = GetAddrOfLocalVar(param); 3764 3765 QualType type = param->getType(); 3766 3767 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3768 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3769 } 3770 3771 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3772 // but the argument needs to be the original pointer. 3773 if (type->isReferenceType()) { 3774 args.add(RValue::get(Builder.CreateLoad(local)), type); 3775 3776 // In ARC, move out of consumed arguments so that the release cleanup 3777 // entered by StartFunction doesn't cause an over-release. This isn't 3778 // optimal -O0 code generation, but it should get cleaned up when 3779 // optimization is enabled. This also assumes that delegate calls are 3780 // performed exactly once for a set of arguments, but that should be safe. 3781 } else if (getLangOpts().ObjCAutoRefCount && 3782 param->hasAttr<NSConsumedAttr>() && 3783 type->isObjCRetainableType()) { 3784 llvm::Value *ptr = Builder.CreateLoad(local); 3785 auto null = 3786 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3787 Builder.CreateStore(null, local); 3788 args.add(RValue::get(ptr), type); 3789 3790 // For the most part, we just need to load the alloca, except that 3791 // aggregate r-values are actually pointers to temporaries. 3792 } else { 3793 args.add(convertTempToRValue(local, type, loc), type); 3794 } 3795 3796 // Deactivate the cleanup for the callee-destructed param that was pushed. 3797 if (type->isRecordType() && !CurFuncIsThunk && 3798 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 3799 param->needsDestruction(getContext())) { 3800 EHScopeStack::stable_iterator cleanup = 3801 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 3802 assert(cleanup.isValid() && 3803 "cleanup for callee-destructed param not recorded"); 3804 // This unreachable is a temporary marker which will be removed later. 3805 llvm::Instruction *isActive = Builder.CreateUnreachable(); 3806 args.addArgCleanupDeactivation(cleanup, isActive); 3807 } 3808 } 3809 3810 static bool isProvablyNull(llvm::Value *addr) { 3811 return isa<llvm::ConstantPointerNull>(addr); 3812 } 3813 3814 /// Emit the actual writing-back of a writeback. 3815 static void emitWriteback(CodeGenFunction &CGF, 3816 const CallArgList::Writeback &writeback) { 3817 const LValue &srcLV = writeback.Source; 3818 Address srcAddr = srcLV.getAddress(CGF); 3819 assert(!isProvablyNull(srcAddr.getPointer()) && 3820 "shouldn't have writeback for provably null argument"); 3821 3822 llvm::BasicBlock *contBB = nullptr; 3823 3824 // If the argument wasn't provably non-null, we need to null check 3825 // before doing the store. 3826 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3827 CGF.CGM.getDataLayout()); 3828 if (!provablyNonNull) { 3829 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3830 contBB = CGF.createBasicBlock("icr.done"); 3831 3832 llvm::Value *isNull = 3833 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3834 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3835 CGF.EmitBlock(writebackBB); 3836 } 3837 3838 // Load the value to writeback. 3839 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3840 3841 // Cast it back, in case we're writing an id to a Foo* or something. 3842 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3843 "icr.writeback-cast"); 3844 3845 // Perform the writeback. 3846 3847 // If we have a "to use" value, it's something we need to emit a use 3848 // of. This has to be carefully threaded in: if it's done after the 3849 // release it's potentially undefined behavior (and the optimizer 3850 // will ignore it), and if it happens before the retain then the 3851 // optimizer could move the release there. 3852 if (writeback.ToUse) { 3853 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3854 3855 // Retain the new value. No need to block-copy here: the block's 3856 // being passed up the stack. 3857 value = CGF.EmitARCRetainNonBlock(value); 3858 3859 // Emit the intrinsic use here. 3860 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3861 3862 // Load the old value (primitively). 3863 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3864 3865 // Put the new value in place (primitively). 3866 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3867 3868 // Release the old value. 3869 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3870 3871 // Otherwise, we can just do a normal lvalue store. 3872 } else { 3873 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3874 } 3875 3876 // Jump to the continuation block. 3877 if (!provablyNonNull) 3878 CGF.EmitBlock(contBB); 3879 } 3880 3881 static void emitWritebacks(CodeGenFunction &CGF, 3882 const CallArgList &args) { 3883 for (const auto &I : args.writebacks()) 3884 emitWriteback(CGF, I); 3885 } 3886 3887 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3888 const CallArgList &CallArgs) { 3889 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3890 CallArgs.getCleanupsToDeactivate(); 3891 // Iterate in reverse to increase the likelihood of popping the cleanup. 3892 for (const auto &I : llvm::reverse(Cleanups)) { 3893 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3894 I.IsActiveIP->eraseFromParent(); 3895 } 3896 } 3897 3898 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3899 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3900 if (uop->getOpcode() == UO_AddrOf) 3901 return uop->getSubExpr(); 3902 return nullptr; 3903 } 3904 3905 /// Emit an argument that's being passed call-by-writeback. That is, 3906 /// we are passing the address of an __autoreleased temporary; it 3907 /// might be copy-initialized with the current value of the given 3908 /// address, but it will definitely be copied out of after the call. 3909 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3910 const ObjCIndirectCopyRestoreExpr *CRE) { 3911 LValue srcLV; 3912 3913 // Make an optimistic effort to emit the address as an l-value. 3914 // This can fail if the argument expression is more complicated. 3915 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3916 srcLV = CGF.EmitLValue(lvExpr); 3917 3918 // Otherwise, just emit it as a scalar. 3919 } else { 3920 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3921 3922 QualType srcAddrType = 3923 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3924 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3925 } 3926 Address srcAddr = srcLV.getAddress(CGF); 3927 3928 // The dest and src types don't necessarily match in LLVM terms 3929 // because of the crazy ObjC compatibility rules. 3930 3931 llvm::PointerType *destType = 3932 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3933 llvm::Type *destElemType = 3934 CGF.ConvertTypeForMem(CRE->getType()->getPointeeType()); 3935 3936 // If the address is a constant null, just pass the appropriate null. 3937 if (isProvablyNull(srcAddr.getPointer())) { 3938 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3939 CRE->getType()); 3940 return; 3941 } 3942 3943 // Create the temporary. 3944 Address temp = 3945 CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp"); 3946 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3947 // and that cleanup will be conditional if we can't prove that the l-value 3948 // isn't null, so we need to register a dominating point so that the cleanups 3949 // system will make valid IR. 3950 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3951 3952 // Zero-initialize it if we're not doing a copy-initialization. 3953 bool shouldCopy = CRE->shouldCopy(); 3954 if (!shouldCopy) { 3955 llvm::Value *null = 3956 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType)); 3957 CGF.Builder.CreateStore(null, temp); 3958 } 3959 3960 llvm::BasicBlock *contBB = nullptr; 3961 llvm::BasicBlock *originBB = nullptr; 3962 3963 // If the address is *not* known to be non-null, we need to switch. 3964 llvm::Value *finalArgument; 3965 3966 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3967 CGF.CGM.getDataLayout()); 3968 if (provablyNonNull) { 3969 finalArgument = temp.getPointer(); 3970 } else { 3971 llvm::Value *isNull = 3972 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3973 3974 finalArgument = CGF.Builder.CreateSelect(isNull, 3975 llvm::ConstantPointerNull::get(destType), 3976 temp.getPointer(), "icr.argument"); 3977 3978 // If we need to copy, then the load has to be conditional, which 3979 // means we need control flow. 3980 if (shouldCopy) { 3981 originBB = CGF.Builder.GetInsertBlock(); 3982 contBB = CGF.createBasicBlock("icr.cont"); 3983 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3984 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3985 CGF.EmitBlock(copyBB); 3986 condEval.begin(CGF); 3987 } 3988 } 3989 3990 llvm::Value *valueToUse = nullptr; 3991 3992 // Perform a copy if necessary. 3993 if (shouldCopy) { 3994 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3995 assert(srcRV.isScalar()); 3996 3997 llvm::Value *src = srcRV.getScalarVal(); 3998 src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast"); 3999 4000 // Use an ordinary store, not a store-to-lvalue. 4001 CGF.Builder.CreateStore(src, temp); 4002 4003 // If optimization is enabled, and the value was held in a 4004 // __strong variable, we need to tell the optimizer that this 4005 // value has to stay alive until we're doing the store back. 4006 // This is because the temporary is effectively unretained, 4007 // and so otherwise we can violate the high-level semantics. 4008 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 4009 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 4010 valueToUse = src; 4011 } 4012 } 4013 4014 // Finish the control flow if we needed it. 4015 if (shouldCopy && !provablyNonNull) { 4016 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 4017 CGF.EmitBlock(contBB); 4018 4019 // Make a phi for the value to intrinsically use. 4020 if (valueToUse) { 4021 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 4022 "icr.to-use"); 4023 phiToUse->addIncoming(valueToUse, copyBB); 4024 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 4025 originBB); 4026 valueToUse = phiToUse; 4027 } 4028 4029 condEval.end(CGF); 4030 } 4031 4032 args.addWriteback(srcLV, temp, valueToUse); 4033 args.add(RValue::get(finalArgument), CRE->getType()); 4034 } 4035 4036 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 4037 assert(!StackBase); 4038 4039 // Save the stack. 4040 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 4041 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 4042 } 4043 4044 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 4045 if (StackBase) { 4046 // Restore the stack after the call. 4047 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 4048 CGF.Builder.CreateCall(F, StackBase); 4049 } 4050 } 4051 4052 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 4053 SourceLocation ArgLoc, 4054 AbstractCallee AC, 4055 unsigned ParmNum) { 4056 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 4057 SanOpts.has(SanitizerKind::NullabilityArg))) 4058 return; 4059 4060 // The param decl may be missing in a variadic function. 4061 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 4062 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 4063 4064 // Prefer the nonnull attribute if it's present. 4065 const NonNullAttr *NNAttr = nullptr; 4066 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 4067 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 4068 4069 bool CanCheckNullability = false; 4070 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 4071 auto Nullability = PVD->getType()->getNullability(getContext()); 4072 CanCheckNullability = Nullability && 4073 *Nullability == NullabilityKind::NonNull && 4074 PVD->getTypeSourceInfo(); 4075 } 4076 4077 if (!NNAttr && !CanCheckNullability) 4078 return; 4079 4080 SourceLocation AttrLoc; 4081 SanitizerMask CheckKind; 4082 SanitizerHandler Handler; 4083 if (NNAttr) { 4084 AttrLoc = NNAttr->getLocation(); 4085 CheckKind = SanitizerKind::NonnullAttribute; 4086 Handler = SanitizerHandler::NonnullArg; 4087 } else { 4088 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 4089 CheckKind = SanitizerKind::NullabilityArg; 4090 Handler = SanitizerHandler::NullabilityArg; 4091 } 4092 4093 SanitizerScope SanScope(this); 4094 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); 4095 llvm::Constant *StaticData[] = { 4096 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 4097 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 4098 }; 4099 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 4100 } 4101 4102 // Check if the call is going to use the inalloca convention. This needs to 4103 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged 4104 // later, so we can't check it directly. 4105 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, 4106 ArrayRef<QualType> ArgTypes) { 4107 // The Swift calling conventions don't go through the target-specific 4108 // argument classification, they never use inalloca. 4109 // TODO: Consider limiting inalloca use to only calling conventions supported 4110 // by MSVC. 4111 if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync) 4112 return false; 4113 if (!CGM.getTarget().getCXXABI().isMicrosoft()) 4114 return false; 4115 return llvm::any_of(ArgTypes, [&](QualType Ty) { 4116 return isInAllocaArgument(CGM.getCXXABI(), Ty); 4117 }); 4118 } 4119 4120 #ifndef NDEBUG 4121 // Determine whether the given argument is an Objective-C method 4122 // that may have type parameters in its signature. 4123 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { 4124 const DeclContext *dc = method->getDeclContext(); 4125 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { 4126 return classDecl->getTypeParamListAsWritten(); 4127 } 4128 4129 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { 4130 return catDecl->getTypeParamList(); 4131 } 4132 4133 return false; 4134 } 4135 #endif 4136 4137 /// EmitCallArgs - Emit call arguments for a function. 4138 void CodeGenFunction::EmitCallArgs( 4139 CallArgList &Args, PrototypeWrapper Prototype, 4140 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 4141 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 4142 SmallVector<QualType, 16> ArgTypes; 4143 4144 assert((ParamsToSkip == 0 || Prototype.P) && 4145 "Can't skip parameters if type info is not provided"); 4146 4147 // This variable only captures *explicitly* written conventions, not those 4148 // applied by default via command line flags or target defaults, such as 4149 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would 4150 // require knowing if this is a C++ instance method or being able to see 4151 // unprototyped FunctionTypes. 4152 CallingConv ExplicitCC = CC_C; 4153 4154 // First, if a prototype was provided, use those argument types. 4155 bool IsVariadic = false; 4156 if (Prototype.P) { 4157 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); 4158 if (MD) { 4159 IsVariadic = MD->isVariadic(); 4160 ExplicitCC = getCallingConventionForDecl( 4161 MD, CGM.getTarget().getTriple().isOSWindows()); 4162 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, 4163 MD->param_type_end()); 4164 } else { 4165 const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); 4166 IsVariadic = FPT->isVariadic(); 4167 ExplicitCC = FPT->getExtInfo().getCC(); 4168 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, 4169 FPT->param_type_end()); 4170 } 4171 4172 #ifndef NDEBUG 4173 // Check that the prototyped types match the argument expression types. 4174 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); 4175 CallExpr::const_arg_iterator Arg = ArgRange.begin(); 4176 for (QualType Ty : ArgTypes) { 4177 assert(Arg != ArgRange.end() && "Running over edge of argument list!"); 4178 assert( 4179 (isGenericMethod || Ty->isVariablyModifiedType() || 4180 Ty.getNonReferenceType()->isObjCRetainableType() || 4181 getContext() 4182 .getCanonicalType(Ty.getNonReferenceType()) 4183 .getTypePtr() == 4184 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && 4185 "type mismatch in call argument!"); 4186 ++Arg; 4187 } 4188 4189 // Either we've emitted all the call args, or we have a call to variadic 4190 // function. 4191 assert((Arg == ArgRange.end() || IsVariadic) && 4192 "Extra arguments in non-variadic function!"); 4193 #endif 4194 } 4195 4196 // If we still have any arguments, emit them using the type of the argument. 4197 for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size())) 4198 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); 4199 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 4200 4201 // We must evaluate arguments from right to left in the MS C++ ABI, 4202 // because arguments are destroyed left to right in the callee. As a special 4203 // case, there are certain language constructs that require left-to-right 4204 // evaluation, and in those cases we consider the evaluation order requirement 4205 // to trump the "destruction order is reverse construction order" guarantee. 4206 bool LeftToRight = 4207 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 4208 ? Order == EvaluationOrder::ForceLeftToRight 4209 : Order != EvaluationOrder::ForceRightToLeft; 4210 4211 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 4212 RValue EmittedArg) { 4213 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 4214 return; 4215 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 4216 if (PS == nullptr) 4217 return; 4218 4219 const auto &Context = getContext(); 4220 auto SizeTy = Context.getSizeType(); 4221 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 4222 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 4223 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 4224 EmittedArg.getScalarVal(), 4225 PS->isDynamic()); 4226 Args.add(RValue::get(V), SizeTy); 4227 // If we're emitting args in reverse, be sure to do so with 4228 // pass_object_size, as well. 4229 if (!LeftToRight) 4230 std::swap(Args.back(), *(&Args.back() - 1)); 4231 }; 4232 4233 // Insert a stack save if we're going to need any inalloca args. 4234 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { 4235 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && 4236 "inalloca only supported on x86"); 4237 Args.allocateArgumentMemory(*this); 4238 } 4239 4240 // Evaluate each argument in the appropriate order. 4241 size_t CallArgsStart = Args.size(); 4242 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 4243 unsigned Idx = LeftToRight ? I : E - I - 1; 4244 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 4245 unsigned InitialArgSize = Args.size(); 4246 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 4247 // the argument and parameter match or the objc method is parameterized. 4248 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 4249 getContext().hasSameUnqualifiedType((*Arg)->getType(), 4250 ArgTypes[Idx]) || 4251 (isa<ObjCMethodDecl>(AC.getDecl()) && 4252 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 4253 "Argument and parameter types don't match"); 4254 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 4255 // In particular, we depend on it being the last arg in Args, and the 4256 // objectsize bits depend on there only being one arg if !LeftToRight. 4257 assert(InitialArgSize + 1 == Args.size() && 4258 "The code below depends on only adding one arg per EmitCallArg"); 4259 (void)InitialArgSize; 4260 // Since pointer argument are never emitted as LValue, it is safe to emit 4261 // non-null argument check for r-value only. 4262 if (!Args.back().hasLValue()) { 4263 RValue RVArg = Args.back().getKnownRValue(); 4264 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 4265 ParamsToSkip + Idx); 4266 // @llvm.objectsize should never have side-effects and shouldn't need 4267 // destruction/cleanups, so we can safely "emit" it after its arg, 4268 // regardless of right-to-leftness 4269 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 4270 } 4271 } 4272 4273 if (!LeftToRight) { 4274 // Un-reverse the arguments we just evaluated so they match up with the LLVM 4275 // IR function. 4276 std::reverse(Args.begin() + CallArgsStart, Args.end()); 4277 } 4278 } 4279 4280 namespace { 4281 4282 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 4283 DestroyUnpassedArg(Address Addr, QualType Ty) 4284 : Addr(Addr), Ty(Ty) {} 4285 4286 Address Addr; 4287 QualType Ty; 4288 4289 void Emit(CodeGenFunction &CGF, Flags flags) override { 4290 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 4291 if (DtorKind == QualType::DK_cxx_destructor) { 4292 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 4293 assert(!Dtor->isTrivial()); 4294 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 4295 /*Delegating=*/false, Addr, Ty); 4296 } else { 4297 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 4298 } 4299 } 4300 }; 4301 4302 struct DisableDebugLocationUpdates { 4303 CodeGenFunction &CGF; 4304 bool disabledDebugInfo; 4305 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 4306 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 4307 CGF.disableDebugInfo(); 4308 } 4309 ~DisableDebugLocationUpdates() { 4310 if (disabledDebugInfo) 4311 CGF.enableDebugInfo(); 4312 } 4313 }; 4314 4315 } // end anonymous namespace 4316 4317 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 4318 if (!HasLV) 4319 return RV; 4320 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 4321 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 4322 LV.isVolatile()); 4323 IsUsed = true; 4324 return RValue::getAggregate(Copy.getAddress(CGF)); 4325 } 4326 4327 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 4328 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 4329 if (!HasLV && RV.isScalar()) 4330 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); 4331 else if (!HasLV && RV.isComplex()) 4332 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 4333 else { 4334 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); 4335 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 4336 // We assume that call args are never copied into subobjects. 4337 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 4338 HasLV ? LV.isVolatileQualified() 4339 : RV.isVolatileQualified()); 4340 } 4341 IsUsed = true; 4342 } 4343 4344 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 4345 QualType type) { 4346 DisableDebugLocationUpdates Dis(*this, E); 4347 if (const ObjCIndirectCopyRestoreExpr *CRE 4348 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 4349 assert(getLangOpts().ObjCAutoRefCount); 4350 return emitWritebackArg(*this, args, CRE); 4351 } 4352 4353 assert(type->isReferenceType() == E->isGLValue() && 4354 "reference binding to unmaterialized r-value!"); 4355 4356 if (E->isGLValue()) { 4357 assert(E->getObjectKind() == OK_Ordinary); 4358 return args.add(EmitReferenceBindingToExpr(E), type); 4359 } 4360 4361 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 4362 4363 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 4364 // However, we still have to push an EH-only cleanup in case we unwind before 4365 // we make it to the call. 4366 if (type->isRecordType() && 4367 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 4368 // If we're using inalloca, use the argument memory. Otherwise, use a 4369 // temporary. 4370 AggValueSlot Slot = args.isUsingInAlloca() 4371 ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp"); 4372 4373 bool DestroyedInCallee = true, NeedsEHCleanup = true; 4374 if (const auto *RD = type->getAsCXXRecordDecl()) 4375 DestroyedInCallee = RD->hasNonTrivialDestructor(); 4376 else 4377 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 4378 4379 if (DestroyedInCallee) 4380 Slot.setExternallyDestructed(); 4381 4382 EmitAggExpr(E, Slot); 4383 RValue RV = Slot.asRValue(); 4384 args.add(RV, type); 4385 4386 if (DestroyedInCallee && NeedsEHCleanup) { 4387 // Create a no-op GEP between the placeholder and the cleanup so we can 4388 // RAUW it successfully. It also serves as a marker of the first 4389 // instruction where the cleanup is active. 4390 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 4391 type); 4392 // This unreachable is a temporary marker which will be removed later. 4393 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 4394 args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive); 4395 } 4396 return; 4397 } 4398 4399 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 4400 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 4401 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 4402 assert(L.isSimple()); 4403 args.addUncopiedAggregate(L, type); 4404 return; 4405 } 4406 4407 args.add(EmitAnyExprToTemp(E), type); 4408 } 4409 4410 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 4411 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 4412 // implicitly widens null pointer constants that are arguments to varargs 4413 // functions to pointer-sized ints. 4414 if (!getTarget().getTriple().isOSWindows()) 4415 return Arg->getType(); 4416 4417 if (Arg->getType()->isIntegerType() && 4418 getContext().getTypeSize(Arg->getType()) < 4419 getContext().getTargetInfo().getPointerWidth(0) && 4420 Arg->isNullPointerConstant(getContext(), 4421 Expr::NPC_ValueDependentIsNotNull)) { 4422 return getContext().getIntPtrType(); 4423 } 4424 4425 return Arg->getType(); 4426 } 4427 4428 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4429 // optimizer it can aggressively ignore unwind edges. 4430 void 4431 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 4432 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 4433 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 4434 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 4435 CGM.getNoObjCARCExceptionsMetadata()); 4436 } 4437 4438 /// Emits a call to the given no-arguments nounwind runtime function. 4439 llvm::CallInst * 4440 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4441 const llvm::Twine &name) { 4442 return EmitNounwindRuntimeCall(callee, None, name); 4443 } 4444 4445 /// Emits a call to the given nounwind runtime function. 4446 llvm::CallInst * 4447 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4448 ArrayRef<llvm::Value *> args, 4449 const llvm::Twine &name) { 4450 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 4451 call->setDoesNotThrow(); 4452 return call; 4453 } 4454 4455 /// Emits a simple call (never an invoke) to the given no-arguments 4456 /// runtime function. 4457 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4458 const llvm::Twine &name) { 4459 return EmitRuntimeCall(callee, None, name); 4460 } 4461 4462 // Calls which may throw must have operand bundles indicating which funclet 4463 // they are nested within. 4464 SmallVector<llvm::OperandBundleDef, 1> 4465 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 4466 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4467 // There is no need for a funclet operand bundle if we aren't inside a 4468 // funclet. 4469 if (!CurrentFuncletPad) 4470 return BundleList; 4471 4472 // Skip intrinsics which cannot throw. 4473 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 4474 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 4475 return BundleList; 4476 4477 BundleList.emplace_back("funclet", CurrentFuncletPad); 4478 return BundleList; 4479 } 4480 4481 /// Emits a simple call (never an invoke) to the given runtime function. 4482 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4483 ArrayRef<llvm::Value *> args, 4484 const llvm::Twine &name) { 4485 llvm::CallInst *call = Builder.CreateCall( 4486 callee, args, getBundlesForFunclet(callee.getCallee()), name); 4487 call->setCallingConv(getRuntimeCC()); 4488 return call; 4489 } 4490 4491 /// Emits a call or invoke to the given noreturn runtime function. 4492 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( 4493 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { 4494 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4495 getBundlesForFunclet(callee.getCallee()); 4496 4497 if (getInvokeDest()) { 4498 llvm::InvokeInst *invoke = 4499 Builder.CreateInvoke(callee, 4500 getUnreachableBlock(), 4501 getInvokeDest(), 4502 args, 4503 BundleList); 4504 invoke->setDoesNotReturn(); 4505 invoke->setCallingConv(getRuntimeCC()); 4506 } else { 4507 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 4508 call->setDoesNotReturn(); 4509 call->setCallingConv(getRuntimeCC()); 4510 Builder.CreateUnreachable(); 4511 } 4512 } 4513 4514 /// Emits a call or invoke instruction to the given nullary runtime function. 4515 llvm::CallBase * 4516 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4517 const Twine &name) { 4518 return EmitRuntimeCallOrInvoke(callee, None, name); 4519 } 4520 4521 /// Emits a call or invoke instruction to the given runtime function. 4522 llvm::CallBase * 4523 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4524 ArrayRef<llvm::Value *> args, 4525 const Twine &name) { 4526 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 4527 call->setCallingConv(getRuntimeCC()); 4528 return call; 4529 } 4530 4531 /// Emits a call or invoke instruction to the given function, depending 4532 /// on the current state of the EH stack. 4533 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, 4534 ArrayRef<llvm::Value *> Args, 4535 const Twine &Name) { 4536 llvm::BasicBlock *InvokeDest = getInvokeDest(); 4537 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4538 getBundlesForFunclet(Callee.getCallee()); 4539 4540 llvm::CallBase *Inst; 4541 if (!InvokeDest) 4542 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 4543 else { 4544 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 4545 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 4546 Name); 4547 EmitBlock(ContBB); 4548 } 4549 4550 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4551 // optimizer it can aggressively ignore unwind edges. 4552 if (CGM.getLangOpts().ObjCAutoRefCount) 4553 AddObjCARCExceptionMetadata(Inst); 4554 4555 return Inst; 4556 } 4557 4558 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 4559 llvm::Value *New) { 4560 DeferredReplacements.push_back( 4561 std::make_pair(llvm::WeakTrackingVH(Old), New)); 4562 } 4563 4564 namespace { 4565 4566 /// Specify given \p NewAlign as the alignment of return value attribute. If 4567 /// such attribute already exists, re-set it to the maximal one of two options. 4568 LLVM_NODISCARD llvm::AttributeList 4569 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, 4570 const llvm::AttributeList &Attrs, 4571 llvm::Align NewAlign) { 4572 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); 4573 if (CurAlign >= NewAlign) 4574 return Attrs; 4575 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); 4576 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment) 4577 .addRetAttribute(Ctx, AlignAttr); 4578 } 4579 4580 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { 4581 protected: 4582 CodeGenFunction &CGF; 4583 4584 /// We do nothing if this is, or becomes, nullptr. 4585 const AlignedAttrTy *AA = nullptr; 4586 4587 llvm::Value *Alignment = nullptr; // May or may not be a constant. 4588 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. 4589 4590 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4591 : CGF(CGF_) { 4592 if (!FuncDecl) 4593 return; 4594 AA = FuncDecl->getAttr<AlignedAttrTy>(); 4595 } 4596 4597 public: 4598 /// If we can, materialize the alignment as an attribute on return value. 4599 LLVM_NODISCARD llvm::AttributeList 4600 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { 4601 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) 4602 return Attrs; 4603 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); 4604 if (!AlignmentCI) 4605 return Attrs; 4606 // We may legitimately have non-power-of-2 alignment here. 4607 // If so, this is UB land, emit it via `@llvm.assume` instead. 4608 if (!AlignmentCI->getValue().isPowerOf2()) 4609 return Attrs; 4610 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( 4611 CGF.getLLVMContext(), Attrs, 4612 llvm::Align( 4613 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); 4614 AA = nullptr; // We're done. Disallow doing anything else. 4615 return NewAttrs; 4616 } 4617 4618 /// Emit alignment assumption. 4619 /// This is a general fallback that we take if either there is an offset, 4620 /// or the alignment is variable or we are sanitizing for alignment. 4621 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { 4622 if (!AA) 4623 return; 4624 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, 4625 AA->getLocation(), Alignment, OffsetCI); 4626 AA = nullptr; // We're done. Disallow doing anything else. 4627 } 4628 }; 4629 4630 /// Helper data structure to emit `AssumeAlignedAttr`. 4631 class AssumeAlignedAttrEmitter final 4632 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { 4633 public: 4634 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4635 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4636 if (!AA) 4637 return; 4638 // It is guaranteed that the alignment/offset are constants. 4639 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); 4640 if (Expr *Offset = AA->getOffset()) { 4641 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); 4642 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. 4643 OffsetCI = nullptr; 4644 } 4645 } 4646 }; 4647 4648 /// Helper data structure to emit `AllocAlignAttr`. 4649 class AllocAlignAttrEmitter final 4650 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { 4651 public: 4652 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, 4653 const CallArgList &CallArgs) 4654 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4655 if (!AA) 4656 return; 4657 // Alignment may or may not be a constant, and that is okay. 4658 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] 4659 .getRValue(CGF) 4660 .getScalarVal(); 4661 } 4662 }; 4663 4664 } // namespace 4665 4666 static unsigned getMaxVectorWidth(const llvm::Type *Ty) { 4667 if (auto *VT = dyn_cast<llvm::VectorType>(Ty)) 4668 return VT->getPrimitiveSizeInBits().getKnownMinSize(); 4669 if (auto *AT = dyn_cast<llvm::ArrayType>(Ty)) 4670 return getMaxVectorWidth(AT->getElementType()); 4671 4672 unsigned MaxVectorWidth = 0; 4673 if (auto *ST = dyn_cast<llvm::StructType>(Ty)) 4674 for (auto *I : ST->elements()) 4675 MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I)); 4676 return MaxVectorWidth; 4677 } 4678 4679 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 4680 const CGCallee &Callee, 4681 ReturnValueSlot ReturnValue, 4682 const CallArgList &CallArgs, 4683 llvm::CallBase **callOrInvoke, bool IsMustTail, 4684 SourceLocation Loc) { 4685 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 4686 4687 assert(Callee.isOrdinary() || Callee.isVirtual()); 4688 4689 // Handle struct-return functions by passing a pointer to the 4690 // location that we would like to return into. 4691 QualType RetTy = CallInfo.getReturnType(); 4692 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 4693 4694 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); 4695 4696 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4697 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 4698 // We can only guarantee that a function is called from the correct 4699 // context/function based on the appropriate target attributes, 4700 // so only check in the case where we have both always_inline and target 4701 // since otherwise we could be making a conditional call after a check for 4702 // the proper cpu features (and it won't cause code generation issues due to 4703 // function based code generation). 4704 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 4705 TargetDecl->hasAttr<TargetAttr>()) 4706 checkTargetFeatures(Loc, FD); 4707 4708 // Some architectures (such as x86-64) have the ABI changed based on 4709 // attribute-target/features. Give them a chance to diagnose. 4710 CGM.getTargetCodeGenInfo().checkFunctionCallABI( 4711 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs); 4712 } 4713 4714 #ifndef NDEBUG 4715 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { 4716 // For an inalloca varargs function, we don't expect CallInfo to match the 4717 // function pointer's type, because the inalloca struct a will have extra 4718 // fields in it for the varargs parameters. Code later in this function 4719 // bitcasts the function pointer to the type derived from CallInfo. 4720 // 4721 // In other cases, we assert that the types match up (until pointers stop 4722 // having pointee types). 4723 if (Callee.isVirtual()) 4724 assert(IRFuncTy == Callee.getVirtualFunctionType()); 4725 else { 4726 llvm::PointerType *PtrTy = 4727 llvm::cast<llvm::PointerType>(Callee.getFunctionPointer()->getType()); 4728 assert(PtrTy->isOpaqueOrPointeeTypeMatches(IRFuncTy)); 4729 } 4730 } 4731 #endif 4732 4733 // 1. Set up the arguments. 4734 4735 // If we're using inalloca, insert the allocation after the stack save. 4736 // FIXME: Do this earlier rather than hacking it in here! 4737 Address ArgMemory = Address::invalid(); 4738 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 4739 const llvm::DataLayout &DL = CGM.getDataLayout(); 4740 llvm::Instruction *IP = CallArgs.getStackBase(); 4741 llvm::AllocaInst *AI; 4742 if (IP) { 4743 IP = IP->getNextNode(); 4744 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 4745 "argmem", IP); 4746 } else { 4747 AI = CreateTempAlloca(ArgStruct, "argmem"); 4748 } 4749 auto Align = CallInfo.getArgStructAlignment(); 4750 AI->setAlignment(Align.getAsAlign()); 4751 AI->setUsedWithInAlloca(true); 4752 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 4753 ArgMemory = Address(AI, ArgStruct, Align); 4754 } 4755 4756 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 4757 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 4758 4759 // If the call returns a temporary with struct return, create a temporary 4760 // alloca to hold the result, unless one is given to us. 4761 Address SRetPtr = Address::invalid(); 4762 Address SRetAlloca = Address::invalid(); 4763 llvm::Value *UnusedReturnSizePtr = nullptr; 4764 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 4765 if (!ReturnValue.isNull()) { 4766 SRetPtr = ReturnValue.getValue(); 4767 } else { 4768 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 4769 if (HaveInsertPoint() && ReturnValue.isUnused()) { 4770 llvm::TypeSize size = 4771 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 4772 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 4773 } 4774 } 4775 if (IRFunctionArgs.hasSRetArg()) { 4776 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 4777 } else if (RetAI.isInAlloca()) { 4778 Address Addr = 4779 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 4780 Builder.CreateStore(SRetPtr.getPointer(), Addr); 4781 } 4782 } 4783 4784 Address swiftErrorTemp = Address::invalid(); 4785 Address swiftErrorArg = Address::invalid(); 4786 4787 // When passing arguments using temporary allocas, we need to add the 4788 // appropriate lifetime markers. This vector keeps track of all the lifetime 4789 // markers that need to be ended right after the call. 4790 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; 4791 4792 // Translate all of the arguments as necessary to match the IR lowering. 4793 assert(CallInfo.arg_size() == CallArgs.size() && 4794 "Mismatch between function signature & arguments."); 4795 unsigned ArgNo = 0; 4796 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 4797 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 4798 I != E; ++I, ++info_it, ++ArgNo) { 4799 const ABIArgInfo &ArgInfo = info_it->info; 4800 4801 // Insert a padding argument to ensure proper alignment. 4802 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 4803 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 4804 llvm::UndefValue::get(ArgInfo.getPaddingType()); 4805 4806 unsigned FirstIRArg, NumIRArgs; 4807 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 4808 4809 switch (ArgInfo.getKind()) { 4810 case ABIArgInfo::InAlloca: { 4811 assert(NumIRArgs == 0); 4812 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 4813 if (I->isAggregate()) { 4814 Address Addr = I->hasLValue() 4815 ? I->getKnownLValue().getAddress(*this) 4816 : I->getKnownRValue().getAggregateAddress(); 4817 llvm::Instruction *Placeholder = 4818 cast<llvm::Instruction>(Addr.getPointer()); 4819 4820 if (!ArgInfo.getInAllocaIndirect()) { 4821 // Replace the placeholder with the appropriate argument slot GEP. 4822 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 4823 Builder.SetInsertPoint(Placeholder); 4824 Addr = Builder.CreateStructGEP(ArgMemory, 4825 ArgInfo.getInAllocaFieldIndex()); 4826 Builder.restoreIP(IP); 4827 } else { 4828 // For indirect things such as overaligned structs, replace the 4829 // placeholder with a regular aggregate temporary alloca. Store the 4830 // address of this alloca into the struct. 4831 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); 4832 Address ArgSlot = Builder.CreateStructGEP( 4833 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4834 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4835 } 4836 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 4837 } else if (ArgInfo.getInAllocaIndirect()) { 4838 // Make a temporary alloca and store the address of it into the argument 4839 // struct. 4840 Address Addr = CreateMemTempWithoutCast( 4841 I->Ty, getContext().getTypeAlignInChars(I->Ty), 4842 "indirect-arg-temp"); 4843 I->copyInto(*this, Addr); 4844 Address ArgSlot = 4845 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4846 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4847 } else { 4848 // Store the RValue into the argument struct. 4849 Address Addr = 4850 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4851 // There are some cases where a trivial bitcast is not avoidable. The 4852 // definition of a type later in a translation unit may change it's type 4853 // from {}* to (%struct.foo*)*. 4854 Addr = Builder.CreateElementBitCast(Addr, ConvertTypeForMem(I->Ty)); 4855 I->copyInto(*this, Addr); 4856 } 4857 break; 4858 } 4859 4860 case ABIArgInfo::Indirect: 4861 case ABIArgInfo::IndirectAliased: { 4862 assert(NumIRArgs == 1); 4863 if (!I->isAggregate()) { 4864 // Make a temporary alloca to pass the argument. 4865 Address Addr = CreateMemTempWithoutCast( 4866 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 4867 IRCallArgs[FirstIRArg] = Addr.getPointer(); 4868 4869 I->copyInto(*this, Addr); 4870 } else { 4871 // We want to avoid creating an unnecessary temporary+copy here; 4872 // however, we need one in three cases: 4873 // 1. If the argument is not byval, and we are required to copy the 4874 // source. (This case doesn't occur on any common architecture.) 4875 // 2. If the argument is byval, RV is not sufficiently aligned, and 4876 // we cannot force it to be sufficiently aligned. 4877 // 3. If the argument is byval, but RV is not located in default 4878 // or alloca address space. 4879 Address Addr = I->hasLValue() 4880 ? I->getKnownLValue().getAddress(*this) 4881 : I->getKnownRValue().getAggregateAddress(); 4882 llvm::Value *V = Addr.getPointer(); 4883 CharUnits Align = ArgInfo.getIndirectAlign(); 4884 const llvm::DataLayout *TD = &CGM.getDataLayout(); 4885 4886 assert((FirstIRArg >= IRFuncTy->getNumParams() || 4887 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 4888 TD->getAllocaAddrSpace()) && 4889 "indirect argument must be in alloca address space"); 4890 4891 bool NeedCopy = false; 4892 4893 if (Addr.getAlignment() < Align && 4894 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < 4895 Align.getAsAlign()) { 4896 NeedCopy = true; 4897 } else if (I->hasLValue()) { 4898 auto LV = I->getKnownLValue(); 4899 auto AS = LV.getAddressSpace(); 4900 4901 if (!ArgInfo.getIndirectByVal() || 4902 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { 4903 NeedCopy = true; 4904 } 4905 if (!getLangOpts().OpenCL) { 4906 if ((ArgInfo.getIndirectByVal() && 4907 (AS != LangAS::Default && 4908 AS != CGM.getASTAllocaAddressSpace()))) { 4909 NeedCopy = true; 4910 } 4911 } 4912 // For OpenCL even if RV is located in default or alloca address space 4913 // we don't want to perform address space cast for it. 4914 else if ((ArgInfo.getIndirectByVal() && 4915 Addr.getType()->getAddressSpace() != IRFuncTy-> 4916 getParamType(FirstIRArg)->getPointerAddressSpace())) { 4917 NeedCopy = true; 4918 } 4919 } 4920 4921 if (NeedCopy) { 4922 // Create an aligned temporary, and copy to it. 4923 Address AI = CreateMemTempWithoutCast( 4924 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 4925 IRCallArgs[FirstIRArg] = AI.getPointer(); 4926 4927 // Emit lifetime markers for the temporary alloca. 4928 llvm::TypeSize ByvalTempElementSize = 4929 CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); 4930 llvm::Value *LifetimeSize = 4931 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); 4932 4933 // Add cleanup code to emit the end lifetime marker after the call. 4934 if (LifetimeSize) // In case we disabled lifetime markers. 4935 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); 4936 4937 // Generate the copy. 4938 I->copyInto(*this, AI); 4939 } else { 4940 // Skip the extra memcpy call. 4941 auto *T = llvm::PointerType::getWithSamePointeeType( 4942 cast<llvm::PointerType>(V->getType()), 4943 CGM.getDataLayout().getAllocaAddrSpace()); 4944 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( 4945 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 4946 true); 4947 } 4948 } 4949 break; 4950 } 4951 4952 case ABIArgInfo::Ignore: 4953 assert(NumIRArgs == 0); 4954 break; 4955 4956 case ABIArgInfo::Extend: 4957 case ABIArgInfo::Direct: { 4958 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 4959 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 4960 ArgInfo.getDirectOffset() == 0) { 4961 assert(NumIRArgs == 1); 4962 llvm::Value *V; 4963 if (!I->isAggregate()) 4964 V = I->getKnownRValue().getScalarVal(); 4965 else 4966 V = Builder.CreateLoad( 4967 I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4968 : I->getKnownRValue().getAggregateAddress()); 4969 4970 // Implement swifterror by copying into a new swifterror argument. 4971 // We'll write back in the normal path out of the call. 4972 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 4973 == ParameterABI::SwiftErrorResult) { 4974 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 4975 4976 QualType pointeeTy = I->Ty->getPointeeType(); 4977 swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy), 4978 getContext().getTypeAlignInChars(pointeeTy)); 4979 4980 swiftErrorTemp = 4981 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 4982 V = swiftErrorTemp.getPointer(); 4983 cast<llvm::AllocaInst>(V)->setSwiftError(true); 4984 4985 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 4986 Builder.CreateStore(errorValue, swiftErrorTemp); 4987 } 4988 4989 // We might have to widen integers, but we should never truncate. 4990 if (ArgInfo.getCoerceToType() != V->getType() && 4991 V->getType()->isIntegerTy()) 4992 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 4993 4994 // If the argument doesn't match, perform a bitcast to coerce it. This 4995 // can happen due to trivial type mismatches. 4996 if (FirstIRArg < IRFuncTy->getNumParams() && 4997 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 4998 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 4999 5000 IRCallArgs[FirstIRArg] = V; 5001 break; 5002 } 5003 5004 // FIXME: Avoid the conversion through memory if possible. 5005 Address Src = Address::invalid(); 5006 if (!I->isAggregate()) { 5007 Src = CreateMemTemp(I->Ty, "coerce"); 5008 I->copyInto(*this, Src); 5009 } else { 5010 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 5011 : I->getKnownRValue().getAggregateAddress(); 5012 } 5013 5014 // If the value is offset in memory, apply the offset now. 5015 Src = emitAddressAtOffset(*this, Src, ArgInfo); 5016 5017 // Fast-isel and the optimizer generally like scalar values better than 5018 // FCAs, so we flatten them if this is safe to do for this argument. 5019 llvm::StructType *STy = 5020 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 5021 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 5022 llvm::Type *SrcTy = Src.getElementType(); 5023 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 5024 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 5025 5026 // If the source type is smaller than the destination type of the 5027 // coerce-to logic, copy the source value into a temp alloca the size 5028 // of the destination type to allow loading all of it. The bits past 5029 // the source value are left undef. 5030 if (SrcSize < DstSize) { 5031 Address TempAlloca 5032 = CreateTempAlloca(STy, Src.getAlignment(), 5033 Src.getName() + ".coerce"); 5034 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 5035 Src = TempAlloca; 5036 } else { 5037 Src = Builder.CreateElementBitCast(Src, STy); 5038 } 5039 5040 assert(NumIRArgs == STy->getNumElements()); 5041 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 5042 Address EltPtr = Builder.CreateStructGEP(Src, i); 5043 llvm::Value *LI = Builder.CreateLoad(EltPtr); 5044 IRCallArgs[FirstIRArg + i] = LI; 5045 } 5046 } else { 5047 // In the simple case, just pass the coerced loaded value. 5048 assert(NumIRArgs == 1); 5049 llvm::Value *Load = 5050 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 5051 5052 if (CallInfo.isCmseNSCall()) { 5053 // For certain parameter types, clear padding bits, as they may reveal 5054 // sensitive information. 5055 // Small struct/union types are passed as integer arrays. 5056 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); 5057 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) 5058 Load = EmitCMSEClearRecord(Load, ATy, I->Ty); 5059 } 5060 IRCallArgs[FirstIRArg] = Load; 5061 } 5062 5063 break; 5064 } 5065 5066 case ABIArgInfo::CoerceAndExpand: { 5067 auto coercionType = ArgInfo.getCoerceAndExpandType(); 5068 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 5069 5070 llvm::Value *tempSize = nullptr; 5071 Address addr = Address::invalid(); 5072 Address AllocaAddr = Address::invalid(); 5073 if (I->isAggregate()) { 5074 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 5075 : I->getKnownRValue().getAggregateAddress(); 5076 5077 } else { 5078 RValue RV = I->getKnownRValue(); 5079 assert(RV.isScalar()); // complex should always just be direct 5080 5081 llvm::Type *scalarType = RV.getScalarVal()->getType(); 5082 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 5083 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 5084 5085 // Materialize to a temporary. 5086 addr = 5087 CreateTempAlloca(RV.getScalarVal()->getType(), 5088 CharUnits::fromQuantity(std::max( 5089 layout->getAlignment().value(), scalarAlign)), 5090 "tmp", 5091 /*ArraySize=*/nullptr, &AllocaAddr); 5092 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 5093 5094 Builder.CreateStore(RV.getScalarVal(), addr); 5095 } 5096 5097 addr = Builder.CreateElementBitCast(addr, coercionType); 5098 5099 unsigned IRArgPos = FirstIRArg; 5100 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5101 llvm::Type *eltType = coercionType->getElementType(i); 5102 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5103 Address eltAddr = Builder.CreateStructGEP(addr, i); 5104 llvm::Value *elt = Builder.CreateLoad(eltAddr); 5105 IRCallArgs[IRArgPos++] = elt; 5106 } 5107 assert(IRArgPos == FirstIRArg + NumIRArgs); 5108 5109 if (tempSize) { 5110 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 5111 } 5112 5113 break; 5114 } 5115 5116 case ABIArgInfo::Expand: { 5117 unsigned IRArgPos = FirstIRArg; 5118 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 5119 assert(IRArgPos == FirstIRArg + NumIRArgs); 5120 break; 5121 } 5122 } 5123 } 5124 5125 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 5126 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 5127 5128 // If we're using inalloca, set up that argument. 5129 if (ArgMemory.isValid()) { 5130 llvm::Value *Arg = ArgMemory.getPointer(); 5131 if (CallInfo.isVariadic()) { 5132 // When passing non-POD arguments by value to variadic functions, we will 5133 // end up with a variadic prototype and an inalloca call site. In such 5134 // cases, we can't do any parameter mismatch checks. Give up and bitcast 5135 // the callee. 5136 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 5137 CalleePtr = 5138 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); 5139 } else { 5140 llvm::Type *LastParamTy = 5141 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 5142 if (Arg->getType() != LastParamTy) { 5143 #ifndef NDEBUG 5144 // Assert that these structs have equivalent element types. 5145 llvm::StructType *FullTy = CallInfo.getArgStruct(); 5146 if (!LastParamTy->isOpaquePointerTy()) { 5147 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 5148 LastParamTy->getNonOpaquePointerElementType()); 5149 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 5150 for (auto DI = DeclaredTy->element_begin(), 5151 DE = DeclaredTy->element_end(), 5152 FI = FullTy->element_begin(); 5153 DI != DE; ++DI, ++FI) 5154 assert(*DI == *FI); 5155 } 5156 #endif 5157 Arg = Builder.CreateBitCast(Arg, LastParamTy); 5158 } 5159 } 5160 assert(IRFunctionArgs.hasInallocaArg()); 5161 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 5162 } 5163 5164 // 2. Prepare the function pointer. 5165 5166 // If the callee is a bitcast of a non-variadic function to have a 5167 // variadic function pointer type, check to see if we can remove the 5168 // bitcast. This comes up with unprototyped functions. 5169 // 5170 // This makes the IR nicer, but more importantly it ensures that we 5171 // can inline the function at -O0 if it is marked always_inline. 5172 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, 5173 llvm::Value *Ptr) -> llvm::Function * { 5174 if (!CalleeFT->isVarArg()) 5175 return nullptr; 5176 5177 // Get underlying value if it's a bitcast 5178 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { 5179 if (CE->getOpcode() == llvm::Instruction::BitCast) 5180 Ptr = CE->getOperand(0); 5181 } 5182 5183 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); 5184 if (!OrigFn) 5185 return nullptr; 5186 5187 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 5188 5189 // If the original type is variadic, or if any of the component types 5190 // disagree, we cannot remove the cast. 5191 if (OrigFT->isVarArg() || 5192 OrigFT->getNumParams() != CalleeFT->getNumParams() || 5193 OrigFT->getReturnType() != CalleeFT->getReturnType()) 5194 return nullptr; 5195 5196 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 5197 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 5198 return nullptr; 5199 5200 return OrigFn; 5201 }; 5202 5203 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { 5204 CalleePtr = OrigFn; 5205 IRFuncTy = OrigFn->getFunctionType(); 5206 } 5207 5208 // 3. Perform the actual call. 5209 5210 // Deactivate any cleanups that we're supposed to do immediately before 5211 // the call. 5212 if (!CallArgs.getCleanupsToDeactivate().empty()) 5213 deactivateArgCleanupsBeforeCall(*this, CallArgs); 5214 5215 // Assert that the arguments we computed match up. The IR verifier 5216 // will catch this, but this is a common enough source of problems 5217 // during IRGen changes that it's way better for debugging to catch 5218 // it ourselves here. 5219 #ifndef NDEBUG 5220 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 5221 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5222 // Inalloca argument can have different type. 5223 if (IRFunctionArgs.hasInallocaArg() && 5224 i == IRFunctionArgs.getInallocaArgNo()) 5225 continue; 5226 if (i < IRFuncTy->getNumParams()) 5227 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 5228 } 5229 #endif 5230 5231 // Update the largest vector width if any arguments have vector types. 5232 for (unsigned i = 0; i < IRCallArgs.size(); ++i) 5233 LargestVectorWidth = std::max(LargestVectorWidth, 5234 getMaxVectorWidth(IRCallArgs[i]->getType())); 5235 5236 // Compute the calling convention and attributes. 5237 unsigned CallingConv; 5238 llvm::AttributeList Attrs; 5239 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 5240 Callee.getAbstractInfo(), Attrs, CallingConv, 5241 /*AttrOnCallSite=*/true, 5242 /*IsThunk=*/false); 5243 5244 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5245 if (FD->hasAttr<StrictFPAttr>()) 5246 // All calls within a strictfp function are marked strictfp 5247 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); 5248 5249 // Add call-site nomerge attribute if exists. 5250 if (InNoMergeAttributedStmt) 5251 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge); 5252 5253 // Add call-site noinline attribute if exists. 5254 if (InNoInlineAttributedStmt) 5255 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); 5256 5257 // Add call-site always_inline attribute if exists. 5258 if (InAlwaysInlineAttributedStmt) 5259 Attrs = 5260 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); 5261 5262 // Apply some call-site-specific attributes. 5263 // TODO: work this into building the attribute set. 5264 5265 // Apply always_inline to all calls within flatten functions. 5266 // FIXME: should this really take priority over __try, below? 5267 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 5268 !InNoInlineAttributedStmt && 5269 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { 5270 Attrs = 5271 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline); 5272 } 5273 5274 // Disable inlining inside SEH __try blocks. 5275 if (isSEHTryScope()) { 5276 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline); 5277 } 5278 5279 // Decide whether to use a call or an invoke. 5280 bool CannotThrow; 5281 if (currentFunctionUsesSEHTry()) { 5282 // SEH cares about asynchronous exceptions, so everything can "throw." 5283 CannotThrow = false; 5284 } else if (isCleanupPadScope() && 5285 EHPersonality::get(*this).isMSVCXXPersonality()) { 5286 // The MSVC++ personality will implicitly terminate the program if an 5287 // exception is thrown during a cleanup outside of a try/catch. 5288 // We don't need to model anything in IR to get this behavior. 5289 CannotThrow = true; 5290 } else { 5291 // Otherwise, nounwind call sites will never throw. 5292 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind); 5293 5294 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr)) 5295 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) 5296 CannotThrow = true; 5297 } 5298 5299 // If we made a temporary, be sure to clean up after ourselves. Note that we 5300 // can't depend on being inside of an ExprWithCleanups, so we need to manually 5301 // pop this cleanup later on. Being eager about this is OK, since this 5302 // temporary is 'invisible' outside of the callee. 5303 if (UnusedReturnSizePtr) 5304 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 5305 UnusedReturnSizePtr); 5306 5307 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 5308 5309 SmallVector<llvm::OperandBundleDef, 1> BundleList = 5310 getBundlesForFunclet(CalleePtr); 5311 5312 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5313 if (FD->hasAttr<StrictFPAttr>()) 5314 // All calls within a strictfp function are marked strictfp 5315 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP); 5316 5317 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); 5318 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5319 5320 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); 5321 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5322 5323 // Emit the actual call/invoke instruction. 5324 llvm::CallBase *CI; 5325 if (!InvokeDest) { 5326 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); 5327 } else { 5328 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 5329 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, 5330 BundleList); 5331 EmitBlock(Cont); 5332 } 5333 if (callOrInvoke) 5334 *callOrInvoke = CI; 5335 5336 // If this is within a function that has the guard(nocf) attribute and is an 5337 // indirect call, add the "guard_nocf" attribute to this call to indicate that 5338 // Control Flow Guard checks should not be added, even if the call is inlined. 5339 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 5340 if (const auto *A = FD->getAttr<CFGuardAttr>()) { 5341 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) 5342 Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf"); 5343 } 5344 } 5345 5346 // Apply the attributes and calling convention. 5347 CI->setAttributes(Attrs); 5348 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 5349 5350 // Apply various metadata. 5351 5352 if (!CI->getType()->isVoidTy()) 5353 CI->setName("call"); 5354 5355 // Update largest vector width from the return type. 5356 LargestVectorWidth = 5357 std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType())); 5358 5359 // Insert instrumentation or attach profile metadata at indirect call sites. 5360 // For more details, see the comment before the definition of 5361 // IPVK_IndirectCallTarget in InstrProfData.inc. 5362 if (!CI->getCalledFunction()) 5363 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 5364 CI, CalleePtr); 5365 5366 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 5367 // optimizer it can aggressively ignore unwind edges. 5368 if (CGM.getLangOpts().ObjCAutoRefCount) 5369 AddObjCARCExceptionMetadata(CI); 5370 5371 // Set tail call kind if necessary. 5372 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 5373 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 5374 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 5375 else if (IsMustTail) 5376 Call->setTailCallKind(llvm::CallInst::TCK_MustTail); 5377 } 5378 5379 // Add metadata for calls to MSAllocator functions 5380 if (getDebugInfo() && TargetDecl && 5381 TargetDecl->hasAttr<MSAllocatorAttr>()) 5382 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); 5383 5384 // Add metadata if calling an __attribute__((error(""))) or warning fn. 5385 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) { 5386 llvm::ConstantInt *Line = 5387 llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding()); 5388 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line); 5389 llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD}); 5390 CI->setMetadata("srcloc", MDT); 5391 } 5392 5393 // 4. Finish the call. 5394 5395 // If the call doesn't return, finish the basic block and clear the 5396 // insertion point; this allows the rest of IRGen to discard 5397 // unreachable code. 5398 if (CI->doesNotReturn()) { 5399 if (UnusedReturnSizePtr) 5400 PopCleanupBlock(); 5401 5402 // Strip away the noreturn attribute to better diagnose unreachable UB. 5403 if (SanOpts.has(SanitizerKind::Unreachable)) { 5404 // Also remove from function since CallBase::hasFnAttr additionally checks 5405 // attributes of the called function. 5406 if (auto *F = CI->getCalledFunction()) 5407 F->removeFnAttr(llvm::Attribute::NoReturn); 5408 CI->removeFnAttr(llvm::Attribute::NoReturn); 5409 5410 // Avoid incompatibility with ASan which relies on the `noreturn` 5411 // attribute to insert handler calls. 5412 if (SanOpts.hasOneOf(SanitizerKind::Address | 5413 SanitizerKind::KernelAddress)) { 5414 SanitizerScope SanScope(this); 5415 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 5416 Builder.SetInsertPoint(CI); 5417 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 5418 llvm::FunctionCallee Fn = 5419 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 5420 EmitNounwindRuntimeCall(Fn); 5421 } 5422 } 5423 5424 EmitUnreachable(Loc); 5425 Builder.ClearInsertionPoint(); 5426 5427 // FIXME: For now, emit a dummy basic block because expr emitters in 5428 // generally are not ready to handle emitting expressions at unreachable 5429 // points. 5430 EnsureInsertPoint(); 5431 5432 // Return a reasonable RValue. 5433 return GetUndefRValue(RetTy); 5434 } 5435 5436 // If this is a musttail call, return immediately. We do not branch to the 5437 // epilogue in this case. 5438 if (IsMustTail) { 5439 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end(); 5440 ++it) { 5441 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it); 5442 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn())) 5443 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups"); 5444 } 5445 if (CI->getType()->isVoidTy()) 5446 Builder.CreateRetVoid(); 5447 else 5448 Builder.CreateRet(CI); 5449 Builder.ClearInsertionPoint(); 5450 EnsureInsertPoint(); 5451 return GetUndefRValue(RetTy); 5452 } 5453 5454 // Perform the swifterror writeback. 5455 if (swiftErrorTemp.isValid()) { 5456 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 5457 Builder.CreateStore(errorResult, swiftErrorArg); 5458 } 5459 5460 // Emit any call-associated writebacks immediately. Arguably this 5461 // should happen after any return-value munging. 5462 if (CallArgs.hasWritebacks()) 5463 emitWritebacks(*this, CallArgs); 5464 5465 // The stack cleanup for inalloca arguments has to run out of the normal 5466 // lexical order, so deactivate it and run it manually here. 5467 CallArgs.freeArgumentMemory(*this); 5468 5469 // Extract the return value. 5470 RValue Ret = [&] { 5471 switch (RetAI.getKind()) { 5472 case ABIArgInfo::CoerceAndExpand: { 5473 auto coercionType = RetAI.getCoerceAndExpandType(); 5474 5475 Address addr = SRetPtr; 5476 addr = Builder.CreateElementBitCast(addr, coercionType); 5477 5478 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 5479 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 5480 5481 unsigned unpaddedIndex = 0; 5482 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5483 llvm::Type *eltType = coercionType->getElementType(i); 5484 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5485 Address eltAddr = Builder.CreateStructGEP(addr, i); 5486 llvm::Value *elt = CI; 5487 if (requiresExtract) 5488 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 5489 else 5490 assert(unpaddedIndex == 0); 5491 Builder.CreateStore(elt, eltAddr); 5492 } 5493 // FALLTHROUGH 5494 LLVM_FALLTHROUGH; 5495 } 5496 5497 case ABIArgInfo::InAlloca: 5498 case ABIArgInfo::Indirect: { 5499 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 5500 if (UnusedReturnSizePtr) 5501 PopCleanupBlock(); 5502 return ret; 5503 } 5504 5505 case ABIArgInfo::Ignore: 5506 // If we are ignoring an argument that had a result, make sure to 5507 // construct the appropriate return value for our caller. 5508 return GetUndefRValue(RetTy); 5509 5510 case ABIArgInfo::Extend: 5511 case ABIArgInfo::Direct: { 5512 llvm::Type *RetIRTy = ConvertType(RetTy); 5513 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 5514 switch (getEvaluationKind(RetTy)) { 5515 case TEK_Complex: { 5516 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 5517 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 5518 return RValue::getComplex(std::make_pair(Real, Imag)); 5519 } 5520 case TEK_Aggregate: { 5521 Address DestPtr = ReturnValue.getValue(); 5522 bool DestIsVolatile = ReturnValue.isVolatile(); 5523 5524 if (!DestPtr.isValid()) { 5525 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 5526 DestIsVolatile = false; 5527 } 5528 EmitAggregateStore(CI, DestPtr, DestIsVolatile); 5529 return RValue::getAggregate(DestPtr); 5530 } 5531 case TEK_Scalar: { 5532 // If the argument doesn't match, perform a bitcast to coerce it. This 5533 // can happen due to trivial type mismatches. 5534 llvm::Value *V = CI; 5535 if (V->getType() != RetIRTy) 5536 V = Builder.CreateBitCast(V, RetIRTy); 5537 return RValue::get(V); 5538 } 5539 } 5540 llvm_unreachable("bad evaluation kind"); 5541 } 5542 5543 Address DestPtr = ReturnValue.getValue(); 5544 bool DestIsVolatile = ReturnValue.isVolatile(); 5545 5546 if (!DestPtr.isValid()) { 5547 DestPtr = CreateMemTemp(RetTy, "coerce"); 5548 DestIsVolatile = false; 5549 } 5550 5551 // If the value is offset in memory, apply the offset now. 5552 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 5553 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 5554 5555 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 5556 } 5557 5558 case ABIArgInfo::Expand: 5559 case ABIArgInfo::IndirectAliased: 5560 llvm_unreachable("Invalid ABI kind for return argument"); 5561 } 5562 5563 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 5564 } (); 5565 5566 // Emit the assume_aligned check on the return value. 5567 if (Ret.isScalar() && TargetDecl) { 5568 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5569 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5570 } 5571 5572 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though 5573 // we can't use the full cleanup mechanism. 5574 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) 5575 LifetimeEnd.Emit(*this, /*Flags=*/{}); 5576 5577 if (!ReturnValue.isExternallyDestructed() && 5578 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) 5579 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), 5580 RetTy); 5581 5582 return Ret; 5583 } 5584 5585 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 5586 if (isVirtual()) { 5587 const CallExpr *CE = getVirtualCallExpr(); 5588 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 5589 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), 5590 CE ? CE->getBeginLoc() : SourceLocation()); 5591 } 5592 5593 return *this; 5594 } 5595 5596 /* VarArg handling */ 5597 5598 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 5599 VAListAddr = VE->isMicrosoftABI() 5600 ? EmitMSVAListRef(VE->getSubExpr()) 5601 : EmitVAListRef(VE->getSubExpr()); 5602 QualType Ty = VE->getType(); 5603 if (VE->isMicrosoftABI()) 5604 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 5605 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 5606 } 5607