1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CodeGenFunction.h" 20 #include "CodeGenModule.h" 21 #include "TargetInfo.h" 22 #include "clang/AST/Attr.h" 23 #include "clang/AST/Decl.h" 24 #include "clang/AST/DeclCXX.h" 25 #include "clang/AST/DeclObjC.h" 26 #include "clang/Basic/CodeGenOptions.h" 27 #include "clang/Basic/TargetBuiltins.h" 28 #include "clang/Basic/TargetInfo.h" 29 #include "clang/CodeGen/CGFunctionInfo.h" 30 #include "clang/CodeGen/SwiftCallingConv.h" 31 #include "llvm/ADT/StringExtras.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/IR/Attributes.h" 34 #include "llvm/IR/CallingConv.h" 35 #include "llvm/IR/DataLayout.h" 36 #include "llvm/IR/InlineAsm.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/Intrinsics.h" 39 #include "llvm/Transforms/Utils/Local.h" 40 using namespace clang; 41 using namespace CodeGen; 42 43 /***/ 44 45 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 46 switch (CC) { 47 default: return llvm::CallingConv::C; 48 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 49 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 50 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 51 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 52 case CC_Win64: return llvm::CallingConv::Win64; 53 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 54 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 55 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 56 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 57 // TODO: Add support for __pascal to LLVM. 58 case CC_X86Pascal: return llvm::CallingConv::C; 59 // TODO: Add support for __vectorcall to LLVM. 60 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 61 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 62 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 63 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 64 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 65 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 66 case CC_Swift: return llvm::CallingConv::Swift; 67 } 68 } 69 70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 71 /// qualification. Either or both of RD and MD may be null. A null RD indicates 72 /// that there is no meaningful 'this' type, and a null MD can occur when 73 /// calling a method pointer. 74 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, 75 const CXXMethodDecl *MD) { 76 QualType RecTy; 77 if (RD) 78 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 79 else 80 RecTy = Context.VoidTy; 81 82 if (MD) 83 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 84 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 85 } 86 87 /// Returns the canonical formal type of the given C++ method. 88 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 89 return MD->getType()->getCanonicalTypeUnqualified() 90 .getAs<FunctionProtoType>(); 91 } 92 93 /// Returns the "extra-canonicalized" return type, which discards 94 /// qualifiers on the return type. Codegen doesn't care about them, 95 /// and it makes ABI code a little easier to be able to assume that 96 /// all parameter and return types are top-level unqualified. 97 static CanQualType GetReturnType(QualType RetTy) { 98 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 99 } 100 101 /// Arrange the argument and result information for a value of the given 102 /// unprototyped freestanding function type. 103 const CGFunctionInfo & 104 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 105 // When translating an unprototyped function type, always use a 106 // variadic type. 107 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 108 /*instanceMethod=*/false, 109 /*chainCall=*/false, None, 110 FTNP->getExtInfo(), {}, RequiredArgs(0)); 111 } 112 113 static void addExtParameterInfosForCall( 114 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 115 const FunctionProtoType *proto, 116 unsigned prefixArgs, 117 unsigned totalArgs) { 118 assert(proto->hasExtParameterInfos()); 119 assert(paramInfos.size() <= prefixArgs); 120 assert(proto->getNumParams() + prefixArgs <= totalArgs); 121 122 paramInfos.reserve(totalArgs); 123 124 // Add default infos for any prefix args that don't already have infos. 125 paramInfos.resize(prefixArgs); 126 127 // Add infos for the prototype. 128 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 129 paramInfos.push_back(ParamInfo); 130 // pass_object_size params have no parameter info. 131 if (ParamInfo.hasPassObjectSize()) 132 paramInfos.emplace_back(); 133 } 134 135 assert(paramInfos.size() <= totalArgs && 136 "Did we forget to insert pass_object_size args?"); 137 // Add default infos for the variadic and/or suffix arguments. 138 paramInfos.resize(totalArgs); 139 } 140 141 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 142 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 143 static void appendParameterTypes(const CodeGenTypes &CGT, 144 SmallVectorImpl<CanQualType> &prefix, 145 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 146 CanQual<FunctionProtoType> FPT) { 147 // Fast path: don't touch param info if we don't need to. 148 if (!FPT->hasExtParameterInfos()) { 149 assert(paramInfos.empty() && 150 "We have paramInfos, but the prototype doesn't?"); 151 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 152 return; 153 } 154 155 unsigned PrefixSize = prefix.size(); 156 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 157 // parameters; the only thing that can change this is the presence of 158 // pass_object_size. So, we preallocate for the common case. 159 prefix.reserve(prefix.size() + FPT->getNumParams()); 160 161 auto ExtInfos = FPT->getExtParameterInfos(); 162 assert(ExtInfos.size() == FPT->getNumParams()); 163 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 164 prefix.push_back(FPT->getParamType(I)); 165 if (ExtInfos[I].hasPassObjectSize()) 166 prefix.push_back(CGT.getContext().getSizeType()); 167 } 168 169 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 170 prefix.size()); 171 } 172 173 /// Arrange the LLVM function layout for a value of the given function 174 /// type, on top of any implicit parameters already stored. 175 static const CGFunctionInfo & 176 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 177 SmallVectorImpl<CanQualType> &prefix, 178 CanQual<FunctionProtoType> FTP) { 179 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 180 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 181 // FIXME: Kill copy. 182 appendParameterTypes(CGT, prefix, paramInfos, FTP); 183 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 184 185 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 186 /*chainCall=*/false, prefix, 187 FTP->getExtInfo(), paramInfos, 188 Required); 189 } 190 191 /// Arrange the argument and result information for a value of the 192 /// given freestanding function type. 193 const CGFunctionInfo & 194 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 195 SmallVector<CanQualType, 16> argTypes; 196 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 197 FTP); 198 } 199 200 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) { 201 // Set the appropriate calling convention for the Function. 202 if (D->hasAttr<StdCallAttr>()) 203 return CC_X86StdCall; 204 205 if (D->hasAttr<FastCallAttr>()) 206 return CC_X86FastCall; 207 208 if (D->hasAttr<RegCallAttr>()) 209 return CC_X86RegCall; 210 211 if (D->hasAttr<ThisCallAttr>()) 212 return CC_X86ThisCall; 213 214 if (D->hasAttr<VectorCallAttr>()) 215 return CC_X86VectorCall; 216 217 if (D->hasAttr<PascalAttr>()) 218 return CC_X86Pascal; 219 220 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 221 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 222 223 if (D->hasAttr<AArch64VectorPcsAttr>()) 224 return CC_AArch64VectorCall; 225 226 if (D->hasAttr<IntelOclBiccAttr>()) 227 return CC_IntelOclBicc; 228 229 if (D->hasAttr<MSABIAttr>()) 230 return IsWindows ? CC_C : CC_Win64; 231 232 if (D->hasAttr<SysVABIAttr>()) 233 return IsWindows ? CC_X86_64SysV : CC_C; 234 235 if (D->hasAttr<PreserveMostAttr>()) 236 return CC_PreserveMost; 237 238 if (D->hasAttr<PreserveAllAttr>()) 239 return CC_PreserveAll; 240 241 return CC_C; 242 } 243 244 /// Arrange the argument and result information for a call to an 245 /// unknown C++ non-static member function of the given abstract type. 246 /// (A null RD means we don't have any meaningful "this" argument type, 247 /// so fall back to a generic pointer type). 248 /// The member function must be an ordinary function, i.e. not a 249 /// constructor or destructor. 250 const CGFunctionInfo & 251 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 252 const FunctionProtoType *FTP, 253 const CXXMethodDecl *MD) { 254 SmallVector<CanQualType, 16> argTypes; 255 256 // Add the 'this' pointer. 257 argTypes.push_back(DeriveThisType(RD, MD)); 258 259 return ::arrangeLLVMFunctionInfo( 260 *this, true, argTypes, 261 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 262 } 263 264 /// Set calling convention for CUDA/HIP kernel. 265 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 266 const FunctionDecl *FD) { 267 if (FD->hasAttr<CUDAGlobalAttr>()) { 268 const FunctionType *FT = FTy->getAs<FunctionType>(); 269 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 270 FTy = FT->getCanonicalTypeUnqualified(); 271 } 272 } 273 274 /// Arrange the argument and result information for a declaration or 275 /// definition of the given C++ non-static member function. The 276 /// member function must be an ordinary function, i.e. not a 277 /// constructor or destructor. 278 const CGFunctionInfo & 279 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 280 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 281 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 282 283 CanQualType FT = GetFormalType(MD).getAs<Type>(); 284 setCUDAKernelCallingConvention(FT, CGM, MD); 285 auto prototype = FT.getAs<FunctionProtoType>(); 286 287 if (MD->isInstance()) { 288 // The abstract case is perfectly fine. 289 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 290 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 291 } 292 293 return arrangeFreeFunctionType(prototype); 294 } 295 296 bool CodeGenTypes::inheritingCtorHasParams( 297 const InheritedConstructor &Inherited, CXXCtorType Type) { 298 // Parameters are unnecessary if we're constructing a base class subobject 299 // and the inherited constructor lives in a virtual base. 300 return Type == Ctor_Complete || 301 !Inherited.getShadowDecl()->constructsVirtualBase() || 302 !Target.getCXXABI().hasConstructorVariants(); 303 } 304 305 const CGFunctionInfo & 306 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { 307 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 308 309 SmallVector<CanQualType, 16> argTypes; 310 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 311 argTypes.push_back(DeriveThisType(MD->getParent(), MD)); 312 313 bool PassParams = true; 314 315 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 316 // A base class inheriting constructor doesn't get forwarded arguments 317 // needed to construct a virtual base (or base class thereof). 318 if (auto Inherited = CD->getInheritedConstructor()) 319 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); 320 } 321 322 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 323 324 // Add the formal parameters. 325 if (PassParams) 326 appendParameterTypes(*this, argTypes, paramInfos, FTP); 327 328 CGCXXABI::AddedStructorArgs AddedArgs = 329 TheCXXABI.buildStructorSignature(GD, argTypes); 330 if (!paramInfos.empty()) { 331 // Note: prefix implies after the first param. 332 if (AddedArgs.Prefix) 333 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 334 FunctionProtoType::ExtParameterInfo{}); 335 if (AddedArgs.Suffix) 336 paramInfos.append(AddedArgs.Suffix, 337 FunctionProtoType::ExtParameterInfo{}); 338 } 339 340 RequiredArgs required = 341 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 342 : RequiredArgs::All); 343 344 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 345 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 346 ? argTypes.front() 347 : TheCXXABI.hasMostDerivedReturn(GD) 348 ? CGM.getContext().VoidPtrTy 349 : Context.VoidTy; 350 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 351 /*chainCall=*/false, argTypes, extInfo, 352 paramInfos, required); 353 } 354 355 static SmallVector<CanQualType, 16> 356 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 357 SmallVector<CanQualType, 16> argTypes; 358 for (auto &arg : args) 359 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 360 return argTypes; 361 } 362 363 static SmallVector<CanQualType, 16> 364 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 365 SmallVector<CanQualType, 16> argTypes; 366 for (auto &arg : args) 367 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 368 return argTypes; 369 } 370 371 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 372 getExtParameterInfosForCall(const FunctionProtoType *proto, 373 unsigned prefixArgs, unsigned totalArgs) { 374 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 375 if (proto->hasExtParameterInfos()) { 376 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 377 } 378 return result; 379 } 380 381 /// Arrange a call to a C++ method, passing the given arguments. 382 /// 383 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 384 /// parameter. 385 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 386 /// args. 387 /// PassProtoArgs indicates whether `args` has args for the parameters in the 388 /// given CXXConstructorDecl. 389 const CGFunctionInfo & 390 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 391 const CXXConstructorDecl *D, 392 CXXCtorType CtorKind, 393 unsigned ExtraPrefixArgs, 394 unsigned ExtraSuffixArgs, 395 bool PassProtoArgs) { 396 // FIXME: Kill copy. 397 SmallVector<CanQualType, 16> ArgTypes; 398 for (const auto &Arg : args) 399 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 400 401 // +1 for implicit this, which should always be args[0]. 402 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 403 404 CanQual<FunctionProtoType> FPT = GetFormalType(D); 405 RequiredArgs Required = PassProtoArgs 406 ? RequiredArgs::forPrototypePlus( 407 FPT, TotalPrefixArgs + ExtraSuffixArgs) 408 : RequiredArgs::All; 409 410 GlobalDecl GD(D, CtorKind); 411 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 412 ? ArgTypes.front() 413 : TheCXXABI.hasMostDerivedReturn(GD) 414 ? CGM.getContext().VoidPtrTy 415 : Context.VoidTy; 416 417 FunctionType::ExtInfo Info = FPT->getExtInfo(); 418 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 419 // If the prototype args are elided, we should only have ABI-specific args, 420 // which never have param info. 421 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 422 // ABI-specific suffix arguments are treated the same as variadic arguments. 423 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 424 ArgTypes.size()); 425 } 426 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 427 /*chainCall=*/false, ArgTypes, Info, 428 ParamInfos, Required); 429 } 430 431 /// Arrange the argument and result information for the declaration or 432 /// definition of the given function. 433 const CGFunctionInfo & 434 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 435 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 436 if (MD->isInstance()) 437 return arrangeCXXMethodDeclaration(MD); 438 439 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 440 441 assert(isa<FunctionType>(FTy)); 442 setCUDAKernelCallingConvention(FTy, CGM, FD); 443 444 // When declaring a function without a prototype, always use a 445 // non-variadic type. 446 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 447 return arrangeLLVMFunctionInfo( 448 noProto->getReturnType(), /*instanceMethod=*/false, 449 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 450 } 451 452 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 453 } 454 455 /// Arrange the argument and result information for the declaration or 456 /// definition of an Objective-C method. 457 const CGFunctionInfo & 458 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 459 // It happens that this is the same as a call with no optional 460 // arguments, except also using the formal 'self' type. 461 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 462 } 463 464 /// Arrange the argument and result information for the function type 465 /// through which to perform a send to the given Objective-C method, 466 /// using the given receiver type. The receiver type is not always 467 /// the 'self' type of the method or even an Objective-C pointer type. 468 /// This is *not* the right method for actually performing such a 469 /// message send, due to the possibility of optional arguments. 470 const CGFunctionInfo & 471 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 472 QualType receiverType) { 473 SmallVector<CanQualType, 16> argTys; 474 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 475 argTys.push_back(Context.getCanonicalParamType(receiverType)); 476 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 477 // FIXME: Kill copy? 478 for (const auto *I : MD->parameters()) { 479 argTys.push_back(Context.getCanonicalParamType(I->getType())); 480 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 481 I->hasAttr<NoEscapeAttr>()); 482 extParamInfos.push_back(extParamInfo); 483 } 484 485 FunctionType::ExtInfo einfo; 486 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 487 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 488 489 if (getContext().getLangOpts().ObjCAutoRefCount && 490 MD->hasAttr<NSReturnsRetainedAttr>()) 491 einfo = einfo.withProducesResult(true); 492 493 RequiredArgs required = 494 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 495 496 return arrangeLLVMFunctionInfo( 497 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 498 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 499 } 500 501 const CGFunctionInfo & 502 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 503 const CallArgList &args) { 504 auto argTypes = getArgTypesForCall(Context, args); 505 FunctionType::ExtInfo einfo; 506 507 return arrangeLLVMFunctionInfo( 508 GetReturnType(returnType), /*instanceMethod=*/false, 509 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 510 } 511 512 const CGFunctionInfo & 513 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 514 // FIXME: Do we need to handle ObjCMethodDecl? 515 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 516 517 if (isa<CXXConstructorDecl>(GD.getDecl()) || 518 isa<CXXDestructorDecl>(GD.getDecl())) 519 return arrangeCXXStructorDeclaration(GD); 520 521 return arrangeFunctionDeclaration(FD); 522 } 523 524 /// Arrange a thunk that takes 'this' as the first parameter followed by 525 /// varargs. Return a void pointer, regardless of the actual return type. 526 /// The body of the thunk will end in a musttail call to a function of the 527 /// correct type, and the caller will bitcast the function to the correct 528 /// prototype. 529 const CGFunctionInfo & 530 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 531 assert(MD->isVirtual() && "only methods have thunks"); 532 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 533 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; 534 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 535 /*chainCall=*/false, ArgTys, 536 FTP->getExtInfo(), {}, RequiredArgs(1)); 537 } 538 539 const CGFunctionInfo & 540 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 541 CXXCtorType CT) { 542 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 543 544 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 545 SmallVector<CanQualType, 2> ArgTys; 546 const CXXRecordDecl *RD = CD->getParent(); 547 ArgTys.push_back(DeriveThisType(RD, CD)); 548 if (CT == Ctor_CopyingClosure) 549 ArgTys.push_back(*FTP->param_type_begin()); 550 if (RD->getNumVBases() > 0) 551 ArgTys.push_back(Context.IntTy); 552 CallingConv CC = Context.getDefaultCallingConvention( 553 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 554 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 555 /*chainCall=*/false, ArgTys, 556 FunctionType::ExtInfo(CC), {}, 557 RequiredArgs::All); 558 } 559 560 /// Arrange a call as unto a free function, except possibly with an 561 /// additional number of formal parameters considered required. 562 static const CGFunctionInfo & 563 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 564 CodeGenModule &CGM, 565 const CallArgList &args, 566 const FunctionType *fnType, 567 unsigned numExtraRequiredArgs, 568 bool chainCall) { 569 assert(args.size() >= numExtraRequiredArgs); 570 571 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 572 573 // In most cases, there are no optional arguments. 574 RequiredArgs required = RequiredArgs::All; 575 576 // If we have a variadic prototype, the required arguments are the 577 // extra prefix plus the arguments in the prototype. 578 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 579 if (proto->isVariadic()) 580 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); 581 582 if (proto->hasExtParameterInfos()) 583 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 584 args.size()); 585 586 // If we don't have a prototype at all, but we're supposed to 587 // explicitly use the variadic convention for unprototyped calls, 588 // treat all of the arguments as required but preserve the nominal 589 // possibility of variadics. 590 } else if (CGM.getTargetCodeGenInfo() 591 .isNoProtoCallVariadic(args, 592 cast<FunctionNoProtoType>(fnType))) { 593 required = RequiredArgs(args.size()); 594 } 595 596 // FIXME: Kill copy. 597 SmallVector<CanQualType, 16> argTypes; 598 for (const auto &arg : args) 599 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 600 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 601 /*instanceMethod=*/false, chainCall, 602 argTypes, fnType->getExtInfo(), paramInfos, 603 required); 604 } 605 606 /// Figure out the rules for calling a function with the given formal 607 /// type using the given arguments. The arguments are necessary 608 /// because the function might be unprototyped, in which case it's 609 /// target-dependent in crazy ways. 610 const CGFunctionInfo & 611 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 612 const FunctionType *fnType, 613 bool chainCall) { 614 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 615 chainCall ? 1 : 0, chainCall); 616 } 617 618 /// A block function is essentially a free function with an 619 /// extra implicit argument. 620 const CGFunctionInfo & 621 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 622 const FunctionType *fnType) { 623 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 624 /*chainCall=*/false); 625 } 626 627 const CGFunctionInfo & 628 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 629 const FunctionArgList ¶ms) { 630 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 631 auto argTypes = getArgTypesForDeclaration(Context, params); 632 633 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 634 /*instanceMethod*/ false, /*chainCall*/ false, 635 argTypes, proto->getExtInfo(), paramInfos, 636 RequiredArgs::forPrototypePlus(proto, 1)); 637 } 638 639 const CGFunctionInfo & 640 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 641 const CallArgList &args) { 642 // FIXME: Kill copy. 643 SmallVector<CanQualType, 16> argTypes; 644 for (const auto &Arg : args) 645 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 646 return arrangeLLVMFunctionInfo( 647 GetReturnType(resultType), /*instanceMethod=*/false, 648 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 649 /*paramInfos=*/ {}, RequiredArgs::All); 650 } 651 652 const CGFunctionInfo & 653 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 654 const FunctionArgList &args) { 655 auto argTypes = getArgTypesForDeclaration(Context, args); 656 657 return arrangeLLVMFunctionInfo( 658 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 659 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 660 } 661 662 const CGFunctionInfo & 663 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 664 ArrayRef<CanQualType> argTypes) { 665 return arrangeLLVMFunctionInfo( 666 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 667 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 668 } 669 670 /// Arrange a call to a C++ method, passing the given arguments. 671 /// 672 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 673 /// does not count `this`. 674 const CGFunctionInfo & 675 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 676 const FunctionProtoType *proto, 677 RequiredArgs required, 678 unsigned numPrefixArgs) { 679 assert(numPrefixArgs + 1 <= args.size() && 680 "Emitting a call with less args than the required prefix?"); 681 // Add one to account for `this`. It's a bit awkward here, but we don't count 682 // `this` in similar places elsewhere. 683 auto paramInfos = 684 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 685 686 // FIXME: Kill copy. 687 auto argTypes = getArgTypesForCall(Context, args); 688 689 FunctionType::ExtInfo info = proto->getExtInfo(); 690 return arrangeLLVMFunctionInfo( 691 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 692 /*chainCall=*/false, argTypes, info, paramInfos, required); 693 } 694 695 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 696 return arrangeLLVMFunctionInfo( 697 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 698 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 699 } 700 701 const CGFunctionInfo & 702 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 703 const CallArgList &args) { 704 assert(signature.arg_size() <= args.size()); 705 if (signature.arg_size() == args.size()) 706 return signature; 707 708 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 709 auto sigParamInfos = signature.getExtParameterInfos(); 710 if (!sigParamInfos.empty()) { 711 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 712 paramInfos.resize(args.size()); 713 } 714 715 auto argTypes = getArgTypesForCall(Context, args); 716 717 assert(signature.getRequiredArgs().allowsOptionalArgs()); 718 return arrangeLLVMFunctionInfo(signature.getReturnType(), 719 signature.isInstanceMethod(), 720 signature.isChainCall(), 721 argTypes, 722 signature.getExtInfo(), 723 paramInfos, 724 signature.getRequiredArgs()); 725 } 726 727 namespace clang { 728 namespace CodeGen { 729 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 730 } 731 } 732 733 /// Arrange the argument and result information for an abstract value 734 /// of a given function type. This is the method which all of the 735 /// above functions ultimately defer to. 736 const CGFunctionInfo & 737 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 738 bool instanceMethod, 739 bool chainCall, 740 ArrayRef<CanQualType> argTypes, 741 FunctionType::ExtInfo info, 742 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 743 RequiredArgs required) { 744 assert(llvm::all_of(argTypes, 745 [](CanQualType T) { return T.isCanonicalAsParam(); })); 746 747 // Lookup or create unique function info. 748 llvm::FoldingSetNodeID ID; 749 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 750 required, resultType, argTypes); 751 752 void *insertPos = nullptr; 753 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 754 if (FI) 755 return *FI; 756 757 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 758 759 // Construct the function info. We co-allocate the ArgInfos. 760 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 761 paramInfos, resultType, argTypes, required); 762 FunctionInfos.InsertNode(FI, insertPos); 763 764 bool inserted = FunctionsBeingProcessed.insert(FI).second; 765 (void)inserted; 766 assert(inserted && "Recursively being processed?"); 767 768 // Compute ABI information. 769 if (CC == llvm::CallingConv::SPIR_KERNEL) { 770 // Force target independent argument handling for the host visible 771 // kernel functions. 772 computeSPIRKernelABIInfo(CGM, *FI); 773 } else if (info.getCC() == CC_Swift) { 774 swiftcall::computeABIInfo(CGM, *FI); 775 } else { 776 getABIInfo().computeInfo(*FI); 777 } 778 779 // Loop over all of the computed argument and return value info. If any of 780 // them are direct or extend without a specified coerce type, specify the 781 // default now. 782 ABIArgInfo &retInfo = FI->getReturnInfo(); 783 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 784 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 785 786 for (auto &I : FI->arguments()) 787 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 788 I.info.setCoerceToType(ConvertType(I.type)); 789 790 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 791 assert(erased && "Not in set?"); 792 793 return *FI; 794 } 795 796 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 797 bool instanceMethod, 798 bool chainCall, 799 const FunctionType::ExtInfo &info, 800 ArrayRef<ExtParameterInfo> paramInfos, 801 CanQualType resultType, 802 ArrayRef<CanQualType> argTypes, 803 RequiredArgs required) { 804 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 805 assert(!required.allowsOptionalArgs() || 806 required.getNumRequiredArgs() <= argTypes.size()); 807 808 void *buffer = 809 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 810 argTypes.size() + 1, paramInfos.size())); 811 812 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 813 FI->CallingConvention = llvmCC; 814 FI->EffectiveCallingConvention = llvmCC; 815 FI->ASTCallingConvention = info.getCC(); 816 FI->InstanceMethod = instanceMethod; 817 FI->ChainCall = chainCall; 818 FI->NoReturn = info.getNoReturn(); 819 FI->ReturnsRetained = info.getProducesResult(); 820 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 821 FI->NoCfCheck = info.getNoCfCheck(); 822 FI->Required = required; 823 FI->HasRegParm = info.getHasRegParm(); 824 FI->RegParm = info.getRegParm(); 825 FI->ArgStruct = nullptr; 826 FI->ArgStructAlign = 0; 827 FI->NumArgs = argTypes.size(); 828 FI->HasExtParameterInfos = !paramInfos.empty(); 829 FI->getArgsBuffer()[0].type = resultType; 830 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 831 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 832 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 833 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 834 return FI; 835 } 836 837 /***/ 838 839 namespace { 840 // ABIArgInfo::Expand implementation. 841 842 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 843 struct TypeExpansion { 844 enum TypeExpansionKind { 845 // Elements of constant arrays are expanded recursively. 846 TEK_ConstantArray, 847 // Record fields are expanded recursively (but if record is a union, only 848 // the field with the largest size is expanded). 849 TEK_Record, 850 // For complex types, real and imaginary parts are expanded recursively. 851 TEK_Complex, 852 // All other types are not expandable. 853 TEK_None 854 }; 855 856 const TypeExpansionKind Kind; 857 858 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 859 virtual ~TypeExpansion() {} 860 }; 861 862 struct ConstantArrayExpansion : TypeExpansion { 863 QualType EltTy; 864 uint64_t NumElts; 865 866 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 867 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 868 static bool classof(const TypeExpansion *TE) { 869 return TE->Kind == TEK_ConstantArray; 870 } 871 }; 872 873 struct RecordExpansion : TypeExpansion { 874 SmallVector<const CXXBaseSpecifier *, 1> Bases; 875 876 SmallVector<const FieldDecl *, 1> Fields; 877 878 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 879 SmallVector<const FieldDecl *, 1> &&Fields) 880 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 881 Fields(std::move(Fields)) {} 882 static bool classof(const TypeExpansion *TE) { 883 return TE->Kind == TEK_Record; 884 } 885 }; 886 887 struct ComplexExpansion : TypeExpansion { 888 QualType EltTy; 889 890 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 891 static bool classof(const TypeExpansion *TE) { 892 return TE->Kind == TEK_Complex; 893 } 894 }; 895 896 struct NoExpansion : TypeExpansion { 897 NoExpansion() : TypeExpansion(TEK_None) {} 898 static bool classof(const TypeExpansion *TE) { 899 return TE->Kind == TEK_None; 900 } 901 }; 902 } // namespace 903 904 static std::unique_ptr<TypeExpansion> 905 getTypeExpansion(QualType Ty, const ASTContext &Context) { 906 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 907 return std::make_unique<ConstantArrayExpansion>( 908 AT->getElementType(), AT->getSize().getZExtValue()); 909 } 910 if (const RecordType *RT = Ty->getAs<RecordType>()) { 911 SmallVector<const CXXBaseSpecifier *, 1> Bases; 912 SmallVector<const FieldDecl *, 1> Fields; 913 const RecordDecl *RD = RT->getDecl(); 914 assert(!RD->hasFlexibleArrayMember() && 915 "Cannot expand structure with flexible array."); 916 if (RD->isUnion()) { 917 // Unions can be here only in degenerative cases - all the fields are same 918 // after flattening. Thus we have to use the "largest" field. 919 const FieldDecl *LargestFD = nullptr; 920 CharUnits UnionSize = CharUnits::Zero(); 921 922 for (const auto *FD : RD->fields()) { 923 if (FD->isZeroLengthBitField(Context)) 924 continue; 925 assert(!FD->isBitField() && 926 "Cannot expand structure with bit-field members."); 927 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 928 if (UnionSize < FieldSize) { 929 UnionSize = FieldSize; 930 LargestFD = FD; 931 } 932 } 933 if (LargestFD) 934 Fields.push_back(LargestFD); 935 } else { 936 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 937 assert(!CXXRD->isDynamicClass() && 938 "cannot expand vtable pointers in dynamic classes"); 939 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 940 Bases.push_back(&BS); 941 } 942 943 for (const auto *FD : RD->fields()) { 944 if (FD->isZeroLengthBitField(Context)) 945 continue; 946 assert(!FD->isBitField() && 947 "Cannot expand structure with bit-field members."); 948 Fields.push_back(FD); 949 } 950 } 951 return std::make_unique<RecordExpansion>(std::move(Bases), 952 std::move(Fields)); 953 } 954 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 955 return std::make_unique<ComplexExpansion>(CT->getElementType()); 956 } 957 return std::make_unique<NoExpansion>(); 958 } 959 960 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 961 auto Exp = getTypeExpansion(Ty, Context); 962 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 963 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 964 } 965 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 966 int Res = 0; 967 for (auto BS : RExp->Bases) 968 Res += getExpansionSize(BS->getType(), Context); 969 for (auto FD : RExp->Fields) 970 Res += getExpansionSize(FD->getType(), Context); 971 return Res; 972 } 973 if (isa<ComplexExpansion>(Exp.get())) 974 return 2; 975 assert(isa<NoExpansion>(Exp.get())); 976 return 1; 977 } 978 979 void 980 CodeGenTypes::getExpandedTypes(QualType Ty, 981 SmallVectorImpl<llvm::Type *>::iterator &TI) { 982 auto Exp = getTypeExpansion(Ty, Context); 983 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 984 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 985 getExpandedTypes(CAExp->EltTy, TI); 986 } 987 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 988 for (auto BS : RExp->Bases) 989 getExpandedTypes(BS->getType(), TI); 990 for (auto FD : RExp->Fields) 991 getExpandedTypes(FD->getType(), TI); 992 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 993 llvm::Type *EltTy = ConvertType(CExp->EltTy); 994 *TI++ = EltTy; 995 *TI++ = EltTy; 996 } else { 997 assert(isa<NoExpansion>(Exp.get())); 998 *TI++ = ConvertType(Ty); 999 } 1000 } 1001 1002 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1003 ConstantArrayExpansion *CAE, 1004 Address BaseAddr, 1005 llvm::function_ref<void(Address)> Fn) { 1006 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1007 CharUnits EltAlign = 1008 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1009 1010 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1011 llvm::Value *EltAddr = 1012 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 1013 Fn(Address(EltAddr, EltAlign)); 1014 } 1015 } 1016 1017 void CodeGenFunction::ExpandTypeFromArgs( 1018 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) { 1019 assert(LV.isSimple() && 1020 "Unexpected non-simple lvalue during struct expansion."); 1021 1022 auto Exp = getTypeExpansion(Ty, getContext()); 1023 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1024 forConstantArrayExpansion( 1025 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { 1026 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1027 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1028 }); 1029 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1030 Address This = LV.getAddress(*this); 1031 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1032 // Perform a single step derived-to-base conversion. 1033 Address Base = 1034 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1035 /*NullCheckValue=*/false, SourceLocation()); 1036 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1037 1038 // Recurse onto bases. 1039 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1040 } 1041 for (auto FD : RExp->Fields) { 1042 // FIXME: What are the right qualifiers here? 1043 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1044 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1045 } 1046 } else if (isa<ComplexExpansion>(Exp.get())) { 1047 auto realValue = *AI++; 1048 auto imagValue = *AI++; 1049 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1050 } else { 1051 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a 1052 // primitive store. 1053 assert(isa<NoExpansion>(Exp.get())); 1054 if (LV.isBitField()) 1055 EmitStoreThroughLValue(RValue::get(*AI++), LV); 1056 else 1057 EmitStoreOfScalar(*AI++, LV); 1058 } 1059 } 1060 1061 void CodeGenFunction::ExpandTypeToArgs( 1062 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1063 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1064 auto Exp = getTypeExpansion(Ty, getContext()); 1065 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1066 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1067 : Arg.getKnownRValue().getAggregateAddress(); 1068 forConstantArrayExpansion( 1069 *this, CAExp, Addr, [&](Address EltAddr) { 1070 CallArg EltArg = CallArg( 1071 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1072 CAExp->EltTy); 1073 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1074 IRCallArgPos); 1075 }); 1076 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1077 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1078 : Arg.getKnownRValue().getAggregateAddress(); 1079 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1080 // Perform a single step derived-to-base conversion. 1081 Address Base = 1082 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1083 /*NullCheckValue=*/false, SourceLocation()); 1084 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1085 1086 // Recurse onto bases. 1087 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1088 IRCallArgPos); 1089 } 1090 1091 LValue LV = MakeAddrLValue(This, Ty); 1092 for (auto FD : RExp->Fields) { 1093 CallArg FldArg = 1094 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1095 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1096 IRCallArgPos); 1097 } 1098 } else if (isa<ComplexExpansion>(Exp.get())) { 1099 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1100 IRCallArgs[IRCallArgPos++] = CV.first; 1101 IRCallArgs[IRCallArgPos++] = CV.second; 1102 } else { 1103 assert(isa<NoExpansion>(Exp.get())); 1104 auto RV = Arg.getKnownRValue(); 1105 assert(RV.isScalar() && 1106 "Unexpected non-scalar rvalue during struct expansion."); 1107 1108 // Insert a bitcast as needed. 1109 llvm::Value *V = RV.getScalarVal(); 1110 if (IRCallArgPos < IRFuncTy->getNumParams() && 1111 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1112 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1113 1114 IRCallArgs[IRCallArgPos++] = V; 1115 } 1116 } 1117 1118 /// Create a temporary allocation for the purposes of coercion. 1119 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1120 CharUnits MinAlign) { 1121 // Don't use an alignment that's worse than what LLVM would prefer. 1122 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1123 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1124 1125 return CGF.CreateTempAlloca(Ty, Align); 1126 } 1127 1128 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1129 /// accessing some number of bytes out of it, try to gep into the struct to get 1130 /// at its inner goodness. Dive as deep as possible without entering an element 1131 /// with an in-memory size smaller than DstSize. 1132 static Address 1133 EnterStructPointerForCoercedAccess(Address SrcPtr, 1134 llvm::StructType *SrcSTy, 1135 uint64_t DstSize, CodeGenFunction &CGF) { 1136 // We can't dive into a zero-element struct. 1137 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1138 1139 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1140 1141 // If the first elt is at least as large as what we're looking for, or if the 1142 // first element is the same size as the whole struct, we can enter it. The 1143 // comparison must be made on the store size and not the alloca size. Using 1144 // the alloca size may overstate the size of the load. 1145 uint64_t FirstEltSize = 1146 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1147 if (FirstEltSize < DstSize && 1148 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1149 return SrcPtr; 1150 1151 // GEP into the first element. 1152 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); 1153 1154 // If the first element is a struct, recurse. 1155 llvm::Type *SrcTy = SrcPtr.getElementType(); 1156 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1157 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1158 1159 return SrcPtr; 1160 } 1161 1162 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1163 /// are either integers or pointers. This does a truncation of the value if it 1164 /// is too large or a zero extension if it is too small. 1165 /// 1166 /// This behaves as if the value were coerced through memory, so on big-endian 1167 /// targets the high bits are preserved in a truncation, while little-endian 1168 /// targets preserve the low bits. 1169 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1170 llvm::Type *Ty, 1171 CodeGenFunction &CGF) { 1172 if (Val->getType() == Ty) 1173 return Val; 1174 1175 if (isa<llvm::PointerType>(Val->getType())) { 1176 // If this is Pointer->Pointer avoid conversion to and from int. 1177 if (isa<llvm::PointerType>(Ty)) 1178 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1179 1180 // Convert the pointer to an integer so we can play with its width. 1181 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1182 } 1183 1184 llvm::Type *DestIntTy = Ty; 1185 if (isa<llvm::PointerType>(DestIntTy)) 1186 DestIntTy = CGF.IntPtrTy; 1187 1188 if (Val->getType() != DestIntTy) { 1189 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1190 if (DL.isBigEndian()) { 1191 // Preserve the high bits on big-endian targets. 1192 // That is what memory coercion does. 1193 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1194 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1195 1196 if (SrcSize > DstSize) { 1197 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1198 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1199 } else { 1200 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1201 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1202 } 1203 } else { 1204 // Little-endian targets preserve the low bits. No shifts required. 1205 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1206 } 1207 } 1208 1209 if (isa<llvm::PointerType>(Ty)) 1210 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1211 return Val; 1212 } 1213 1214 1215 1216 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1217 /// a pointer to an object of type \arg Ty, known to be aligned to 1218 /// \arg SrcAlign bytes. 1219 /// 1220 /// This safely handles the case when the src type is smaller than the 1221 /// destination type; in this situation the values of bits which not 1222 /// present in the src are undefined. 1223 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1224 CodeGenFunction &CGF) { 1225 llvm::Type *SrcTy = Src.getElementType(); 1226 1227 // If SrcTy and Ty are the same, just do a load. 1228 if (SrcTy == Ty) 1229 return CGF.Builder.CreateLoad(Src); 1230 1231 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1232 1233 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1234 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF); 1235 SrcTy = Src.getType()->getElementType(); 1236 } 1237 1238 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1239 1240 // If the source and destination are integer or pointer types, just do an 1241 // extension or truncation to the desired type. 1242 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1243 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1244 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1245 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1246 } 1247 1248 // If load is legal, just bitcast the src pointer. 1249 if (SrcSize >= DstSize) { 1250 // Generally SrcSize is never greater than DstSize, since this means we are 1251 // losing bits. However, this can happen in cases where the structure has 1252 // additional padding, for example due to a user specified alignment. 1253 // 1254 // FIXME: Assert that we aren't truncating non-padding bits when have access 1255 // to that information. 1256 Src = CGF.Builder.CreateBitCast(Src, 1257 Ty->getPointerTo(Src.getAddressSpace())); 1258 return CGF.Builder.CreateLoad(Src); 1259 } 1260 1261 // Otherwise do coercion through memory. This is stupid, but simple. 1262 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment()); 1263 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty); 1264 Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty); 1265 CGF.Builder.CreateMemCpy(Casted, SrcCasted, 1266 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize), 1267 false); 1268 return CGF.Builder.CreateLoad(Tmp); 1269 } 1270 1271 // Function to store a first-class aggregate into memory. We prefer to 1272 // store the elements rather than the aggregate to be more friendly to 1273 // fast-isel. 1274 // FIXME: Do we need to recurse here? 1275 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val, 1276 Address Dest, bool DestIsVolatile) { 1277 // Prefer scalar stores to first-class aggregate stores. 1278 if (llvm::StructType *STy = 1279 dyn_cast<llvm::StructType>(Val->getType())) { 1280 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1281 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i); 1282 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i); 1283 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1284 } 1285 } else { 1286 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile); 1287 } 1288 } 1289 1290 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1291 /// where the source and destination may have different types. The 1292 /// destination is known to be aligned to \arg DstAlign bytes. 1293 /// 1294 /// This safely handles the case when the src type is larger than the 1295 /// destination type; the upper bits of the src will be lost. 1296 static void CreateCoercedStore(llvm::Value *Src, 1297 Address Dst, 1298 bool DstIsVolatile, 1299 CodeGenFunction &CGF) { 1300 llvm::Type *SrcTy = Src->getType(); 1301 llvm::Type *DstTy = Dst.getType()->getElementType(); 1302 if (SrcTy == DstTy) { 1303 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1304 return; 1305 } 1306 1307 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1308 1309 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1310 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF); 1311 DstTy = Dst.getType()->getElementType(); 1312 } 1313 1314 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); 1315 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); 1316 if (SrcPtrTy && DstPtrTy && 1317 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { 1318 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); 1319 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1320 return; 1321 } 1322 1323 // If the source and destination are integer or pointer types, just do an 1324 // extension or truncation to the desired type. 1325 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1326 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1327 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1328 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1329 return; 1330 } 1331 1332 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1333 1334 // If store is legal, just bitcast the src pointer. 1335 if (SrcSize <= DstSize) { 1336 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1337 BuildAggStore(CGF, Src, Dst, DstIsVolatile); 1338 } else { 1339 // Otherwise do coercion through memory. This is stupid, but 1340 // simple. 1341 1342 // Generally SrcSize is never greater than DstSize, since this means we are 1343 // losing bits. However, this can happen in cases where the structure has 1344 // additional padding, for example due to a user specified alignment. 1345 // 1346 // FIXME: Assert that we aren't truncating non-padding bits when have access 1347 // to that information. 1348 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1349 CGF.Builder.CreateStore(Src, Tmp); 1350 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty); 1351 Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty); 1352 CGF.Builder.CreateMemCpy(DstCasted, Casted, 1353 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize), 1354 false); 1355 } 1356 } 1357 1358 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1359 const ABIArgInfo &info) { 1360 if (unsigned offset = info.getDirectOffset()) { 1361 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1362 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1363 CharUnits::fromQuantity(offset)); 1364 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1365 } 1366 return addr; 1367 } 1368 1369 namespace { 1370 1371 /// Encapsulates information about the way function arguments from 1372 /// CGFunctionInfo should be passed to actual LLVM IR function. 1373 class ClangToLLVMArgMapping { 1374 static const unsigned InvalidIndex = ~0U; 1375 unsigned InallocaArgNo; 1376 unsigned SRetArgNo; 1377 unsigned TotalIRArgs; 1378 1379 /// Arguments of LLVM IR function corresponding to single Clang argument. 1380 struct IRArgs { 1381 unsigned PaddingArgIndex; 1382 // Argument is expanded to IR arguments at positions 1383 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1384 unsigned FirstArgIndex; 1385 unsigned NumberOfArgs; 1386 1387 IRArgs() 1388 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1389 NumberOfArgs(0) {} 1390 }; 1391 1392 SmallVector<IRArgs, 8> ArgInfo; 1393 1394 public: 1395 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1396 bool OnlyRequiredArgs = false) 1397 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1398 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1399 construct(Context, FI, OnlyRequiredArgs); 1400 } 1401 1402 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1403 unsigned getInallocaArgNo() const { 1404 assert(hasInallocaArg()); 1405 return InallocaArgNo; 1406 } 1407 1408 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1409 unsigned getSRetArgNo() const { 1410 assert(hasSRetArg()); 1411 return SRetArgNo; 1412 } 1413 1414 unsigned totalIRArgs() const { return TotalIRArgs; } 1415 1416 bool hasPaddingArg(unsigned ArgNo) const { 1417 assert(ArgNo < ArgInfo.size()); 1418 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1419 } 1420 unsigned getPaddingArgNo(unsigned ArgNo) const { 1421 assert(hasPaddingArg(ArgNo)); 1422 return ArgInfo[ArgNo].PaddingArgIndex; 1423 } 1424 1425 /// Returns index of first IR argument corresponding to ArgNo, and their 1426 /// quantity. 1427 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1428 assert(ArgNo < ArgInfo.size()); 1429 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1430 ArgInfo[ArgNo].NumberOfArgs); 1431 } 1432 1433 private: 1434 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1435 bool OnlyRequiredArgs); 1436 }; 1437 1438 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1439 const CGFunctionInfo &FI, 1440 bool OnlyRequiredArgs) { 1441 unsigned IRArgNo = 0; 1442 bool SwapThisWithSRet = false; 1443 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1444 1445 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1446 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1447 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1448 } 1449 1450 unsigned ArgNo = 0; 1451 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1452 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1453 ++I, ++ArgNo) { 1454 assert(I != FI.arg_end()); 1455 QualType ArgType = I->type; 1456 const ABIArgInfo &AI = I->info; 1457 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1458 auto &IRArgs = ArgInfo[ArgNo]; 1459 1460 if (AI.getPaddingType()) 1461 IRArgs.PaddingArgIndex = IRArgNo++; 1462 1463 switch (AI.getKind()) { 1464 case ABIArgInfo::Extend: 1465 case ABIArgInfo::Direct: { 1466 // FIXME: handle sseregparm someday... 1467 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1468 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1469 IRArgs.NumberOfArgs = STy->getNumElements(); 1470 } else { 1471 IRArgs.NumberOfArgs = 1; 1472 } 1473 break; 1474 } 1475 case ABIArgInfo::Indirect: 1476 IRArgs.NumberOfArgs = 1; 1477 break; 1478 case ABIArgInfo::Ignore: 1479 case ABIArgInfo::InAlloca: 1480 // ignore and inalloca doesn't have matching LLVM parameters. 1481 IRArgs.NumberOfArgs = 0; 1482 break; 1483 case ABIArgInfo::CoerceAndExpand: 1484 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1485 break; 1486 case ABIArgInfo::Expand: 1487 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1488 break; 1489 } 1490 1491 if (IRArgs.NumberOfArgs > 0) { 1492 IRArgs.FirstArgIndex = IRArgNo; 1493 IRArgNo += IRArgs.NumberOfArgs; 1494 } 1495 1496 // Skip over the sret parameter when it comes second. We already handled it 1497 // above. 1498 if (IRArgNo == 1 && SwapThisWithSRet) 1499 IRArgNo++; 1500 } 1501 assert(ArgNo == ArgInfo.size()); 1502 1503 if (FI.usesInAlloca()) 1504 InallocaArgNo = IRArgNo++; 1505 1506 TotalIRArgs = IRArgNo; 1507 } 1508 } // namespace 1509 1510 /***/ 1511 1512 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1513 const auto &RI = FI.getReturnInfo(); 1514 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1515 } 1516 1517 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1518 return ReturnTypeUsesSRet(FI) && 1519 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1520 } 1521 1522 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1523 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1524 switch (BT->getKind()) { 1525 default: 1526 return false; 1527 case BuiltinType::Float: 1528 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1529 case BuiltinType::Double: 1530 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1531 case BuiltinType::LongDouble: 1532 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1533 } 1534 } 1535 1536 return false; 1537 } 1538 1539 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1540 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1541 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1542 if (BT->getKind() == BuiltinType::LongDouble) 1543 return getTarget().useObjCFP2RetForComplexLongDouble(); 1544 } 1545 } 1546 1547 return false; 1548 } 1549 1550 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1551 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1552 return GetFunctionType(FI); 1553 } 1554 1555 llvm::FunctionType * 1556 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1557 1558 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1559 (void)Inserted; 1560 assert(Inserted && "Recursively being processed?"); 1561 1562 llvm::Type *resultType = nullptr; 1563 const ABIArgInfo &retAI = FI.getReturnInfo(); 1564 switch (retAI.getKind()) { 1565 case ABIArgInfo::Expand: 1566 llvm_unreachable("Invalid ABI kind for return argument"); 1567 1568 case ABIArgInfo::Extend: 1569 case ABIArgInfo::Direct: 1570 resultType = retAI.getCoerceToType(); 1571 break; 1572 1573 case ABIArgInfo::InAlloca: 1574 if (retAI.getInAllocaSRet()) { 1575 // sret things on win32 aren't void, they return the sret pointer. 1576 QualType ret = FI.getReturnType(); 1577 llvm::Type *ty = ConvertType(ret); 1578 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1579 resultType = llvm::PointerType::get(ty, addressSpace); 1580 } else { 1581 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1582 } 1583 break; 1584 1585 case ABIArgInfo::Indirect: 1586 case ABIArgInfo::Ignore: 1587 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1588 break; 1589 1590 case ABIArgInfo::CoerceAndExpand: 1591 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1592 break; 1593 } 1594 1595 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1596 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1597 1598 // Add type for sret argument. 1599 if (IRFunctionArgs.hasSRetArg()) { 1600 QualType Ret = FI.getReturnType(); 1601 llvm::Type *Ty = ConvertType(Ret); 1602 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1603 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1604 llvm::PointerType::get(Ty, AddressSpace); 1605 } 1606 1607 // Add type for inalloca argument. 1608 if (IRFunctionArgs.hasInallocaArg()) { 1609 auto ArgStruct = FI.getArgStruct(); 1610 assert(ArgStruct); 1611 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1612 } 1613 1614 // Add in all of the required arguments. 1615 unsigned ArgNo = 0; 1616 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1617 ie = it + FI.getNumRequiredArgs(); 1618 for (; it != ie; ++it, ++ArgNo) { 1619 const ABIArgInfo &ArgInfo = it->info; 1620 1621 // Insert a padding type to ensure proper alignment. 1622 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1623 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1624 ArgInfo.getPaddingType(); 1625 1626 unsigned FirstIRArg, NumIRArgs; 1627 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1628 1629 switch (ArgInfo.getKind()) { 1630 case ABIArgInfo::Ignore: 1631 case ABIArgInfo::InAlloca: 1632 assert(NumIRArgs == 0); 1633 break; 1634 1635 case ABIArgInfo::Indirect: { 1636 assert(NumIRArgs == 1); 1637 // indirect arguments are always on the stack, which is alloca addr space. 1638 llvm::Type *LTy = ConvertTypeForMem(it->type); 1639 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1640 CGM.getDataLayout().getAllocaAddrSpace()); 1641 break; 1642 } 1643 1644 case ABIArgInfo::Extend: 1645 case ABIArgInfo::Direct: { 1646 // Fast-isel and the optimizer generally like scalar values better than 1647 // FCAs, so we flatten them if this is safe to do for this argument. 1648 llvm::Type *argType = ArgInfo.getCoerceToType(); 1649 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1650 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1651 assert(NumIRArgs == st->getNumElements()); 1652 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1653 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1654 } else { 1655 assert(NumIRArgs == 1); 1656 ArgTypes[FirstIRArg] = argType; 1657 } 1658 break; 1659 } 1660 1661 case ABIArgInfo::CoerceAndExpand: { 1662 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1663 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1664 *ArgTypesIter++ = EltTy; 1665 } 1666 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1667 break; 1668 } 1669 1670 case ABIArgInfo::Expand: 1671 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1672 getExpandedTypes(it->type, ArgTypesIter); 1673 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1674 break; 1675 } 1676 } 1677 1678 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1679 assert(Erased && "Not in set?"); 1680 1681 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1682 } 1683 1684 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1685 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1686 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1687 1688 if (!isFuncTypeConvertible(FPT)) 1689 return llvm::StructType::get(getLLVMContext()); 1690 1691 return GetFunctionType(GD); 1692 } 1693 1694 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1695 llvm::AttrBuilder &FuncAttrs, 1696 const FunctionProtoType *FPT) { 1697 if (!FPT) 1698 return; 1699 1700 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1701 FPT->isNothrow()) 1702 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1703 } 1704 1705 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone, 1706 bool AttrOnCallSite, 1707 llvm::AttrBuilder &FuncAttrs) { 1708 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1709 if (!HasOptnone) { 1710 if (CodeGenOpts.OptimizeSize) 1711 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1712 if (CodeGenOpts.OptimizeSize == 2) 1713 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1714 } 1715 1716 if (CodeGenOpts.DisableRedZone) 1717 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1718 if (CodeGenOpts.IndirectTlsSegRefs) 1719 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1720 if (CodeGenOpts.NoImplicitFloat) 1721 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1722 1723 if (AttrOnCallSite) { 1724 // Attributes that should go on the call site only. 1725 if (!CodeGenOpts.SimplifyLibCalls || 1726 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1727 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1728 if (!CodeGenOpts.TrapFuncName.empty()) 1729 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1730 } else { 1731 StringRef FpKind; 1732 switch (CodeGenOpts.getFramePointer()) { 1733 case CodeGenOptions::FramePointerKind::None: 1734 FpKind = "none"; 1735 break; 1736 case CodeGenOptions::FramePointerKind::NonLeaf: 1737 FpKind = "non-leaf"; 1738 break; 1739 case CodeGenOptions::FramePointerKind::All: 1740 FpKind = "all"; 1741 break; 1742 } 1743 FuncAttrs.addAttribute("frame-pointer", FpKind); 1744 1745 FuncAttrs.addAttribute("less-precise-fpmad", 1746 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD)); 1747 1748 if (CodeGenOpts.NullPointerIsValid) 1749 FuncAttrs.addAttribute("null-pointer-is-valid", "true"); 1750 1751 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) 1752 FuncAttrs.addAttribute("denormal-fp-math", 1753 CodeGenOpts.FPDenormalMode.str()); 1754 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { 1755 FuncAttrs.addAttribute( 1756 "denormal-fp-math-f32", 1757 CodeGenOpts.FP32DenormalMode.str()); 1758 } 1759 1760 FuncAttrs.addAttribute("no-trapping-math", 1761 llvm::toStringRef(CodeGenOpts.NoTrappingMath)); 1762 1763 // Strict (compliant) code is the default, so only add this attribute to 1764 // indicate that we are trying to workaround a problem case. 1765 if (!CodeGenOpts.StrictFloatCastOverflow) 1766 FuncAttrs.addAttribute("strict-float-cast-overflow", "false"); 1767 1768 // TODO: Are these all needed? 1769 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1770 FuncAttrs.addAttribute("no-infs-fp-math", 1771 llvm::toStringRef(CodeGenOpts.NoInfsFPMath)); 1772 FuncAttrs.addAttribute("no-nans-fp-math", 1773 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath)); 1774 FuncAttrs.addAttribute("unsafe-fp-math", 1775 llvm::toStringRef(CodeGenOpts.UnsafeFPMath)); 1776 FuncAttrs.addAttribute("use-soft-float", 1777 llvm::toStringRef(CodeGenOpts.SoftFloat)); 1778 FuncAttrs.addAttribute("stack-protector-buffer-size", 1779 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1780 FuncAttrs.addAttribute("no-signed-zeros-fp-math", 1781 llvm::toStringRef(CodeGenOpts.NoSignedZeros)); 1782 FuncAttrs.addAttribute( 1783 "correctly-rounded-divide-sqrt-fp-math", 1784 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt)); 1785 1786 // TODO: Reciprocal estimate codegen options should apply to instructions? 1787 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1788 if (!Recips.empty()) 1789 FuncAttrs.addAttribute("reciprocal-estimates", 1790 llvm::join(Recips, ",")); 1791 1792 if (!CodeGenOpts.PreferVectorWidth.empty() && 1793 CodeGenOpts.PreferVectorWidth != "none") 1794 FuncAttrs.addAttribute("prefer-vector-width", 1795 CodeGenOpts.PreferVectorWidth); 1796 1797 if (CodeGenOpts.StackRealignment) 1798 FuncAttrs.addAttribute("stackrealign"); 1799 if (CodeGenOpts.Backchain) 1800 FuncAttrs.addAttribute("backchain"); 1801 1802 if (CodeGenOpts.SpeculativeLoadHardening) 1803 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1804 } 1805 1806 if (getLangOpts().assumeFunctionsAreConvergent()) { 1807 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1808 // convergent (meaning, they may call an intrinsically convergent op, such 1809 // as __syncthreads() / barrier(), and so can't have certain optimizations 1810 // applied around them). LLVM will remove this attribute where it safely 1811 // can. 1812 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1813 } 1814 1815 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1816 // Exceptions aren't supported in CUDA device code. 1817 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1818 } 1819 1820 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1821 StringRef Var, Value; 1822 std::tie(Var, Value) = Attr.split('='); 1823 FuncAttrs.addAttribute(Var, Value); 1824 } 1825 } 1826 1827 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) { 1828 llvm::AttrBuilder FuncAttrs; 1829 ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(), 1830 /* AttrOnCallSite = */ false, FuncAttrs); 1831 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); 1832 } 1833 1834 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, 1835 const LangOptions &LangOpts, 1836 const NoBuiltinAttr *NBA = nullptr) { 1837 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { 1838 SmallString<32> AttributeName; 1839 AttributeName += "no-builtin-"; 1840 AttributeName += BuiltinName; 1841 FuncAttrs.addAttribute(AttributeName); 1842 }; 1843 1844 // First, handle the language options passed through -fno-builtin. 1845 if (LangOpts.NoBuiltin) { 1846 // -fno-builtin disables them all. 1847 FuncAttrs.addAttribute("no-builtins"); 1848 return; 1849 } 1850 1851 // Then, add attributes for builtins specified through -fno-builtin-<name>. 1852 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); 1853 1854 // Now, let's check the __attribute__((no_builtin("...")) attribute added to 1855 // the source. 1856 if (!NBA) 1857 return; 1858 1859 // If there is a wildcard in the builtin names specified through the 1860 // attribute, disable them all. 1861 if (llvm::is_contained(NBA->builtinNames(), "*")) { 1862 FuncAttrs.addAttribute("no-builtins"); 1863 return; 1864 } 1865 1866 // And last, add the rest of the builtin names. 1867 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); 1868 } 1869 1870 void CodeGenModule::ConstructAttributeList( 1871 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1872 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) { 1873 llvm::AttrBuilder FuncAttrs; 1874 llvm::AttrBuilder RetAttrs; 1875 1876 CallingConv = FI.getEffectiveCallingConvention(); 1877 if (FI.isNoReturn()) 1878 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1879 1880 // If we have information about the function prototype, we can learn 1881 // attributes from there. 1882 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 1883 CalleeInfo.getCalleeFunctionProtoType()); 1884 1885 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 1886 1887 bool HasOptnone = false; 1888 // The NoBuiltinAttr attached to a TargetDecl (only allowed on FunctionDecls). 1889 const NoBuiltinAttr *NBA = nullptr; 1890 // FIXME: handle sseregparm someday... 1891 if (TargetDecl) { 1892 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 1893 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 1894 if (TargetDecl->hasAttr<NoThrowAttr>()) 1895 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1896 if (TargetDecl->hasAttr<NoReturnAttr>()) 1897 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1898 if (TargetDecl->hasAttr<ColdAttr>()) 1899 FuncAttrs.addAttribute(llvm::Attribute::Cold); 1900 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 1901 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 1902 if (TargetDecl->hasAttr<ConvergentAttr>()) 1903 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1904 1905 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1906 AddAttributesFromFunctionProtoType( 1907 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 1908 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { 1909 // A sane operator new returns a non-aliasing pointer. 1910 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); 1911 if (getCodeGenOpts().AssumeSaneOperatorNew && 1912 (Kind == OO_New || Kind == OO_Array_New)) 1913 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1914 } 1915 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 1916 const bool IsVirtualCall = MD && MD->isVirtual(); 1917 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a 1918 // virtual function. These attributes are not inherited by overloads. 1919 if (!(AttrOnCallSite && IsVirtualCall)) { 1920 if (Fn->isNoReturn()) 1921 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1922 NBA = Fn->getAttr<NoBuiltinAttr>(); 1923 } 1924 } 1925 1926 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 1927 if (TargetDecl->hasAttr<ConstAttr>()) { 1928 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 1929 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1930 } else if (TargetDecl->hasAttr<PureAttr>()) { 1931 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 1932 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1933 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 1934 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 1935 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1936 } 1937 if (TargetDecl->hasAttr<RestrictAttr>()) 1938 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 1939 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 1940 !CodeGenOpts.NullPointerIsValid) 1941 RetAttrs.addAttribute(llvm::Attribute::NonNull); 1942 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 1943 FuncAttrs.addAttribute("no_caller_saved_registers"); 1944 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 1945 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 1946 1947 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 1948 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 1949 Optional<unsigned> NumElemsParam; 1950 if (AllocSize->getNumElemsParam().isValid()) 1951 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 1952 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 1953 NumElemsParam); 1954 } 1955 } 1956 1957 // Attach "no-builtins" attributes to: 1958 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". 1959 // * definitions: "no-builtins" or "no-builtin-<name>" only. 1960 // The attributes can come from: 1961 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> 1962 // * FunctionDecl attributes: __attribute__((no_builtin(...))) 1963 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); 1964 1965 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 1966 1967 // This must run after constructing the default function attribute list 1968 // to ensure that the speculative load hardening attribute is removed 1969 // in the case where the -mspeculative-load-hardening flag was passed. 1970 if (TargetDecl) { 1971 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 1972 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 1973 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 1974 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1975 } 1976 1977 if (CodeGenOpts.EnableSegmentedStacks && 1978 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>())) 1979 FuncAttrs.addAttribute("split-stack"); 1980 1981 // Add NonLazyBind attribute to function declarations when -fno-plt 1982 // is used. 1983 if (TargetDecl && CodeGenOpts.NoPLT) { 1984 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 1985 if (!Fn->isDefined() && !AttrOnCallSite) { 1986 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 1987 } 1988 } 1989 } 1990 1991 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) { 1992 if (getLangOpts().OpenCLVersion <= 120) { 1993 // OpenCL v1.2 Work groups are always uniform 1994 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 1995 } else { 1996 // OpenCL v2.0 Work groups may be whether uniform or not. 1997 // '-cl-uniform-work-group-size' compile option gets a hint 1998 // to the compiler that the global work-size be a multiple of 1999 // the work-group size specified to clEnqueueNDRangeKernel 2000 // (i.e. work groups are uniform). 2001 FuncAttrs.addAttribute("uniform-work-group-size", 2002 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 2003 } 2004 } 2005 2006 if (!AttrOnCallSite) { 2007 bool DisableTailCalls = false; 2008 2009 if (CodeGenOpts.DisableTailCalls) 2010 DisableTailCalls = true; 2011 else if (TargetDecl) { 2012 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 2013 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 2014 DisableTailCalls = true; 2015 else if (CodeGenOpts.NoEscapingBlockTailCalls) { 2016 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 2017 if (!BD->doesNotEscape()) 2018 DisableTailCalls = true; 2019 } 2020 } 2021 2022 FuncAttrs.addAttribute("disable-tail-calls", 2023 llvm::toStringRef(DisableTailCalls)); 2024 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 2025 } 2026 2027 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 2028 2029 QualType RetTy = FI.getReturnType(); 2030 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2031 switch (RetAI.getKind()) { 2032 case ABIArgInfo::Extend: 2033 if (RetAI.isSignExt()) 2034 RetAttrs.addAttribute(llvm::Attribute::SExt); 2035 else 2036 RetAttrs.addAttribute(llvm::Attribute::ZExt); 2037 LLVM_FALLTHROUGH; 2038 case ABIArgInfo::Direct: 2039 if (RetAI.getInReg()) 2040 RetAttrs.addAttribute(llvm::Attribute::InReg); 2041 break; 2042 case ABIArgInfo::Ignore: 2043 break; 2044 2045 case ABIArgInfo::InAlloca: 2046 case ABIArgInfo::Indirect: { 2047 // inalloca and sret disable readnone and readonly 2048 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2049 .removeAttribute(llvm::Attribute::ReadNone); 2050 break; 2051 } 2052 2053 case ABIArgInfo::CoerceAndExpand: 2054 break; 2055 2056 case ABIArgInfo::Expand: 2057 llvm_unreachable("Invalid ABI kind for return argument"); 2058 } 2059 2060 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 2061 QualType PTy = RefTy->getPointeeType(); 2062 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2063 RetAttrs.addDereferenceableAttr( 2064 getMinimumObjectSize(PTy).getQuantity()); 2065 else if (getContext().getTargetAddressSpace(PTy) == 0 && 2066 !CodeGenOpts.NullPointerIsValid) 2067 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2068 } 2069 2070 bool hasUsedSRet = false; 2071 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2072 2073 // Attach attributes to sret. 2074 if (IRFunctionArgs.hasSRetArg()) { 2075 llvm::AttrBuilder SRETAttrs; 2076 SRETAttrs.addAttribute(llvm::Attribute::StructRet); 2077 hasUsedSRet = true; 2078 if (RetAI.getInReg()) 2079 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2080 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2081 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2082 } 2083 2084 // Attach attributes to inalloca argument. 2085 if (IRFunctionArgs.hasInallocaArg()) { 2086 llvm::AttrBuilder Attrs; 2087 Attrs.addAttribute(llvm::Attribute::InAlloca); 2088 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2089 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2090 } 2091 2092 unsigned ArgNo = 0; 2093 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2094 E = FI.arg_end(); 2095 I != E; ++I, ++ArgNo) { 2096 QualType ParamType = I->type; 2097 const ABIArgInfo &AI = I->info; 2098 llvm::AttrBuilder Attrs; 2099 2100 // Add attribute for padding argument, if necessary. 2101 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2102 if (AI.getPaddingInReg()) { 2103 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2104 llvm::AttributeSet::get( 2105 getLLVMContext(), 2106 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); 2107 } 2108 } 2109 2110 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2111 // have the corresponding parameter variable. It doesn't make 2112 // sense to do it here because parameters are so messed up. 2113 switch (AI.getKind()) { 2114 case ABIArgInfo::Extend: 2115 if (AI.isSignExt()) 2116 Attrs.addAttribute(llvm::Attribute::SExt); 2117 else 2118 Attrs.addAttribute(llvm::Attribute::ZExt); 2119 LLVM_FALLTHROUGH; 2120 case ABIArgInfo::Direct: 2121 if (ArgNo == 0 && FI.isChainCall()) 2122 Attrs.addAttribute(llvm::Attribute::Nest); 2123 else if (AI.getInReg()) 2124 Attrs.addAttribute(llvm::Attribute::InReg); 2125 break; 2126 2127 case ABIArgInfo::Indirect: { 2128 if (AI.getInReg()) 2129 Attrs.addAttribute(llvm::Attribute::InReg); 2130 2131 if (AI.getIndirectByVal()) 2132 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); 2133 2134 CharUnits Align = AI.getIndirectAlign(); 2135 2136 // In a byval argument, it is important that the required 2137 // alignment of the type is honored, as LLVM might be creating a 2138 // *new* stack object, and needs to know what alignment to give 2139 // it. (Sometimes it can deduce a sensible alignment on its own, 2140 // but not if clang decides it must emit a packed struct, or the 2141 // user specifies increased alignment requirements.) 2142 // 2143 // This is different from indirect *not* byval, where the object 2144 // exists already, and the align attribute is purely 2145 // informative. 2146 assert(!Align.isZero()); 2147 2148 // For now, only add this when we have a byval argument. 2149 // TODO: be less lazy about updating test cases. 2150 if (AI.getIndirectByVal()) 2151 Attrs.addAlignmentAttr(Align.getQuantity()); 2152 2153 // byval disables readnone and readonly. 2154 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2155 .removeAttribute(llvm::Attribute::ReadNone); 2156 break; 2157 } 2158 case ABIArgInfo::Ignore: 2159 case ABIArgInfo::Expand: 2160 case ABIArgInfo::CoerceAndExpand: 2161 break; 2162 2163 case ABIArgInfo::InAlloca: 2164 // inalloca disables readnone and readonly. 2165 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2166 .removeAttribute(llvm::Attribute::ReadNone); 2167 continue; 2168 } 2169 2170 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2171 QualType PTy = RefTy->getPointeeType(); 2172 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2173 Attrs.addDereferenceableAttr( 2174 getMinimumObjectSize(PTy).getQuantity()); 2175 else if (getContext().getTargetAddressSpace(PTy) == 0 && 2176 !CodeGenOpts.NullPointerIsValid) 2177 Attrs.addAttribute(llvm::Attribute::NonNull); 2178 } 2179 2180 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2181 case ParameterABI::Ordinary: 2182 break; 2183 2184 case ParameterABI::SwiftIndirectResult: { 2185 // Add 'sret' if we haven't already used it for something, but 2186 // only if the result is void. 2187 if (!hasUsedSRet && RetTy->isVoidType()) { 2188 Attrs.addAttribute(llvm::Attribute::StructRet); 2189 hasUsedSRet = true; 2190 } 2191 2192 // Add 'noalias' in either case. 2193 Attrs.addAttribute(llvm::Attribute::NoAlias); 2194 2195 // Add 'dereferenceable' and 'alignment'. 2196 auto PTy = ParamType->getPointeeType(); 2197 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2198 auto info = getContext().getTypeInfoInChars(PTy); 2199 Attrs.addDereferenceableAttr(info.first.getQuantity()); 2200 Attrs.addAttribute(llvm::Attribute::getWithAlignment( 2201 getLLVMContext(), info.second.getAsAlign())); 2202 } 2203 break; 2204 } 2205 2206 case ParameterABI::SwiftErrorResult: 2207 Attrs.addAttribute(llvm::Attribute::SwiftError); 2208 break; 2209 2210 case ParameterABI::SwiftContext: 2211 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2212 break; 2213 } 2214 2215 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2216 Attrs.addAttribute(llvm::Attribute::NoCapture); 2217 2218 if (Attrs.hasAttributes()) { 2219 unsigned FirstIRArg, NumIRArgs; 2220 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2221 for (unsigned i = 0; i < NumIRArgs; i++) 2222 ArgAttrs[FirstIRArg + i] = 2223 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2224 } 2225 } 2226 assert(ArgNo == FI.arg_size()); 2227 2228 AttrList = llvm::AttributeList::get( 2229 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2230 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2231 } 2232 2233 /// An argument came in as a promoted argument; demote it back to its 2234 /// declared type. 2235 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2236 const VarDecl *var, 2237 llvm::Value *value) { 2238 llvm::Type *varType = CGF.ConvertType(var->getType()); 2239 2240 // This can happen with promotions that actually don't change the 2241 // underlying type, like the enum promotions. 2242 if (value->getType() == varType) return value; 2243 2244 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2245 && "unexpected promotion type"); 2246 2247 if (isa<llvm::IntegerType>(varType)) 2248 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2249 2250 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2251 } 2252 2253 /// Returns the attribute (either parameter attribute, or function 2254 /// attribute), which declares argument ArgNo to be non-null. 2255 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2256 QualType ArgType, unsigned ArgNo) { 2257 // FIXME: __attribute__((nonnull)) can also be applied to: 2258 // - references to pointers, where the pointee is known to be 2259 // nonnull (apparently a Clang extension) 2260 // - transparent unions containing pointers 2261 // In the former case, LLVM IR cannot represent the constraint. In 2262 // the latter case, we have no guarantee that the transparent union 2263 // is in fact passed as a pointer. 2264 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2265 return nullptr; 2266 // First, check attribute on parameter itself. 2267 if (PVD) { 2268 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2269 return ParmNNAttr; 2270 } 2271 // Check function attributes. 2272 if (!FD) 2273 return nullptr; 2274 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2275 if (NNAttr->isNonNull(ArgNo)) 2276 return NNAttr; 2277 } 2278 return nullptr; 2279 } 2280 2281 namespace { 2282 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2283 Address Temp; 2284 Address Arg; 2285 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2286 void Emit(CodeGenFunction &CGF, Flags flags) override { 2287 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2288 CGF.Builder.CreateStore(errorValue, Arg); 2289 } 2290 }; 2291 } 2292 2293 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2294 llvm::Function *Fn, 2295 const FunctionArgList &Args) { 2296 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2297 // Naked functions don't have prologues. 2298 return; 2299 2300 // If this is an implicit-return-zero function, go ahead and 2301 // initialize the return value. TODO: it might be nice to have 2302 // a more general mechanism for this that didn't require synthesized 2303 // return statements. 2304 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2305 if (FD->hasImplicitReturnZero()) { 2306 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2307 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2308 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2309 Builder.CreateStore(Zero, ReturnValue); 2310 } 2311 } 2312 2313 // FIXME: We no longer need the types from FunctionArgList; lift up and 2314 // simplify. 2315 2316 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2317 // Flattened function arguments. 2318 SmallVector<llvm::Value *, 16> FnArgs; 2319 FnArgs.reserve(IRFunctionArgs.totalIRArgs()); 2320 for (auto &Arg : Fn->args()) { 2321 FnArgs.push_back(&Arg); 2322 } 2323 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs()); 2324 2325 // If we're using inalloca, all the memory arguments are GEPs off of the last 2326 // parameter, which is a pointer to the complete memory area. 2327 Address ArgStruct = Address::invalid(); 2328 if (IRFunctionArgs.hasInallocaArg()) { 2329 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()], 2330 FI.getArgStructAlignment()); 2331 2332 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2333 } 2334 2335 // Name the struct return parameter. 2336 if (IRFunctionArgs.hasSRetArg()) { 2337 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]); 2338 AI->setName("agg.result"); 2339 AI->addAttr(llvm::Attribute::NoAlias); 2340 } 2341 2342 // Track if we received the parameter as a pointer (indirect, byval, or 2343 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2344 // into a local alloca for us. 2345 SmallVector<ParamValue, 16> ArgVals; 2346 ArgVals.reserve(Args.size()); 2347 2348 // Create a pointer value for every parameter declaration. This usually 2349 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2350 // any cleanups or do anything that might unwind. We do that separately, so 2351 // we can push the cleanups in the correct order for the ABI. 2352 assert(FI.arg_size() == Args.size() && 2353 "Mismatch between function signature & arguments."); 2354 unsigned ArgNo = 0; 2355 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2356 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2357 i != e; ++i, ++info_it, ++ArgNo) { 2358 const VarDecl *Arg = *i; 2359 const ABIArgInfo &ArgI = info_it->info; 2360 2361 bool isPromoted = 2362 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2363 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2364 // the parameter is promoted. In this case we convert to 2365 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2366 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2367 assert(hasScalarEvaluationKind(Ty) == 2368 hasScalarEvaluationKind(Arg->getType())); 2369 2370 unsigned FirstIRArg, NumIRArgs; 2371 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2372 2373 switch (ArgI.getKind()) { 2374 case ABIArgInfo::InAlloca: { 2375 assert(NumIRArgs == 0); 2376 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2377 Address V = 2378 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); 2379 if (ArgI.getInAllocaIndirect()) 2380 V = Address(Builder.CreateLoad(V), 2381 getContext().getTypeAlignInChars(Ty)); 2382 ArgVals.push_back(ParamValue::forIndirect(V)); 2383 break; 2384 } 2385 2386 case ABIArgInfo::Indirect: { 2387 assert(NumIRArgs == 1); 2388 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign()); 2389 2390 if (!hasScalarEvaluationKind(Ty)) { 2391 // Aggregates and complex variables are accessed by reference. All we 2392 // need to do is realign the value, if requested. 2393 Address V = ParamAddr; 2394 if (ArgI.getIndirectRealign()) { 2395 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2396 2397 // Copy from the incoming argument pointer to the temporary with the 2398 // appropriate alignment. 2399 // 2400 // FIXME: We should have a common utility for generating an aggregate 2401 // copy. 2402 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2403 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()); 2404 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy); 2405 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy); 2406 Builder.CreateMemCpy(Dst, Src, SizeVal, false); 2407 V = AlignedTemp; 2408 } 2409 ArgVals.push_back(ParamValue::forIndirect(V)); 2410 } else { 2411 // Load scalar value from indirect argument. 2412 llvm::Value *V = 2413 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2414 2415 if (isPromoted) 2416 V = emitArgumentDemotion(*this, Arg, V); 2417 ArgVals.push_back(ParamValue::forDirect(V)); 2418 } 2419 break; 2420 } 2421 2422 case ABIArgInfo::Extend: 2423 case ABIArgInfo::Direct: { 2424 2425 // If we have the trivial case, handle it with no muss and fuss. 2426 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2427 ArgI.getCoerceToType() == ConvertType(Ty) && 2428 ArgI.getDirectOffset() == 0) { 2429 assert(NumIRArgs == 1); 2430 llvm::Value *V = FnArgs[FirstIRArg]; 2431 auto AI = cast<llvm::Argument>(V); 2432 2433 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2434 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2435 PVD->getFunctionScopeIndex()) && 2436 !CGM.getCodeGenOpts().NullPointerIsValid) 2437 AI->addAttr(llvm::Attribute::NonNull); 2438 2439 QualType OTy = PVD->getOriginalType(); 2440 if (const auto *ArrTy = 2441 getContext().getAsConstantArrayType(OTy)) { 2442 // A C99 array parameter declaration with the static keyword also 2443 // indicates dereferenceability, and if the size is constant we can 2444 // use the dereferenceable attribute (which requires the size in 2445 // bytes). 2446 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2447 QualType ETy = ArrTy->getElementType(); 2448 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2449 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2450 ArrSize) { 2451 llvm::AttrBuilder Attrs; 2452 Attrs.addDereferenceableAttr( 2453 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize); 2454 AI->addAttrs(Attrs); 2455 } else if (getContext().getTargetAddressSpace(ETy) == 0 && 2456 !CGM.getCodeGenOpts().NullPointerIsValid) { 2457 AI->addAttr(llvm::Attribute::NonNull); 2458 } 2459 } 2460 } else if (const auto *ArrTy = 2461 getContext().getAsVariableArrayType(OTy)) { 2462 // For C99 VLAs with the static keyword, we don't know the size so 2463 // we can't use the dereferenceable attribute, but in addrspace(0) 2464 // we know that it must be nonnull. 2465 if (ArrTy->getSizeModifier() == VariableArrayType::Static && 2466 !getContext().getTargetAddressSpace(ArrTy->getElementType()) && 2467 !CGM.getCodeGenOpts().NullPointerIsValid) 2468 AI->addAttr(llvm::Attribute::NonNull); 2469 } 2470 2471 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2472 if (!AVAttr) 2473 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2474 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2475 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 2476 // If alignment-assumption sanitizer is enabled, we do *not* add 2477 // alignment attribute here, but emit normal alignment assumption, 2478 // so the UBSAN check could function. 2479 llvm::Value *AlignmentValue = 2480 EmitScalarExpr(AVAttr->getAlignment()); 2481 llvm::ConstantInt *AlignmentCI = 2482 cast<llvm::ConstantInt>(AlignmentValue); 2483 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(llvm::MaybeAlign( 2484 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)))); 2485 } 2486 } 2487 2488 if (Arg->getType().isRestrictQualified()) 2489 AI->addAttr(llvm::Attribute::NoAlias); 2490 2491 // LLVM expects swifterror parameters to be used in very restricted 2492 // ways. Copy the value into a less-restricted temporary. 2493 if (FI.getExtParameterInfo(ArgNo).getABI() 2494 == ParameterABI::SwiftErrorResult) { 2495 QualType pointeeTy = Ty->getPointeeType(); 2496 assert(pointeeTy->isPointerType()); 2497 Address temp = 2498 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2499 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2500 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2501 Builder.CreateStore(incomingErrorValue, temp); 2502 V = temp.getPointer(); 2503 2504 // Push a cleanup to copy the value back at the end of the function. 2505 // The convention does not guarantee that the value will be written 2506 // back if the function exits with an unwind exception. 2507 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2508 } 2509 2510 // Ensure the argument is the correct type. 2511 if (V->getType() != ArgI.getCoerceToType()) 2512 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2513 2514 if (isPromoted) 2515 V = emitArgumentDemotion(*this, Arg, V); 2516 2517 // Because of merging of function types from multiple decls it is 2518 // possible for the type of an argument to not match the corresponding 2519 // type in the function type. Since we are codegening the callee 2520 // in here, add a cast to the argument type. 2521 llvm::Type *LTy = ConvertType(Arg->getType()); 2522 if (V->getType() != LTy) 2523 V = Builder.CreateBitCast(V, LTy); 2524 2525 ArgVals.push_back(ParamValue::forDirect(V)); 2526 break; 2527 } 2528 2529 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2530 Arg->getName()); 2531 2532 // Pointer to store into. 2533 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2534 2535 // Fast-isel and the optimizer generally like scalar values better than 2536 // FCAs, so we flatten them if this is safe to do for this argument. 2537 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2538 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2539 STy->getNumElements() > 1) { 2540 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2541 llvm::Type *DstTy = Ptr.getElementType(); 2542 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2543 2544 Address AddrToStoreInto = Address::invalid(); 2545 if (SrcSize <= DstSize) { 2546 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2547 } else { 2548 AddrToStoreInto = 2549 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2550 } 2551 2552 assert(STy->getNumElements() == NumIRArgs); 2553 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2554 auto AI = FnArgs[FirstIRArg + i]; 2555 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2556 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); 2557 Builder.CreateStore(AI, EltPtr); 2558 } 2559 2560 if (SrcSize > DstSize) { 2561 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2562 } 2563 2564 } else { 2565 // Simple case, just do a coerced store of the argument into the alloca. 2566 assert(NumIRArgs == 1); 2567 auto AI = FnArgs[FirstIRArg]; 2568 AI->setName(Arg->getName() + ".coerce"); 2569 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); 2570 } 2571 2572 // Match to what EmitParmDecl is expecting for this type. 2573 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2574 llvm::Value *V = 2575 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 2576 if (isPromoted) 2577 V = emitArgumentDemotion(*this, Arg, V); 2578 ArgVals.push_back(ParamValue::forDirect(V)); 2579 } else { 2580 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2581 } 2582 break; 2583 } 2584 2585 case ABIArgInfo::CoerceAndExpand: { 2586 // Reconstruct into a temporary. 2587 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2588 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2589 2590 auto coercionType = ArgI.getCoerceAndExpandType(); 2591 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2592 2593 unsigned argIndex = FirstIRArg; 2594 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2595 llvm::Type *eltType = coercionType->getElementType(i); 2596 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2597 continue; 2598 2599 auto eltAddr = Builder.CreateStructGEP(alloca, i); 2600 auto elt = FnArgs[argIndex++]; 2601 Builder.CreateStore(elt, eltAddr); 2602 } 2603 assert(argIndex == FirstIRArg + NumIRArgs); 2604 break; 2605 } 2606 2607 case ABIArgInfo::Expand: { 2608 // If this structure was expanded into multiple arguments then 2609 // we need to create a temporary and reconstruct it from the 2610 // arguments. 2611 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2612 LValue LV = MakeAddrLValue(Alloca, Ty); 2613 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2614 2615 auto FnArgIter = FnArgs.begin() + FirstIRArg; 2616 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2617 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs); 2618 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2619 auto AI = FnArgs[FirstIRArg + i]; 2620 AI->setName(Arg->getName() + "." + Twine(i)); 2621 } 2622 break; 2623 } 2624 2625 case ABIArgInfo::Ignore: 2626 assert(NumIRArgs == 0); 2627 // Initialize the local variable appropriately. 2628 if (!hasScalarEvaluationKind(Ty)) { 2629 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2630 } else { 2631 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2632 ArgVals.push_back(ParamValue::forDirect(U)); 2633 } 2634 break; 2635 } 2636 } 2637 2638 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2639 for (int I = Args.size() - 1; I >= 0; --I) 2640 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2641 } else { 2642 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2643 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2644 } 2645 } 2646 2647 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2648 while (insn->use_empty()) { 2649 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2650 if (!bitcast) return; 2651 2652 // This is "safe" because we would have used a ConstantExpr otherwise. 2653 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2654 bitcast->eraseFromParent(); 2655 } 2656 } 2657 2658 /// Try to emit a fused autorelease of a return result. 2659 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2660 llvm::Value *result) { 2661 // We must be immediately followed the cast. 2662 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2663 if (BB->empty()) return nullptr; 2664 if (&BB->back() != result) return nullptr; 2665 2666 llvm::Type *resultType = result->getType(); 2667 2668 // result is in a BasicBlock and is therefore an Instruction. 2669 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2670 2671 SmallVector<llvm::Instruction *, 4> InstsToKill; 2672 2673 // Look for: 2674 // %generator = bitcast %type1* %generator2 to %type2* 2675 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2676 // We would have emitted this as a constant if the operand weren't 2677 // an Instruction. 2678 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2679 2680 // Require the generator to be immediately followed by the cast. 2681 if (generator->getNextNode() != bitcast) 2682 return nullptr; 2683 2684 InstsToKill.push_back(bitcast); 2685 } 2686 2687 // Look for: 2688 // %generator = call i8* @objc_retain(i8* %originalResult) 2689 // or 2690 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2691 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 2692 if (!call) return nullptr; 2693 2694 bool doRetainAutorelease; 2695 2696 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) { 2697 doRetainAutorelease = true; 2698 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints() 2699 .objc_retainAutoreleasedReturnValue) { 2700 doRetainAutorelease = false; 2701 2702 // If we emitted an assembly marker for this call (and the 2703 // ARCEntrypoints field should have been set if so), go looking 2704 // for that call. If we can't find it, we can't do this 2705 // optimization. But it should always be the immediately previous 2706 // instruction, unless we needed bitcasts around the call. 2707 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 2708 llvm::Instruction *prev = call->getPrevNode(); 2709 assert(prev); 2710 if (isa<llvm::BitCastInst>(prev)) { 2711 prev = prev->getPrevNode(); 2712 assert(prev); 2713 } 2714 assert(isa<llvm::CallInst>(prev)); 2715 assert(cast<llvm::CallInst>(prev)->getCalledValue() == 2716 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 2717 InstsToKill.push_back(prev); 2718 } 2719 } else { 2720 return nullptr; 2721 } 2722 2723 result = call->getArgOperand(0); 2724 InstsToKill.push_back(call); 2725 2726 // Keep killing bitcasts, for sanity. Note that we no longer care 2727 // about precise ordering as long as there's exactly one use. 2728 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 2729 if (!bitcast->hasOneUse()) break; 2730 InstsToKill.push_back(bitcast); 2731 result = bitcast->getOperand(0); 2732 } 2733 2734 // Delete all the unnecessary instructions, from latest to earliest. 2735 for (auto *I : InstsToKill) 2736 I->eraseFromParent(); 2737 2738 // Do the fused retain/autorelease if we were asked to. 2739 if (doRetainAutorelease) 2740 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 2741 2742 // Cast back to the result type. 2743 return CGF.Builder.CreateBitCast(result, resultType); 2744 } 2745 2746 /// If this is a +1 of the value of an immutable 'self', remove it. 2747 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 2748 llvm::Value *result) { 2749 // This is only applicable to a method with an immutable 'self'. 2750 const ObjCMethodDecl *method = 2751 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 2752 if (!method) return nullptr; 2753 const VarDecl *self = method->getSelfDecl(); 2754 if (!self->getType().isConstQualified()) return nullptr; 2755 2756 // Look for a retain call. 2757 llvm::CallInst *retainCall = 2758 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 2759 if (!retainCall || 2760 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain) 2761 return nullptr; 2762 2763 // Look for an ordinary load of 'self'. 2764 llvm::Value *retainedValue = retainCall->getArgOperand(0); 2765 llvm::LoadInst *load = 2766 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 2767 if (!load || load->isAtomic() || load->isVolatile() || 2768 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 2769 return nullptr; 2770 2771 // Okay! Burn it all down. This relies for correctness on the 2772 // assumption that the retain is emitted as part of the return and 2773 // that thereafter everything is used "linearly". 2774 llvm::Type *resultType = result->getType(); 2775 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 2776 assert(retainCall->use_empty()); 2777 retainCall->eraseFromParent(); 2778 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 2779 2780 return CGF.Builder.CreateBitCast(load, resultType); 2781 } 2782 2783 /// Emit an ARC autorelease of the result of a function. 2784 /// 2785 /// \return the value to actually return from the function 2786 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 2787 llvm::Value *result) { 2788 // If we're returning 'self', kill the initial retain. This is a 2789 // heuristic attempt to "encourage correctness" in the really unfortunate 2790 // case where we have a return of self during a dealloc and we desperately 2791 // need to avoid the possible autorelease. 2792 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 2793 return self; 2794 2795 // At -O0, try to emit a fused retain/autorelease. 2796 if (CGF.shouldUseFusedARCCalls()) 2797 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 2798 return fused; 2799 2800 return CGF.EmitARCAutoreleaseReturnValue(result); 2801 } 2802 2803 /// Heuristically search for a dominating store to the return-value slot. 2804 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 2805 // Check if a User is a store which pointerOperand is the ReturnValue. 2806 // We are looking for stores to the ReturnValue, not for stores of the 2807 // ReturnValue to some other location. 2808 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 2809 auto *SI = dyn_cast<llvm::StoreInst>(U); 2810 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 2811 return nullptr; 2812 // These aren't actually possible for non-coerced returns, and we 2813 // only care about non-coerced returns on this code path. 2814 assert(!SI->isAtomic() && !SI->isVolatile()); 2815 return SI; 2816 }; 2817 // If there are multiple uses of the return-value slot, just check 2818 // for something immediately preceding the IP. Sometimes this can 2819 // happen with how we generate implicit-returns; it can also happen 2820 // with noreturn cleanups. 2821 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 2822 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2823 if (IP->empty()) return nullptr; 2824 llvm::Instruction *I = &IP->back(); 2825 2826 // Skip lifetime markers 2827 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 2828 IE = IP->rend(); 2829 II != IE; ++II) { 2830 if (llvm::IntrinsicInst *Intrinsic = 2831 dyn_cast<llvm::IntrinsicInst>(&*II)) { 2832 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 2833 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 2834 ++II; 2835 if (II == IE) 2836 break; 2837 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 2838 continue; 2839 } 2840 } 2841 I = &*II; 2842 break; 2843 } 2844 2845 return GetStoreIfValid(I); 2846 } 2847 2848 llvm::StoreInst *store = 2849 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 2850 if (!store) return nullptr; 2851 2852 // Now do a first-and-dirty dominance check: just walk up the 2853 // single-predecessors chain from the current insertion point. 2854 llvm::BasicBlock *StoreBB = store->getParent(); 2855 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 2856 while (IP != StoreBB) { 2857 if (!(IP = IP->getSinglePredecessor())) 2858 return nullptr; 2859 } 2860 2861 // Okay, the store's basic block dominates the insertion point; we 2862 // can do our thing. 2863 return store; 2864 } 2865 2866 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 2867 bool EmitRetDbgLoc, 2868 SourceLocation EndLoc) { 2869 if (FI.isNoReturn()) { 2870 // Noreturn functions don't return. 2871 EmitUnreachable(EndLoc); 2872 return; 2873 } 2874 2875 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 2876 // Naked functions don't have epilogues. 2877 Builder.CreateUnreachable(); 2878 return; 2879 } 2880 2881 // Functions with no result always return void. 2882 if (!ReturnValue.isValid()) { 2883 Builder.CreateRetVoid(); 2884 return; 2885 } 2886 2887 llvm::DebugLoc RetDbgLoc; 2888 llvm::Value *RV = nullptr; 2889 QualType RetTy = FI.getReturnType(); 2890 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2891 2892 switch (RetAI.getKind()) { 2893 case ABIArgInfo::InAlloca: 2894 // Aggregrates get evaluated directly into the destination. Sometimes we 2895 // need to return the sret value in a register, though. 2896 assert(hasAggregateEvaluationKind(RetTy)); 2897 if (RetAI.getInAllocaSRet()) { 2898 llvm::Function::arg_iterator EI = CurFn->arg_end(); 2899 --EI; 2900 llvm::Value *ArgStruct = &*EI; 2901 llvm::Value *SRet = Builder.CreateStructGEP( 2902 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 2903 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 2904 } 2905 break; 2906 2907 case ABIArgInfo::Indirect: { 2908 auto AI = CurFn->arg_begin(); 2909 if (RetAI.isSRetAfterThis()) 2910 ++AI; 2911 switch (getEvaluationKind(RetTy)) { 2912 case TEK_Complex: { 2913 ComplexPairTy RT = 2914 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 2915 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 2916 /*isInit*/ true); 2917 break; 2918 } 2919 case TEK_Aggregate: 2920 // Do nothing; aggregrates get evaluated directly into the destination. 2921 break; 2922 case TEK_Scalar: 2923 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 2924 MakeNaturalAlignAddrLValue(&*AI, RetTy), 2925 /*isInit*/ true); 2926 break; 2927 } 2928 break; 2929 } 2930 2931 case ABIArgInfo::Extend: 2932 case ABIArgInfo::Direct: 2933 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 2934 RetAI.getDirectOffset() == 0) { 2935 // The internal return value temp always will have pointer-to-return-type 2936 // type, just do a load. 2937 2938 // If there is a dominating store to ReturnValue, we can elide 2939 // the load, zap the store, and usually zap the alloca. 2940 if (llvm::StoreInst *SI = 2941 findDominatingStoreToReturnValue(*this)) { 2942 // Reuse the debug location from the store unless there is 2943 // cleanup code to be emitted between the store and return 2944 // instruction. 2945 if (EmitRetDbgLoc && !AutoreleaseResult) 2946 RetDbgLoc = SI->getDebugLoc(); 2947 // Get the stored value and nuke the now-dead store. 2948 RV = SI->getValueOperand(); 2949 SI->eraseFromParent(); 2950 2951 // Otherwise, we have to do a simple load. 2952 } else { 2953 RV = Builder.CreateLoad(ReturnValue); 2954 } 2955 } else { 2956 // If the value is offset in memory, apply the offset now. 2957 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 2958 2959 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 2960 } 2961 2962 // In ARC, end functions that return a retainable type with a call 2963 // to objc_autoreleaseReturnValue. 2964 if (AutoreleaseResult) { 2965 #ifndef NDEBUG 2966 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 2967 // been stripped of the typedefs, so we cannot use RetTy here. Get the 2968 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 2969 // CurCodeDecl or BlockInfo. 2970 QualType RT; 2971 2972 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 2973 RT = FD->getReturnType(); 2974 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 2975 RT = MD->getReturnType(); 2976 else if (isa<BlockDecl>(CurCodeDecl)) 2977 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 2978 else 2979 llvm_unreachable("Unexpected function/method type"); 2980 2981 assert(getLangOpts().ObjCAutoRefCount && 2982 !FI.isReturnsRetained() && 2983 RT->isObjCRetainableType()); 2984 #endif 2985 RV = emitAutoreleaseOfResult(*this, RV); 2986 } 2987 2988 break; 2989 2990 case ABIArgInfo::Ignore: 2991 break; 2992 2993 case ABIArgInfo::CoerceAndExpand: { 2994 auto coercionType = RetAI.getCoerceAndExpandType(); 2995 2996 // Load all of the coerced elements out into results. 2997 llvm::SmallVector<llvm::Value*, 4> results; 2998 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 2999 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3000 auto coercedEltType = coercionType->getElementType(i); 3001 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 3002 continue; 3003 3004 auto eltAddr = Builder.CreateStructGEP(addr, i); 3005 auto elt = Builder.CreateLoad(eltAddr); 3006 results.push_back(elt); 3007 } 3008 3009 // If we have one result, it's the single direct result type. 3010 if (results.size() == 1) { 3011 RV = results[0]; 3012 3013 // Otherwise, we need to make a first-class aggregate. 3014 } else { 3015 // Construct a return type that lacks padding elements. 3016 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 3017 3018 RV = llvm::UndefValue::get(returnType); 3019 for (unsigned i = 0, e = results.size(); i != e; ++i) { 3020 RV = Builder.CreateInsertValue(RV, results[i], i); 3021 } 3022 } 3023 break; 3024 } 3025 3026 case ABIArgInfo::Expand: 3027 llvm_unreachable("Invalid ABI kind for return argument"); 3028 } 3029 3030 llvm::Instruction *Ret; 3031 if (RV) { 3032 EmitReturnValueCheck(RV); 3033 Ret = Builder.CreateRet(RV); 3034 } else { 3035 Ret = Builder.CreateRetVoid(); 3036 } 3037 3038 if (RetDbgLoc) 3039 Ret->setDebugLoc(std::move(RetDbgLoc)); 3040 } 3041 3042 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 3043 // A current decl may not be available when emitting vtable thunks. 3044 if (!CurCodeDecl) 3045 return; 3046 3047 // If the return block isn't reachable, neither is this check, so don't emit 3048 // it. 3049 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) 3050 return; 3051 3052 ReturnsNonNullAttr *RetNNAttr = nullptr; 3053 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3054 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3055 3056 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3057 return; 3058 3059 // Prefer the returns_nonnull attribute if it's present. 3060 SourceLocation AttrLoc; 3061 SanitizerMask CheckKind; 3062 SanitizerHandler Handler; 3063 if (RetNNAttr) { 3064 assert(!requiresReturnValueNullabilityCheck() && 3065 "Cannot check nullability and the nonnull attribute"); 3066 AttrLoc = RetNNAttr->getLocation(); 3067 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3068 Handler = SanitizerHandler::NonnullReturn; 3069 } else { 3070 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3071 if (auto *TSI = DD->getTypeSourceInfo()) 3072 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) 3073 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3074 CheckKind = SanitizerKind::NullabilityReturn; 3075 Handler = SanitizerHandler::NullabilityReturn; 3076 } 3077 3078 SanitizerScope SanScope(this); 3079 3080 // Make sure the "return" source location is valid. If we're checking a 3081 // nullability annotation, make sure the preconditions for the check are met. 3082 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3083 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3084 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3085 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3086 if (requiresReturnValueNullabilityCheck()) 3087 CanNullCheck = 3088 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3089 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3090 EmitBlock(Check); 3091 3092 // Now do the null check. 3093 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3094 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3095 llvm::Value *DynamicData[] = {SLocPtr}; 3096 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3097 3098 EmitBlock(NoCheck); 3099 3100 #ifndef NDEBUG 3101 // The return location should not be used after the check has been emitted. 3102 ReturnLocation = Address::invalid(); 3103 #endif 3104 } 3105 3106 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3107 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3108 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3109 } 3110 3111 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3112 QualType Ty) { 3113 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3114 // placeholders. 3115 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3116 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 3117 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 3118 3119 // FIXME: When we generate this IR in one pass, we shouldn't need 3120 // this win32-specific alignment hack. 3121 CharUnits Align = CharUnits::fromQuantity(4); 3122 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3123 3124 return AggValueSlot::forAddr(Address(Placeholder, Align), 3125 Ty.getQualifiers(), 3126 AggValueSlot::IsNotDestructed, 3127 AggValueSlot::DoesNotNeedGCBarriers, 3128 AggValueSlot::IsNotAliased, 3129 AggValueSlot::DoesNotOverlap); 3130 } 3131 3132 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3133 const VarDecl *param, 3134 SourceLocation loc) { 3135 // StartFunction converted the ABI-lowered parameter(s) into a 3136 // local alloca. We need to turn that into an r-value suitable 3137 // for EmitCall. 3138 Address local = GetAddrOfLocalVar(param); 3139 3140 QualType type = param->getType(); 3141 3142 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3143 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3144 } 3145 3146 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3147 // but the argument needs to be the original pointer. 3148 if (type->isReferenceType()) { 3149 args.add(RValue::get(Builder.CreateLoad(local)), type); 3150 3151 // In ARC, move out of consumed arguments so that the release cleanup 3152 // entered by StartFunction doesn't cause an over-release. This isn't 3153 // optimal -O0 code generation, but it should get cleaned up when 3154 // optimization is enabled. This also assumes that delegate calls are 3155 // performed exactly once for a set of arguments, but that should be safe. 3156 } else if (getLangOpts().ObjCAutoRefCount && 3157 param->hasAttr<NSConsumedAttr>() && 3158 type->isObjCRetainableType()) { 3159 llvm::Value *ptr = Builder.CreateLoad(local); 3160 auto null = 3161 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3162 Builder.CreateStore(null, local); 3163 args.add(RValue::get(ptr), type); 3164 3165 // For the most part, we just need to load the alloca, except that 3166 // aggregate r-values are actually pointers to temporaries. 3167 } else { 3168 args.add(convertTempToRValue(local, type, loc), type); 3169 } 3170 3171 // Deactivate the cleanup for the callee-destructed param that was pushed. 3172 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk && 3173 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 3174 param->needsDestruction(getContext())) { 3175 EHScopeStack::stable_iterator cleanup = 3176 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 3177 assert(cleanup.isValid() && 3178 "cleanup for callee-destructed param not recorded"); 3179 // This unreachable is a temporary marker which will be removed later. 3180 llvm::Instruction *isActive = Builder.CreateUnreachable(); 3181 args.addArgCleanupDeactivation(cleanup, isActive); 3182 } 3183 } 3184 3185 static bool isProvablyNull(llvm::Value *addr) { 3186 return isa<llvm::ConstantPointerNull>(addr); 3187 } 3188 3189 /// Emit the actual writing-back of a writeback. 3190 static void emitWriteback(CodeGenFunction &CGF, 3191 const CallArgList::Writeback &writeback) { 3192 const LValue &srcLV = writeback.Source; 3193 Address srcAddr = srcLV.getAddress(CGF); 3194 assert(!isProvablyNull(srcAddr.getPointer()) && 3195 "shouldn't have writeback for provably null argument"); 3196 3197 llvm::BasicBlock *contBB = nullptr; 3198 3199 // If the argument wasn't provably non-null, we need to null check 3200 // before doing the store. 3201 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3202 CGF.CGM.getDataLayout()); 3203 if (!provablyNonNull) { 3204 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3205 contBB = CGF.createBasicBlock("icr.done"); 3206 3207 llvm::Value *isNull = 3208 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3209 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3210 CGF.EmitBlock(writebackBB); 3211 } 3212 3213 // Load the value to writeback. 3214 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3215 3216 // Cast it back, in case we're writing an id to a Foo* or something. 3217 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3218 "icr.writeback-cast"); 3219 3220 // Perform the writeback. 3221 3222 // If we have a "to use" value, it's something we need to emit a use 3223 // of. This has to be carefully threaded in: if it's done after the 3224 // release it's potentially undefined behavior (and the optimizer 3225 // will ignore it), and if it happens before the retain then the 3226 // optimizer could move the release there. 3227 if (writeback.ToUse) { 3228 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3229 3230 // Retain the new value. No need to block-copy here: the block's 3231 // being passed up the stack. 3232 value = CGF.EmitARCRetainNonBlock(value); 3233 3234 // Emit the intrinsic use here. 3235 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3236 3237 // Load the old value (primitively). 3238 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3239 3240 // Put the new value in place (primitively). 3241 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3242 3243 // Release the old value. 3244 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3245 3246 // Otherwise, we can just do a normal lvalue store. 3247 } else { 3248 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3249 } 3250 3251 // Jump to the continuation block. 3252 if (!provablyNonNull) 3253 CGF.EmitBlock(contBB); 3254 } 3255 3256 static void emitWritebacks(CodeGenFunction &CGF, 3257 const CallArgList &args) { 3258 for (const auto &I : args.writebacks()) 3259 emitWriteback(CGF, I); 3260 } 3261 3262 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3263 const CallArgList &CallArgs) { 3264 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3265 CallArgs.getCleanupsToDeactivate(); 3266 // Iterate in reverse to increase the likelihood of popping the cleanup. 3267 for (const auto &I : llvm::reverse(Cleanups)) { 3268 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3269 I.IsActiveIP->eraseFromParent(); 3270 } 3271 } 3272 3273 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3274 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3275 if (uop->getOpcode() == UO_AddrOf) 3276 return uop->getSubExpr(); 3277 return nullptr; 3278 } 3279 3280 /// Emit an argument that's being passed call-by-writeback. That is, 3281 /// we are passing the address of an __autoreleased temporary; it 3282 /// might be copy-initialized with the current value of the given 3283 /// address, but it will definitely be copied out of after the call. 3284 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3285 const ObjCIndirectCopyRestoreExpr *CRE) { 3286 LValue srcLV; 3287 3288 // Make an optimistic effort to emit the address as an l-value. 3289 // This can fail if the argument expression is more complicated. 3290 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3291 srcLV = CGF.EmitLValue(lvExpr); 3292 3293 // Otherwise, just emit it as a scalar. 3294 } else { 3295 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3296 3297 QualType srcAddrType = 3298 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3299 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3300 } 3301 Address srcAddr = srcLV.getAddress(CGF); 3302 3303 // The dest and src types don't necessarily match in LLVM terms 3304 // because of the crazy ObjC compatibility rules. 3305 3306 llvm::PointerType *destType = 3307 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3308 3309 // If the address is a constant null, just pass the appropriate null. 3310 if (isProvablyNull(srcAddr.getPointer())) { 3311 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3312 CRE->getType()); 3313 return; 3314 } 3315 3316 // Create the temporary. 3317 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3318 CGF.getPointerAlign(), 3319 "icr.temp"); 3320 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3321 // and that cleanup will be conditional if we can't prove that the l-value 3322 // isn't null, so we need to register a dominating point so that the cleanups 3323 // system will make valid IR. 3324 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3325 3326 // Zero-initialize it if we're not doing a copy-initialization. 3327 bool shouldCopy = CRE->shouldCopy(); 3328 if (!shouldCopy) { 3329 llvm::Value *null = 3330 llvm::ConstantPointerNull::get( 3331 cast<llvm::PointerType>(destType->getElementType())); 3332 CGF.Builder.CreateStore(null, temp); 3333 } 3334 3335 llvm::BasicBlock *contBB = nullptr; 3336 llvm::BasicBlock *originBB = nullptr; 3337 3338 // If the address is *not* known to be non-null, we need to switch. 3339 llvm::Value *finalArgument; 3340 3341 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3342 CGF.CGM.getDataLayout()); 3343 if (provablyNonNull) { 3344 finalArgument = temp.getPointer(); 3345 } else { 3346 llvm::Value *isNull = 3347 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3348 3349 finalArgument = CGF.Builder.CreateSelect(isNull, 3350 llvm::ConstantPointerNull::get(destType), 3351 temp.getPointer(), "icr.argument"); 3352 3353 // If we need to copy, then the load has to be conditional, which 3354 // means we need control flow. 3355 if (shouldCopy) { 3356 originBB = CGF.Builder.GetInsertBlock(); 3357 contBB = CGF.createBasicBlock("icr.cont"); 3358 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3359 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3360 CGF.EmitBlock(copyBB); 3361 condEval.begin(CGF); 3362 } 3363 } 3364 3365 llvm::Value *valueToUse = nullptr; 3366 3367 // Perform a copy if necessary. 3368 if (shouldCopy) { 3369 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3370 assert(srcRV.isScalar()); 3371 3372 llvm::Value *src = srcRV.getScalarVal(); 3373 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3374 "icr.cast"); 3375 3376 // Use an ordinary store, not a store-to-lvalue. 3377 CGF.Builder.CreateStore(src, temp); 3378 3379 // If optimization is enabled, and the value was held in a 3380 // __strong variable, we need to tell the optimizer that this 3381 // value has to stay alive until we're doing the store back. 3382 // This is because the temporary is effectively unretained, 3383 // and so otherwise we can violate the high-level semantics. 3384 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3385 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3386 valueToUse = src; 3387 } 3388 } 3389 3390 // Finish the control flow if we needed it. 3391 if (shouldCopy && !provablyNonNull) { 3392 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3393 CGF.EmitBlock(contBB); 3394 3395 // Make a phi for the value to intrinsically use. 3396 if (valueToUse) { 3397 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3398 "icr.to-use"); 3399 phiToUse->addIncoming(valueToUse, copyBB); 3400 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3401 originBB); 3402 valueToUse = phiToUse; 3403 } 3404 3405 condEval.end(CGF); 3406 } 3407 3408 args.addWriteback(srcLV, temp, valueToUse); 3409 args.add(RValue::get(finalArgument), CRE->getType()); 3410 } 3411 3412 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3413 assert(!StackBase); 3414 3415 // Save the stack. 3416 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3417 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3418 } 3419 3420 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3421 if (StackBase) { 3422 // Restore the stack after the call. 3423 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3424 CGF.Builder.CreateCall(F, StackBase); 3425 } 3426 } 3427 3428 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3429 SourceLocation ArgLoc, 3430 AbstractCallee AC, 3431 unsigned ParmNum) { 3432 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3433 SanOpts.has(SanitizerKind::NullabilityArg))) 3434 return; 3435 3436 // The param decl may be missing in a variadic function. 3437 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 3438 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3439 3440 // Prefer the nonnull attribute if it's present. 3441 const NonNullAttr *NNAttr = nullptr; 3442 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 3443 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 3444 3445 bool CanCheckNullability = false; 3446 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 3447 auto Nullability = PVD->getType()->getNullability(getContext()); 3448 CanCheckNullability = Nullability && 3449 *Nullability == NullabilityKind::NonNull && 3450 PVD->getTypeSourceInfo(); 3451 } 3452 3453 if (!NNAttr && !CanCheckNullability) 3454 return; 3455 3456 SourceLocation AttrLoc; 3457 SanitizerMask CheckKind; 3458 SanitizerHandler Handler; 3459 if (NNAttr) { 3460 AttrLoc = NNAttr->getLocation(); 3461 CheckKind = SanitizerKind::NonnullAttribute; 3462 Handler = SanitizerHandler::NonnullArg; 3463 } else { 3464 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 3465 CheckKind = SanitizerKind::NullabilityArg; 3466 Handler = SanitizerHandler::NullabilityArg; 3467 } 3468 3469 SanitizerScope SanScope(this); 3470 assert(RV.isScalar()); 3471 llvm::Value *V = RV.getScalarVal(); 3472 llvm::Value *Cond = 3473 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType())); 3474 llvm::Constant *StaticData[] = { 3475 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 3476 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3477 }; 3478 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 3479 } 3480 3481 void CodeGenFunction::EmitCallArgs( 3482 CallArgList &Args, ArrayRef<QualType> ArgTypes, 3483 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 3484 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 3485 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 3486 3487 // We *have* to evaluate arguments from right to left in the MS C++ ABI, 3488 // because arguments are destroyed left to right in the callee. As a special 3489 // case, there are certain language constructs that require left-to-right 3490 // evaluation, and in those cases we consider the evaluation order requirement 3491 // to trump the "destruction order is reverse construction order" guarantee. 3492 bool LeftToRight = 3493 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 3494 ? Order == EvaluationOrder::ForceLeftToRight 3495 : Order != EvaluationOrder::ForceRightToLeft; 3496 3497 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 3498 RValue EmittedArg) { 3499 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 3500 return; 3501 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 3502 if (PS == nullptr) 3503 return; 3504 3505 const auto &Context = getContext(); 3506 auto SizeTy = Context.getSizeType(); 3507 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 3508 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 3509 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 3510 EmittedArg.getScalarVal(), 3511 PS->isDynamic()); 3512 Args.add(RValue::get(V), SizeTy); 3513 // If we're emitting args in reverse, be sure to do so with 3514 // pass_object_size, as well. 3515 if (!LeftToRight) 3516 std::swap(Args.back(), *(&Args.back() - 1)); 3517 }; 3518 3519 // Insert a stack save if we're going to need any inalloca args. 3520 bool HasInAllocaArgs = false; 3521 if (CGM.getTarget().getCXXABI().isMicrosoft()) { 3522 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end(); 3523 I != E && !HasInAllocaArgs; ++I) 3524 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I); 3525 if (HasInAllocaArgs) { 3526 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 3527 Args.allocateArgumentMemory(*this); 3528 } 3529 } 3530 3531 // Evaluate each argument in the appropriate order. 3532 size_t CallArgsStart = Args.size(); 3533 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 3534 unsigned Idx = LeftToRight ? I : E - I - 1; 3535 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 3536 unsigned InitialArgSize = Args.size(); 3537 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 3538 // the argument and parameter match or the objc method is parameterized. 3539 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 3540 getContext().hasSameUnqualifiedType((*Arg)->getType(), 3541 ArgTypes[Idx]) || 3542 (isa<ObjCMethodDecl>(AC.getDecl()) && 3543 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 3544 "Argument and parameter types don't match"); 3545 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 3546 // In particular, we depend on it being the last arg in Args, and the 3547 // objectsize bits depend on there only being one arg if !LeftToRight. 3548 assert(InitialArgSize + 1 == Args.size() && 3549 "The code below depends on only adding one arg per EmitCallArg"); 3550 (void)InitialArgSize; 3551 // Since pointer argument are never emitted as LValue, it is safe to emit 3552 // non-null argument check for r-value only. 3553 if (!Args.back().hasLValue()) { 3554 RValue RVArg = Args.back().getKnownRValue(); 3555 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 3556 ParamsToSkip + Idx); 3557 // @llvm.objectsize should never have side-effects and shouldn't need 3558 // destruction/cleanups, so we can safely "emit" it after its arg, 3559 // regardless of right-to-leftness 3560 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 3561 } 3562 } 3563 3564 if (!LeftToRight) { 3565 // Un-reverse the arguments we just evaluated so they match up with the LLVM 3566 // IR function. 3567 std::reverse(Args.begin() + CallArgsStart, Args.end()); 3568 } 3569 } 3570 3571 namespace { 3572 3573 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 3574 DestroyUnpassedArg(Address Addr, QualType Ty) 3575 : Addr(Addr), Ty(Ty) {} 3576 3577 Address Addr; 3578 QualType Ty; 3579 3580 void Emit(CodeGenFunction &CGF, Flags flags) override { 3581 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 3582 if (DtorKind == QualType::DK_cxx_destructor) { 3583 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 3584 assert(!Dtor->isTrivial()); 3585 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 3586 /*Delegating=*/false, Addr, Ty); 3587 } else { 3588 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 3589 } 3590 } 3591 }; 3592 3593 struct DisableDebugLocationUpdates { 3594 CodeGenFunction &CGF; 3595 bool disabledDebugInfo; 3596 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 3597 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 3598 CGF.disableDebugInfo(); 3599 } 3600 ~DisableDebugLocationUpdates() { 3601 if (disabledDebugInfo) 3602 CGF.enableDebugInfo(); 3603 } 3604 }; 3605 3606 } // end anonymous namespace 3607 3608 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 3609 if (!HasLV) 3610 return RV; 3611 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 3612 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 3613 LV.isVolatile()); 3614 IsUsed = true; 3615 return RValue::getAggregate(Copy.getAddress(CGF)); 3616 } 3617 3618 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 3619 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 3620 if (!HasLV && RV.isScalar()) 3621 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); 3622 else if (!HasLV && RV.isComplex()) 3623 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 3624 else { 3625 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); 3626 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 3627 // We assume that call args are never copied into subobjects. 3628 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 3629 HasLV ? LV.isVolatileQualified() 3630 : RV.isVolatileQualified()); 3631 } 3632 IsUsed = true; 3633 } 3634 3635 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 3636 QualType type) { 3637 DisableDebugLocationUpdates Dis(*this, E); 3638 if (const ObjCIndirectCopyRestoreExpr *CRE 3639 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 3640 assert(getLangOpts().ObjCAutoRefCount); 3641 return emitWritebackArg(*this, args, CRE); 3642 } 3643 3644 assert(type->isReferenceType() == E->isGLValue() && 3645 "reference binding to unmaterialized r-value!"); 3646 3647 if (E->isGLValue()) { 3648 assert(E->getObjectKind() == OK_Ordinary); 3649 return args.add(EmitReferenceBindingToExpr(E), type); 3650 } 3651 3652 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 3653 3654 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 3655 // However, we still have to push an EH-only cleanup in case we unwind before 3656 // we make it to the call. 3657 if (HasAggregateEvalKind && 3658 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 3659 // If we're using inalloca, use the argument memory. Otherwise, use a 3660 // temporary. 3661 AggValueSlot Slot; 3662 if (args.isUsingInAlloca()) 3663 Slot = createPlaceholderSlot(*this, type); 3664 else 3665 Slot = CreateAggTemp(type, "agg.tmp"); 3666 3667 bool DestroyedInCallee = true, NeedsEHCleanup = true; 3668 if (const auto *RD = type->getAsCXXRecordDecl()) 3669 DestroyedInCallee = RD->hasNonTrivialDestructor(); 3670 else 3671 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 3672 3673 if (DestroyedInCallee) 3674 Slot.setExternallyDestructed(); 3675 3676 EmitAggExpr(E, Slot); 3677 RValue RV = Slot.asRValue(); 3678 args.add(RV, type); 3679 3680 if (DestroyedInCallee && NeedsEHCleanup) { 3681 // Create a no-op GEP between the placeholder and the cleanup so we can 3682 // RAUW it successfully. It also serves as a marker of the first 3683 // instruction where the cleanup is active. 3684 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 3685 type); 3686 // This unreachable is a temporary marker which will be removed later. 3687 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 3688 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 3689 } 3690 return; 3691 } 3692 3693 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 3694 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 3695 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 3696 assert(L.isSimple()); 3697 args.addUncopiedAggregate(L, type); 3698 return; 3699 } 3700 3701 args.add(EmitAnyExprToTemp(E), type); 3702 } 3703 3704 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 3705 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 3706 // implicitly widens null pointer constants that are arguments to varargs 3707 // functions to pointer-sized ints. 3708 if (!getTarget().getTriple().isOSWindows()) 3709 return Arg->getType(); 3710 3711 if (Arg->getType()->isIntegerType() && 3712 getContext().getTypeSize(Arg->getType()) < 3713 getContext().getTargetInfo().getPointerWidth(0) && 3714 Arg->isNullPointerConstant(getContext(), 3715 Expr::NPC_ValueDependentIsNotNull)) { 3716 return getContext().getIntPtrType(); 3717 } 3718 3719 return Arg->getType(); 3720 } 3721 3722 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3723 // optimizer it can aggressively ignore unwind edges. 3724 void 3725 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 3726 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 3727 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 3728 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 3729 CGM.getNoObjCARCExceptionsMetadata()); 3730 } 3731 3732 /// Emits a call to the given no-arguments nounwind runtime function. 3733 llvm::CallInst * 3734 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 3735 const llvm::Twine &name) { 3736 return EmitNounwindRuntimeCall(callee, None, name); 3737 } 3738 3739 /// Emits a call to the given nounwind runtime function. 3740 llvm::CallInst * 3741 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 3742 ArrayRef<llvm::Value *> args, 3743 const llvm::Twine &name) { 3744 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 3745 call->setDoesNotThrow(); 3746 return call; 3747 } 3748 3749 /// Emits a simple call (never an invoke) to the given no-arguments 3750 /// runtime function. 3751 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 3752 const llvm::Twine &name) { 3753 return EmitRuntimeCall(callee, None, name); 3754 } 3755 3756 // Calls which may throw must have operand bundles indicating which funclet 3757 // they are nested within. 3758 SmallVector<llvm::OperandBundleDef, 1> 3759 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 3760 SmallVector<llvm::OperandBundleDef, 1> BundleList; 3761 // There is no need for a funclet operand bundle if we aren't inside a 3762 // funclet. 3763 if (!CurrentFuncletPad) 3764 return BundleList; 3765 3766 // Skip intrinsics which cannot throw. 3767 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 3768 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 3769 return BundleList; 3770 3771 BundleList.emplace_back("funclet", CurrentFuncletPad); 3772 return BundleList; 3773 } 3774 3775 /// Emits a simple call (never an invoke) to the given runtime function. 3776 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 3777 ArrayRef<llvm::Value *> args, 3778 const llvm::Twine &name) { 3779 llvm::CallInst *call = Builder.CreateCall( 3780 callee, args, getBundlesForFunclet(callee.getCallee()), name); 3781 call->setCallingConv(getRuntimeCC()); 3782 return call; 3783 } 3784 3785 /// Emits a call or invoke to the given noreturn runtime function. 3786 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( 3787 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { 3788 SmallVector<llvm::OperandBundleDef, 1> BundleList = 3789 getBundlesForFunclet(callee.getCallee()); 3790 3791 if (getInvokeDest()) { 3792 llvm::InvokeInst *invoke = 3793 Builder.CreateInvoke(callee, 3794 getUnreachableBlock(), 3795 getInvokeDest(), 3796 args, 3797 BundleList); 3798 invoke->setDoesNotReturn(); 3799 invoke->setCallingConv(getRuntimeCC()); 3800 } else { 3801 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 3802 call->setDoesNotReturn(); 3803 call->setCallingConv(getRuntimeCC()); 3804 Builder.CreateUnreachable(); 3805 } 3806 } 3807 3808 /// Emits a call or invoke instruction to the given nullary runtime function. 3809 llvm::CallBase * 3810 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 3811 const Twine &name) { 3812 return EmitRuntimeCallOrInvoke(callee, None, name); 3813 } 3814 3815 /// Emits a call or invoke instruction to the given runtime function. 3816 llvm::CallBase * 3817 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 3818 ArrayRef<llvm::Value *> args, 3819 const Twine &name) { 3820 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 3821 call->setCallingConv(getRuntimeCC()); 3822 return call; 3823 } 3824 3825 /// Emits a call or invoke instruction to the given function, depending 3826 /// on the current state of the EH stack. 3827 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, 3828 ArrayRef<llvm::Value *> Args, 3829 const Twine &Name) { 3830 llvm::BasicBlock *InvokeDest = getInvokeDest(); 3831 SmallVector<llvm::OperandBundleDef, 1> BundleList = 3832 getBundlesForFunclet(Callee.getCallee()); 3833 3834 llvm::CallBase *Inst; 3835 if (!InvokeDest) 3836 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 3837 else { 3838 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 3839 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 3840 Name); 3841 EmitBlock(ContBB); 3842 } 3843 3844 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 3845 // optimizer it can aggressively ignore unwind edges. 3846 if (CGM.getLangOpts().ObjCAutoRefCount) 3847 AddObjCARCExceptionMetadata(Inst); 3848 3849 return Inst; 3850 } 3851 3852 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 3853 llvm::Value *New) { 3854 DeferredReplacements.push_back(std::make_pair(Old, New)); 3855 } 3856 3857 namespace { 3858 3859 /// Specify given \p NewAlign as the alignment of return value attribute. If 3860 /// such attribute already exists, re-set it to the maximal one of two options. 3861 LLVM_NODISCARD llvm::AttributeList 3862 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, 3863 const llvm::AttributeList &Attrs, 3864 llvm::Align NewAlign) { 3865 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); 3866 if (CurAlign >= NewAlign) 3867 return Attrs; 3868 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); 3869 return Attrs 3870 .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex, 3871 llvm::Attribute::AttrKind::Alignment) 3872 .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr); 3873 } 3874 3875 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { 3876 protected: 3877 CodeGenFunction &CGF; 3878 3879 /// We do nothing if this is, or becomes, nullptr. 3880 const AlignedAttrTy *AA = nullptr; 3881 3882 llvm::Value *Alignment = nullptr; // May or may not be a constant. 3883 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. 3884 3885 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 3886 : CGF(CGF_) { 3887 if (!FuncDecl) 3888 return; 3889 AA = FuncDecl->getAttr<AlignedAttrTy>(); 3890 } 3891 3892 public: 3893 /// If we can, materialize the alignment as an attribute on return value. 3894 LLVM_NODISCARD llvm::AttributeList 3895 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { 3896 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) 3897 return Attrs; 3898 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); 3899 if (!AlignmentCI) 3900 return Attrs; 3901 // We may legitimately have non-power-of-2 alignment here. 3902 // If so, this is UB land, emit it via `@llvm.assume` instead. 3903 if (!AlignmentCI->getValue().isPowerOf2()) 3904 return Attrs; 3905 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( 3906 CGF.getLLVMContext(), Attrs, 3907 llvm::Align( 3908 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); 3909 AA = nullptr; // We're done. Disallow doing anything else. 3910 return NewAttrs; 3911 } 3912 3913 /// Emit alignment assumption. 3914 /// This is a general fallback that we take if either there is an offset, 3915 /// or the alignment is variable or we are sanitizing for alignment. 3916 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { 3917 if (!AA) 3918 return; 3919 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, 3920 AA->getLocation(), Alignment, OffsetCI); 3921 AA = nullptr; // We're done. Disallow doing anything else. 3922 } 3923 }; 3924 3925 /// Helper data structure to emit `AssumeAlignedAttr`. 3926 class AssumeAlignedAttrEmitter final 3927 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { 3928 public: 3929 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 3930 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 3931 if (!AA) 3932 return; 3933 // It is guaranteed that the alignment/offset are constants. 3934 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); 3935 if (Expr *Offset = AA->getOffset()) { 3936 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); 3937 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. 3938 OffsetCI = nullptr; 3939 } 3940 } 3941 }; 3942 3943 /// Helper data structure to emit `AllocAlignAttr`. 3944 class AllocAlignAttrEmitter final 3945 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { 3946 public: 3947 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, 3948 const CallArgList &CallArgs) 3949 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 3950 if (!AA) 3951 return; 3952 // Alignment may or may not be a constant, and that is okay. 3953 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] 3954 .getRValue(CGF) 3955 .getScalarVal(); 3956 } 3957 }; 3958 3959 } // namespace 3960 3961 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 3962 const CGCallee &Callee, 3963 ReturnValueSlot ReturnValue, 3964 const CallArgList &CallArgs, 3965 llvm::CallBase **callOrInvoke, 3966 SourceLocation Loc) { 3967 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 3968 3969 assert(Callee.isOrdinary() || Callee.isVirtual()); 3970 3971 // Handle struct-return functions by passing a pointer to the 3972 // location that we would like to return into. 3973 QualType RetTy = CallInfo.getReturnType(); 3974 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 3975 3976 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); 3977 3978 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 3979 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) 3980 // We can only guarantee that a function is called from the correct 3981 // context/function based on the appropriate target attributes, 3982 // so only check in the case where we have both always_inline and target 3983 // since otherwise we could be making a conditional call after a check for 3984 // the proper cpu features (and it won't cause code generation issues due to 3985 // function based code generation). 3986 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 3987 TargetDecl->hasAttr<TargetAttr>()) 3988 checkTargetFeatures(Loc, FD); 3989 3990 #ifndef NDEBUG 3991 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { 3992 // For an inalloca varargs function, we don't expect CallInfo to match the 3993 // function pointer's type, because the inalloca struct a will have extra 3994 // fields in it for the varargs parameters. Code later in this function 3995 // bitcasts the function pointer to the type derived from CallInfo. 3996 // 3997 // In other cases, we assert that the types match up (until pointers stop 3998 // having pointee types). 3999 llvm::Type *TypeFromVal; 4000 if (Callee.isVirtual()) 4001 TypeFromVal = Callee.getVirtualFunctionType(); 4002 else 4003 TypeFromVal = 4004 Callee.getFunctionPointer()->getType()->getPointerElementType(); 4005 assert(IRFuncTy == TypeFromVal); 4006 } 4007 #endif 4008 4009 // 1. Set up the arguments. 4010 4011 // If we're using inalloca, insert the allocation after the stack save. 4012 // FIXME: Do this earlier rather than hacking it in here! 4013 Address ArgMemory = Address::invalid(); 4014 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 4015 const llvm::DataLayout &DL = CGM.getDataLayout(); 4016 llvm::Instruction *IP = CallArgs.getStackBase(); 4017 llvm::AllocaInst *AI; 4018 if (IP) { 4019 IP = IP->getNextNode(); 4020 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 4021 "argmem", IP); 4022 } else { 4023 AI = CreateTempAlloca(ArgStruct, "argmem"); 4024 } 4025 auto Align = CallInfo.getArgStructAlignment(); 4026 AI->setAlignment(Align.getAsAlign()); 4027 AI->setUsedWithInAlloca(true); 4028 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 4029 ArgMemory = Address(AI, Align); 4030 } 4031 4032 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 4033 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 4034 4035 // If the call returns a temporary with struct return, create a temporary 4036 // alloca to hold the result, unless one is given to us. 4037 Address SRetPtr = Address::invalid(); 4038 Address SRetAlloca = Address::invalid(); 4039 llvm::Value *UnusedReturnSizePtr = nullptr; 4040 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 4041 if (!ReturnValue.isNull()) { 4042 SRetPtr = ReturnValue.getValue(); 4043 } else { 4044 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 4045 if (HaveInsertPoint() && ReturnValue.isUnused()) { 4046 uint64_t size = 4047 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 4048 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 4049 } 4050 } 4051 if (IRFunctionArgs.hasSRetArg()) { 4052 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 4053 } else if (RetAI.isInAlloca()) { 4054 Address Addr = 4055 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 4056 Builder.CreateStore(SRetPtr.getPointer(), Addr); 4057 } 4058 } 4059 4060 Address swiftErrorTemp = Address::invalid(); 4061 Address swiftErrorArg = Address::invalid(); 4062 4063 // When passing arguments using temporary allocas, we need to add the 4064 // appropriate lifetime markers. This vector keeps track of all the lifetime 4065 // markers that need to be ended right after the call. 4066 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; 4067 4068 // Translate all of the arguments as necessary to match the IR lowering. 4069 assert(CallInfo.arg_size() == CallArgs.size() && 4070 "Mismatch between function signature & arguments."); 4071 unsigned ArgNo = 0; 4072 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 4073 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 4074 I != E; ++I, ++info_it, ++ArgNo) { 4075 const ABIArgInfo &ArgInfo = info_it->info; 4076 4077 // Insert a padding argument to ensure proper alignment. 4078 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 4079 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 4080 llvm::UndefValue::get(ArgInfo.getPaddingType()); 4081 4082 unsigned FirstIRArg, NumIRArgs; 4083 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 4084 4085 switch (ArgInfo.getKind()) { 4086 case ABIArgInfo::InAlloca: { 4087 assert(NumIRArgs == 0); 4088 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 4089 if (I->isAggregate()) { 4090 Address Addr = I->hasLValue() 4091 ? I->getKnownLValue().getAddress(*this) 4092 : I->getKnownRValue().getAggregateAddress(); 4093 llvm::Instruction *Placeholder = 4094 cast<llvm::Instruction>(Addr.getPointer()); 4095 4096 if (!ArgInfo.getInAllocaIndirect()) { 4097 // Replace the placeholder with the appropriate argument slot GEP. 4098 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 4099 Builder.SetInsertPoint(Placeholder); 4100 Addr = Builder.CreateStructGEP(ArgMemory, 4101 ArgInfo.getInAllocaFieldIndex()); 4102 Builder.restoreIP(IP); 4103 } else { 4104 // For indirect things such as overaligned structs, replace the 4105 // placeholder with a regular aggregate temporary alloca. Store the 4106 // address of this alloca into the struct. 4107 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); 4108 Address ArgSlot = Builder.CreateStructGEP( 4109 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4110 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4111 } 4112 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 4113 } else if (ArgInfo.getInAllocaIndirect()) { 4114 // Make a temporary alloca and store the address of it into the argument 4115 // struct. 4116 Address Addr = CreateMemTempWithoutCast( 4117 I->Ty, getContext().getTypeAlignInChars(I->Ty), 4118 "indirect-arg-temp"); 4119 I->copyInto(*this, Addr); 4120 Address ArgSlot = 4121 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4122 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4123 } else { 4124 // Store the RValue into the argument struct. 4125 Address Addr = 4126 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4127 unsigned AS = Addr.getType()->getPointerAddressSpace(); 4128 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 4129 // There are some cases where a trivial bitcast is not avoidable. The 4130 // definition of a type later in a translation unit may change it's type 4131 // from {}* to (%struct.foo*)*. 4132 if (Addr.getType() != MemType) 4133 Addr = Builder.CreateBitCast(Addr, MemType); 4134 I->copyInto(*this, Addr); 4135 } 4136 break; 4137 } 4138 4139 case ABIArgInfo::Indirect: { 4140 assert(NumIRArgs == 1); 4141 if (!I->isAggregate()) { 4142 // Make a temporary alloca to pass the argument. 4143 Address Addr = CreateMemTempWithoutCast( 4144 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 4145 IRCallArgs[FirstIRArg] = Addr.getPointer(); 4146 4147 I->copyInto(*this, Addr); 4148 } else { 4149 // We want to avoid creating an unnecessary temporary+copy here; 4150 // however, we need one in three cases: 4151 // 1. If the argument is not byval, and we are required to copy the 4152 // source. (This case doesn't occur on any common architecture.) 4153 // 2. If the argument is byval, RV is not sufficiently aligned, and 4154 // we cannot force it to be sufficiently aligned. 4155 // 3. If the argument is byval, but RV is not located in default 4156 // or alloca address space. 4157 Address Addr = I->hasLValue() 4158 ? I->getKnownLValue().getAddress(*this) 4159 : I->getKnownRValue().getAggregateAddress(); 4160 llvm::Value *V = Addr.getPointer(); 4161 CharUnits Align = ArgInfo.getIndirectAlign(); 4162 const llvm::DataLayout *TD = &CGM.getDataLayout(); 4163 4164 assert((FirstIRArg >= IRFuncTy->getNumParams() || 4165 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 4166 TD->getAllocaAddrSpace()) && 4167 "indirect argument must be in alloca address space"); 4168 4169 bool NeedCopy = false; 4170 4171 if (Addr.getAlignment() < Align && 4172 llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) < 4173 Align.getQuantity()) { 4174 NeedCopy = true; 4175 } else if (I->hasLValue()) { 4176 auto LV = I->getKnownLValue(); 4177 auto AS = LV.getAddressSpace(); 4178 4179 if (!ArgInfo.getIndirectByVal() || 4180 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { 4181 NeedCopy = true; 4182 } 4183 if (!getLangOpts().OpenCL) { 4184 if ((ArgInfo.getIndirectByVal() && 4185 (AS != LangAS::Default && 4186 AS != CGM.getASTAllocaAddressSpace()))) { 4187 NeedCopy = true; 4188 } 4189 } 4190 // For OpenCL even if RV is located in default or alloca address space 4191 // we don't want to perform address space cast for it. 4192 else if ((ArgInfo.getIndirectByVal() && 4193 Addr.getType()->getAddressSpace() != IRFuncTy-> 4194 getParamType(FirstIRArg)->getPointerAddressSpace())) { 4195 NeedCopy = true; 4196 } 4197 } 4198 4199 if (NeedCopy) { 4200 // Create an aligned temporary, and copy to it. 4201 Address AI = CreateMemTempWithoutCast( 4202 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 4203 IRCallArgs[FirstIRArg] = AI.getPointer(); 4204 4205 // Emit lifetime markers for the temporary alloca. 4206 uint64_t ByvalTempElementSize = 4207 CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); 4208 llvm::Value *LifetimeSize = 4209 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); 4210 4211 // Add cleanup code to emit the end lifetime marker after the call. 4212 if (LifetimeSize) // In case we disabled lifetime markers. 4213 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); 4214 4215 // Generate the copy. 4216 I->copyInto(*this, AI); 4217 } else { 4218 // Skip the extra memcpy call. 4219 auto *T = V->getType()->getPointerElementType()->getPointerTo( 4220 CGM.getDataLayout().getAllocaAddrSpace()); 4221 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( 4222 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 4223 true); 4224 } 4225 } 4226 break; 4227 } 4228 4229 case ABIArgInfo::Ignore: 4230 assert(NumIRArgs == 0); 4231 break; 4232 4233 case ABIArgInfo::Extend: 4234 case ABIArgInfo::Direct: { 4235 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 4236 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 4237 ArgInfo.getDirectOffset() == 0) { 4238 assert(NumIRArgs == 1); 4239 llvm::Value *V; 4240 if (!I->isAggregate()) 4241 V = I->getKnownRValue().getScalarVal(); 4242 else 4243 V = Builder.CreateLoad( 4244 I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4245 : I->getKnownRValue().getAggregateAddress()); 4246 4247 // Implement swifterror by copying into a new swifterror argument. 4248 // We'll write back in the normal path out of the call. 4249 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 4250 == ParameterABI::SwiftErrorResult) { 4251 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 4252 4253 QualType pointeeTy = I->Ty->getPointeeType(); 4254 swiftErrorArg = 4255 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 4256 4257 swiftErrorTemp = 4258 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 4259 V = swiftErrorTemp.getPointer(); 4260 cast<llvm::AllocaInst>(V)->setSwiftError(true); 4261 4262 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 4263 Builder.CreateStore(errorValue, swiftErrorTemp); 4264 } 4265 4266 // We might have to widen integers, but we should never truncate. 4267 if (ArgInfo.getCoerceToType() != V->getType() && 4268 V->getType()->isIntegerTy()) 4269 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 4270 4271 // If the argument doesn't match, perform a bitcast to coerce it. This 4272 // can happen due to trivial type mismatches. 4273 if (FirstIRArg < IRFuncTy->getNumParams() && 4274 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 4275 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 4276 4277 IRCallArgs[FirstIRArg] = V; 4278 break; 4279 } 4280 4281 // FIXME: Avoid the conversion through memory if possible. 4282 Address Src = Address::invalid(); 4283 if (!I->isAggregate()) { 4284 Src = CreateMemTemp(I->Ty, "coerce"); 4285 I->copyInto(*this, Src); 4286 } else { 4287 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4288 : I->getKnownRValue().getAggregateAddress(); 4289 } 4290 4291 // If the value is offset in memory, apply the offset now. 4292 Src = emitAddressAtOffset(*this, Src, ArgInfo); 4293 4294 // Fast-isel and the optimizer generally like scalar values better than 4295 // FCAs, so we flatten them if this is safe to do for this argument. 4296 llvm::StructType *STy = 4297 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 4298 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 4299 llvm::Type *SrcTy = Src.getType()->getElementType(); 4300 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 4301 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 4302 4303 // If the source type is smaller than the destination type of the 4304 // coerce-to logic, copy the source value into a temp alloca the size 4305 // of the destination type to allow loading all of it. The bits past 4306 // the source value are left undef. 4307 if (SrcSize < DstSize) { 4308 Address TempAlloca 4309 = CreateTempAlloca(STy, Src.getAlignment(), 4310 Src.getName() + ".coerce"); 4311 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 4312 Src = TempAlloca; 4313 } else { 4314 Src = Builder.CreateBitCast(Src, 4315 STy->getPointerTo(Src.getAddressSpace())); 4316 } 4317 4318 assert(NumIRArgs == STy->getNumElements()); 4319 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4320 Address EltPtr = Builder.CreateStructGEP(Src, i); 4321 llvm::Value *LI = Builder.CreateLoad(EltPtr); 4322 IRCallArgs[FirstIRArg + i] = LI; 4323 } 4324 } else { 4325 // In the simple case, just pass the coerced loaded value. 4326 assert(NumIRArgs == 1); 4327 IRCallArgs[FirstIRArg] = 4328 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 4329 } 4330 4331 break; 4332 } 4333 4334 case ABIArgInfo::CoerceAndExpand: { 4335 auto coercionType = ArgInfo.getCoerceAndExpandType(); 4336 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4337 4338 llvm::Value *tempSize = nullptr; 4339 Address addr = Address::invalid(); 4340 Address AllocaAddr = Address::invalid(); 4341 if (I->isAggregate()) { 4342 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4343 : I->getKnownRValue().getAggregateAddress(); 4344 4345 } else { 4346 RValue RV = I->getKnownRValue(); 4347 assert(RV.isScalar()); // complex should always just be direct 4348 4349 llvm::Type *scalarType = RV.getScalarVal()->getType(); 4350 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 4351 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 4352 4353 // Materialize to a temporary. 4354 addr = CreateTempAlloca( 4355 RV.getScalarVal()->getType(), 4356 CharUnits::fromQuantity(std::max( 4357 (unsigned)layout->getAlignment().value(), scalarAlign)), 4358 "tmp", 4359 /*ArraySize=*/nullptr, &AllocaAddr); 4360 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 4361 4362 Builder.CreateStore(RV.getScalarVal(), addr); 4363 } 4364 4365 addr = Builder.CreateElementBitCast(addr, coercionType); 4366 4367 unsigned IRArgPos = FirstIRArg; 4368 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4369 llvm::Type *eltType = coercionType->getElementType(i); 4370 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4371 Address eltAddr = Builder.CreateStructGEP(addr, i); 4372 llvm::Value *elt = Builder.CreateLoad(eltAddr); 4373 IRCallArgs[IRArgPos++] = elt; 4374 } 4375 assert(IRArgPos == FirstIRArg + NumIRArgs); 4376 4377 if (tempSize) { 4378 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 4379 } 4380 4381 break; 4382 } 4383 4384 case ABIArgInfo::Expand: 4385 unsigned IRArgPos = FirstIRArg; 4386 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 4387 assert(IRArgPos == FirstIRArg + NumIRArgs); 4388 break; 4389 } 4390 } 4391 4392 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 4393 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 4394 4395 // If we're using inalloca, set up that argument. 4396 if (ArgMemory.isValid()) { 4397 llvm::Value *Arg = ArgMemory.getPointer(); 4398 if (CallInfo.isVariadic()) { 4399 // When passing non-POD arguments by value to variadic functions, we will 4400 // end up with a variadic prototype and an inalloca call site. In such 4401 // cases, we can't do any parameter mismatch checks. Give up and bitcast 4402 // the callee. 4403 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 4404 CalleePtr = 4405 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); 4406 } else { 4407 llvm::Type *LastParamTy = 4408 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 4409 if (Arg->getType() != LastParamTy) { 4410 #ifndef NDEBUG 4411 // Assert that these structs have equivalent element types. 4412 llvm::StructType *FullTy = CallInfo.getArgStruct(); 4413 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 4414 cast<llvm::PointerType>(LastParamTy)->getElementType()); 4415 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 4416 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 4417 DE = DeclaredTy->element_end(), 4418 FI = FullTy->element_begin(); 4419 DI != DE; ++DI, ++FI) 4420 assert(*DI == *FI); 4421 #endif 4422 Arg = Builder.CreateBitCast(Arg, LastParamTy); 4423 } 4424 } 4425 assert(IRFunctionArgs.hasInallocaArg()); 4426 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 4427 } 4428 4429 // 2. Prepare the function pointer. 4430 4431 // If the callee is a bitcast of a non-variadic function to have a 4432 // variadic function pointer type, check to see if we can remove the 4433 // bitcast. This comes up with unprototyped functions. 4434 // 4435 // This makes the IR nicer, but more importantly it ensures that we 4436 // can inline the function at -O0 if it is marked always_inline. 4437 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, 4438 llvm::Value *Ptr) -> llvm::Function * { 4439 if (!CalleeFT->isVarArg()) 4440 return nullptr; 4441 4442 // Get underlying value if it's a bitcast 4443 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { 4444 if (CE->getOpcode() == llvm::Instruction::BitCast) 4445 Ptr = CE->getOperand(0); 4446 } 4447 4448 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); 4449 if (!OrigFn) 4450 return nullptr; 4451 4452 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 4453 4454 // If the original type is variadic, or if any of the component types 4455 // disagree, we cannot remove the cast. 4456 if (OrigFT->isVarArg() || 4457 OrigFT->getNumParams() != CalleeFT->getNumParams() || 4458 OrigFT->getReturnType() != CalleeFT->getReturnType()) 4459 return nullptr; 4460 4461 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 4462 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 4463 return nullptr; 4464 4465 return OrigFn; 4466 }; 4467 4468 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { 4469 CalleePtr = OrigFn; 4470 IRFuncTy = OrigFn->getFunctionType(); 4471 } 4472 4473 // 3. Perform the actual call. 4474 4475 // Deactivate any cleanups that we're supposed to do immediately before 4476 // the call. 4477 if (!CallArgs.getCleanupsToDeactivate().empty()) 4478 deactivateArgCleanupsBeforeCall(*this, CallArgs); 4479 4480 // Assert that the arguments we computed match up. The IR verifier 4481 // will catch this, but this is a common enough source of problems 4482 // during IRGen changes that it's way better for debugging to catch 4483 // it ourselves here. 4484 #ifndef NDEBUG 4485 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 4486 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 4487 // Inalloca argument can have different type. 4488 if (IRFunctionArgs.hasInallocaArg() && 4489 i == IRFunctionArgs.getInallocaArgNo()) 4490 continue; 4491 if (i < IRFuncTy->getNumParams()) 4492 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 4493 } 4494 #endif 4495 4496 // Update the largest vector width if any arguments have vector types. 4497 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 4498 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType())) 4499 LargestVectorWidth = 4500 std::max((uint64_t)LargestVectorWidth, 4501 VT->getPrimitiveSizeInBits().getKnownMinSize()); 4502 } 4503 4504 // Compute the calling convention and attributes. 4505 unsigned CallingConv; 4506 llvm::AttributeList Attrs; 4507 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 4508 Callee.getAbstractInfo(), Attrs, CallingConv, 4509 /*AttrOnCallSite=*/true); 4510 4511 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 4512 if (FD->usesFPIntrin()) 4513 // All calls within a strictfp function are marked strictfp 4514 Attrs = 4515 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4516 llvm::Attribute::StrictFP); 4517 4518 // Apply some call-site-specific attributes. 4519 // TODO: work this into building the attribute set. 4520 4521 // Apply always_inline to all calls within flatten functions. 4522 // FIXME: should this really take priority over __try, below? 4523 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 4524 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { 4525 Attrs = 4526 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4527 llvm::Attribute::AlwaysInline); 4528 } 4529 4530 // Disable inlining inside SEH __try blocks. 4531 if (isSEHTryScope()) { 4532 Attrs = 4533 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4534 llvm::Attribute::NoInline); 4535 } 4536 4537 // Decide whether to use a call or an invoke. 4538 bool CannotThrow; 4539 if (currentFunctionUsesSEHTry()) { 4540 // SEH cares about asynchronous exceptions, so everything can "throw." 4541 CannotThrow = false; 4542 } else if (isCleanupPadScope() && 4543 EHPersonality::get(*this).isMSVCXXPersonality()) { 4544 // The MSVC++ personality will implicitly terminate the program if an 4545 // exception is thrown during a cleanup outside of a try/catch. 4546 // We don't need to model anything in IR to get this behavior. 4547 CannotThrow = true; 4548 } else { 4549 // Otherwise, nounwind call sites will never throw. 4550 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex, 4551 llvm::Attribute::NoUnwind); 4552 } 4553 4554 // If we made a temporary, be sure to clean up after ourselves. Note that we 4555 // can't depend on being inside of an ExprWithCleanups, so we need to manually 4556 // pop this cleanup later on. Being eager about this is OK, since this 4557 // temporary is 'invisible' outside of the callee. 4558 if (UnusedReturnSizePtr) 4559 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 4560 UnusedReturnSizePtr); 4561 4562 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 4563 4564 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4565 getBundlesForFunclet(CalleePtr); 4566 4567 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 4568 if (FD->usesFPIntrin()) 4569 // All calls within a strictfp function are marked strictfp 4570 Attrs = 4571 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 4572 llvm::Attribute::StrictFP); 4573 4574 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); 4575 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 4576 4577 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); 4578 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 4579 4580 // Emit the actual call/invoke instruction. 4581 llvm::CallBase *CI; 4582 if (!InvokeDest) { 4583 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); 4584 } else { 4585 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 4586 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, 4587 BundleList); 4588 EmitBlock(Cont); 4589 } 4590 if (callOrInvoke) 4591 *callOrInvoke = CI; 4592 4593 // If this is within a function that has the guard(nocf) attribute and is an 4594 // indirect call, add the "guard_nocf" attribute to this call to indicate that 4595 // Control Flow Guard checks should not be added, even if the call is inlined. 4596 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 4597 if (const auto *A = FD->getAttr<CFGuardAttr>()) { 4598 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) 4599 Attrs = Attrs.addAttribute( 4600 getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf"); 4601 } 4602 } 4603 4604 // Apply the attributes and calling convention. 4605 CI->setAttributes(Attrs); 4606 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 4607 4608 // Apply various metadata. 4609 4610 if (!CI->getType()->isVoidTy()) 4611 CI->setName("call"); 4612 4613 // Update largest vector width from the return type. 4614 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType())) 4615 LargestVectorWidth = 4616 std::max((uint64_t)LargestVectorWidth, 4617 VT->getPrimitiveSizeInBits().getKnownMinSize()); 4618 4619 // Insert instrumentation or attach profile metadata at indirect call sites. 4620 // For more details, see the comment before the definition of 4621 // IPVK_IndirectCallTarget in InstrProfData.inc. 4622 if (!CI->getCalledFunction()) 4623 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 4624 CI, CalleePtr); 4625 4626 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4627 // optimizer it can aggressively ignore unwind edges. 4628 if (CGM.getLangOpts().ObjCAutoRefCount) 4629 AddObjCARCExceptionMetadata(CI); 4630 4631 // Suppress tail calls if requested. 4632 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 4633 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 4634 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 4635 } 4636 4637 // Add metadata for calls to MSAllocator functions 4638 if (getDebugInfo() && TargetDecl && 4639 TargetDecl->hasAttr<MSAllocatorAttr>()) 4640 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc); 4641 4642 // 4. Finish the call. 4643 4644 // If the call doesn't return, finish the basic block and clear the 4645 // insertion point; this allows the rest of IRGen to discard 4646 // unreachable code. 4647 if (CI->doesNotReturn()) { 4648 if (UnusedReturnSizePtr) 4649 PopCleanupBlock(); 4650 4651 // Strip away the noreturn attribute to better diagnose unreachable UB. 4652 if (SanOpts.has(SanitizerKind::Unreachable)) { 4653 // Also remove from function since CallBase::hasFnAttr additionally checks 4654 // attributes of the called function. 4655 if (auto *F = CI->getCalledFunction()) 4656 F->removeFnAttr(llvm::Attribute::NoReturn); 4657 CI->removeAttribute(llvm::AttributeList::FunctionIndex, 4658 llvm::Attribute::NoReturn); 4659 4660 // Avoid incompatibility with ASan which relies on the `noreturn` 4661 // attribute to insert handler calls. 4662 if (SanOpts.hasOneOf(SanitizerKind::Address | 4663 SanitizerKind::KernelAddress)) { 4664 SanitizerScope SanScope(this); 4665 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 4666 Builder.SetInsertPoint(CI); 4667 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 4668 llvm::FunctionCallee Fn = 4669 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 4670 EmitNounwindRuntimeCall(Fn); 4671 } 4672 } 4673 4674 EmitUnreachable(Loc); 4675 Builder.ClearInsertionPoint(); 4676 4677 // FIXME: For now, emit a dummy basic block because expr emitters in 4678 // generally are not ready to handle emitting expressions at unreachable 4679 // points. 4680 EnsureInsertPoint(); 4681 4682 // Return a reasonable RValue. 4683 return GetUndefRValue(RetTy); 4684 } 4685 4686 // Perform the swifterror writeback. 4687 if (swiftErrorTemp.isValid()) { 4688 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 4689 Builder.CreateStore(errorResult, swiftErrorArg); 4690 } 4691 4692 // Emit any call-associated writebacks immediately. Arguably this 4693 // should happen after any return-value munging. 4694 if (CallArgs.hasWritebacks()) 4695 emitWritebacks(*this, CallArgs); 4696 4697 // The stack cleanup for inalloca arguments has to run out of the normal 4698 // lexical order, so deactivate it and run it manually here. 4699 CallArgs.freeArgumentMemory(*this); 4700 4701 // Extract the return value. 4702 RValue Ret = [&] { 4703 switch (RetAI.getKind()) { 4704 case ABIArgInfo::CoerceAndExpand: { 4705 auto coercionType = RetAI.getCoerceAndExpandType(); 4706 4707 Address addr = SRetPtr; 4708 addr = Builder.CreateElementBitCast(addr, coercionType); 4709 4710 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 4711 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 4712 4713 unsigned unpaddedIndex = 0; 4714 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4715 llvm::Type *eltType = coercionType->getElementType(i); 4716 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4717 Address eltAddr = Builder.CreateStructGEP(addr, i); 4718 llvm::Value *elt = CI; 4719 if (requiresExtract) 4720 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 4721 else 4722 assert(unpaddedIndex == 0); 4723 Builder.CreateStore(elt, eltAddr); 4724 } 4725 // FALLTHROUGH 4726 LLVM_FALLTHROUGH; 4727 } 4728 4729 case ABIArgInfo::InAlloca: 4730 case ABIArgInfo::Indirect: { 4731 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 4732 if (UnusedReturnSizePtr) 4733 PopCleanupBlock(); 4734 return ret; 4735 } 4736 4737 case ABIArgInfo::Ignore: 4738 // If we are ignoring an argument that had a result, make sure to 4739 // construct the appropriate return value for our caller. 4740 return GetUndefRValue(RetTy); 4741 4742 case ABIArgInfo::Extend: 4743 case ABIArgInfo::Direct: { 4744 llvm::Type *RetIRTy = ConvertType(RetTy); 4745 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 4746 switch (getEvaluationKind(RetTy)) { 4747 case TEK_Complex: { 4748 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 4749 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 4750 return RValue::getComplex(std::make_pair(Real, Imag)); 4751 } 4752 case TEK_Aggregate: { 4753 Address DestPtr = ReturnValue.getValue(); 4754 bool DestIsVolatile = ReturnValue.isVolatile(); 4755 4756 if (!DestPtr.isValid()) { 4757 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 4758 DestIsVolatile = false; 4759 } 4760 BuildAggStore(*this, CI, DestPtr, DestIsVolatile); 4761 return RValue::getAggregate(DestPtr); 4762 } 4763 case TEK_Scalar: { 4764 // If the argument doesn't match, perform a bitcast to coerce it. This 4765 // can happen due to trivial type mismatches. 4766 llvm::Value *V = CI; 4767 if (V->getType() != RetIRTy) 4768 V = Builder.CreateBitCast(V, RetIRTy); 4769 return RValue::get(V); 4770 } 4771 } 4772 llvm_unreachable("bad evaluation kind"); 4773 } 4774 4775 Address DestPtr = ReturnValue.getValue(); 4776 bool DestIsVolatile = ReturnValue.isVolatile(); 4777 4778 if (!DestPtr.isValid()) { 4779 DestPtr = CreateMemTemp(RetTy, "coerce"); 4780 DestIsVolatile = false; 4781 } 4782 4783 // If the value is offset in memory, apply the offset now. 4784 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 4785 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 4786 4787 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 4788 } 4789 4790 case ABIArgInfo::Expand: 4791 llvm_unreachable("Invalid ABI kind for return argument"); 4792 } 4793 4794 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 4795 } (); 4796 4797 // Emit the assume_aligned check on the return value. 4798 if (Ret.isScalar() && TargetDecl) { 4799 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 4800 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 4801 } 4802 4803 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though 4804 // we can't use the full cleanup mechanism. 4805 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) 4806 LifetimeEnd.Emit(*this, /*Flags=*/{}); 4807 4808 return Ret; 4809 } 4810 4811 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 4812 if (isVirtual()) { 4813 const CallExpr *CE = getVirtualCallExpr(); 4814 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 4815 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), 4816 CE ? CE->getBeginLoc() : SourceLocation()); 4817 } 4818 4819 return *this; 4820 } 4821 4822 /* VarArg handling */ 4823 4824 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 4825 VAListAddr = VE->isMicrosoftABI() 4826 ? EmitMSVAListRef(VE->getSubExpr()) 4827 : EmitVAListRef(VE->getSubExpr()); 4828 QualType Ty = VE->getType(); 4829 if (VE->isMicrosoftABI()) 4830 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 4831 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 4832 } 4833