1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // These classes wrap the information about a call or function 10 // definition used to handle ABI compliancy. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "CGCall.h" 15 #include "ABIInfo.h" 16 #include "CGBlocks.h" 17 #include "CGCXXABI.h" 18 #include "CGCleanup.h" 19 #include "CGRecordLayout.h" 20 #include "CodeGenFunction.h" 21 #include "CodeGenModule.h" 22 #include "TargetInfo.h" 23 #include "clang/AST/Attr.h" 24 #include "clang/AST/Decl.h" 25 #include "clang/AST/DeclCXX.h" 26 #include "clang/AST/DeclObjC.h" 27 #include "clang/Basic/CodeGenOptions.h" 28 #include "clang/Basic/TargetBuiltins.h" 29 #include "clang/Basic/TargetInfo.h" 30 #include "clang/CodeGen/CGFunctionInfo.h" 31 #include "clang/CodeGen/SwiftCallingConv.h" 32 #include "llvm/ADT/StringExtras.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/IR/Assumptions.h" 35 #include "llvm/IR/Attributes.h" 36 #include "llvm/IR/CallingConv.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/InlineAsm.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/Transforms/Utils/Local.h" 42 using namespace clang; 43 using namespace CodeGen; 44 45 /***/ 46 47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) { 48 switch (CC) { 49 default: return llvm::CallingConv::C; 50 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall; 51 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall; 52 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall; 53 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall; 54 case CC_Win64: return llvm::CallingConv::Win64; 55 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV; 56 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS; 57 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP; 58 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI; 59 // TODO: Add support for __pascal to LLVM. 60 case CC_X86Pascal: return llvm::CallingConv::C; 61 // TODO: Add support for __vectorcall to LLVM. 62 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall; 63 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall; 64 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC; 65 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv(); 66 case CC_PreserveMost: return llvm::CallingConv::PreserveMost; 67 case CC_PreserveAll: return llvm::CallingConv::PreserveAll; 68 case CC_Swift: return llvm::CallingConv::Swift; 69 } 70 } 71 72 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR 73 /// qualification. Either or both of RD and MD may be null. A null RD indicates 74 /// that there is no meaningful 'this' type, and a null MD can occur when 75 /// calling a method pointer. 76 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD, 77 const CXXMethodDecl *MD) { 78 QualType RecTy; 79 if (RD) 80 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal(); 81 else 82 RecTy = Context.VoidTy; 83 84 if (MD) 85 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace()); 86 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy)); 87 } 88 89 /// Returns the canonical formal type of the given C++ method. 90 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) { 91 return MD->getType()->getCanonicalTypeUnqualified() 92 .getAs<FunctionProtoType>(); 93 } 94 95 /// Returns the "extra-canonicalized" return type, which discards 96 /// qualifiers on the return type. Codegen doesn't care about them, 97 /// and it makes ABI code a little easier to be able to assume that 98 /// all parameter and return types are top-level unqualified. 99 static CanQualType GetReturnType(QualType RetTy) { 100 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType(); 101 } 102 103 /// Arrange the argument and result information for a value of the given 104 /// unprototyped freestanding function type. 105 const CGFunctionInfo & 106 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) { 107 // When translating an unprototyped function type, always use a 108 // variadic type. 109 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(), 110 /*instanceMethod=*/false, 111 /*chainCall=*/false, None, 112 FTNP->getExtInfo(), {}, RequiredArgs(0)); 113 } 114 115 static void addExtParameterInfosForCall( 116 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 117 const FunctionProtoType *proto, 118 unsigned prefixArgs, 119 unsigned totalArgs) { 120 assert(proto->hasExtParameterInfos()); 121 assert(paramInfos.size() <= prefixArgs); 122 assert(proto->getNumParams() + prefixArgs <= totalArgs); 123 124 paramInfos.reserve(totalArgs); 125 126 // Add default infos for any prefix args that don't already have infos. 127 paramInfos.resize(prefixArgs); 128 129 // Add infos for the prototype. 130 for (const auto &ParamInfo : proto->getExtParameterInfos()) { 131 paramInfos.push_back(ParamInfo); 132 // pass_object_size params have no parameter info. 133 if (ParamInfo.hasPassObjectSize()) 134 paramInfos.emplace_back(); 135 } 136 137 assert(paramInfos.size() <= totalArgs && 138 "Did we forget to insert pass_object_size args?"); 139 // Add default infos for the variadic and/or suffix arguments. 140 paramInfos.resize(totalArgs); 141 } 142 143 /// Adds the formal parameters in FPT to the given prefix. If any parameter in 144 /// FPT has pass_object_size attrs, then we'll add parameters for those, too. 145 static void appendParameterTypes(const CodeGenTypes &CGT, 146 SmallVectorImpl<CanQualType> &prefix, 147 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos, 148 CanQual<FunctionProtoType> FPT) { 149 // Fast path: don't touch param info if we don't need to. 150 if (!FPT->hasExtParameterInfos()) { 151 assert(paramInfos.empty() && 152 "We have paramInfos, but the prototype doesn't?"); 153 prefix.append(FPT->param_type_begin(), FPT->param_type_end()); 154 return; 155 } 156 157 unsigned PrefixSize = prefix.size(); 158 // In the vast majority of cases, we'll have precisely FPT->getNumParams() 159 // parameters; the only thing that can change this is the presence of 160 // pass_object_size. So, we preallocate for the common case. 161 prefix.reserve(prefix.size() + FPT->getNumParams()); 162 163 auto ExtInfos = FPT->getExtParameterInfos(); 164 assert(ExtInfos.size() == FPT->getNumParams()); 165 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) { 166 prefix.push_back(FPT->getParamType(I)); 167 if (ExtInfos[I].hasPassObjectSize()) 168 prefix.push_back(CGT.getContext().getSizeType()); 169 } 170 171 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize, 172 prefix.size()); 173 } 174 175 /// Arrange the LLVM function layout for a value of the given function 176 /// type, on top of any implicit parameters already stored. 177 static const CGFunctionInfo & 178 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod, 179 SmallVectorImpl<CanQualType> &prefix, 180 CanQual<FunctionProtoType> FTP) { 181 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 182 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size()); 183 // FIXME: Kill copy. 184 appendParameterTypes(CGT, prefix, paramInfos, FTP); 185 CanQualType resultType = FTP->getReturnType().getUnqualifiedType(); 186 187 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod, 188 /*chainCall=*/false, prefix, 189 FTP->getExtInfo(), paramInfos, 190 Required); 191 } 192 193 /// Arrange the argument and result information for a value of the 194 /// given freestanding function type. 195 const CGFunctionInfo & 196 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) { 197 SmallVector<CanQualType, 16> argTypes; 198 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes, 199 FTP); 200 } 201 202 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D, 203 bool IsWindows) { 204 // Set the appropriate calling convention for the Function. 205 if (D->hasAttr<StdCallAttr>()) 206 return CC_X86StdCall; 207 208 if (D->hasAttr<FastCallAttr>()) 209 return CC_X86FastCall; 210 211 if (D->hasAttr<RegCallAttr>()) 212 return CC_X86RegCall; 213 214 if (D->hasAttr<ThisCallAttr>()) 215 return CC_X86ThisCall; 216 217 if (D->hasAttr<VectorCallAttr>()) 218 return CC_X86VectorCall; 219 220 if (D->hasAttr<PascalAttr>()) 221 return CC_X86Pascal; 222 223 if (PcsAttr *PCS = D->getAttr<PcsAttr>()) 224 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP); 225 226 if (D->hasAttr<AArch64VectorPcsAttr>()) 227 return CC_AArch64VectorCall; 228 229 if (D->hasAttr<IntelOclBiccAttr>()) 230 return CC_IntelOclBicc; 231 232 if (D->hasAttr<MSABIAttr>()) 233 return IsWindows ? CC_C : CC_Win64; 234 235 if (D->hasAttr<SysVABIAttr>()) 236 return IsWindows ? CC_X86_64SysV : CC_C; 237 238 if (D->hasAttr<PreserveMostAttr>()) 239 return CC_PreserveMost; 240 241 if (D->hasAttr<PreserveAllAttr>()) 242 return CC_PreserveAll; 243 244 return CC_C; 245 } 246 247 /// Arrange the argument and result information for a call to an 248 /// unknown C++ non-static member function of the given abstract type. 249 /// (A null RD means we don't have any meaningful "this" argument type, 250 /// so fall back to a generic pointer type). 251 /// The member function must be an ordinary function, i.e. not a 252 /// constructor or destructor. 253 const CGFunctionInfo & 254 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD, 255 const FunctionProtoType *FTP, 256 const CXXMethodDecl *MD) { 257 SmallVector<CanQualType, 16> argTypes; 258 259 // Add the 'this' pointer. 260 argTypes.push_back(DeriveThisType(RD, MD)); 261 262 return ::arrangeLLVMFunctionInfo( 263 *this, true, argTypes, 264 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>()); 265 } 266 267 /// Set calling convention for CUDA/HIP kernel. 268 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM, 269 const FunctionDecl *FD) { 270 if (FD->hasAttr<CUDAGlobalAttr>()) { 271 const FunctionType *FT = FTy->getAs<FunctionType>(); 272 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT); 273 FTy = FT->getCanonicalTypeUnqualified(); 274 } 275 } 276 277 /// Arrange the argument and result information for a declaration or 278 /// definition of the given C++ non-static member function. The 279 /// member function must be an ordinary function, i.e. not a 280 /// constructor or destructor. 281 const CGFunctionInfo & 282 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) { 283 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!"); 284 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!"); 285 286 CanQualType FT = GetFormalType(MD).getAs<Type>(); 287 setCUDAKernelCallingConvention(FT, CGM, MD); 288 auto prototype = FT.getAs<FunctionProtoType>(); 289 290 if (MD->isInstance()) { 291 // The abstract case is perfectly fine. 292 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD); 293 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD); 294 } 295 296 return arrangeFreeFunctionType(prototype); 297 } 298 299 bool CodeGenTypes::inheritingCtorHasParams( 300 const InheritedConstructor &Inherited, CXXCtorType Type) { 301 // Parameters are unnecessary if we're constructing a base class subobject 302 // and the inherited constructor lives in a virtual base. 303 return Type == Ctor_Complete || 304 !Inherited.getShadowDecl()->constructsVirtualBase() || 305 !Target.getCXXABI().hasConstructorVariants(); 306 } 307 308 const CGFunctionInfo & 309 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) { 310 auto *MD = cast<CXXMethodDecl>(GD.getDecl()); 311 312 SmallVector<CanQualType, 16> argTypes; 313 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 314 argTypes.push_back(DeriveThisType(MD->getParent(), MD)); 315 316 bool PassParams = true; 317 318 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) { 319 // A base class inheriting constructor doesn't get forwarded arguments 320 // needed to construct a virtual base (or base class thereof). 321 if (auto Inherited = CD->getInheritedConstructor()) 322 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType()); 323 } 324 325 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 326 327 // Add the formal parameters. 328 if (PassParams) 329 appendParameterTypes(*this, argTypes, paramInfos, FTP); 330 331 CGCXXABI::AddedStructorArgCounts AddedArgs = 332 TheCXXABI.buildStructorSignature(GD, argTypes); 333 if (!paramInfos.empty()) { 334 // Note: prefix implies after the first param. 335 if (AddedArgs.Prefix) 336 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix, 337 FunctionProtoType::ExtParameterInfo{}); 338 if (AddedArgs.Suffix) 339 paramInfos.append(AddedArgs.Suffix, 340 FunctionProtoType::ExtParameterInfo{}); 341 } 342 343 RequiredArgs required = 344 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size()) 345 : RequiredArgs::All); 346 347 FunctionType::ExtInfo extInfo = FTP->getExtInfo(); 348 CanQualType resultType = TheCXXABI.HasThisReturn(GD) 349 ? argTypes.front() 350 : TheCXXABI.hasMostDerivedReturn(GD) 351 ? CGM.getContext().VoidPtrTy 352 : Context.VoidTy; 353 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true, 354 /*chainCall=*/false, argTypes, extInfo, 355 paramInfos, required); 356 } 357 358 static SmallVector<CanQualType, 16> 359 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) { 360 SmallVector<CanQualType, 16> argTypes; 361 for (auto &arg : args) 362 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty)); 363 return argTypes; 364 } 365 366 static SmallVector<CanQualType, 16> 367 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) { 368 SmallVector<CanQualType, 16> argTypes; 369 for (auto &arg : args) 370 argTypes.push_back(ctx.getCanonicalParamType(arg->getType())); 371 return argTypes; 372 } 373 374 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> 375 getExtParameterInfosForCall(const FunctionProtoType *proto, 376 unsigned prefixArgs, unsigned totalArgs) { 377 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result; 378 if (proto->hasExtParameterInfos()) { 379 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs); 380 } 381 return result; 382 } 383 384 /// Arrange a call to a C++ method, passing the given arguments. 385 /// 386 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this` 387 /// parameter. 388 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of 389 /// args. 390 /// PassProtoArgs indicates whether `args` has args for the parameters in the 391 /// given CXXConstructorDecl. 392 const CGFunctionInfo & 393 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args, 394 const CXXConstructorDecl *D, 395 CXXCtorType CtorKind, 396 unsigned ExtraPrefixArgs, 397 unsigned ExtraSuffixArgs, 398 bool PassProtoArgs) { 399 // FIXME: Kill copy. 400 SmallVector<CanQualType, 16> ArgTypes; 401 for (const auto &Arg : args) 402 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 403 404 // +1 for implicit this, which should always be args[0]. 405 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs; 406 407 CanQual<FunctionProtoType> FPT = GetFormalType(D); 408 RequiredArgs Required = PassProtoArgs 409 ? RequiredArgs::forPrototypePlus( 410 FPT, TotalPrefixArgs + ExtraSuffixArgs) 411 : RequiredArgs::All; 412 413 GlobalDecl GD(D, CtorKind); 414 CanQualType ResultType = TheCXXABI.HasThisReturn(GD) 415 ? ArgTypes.front() 416 : TheCXXABI.hasMostDerivedReturn(GD) 417 ? CGM.getContext().VoidPtrTy 418 : Context.VoidTy; 419 420 FunctionType::ExtInfo Info = FPT->getExtInfo(); 421 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos; 422 // If the prototype args are elided, we should only have ABI-specific args, 423 // which never have param info. 424 if (PassProtoArgs && FPT->hasExtParameterInfos()) { 425 // ABI-specific suffix arguments are treated the same as variadic arguments. 426 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs, 427 ArgTypes.size()); 428 } 429 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true, 430 /*chainCall=*/false, ArgTypes, Info, 431 ParamInfos, Required); 432 } 433 434 /// Arrange the argument and result information for the declaration or 435 /// definition of the given function. 436 const CGFunctionInfo & 437 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) { 438 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) 439 if (MD->isInstance()) 440 return arrangeCXXMethodDeclaration(MD); 441 442 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified(); 443 444 assert(isa<FunctionType>(FTy)); 445 setCUDAKernelCallingConvention(FTy, CGM, FD); 446 447 // When declaring a function without a prototype, always use a 448 // non-variadic type. 449 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) { 450 return arrangeLLVMFunctionInfo( 451 noProto->getReturnType(), /*instanceMethod=*/false, 452 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All); 453 } 454 455 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>()); 456 } 457 458 /// Arrange the argument and result information for the declaration or 459 /// definition of an Objective-C method. 460 const CGFunctionInfo & 461 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) { 462 // It happens that this is the same as a call with no optional 463 // arguments, except also using the formal 'self' type. 464 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType()); 465 } 466 467 /// Arrange the argument and result information for the function type 468 /// through which to perform a send to the given Objective-C method, 469 /// using the given receiver type. The receiver type is not always 470 /// the 'self' type of the method or even an Objective-C pointer type. 471 /// This is *not* the right method for actually performing such a 472 /// message send, due to the possibility of optional arguments. 473 const CGFunctionInfo & 474 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD, 475 QualType receiverType) { 476 SmallVector<CanQualType, 16> argTys; 477 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2); 478 argTys.push_back(Context.getCanonicalParamType(receiverType)); 479 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType())); 480 // FIXME: Kill copy? 481 for (const auto *I : MD->parameters()) { 482 argTys.push_back(Context.getCanonicalParamType(I->getType())); 483 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape( 484 I->hasAttr<NoEscapeAttr>()); 485 extParamInfos.push_back(extParamInfo); 486 } 487 488 FunctionType::ExtInfo einfo; 489 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows(); 490 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows)); 491 492 if (getContext().getLangOpts().ObjCAutoRefCount && 493 MD->hasAttr<NSReturnsRetainedAttr>()) 494 einfo = einfo.withProducesResult(true); 495 496 RequiredArgs required = 497 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All); 498 499 return arrangeLLVMFunctionInfo( 500 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false, 501 /*chainCall=*/false, argTys, einfo, extParamInfos, required); 502 } 503 504 const CGFunctionInfo & 505 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType, 506 const CallArgList &args) { 507 auto argTypes = getArgTypesForCall(Context, args); 508 FunctionType::ExtInfo einfo; 509 510 return arrangeLLVMFunctionInfo( 511 GetReturnType(returnType), /*instanceMethod=*/false, 512 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All); 513 } 514 515 const CGFunctionInfo & 516 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) { 517 // FIXME: Do we need to handle ObjCMethodDecl? 518 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl()); 519 520 if (isa<CXXConstructorDecl>(GD.getDecl()) || 521 isa<CXXDestructorDecl>(GD.getDecl())) 522 return arrangeCXXStructorDeclaration(GD); 523 524 return arrangeFunctionDeclaration(FD); 525 } 526 527 /// Arrange a thunk that takes 'this' as the first parameter followed by 528 /// varargs. Return a void pointer, regardless of the actual return type. 529 /// The body of the thunk will end in a musttail call to a function of the 530 /// correct type, and the caller will bitcast the function to the correct 531 /// prototype. 532 const CGFunctionInfo & 533 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) { 534 assert(MD->isVirtual() && "only methods have thunks"); 535 CanQual<FunctionProtoType> FTP = GetFormalType(MD); 536 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)}; 537 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false, 538 /*chainCall=*/false, ArgTys, 539 FTP->getExtInfo(), {}, RequiredArgs(1)); 540 } 541 542 const CGFunctionInfo & 543 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD, 544 CXXCtorType CT) { 545 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure); 546 547 CanQual<FunctionProtoType> FTP = GetFormalType(CD); 548 SmallVector<CanQualType, 2> ArgTys; 549 const CXXRecordDecl *RD = CD->getParent(); 550 ArgTys.push_back(DeriveThisType(RD, CD)); 551 if (CT == Ctor_CopyingClosure) 552 ArgTys.push_back(*FTP->param_type_begin()); 553 if (RD->getNumVBases() > 0) 554 ArgTys.push_back(Context.IntTy); 555 CallingConv CC = Context.getDefaultCallingConvention( 556 /*IsVariadic=*/false, /*IsCXXMethod=*/true); 557 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true, 558 /*chainCall=*/false, ArgTys, 559 FunctionType::ExtInfo(CC), {}, 560 RequiredArgs::All); 561 } 562 563 /// Arrange a call as unto a free function, except possibly with an 564 /// additional number of formal parameters considered required. 565 static const CGFunctionInfo & 566 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT, 567 CodeGenModule &CGM, 568 const CallArgList &args, 569 const FunctionType *fnType, 570 unsigned numExtraRequiredArgs, 571 bool chainCall) { 572 assert(args.size() >= numExtraRequiredArgs); 573 574 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 575 576 // In most cases, there are no optional arguments. 577 RequiredArgs required = RequiredArgs::All; 578 579 // If we have a variadic prototype, the required arguments are the 580 // extra prefix plus the arguments in the prototype. 581 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) { 582 if (proto->isVariadic()) 583 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs); 584 585 if (proto->hasExtParameterInfos()) 586 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs, 587 args.size()); 588 589 // If we don't have a prototype at all, but we're supposed to 590 // explicitly use the variadic convention for unprototyped calls, 591 // treat all of the arguments as required but preserve the nominal 592 // possibility of variadics. 593 } else if (CGM.getTargetCodeGenInfo() 594 .isNoProtoCallVariadic(args, 595 cast<FunctionNoProtoType>(fnType))) { 596 required = RequiredArgs(args.size()); 597 } 598 599 // FIXME: Kill copy. 600 SmallVector<CanQualType, 16> argTypes; 601 for (const auto &arg : args) 602 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty)); 603 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()), 604 /*instanceMethod=*/false, chainCall, 605 argTypes, fnType->getExtInfo(), paramInfos, 606 required); 607 } 608 609 /// Figure out the rules for calling a function with the given formal 610 /// type using the given arguments. The arguments are necessary 611 /// because the function might be unprototyped, in which case it's 612 /// target-dependent in crazy ways. 613 const CGFunctionInfo & 614 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args, 615 const FunctionType *fnType, 616 bool chainCall) { 617 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 618 chainCall ? 1 : 0, chainCall); 619 } 620 621 /// A block function is essentially a free function with an 622 /// extra implicit argument. 623 const CGFunctionInfo & 624 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args, 625 const FunctionType *fnType) { 626 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1, 627 /*chainCall=*/false); 628 } 629 630 const CGFunctionInfo & 631 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto, 632 const FunctionArgList ¶ms) { 633 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size()); 634 auto argTypes = getArgTypesForDeclaration(Context, params); 635 636 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()), 637 /*instanceMethod*/ false, /*chainCall*/ false, 638 argTypes, proto->getExtInfo(), paramInfos, 639 RequiredArgs::forPrototypePlus(proto, 1)); 640 } 641 642 const CGFunctionInfo & 643 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType, 644 const CallArgList &args) { 645 // FIXME: Kill copy. 646 SmallVector<CanQualType, 16> argTypes; 647 for (const auto &Arg : args) 648 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty)); 649 return arrangeLLVMFunctionInfo( 650 GetReturnType(resultType), /*instanceMethod=*/false, 651 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(), 652 /*paramInfos=*/ {}, RequiredArgs::All); 653 } 654 655 const CGFunctionInfo & 656 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType, 657 const FunctionArgList &args) { 658 auto argTypes = getArgTypesForDeclaration(Context, args); 659 660 return arrangeLLVMFunctionInfo( 661 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false, 662 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 663 } 664 665 const CGFunctionInfo & 666 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType, 667 ArrayRef<CanQualType> argTypes) { 668 return arrangeLLVMFunctionInfo( 669 resultType, /*instanceMethod=*/false, /*chainCall=*/false, 670 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All); 671 } 672 673 /// Arrange a call to a C++ method, passing the given arguments. 674 /// 675 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It 676 /// does not count `this`. 677 const CGFunctionInfo & 678 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args, 679 const FunctionProtoType *proto, 680 RequiredArgs required, 681 unsigned numPrefixArgs) { 682 assert(numPrefixArgs + 1 <= args.size() && 683 "Emitting a call with less args than the required prefix?"); 684 // Add one to account for `this`. It's a bit awkward here, but we don't count 685 // `this` in similar places elsewhere. 686 auto paramInfos = 687 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size()); 688 689 // FIXME: Kill copy. 690 auto argTypes = getArgTypesForCall(Context, args); 691 692 FunctionType::ExtInfo info = proto->getExtInfo(); 693 return arrangeLLVMFunctionInfo( 694 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true, 695 /*chainCall=*/false, argTypes, info, paramInfos, required); 696 } 697 698 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() { 699 return arrangeLLVMFunctionInfo( 700 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false, 701 None, FunctionType::ExtInfo(), {}, RequiredArgs::All); 702 } 703 704 const CGFunctionInfo & 705 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature, 706 const CallArgList &args) { 707 assert(signature.arg_size() <= args.size()); 708 if (signature.arg_size() == args.size()) 709 return signature; 710 711 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos; 712 auto sigParamInfos = signature.getExtParameterInfos(); 713 if (!sigParamInfos.empty()) { 714 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end()); 715 paramInfos.resize(args.size()); 716 } 717 718 auto argTypes = getArgTypesForCall(Context, args); 719 720 assert(signature.getRequiredArgs().allowsOptionalArgs()); 721 return arrangeLLVMFunctionInfo(signature.getReturnType(), 722 signature.isInstanceMethod(), 723 signature.isChainCall(), 724 argTypes, 725 signature.getExtInfo(), 726 paramInfos, 727 signature.getRequiredArgs()); 728 } 729 730 namespace clang { 731 namespace CodeGen { 732 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI); 733 } 734 } 735 736 /// Arrange the argument and result information for an abstract value 737 /// of a given function type. This is the method which all of the 738 /// above functions ultimately defer to. 739 const CGFunctionInfo & 740 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType, 741 bool instanceMethod, 742 bool chainCall, 743 ArrayRef<CanQualType> argTypes, 744 FunctionType::ExtInfo info, 745 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos, 746 RequiredArgs required) { 747 assert(llvm::all_of(argTypes, 748 [](CanQualType T) { return T.isCanonicalAsParam(); })); 749 750 // Lookup or create unique function info. 751 llvm::FoldingSetNodeID ID; 752 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos, 753 required, resultType, argTypes); 754 755 void *insertPos = nullptr; 756 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos); 757 if (FI) 758 return *FI; 759 760 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC()); 761 762 // Construct the function info. We co-allocate the ArgInfos. 763 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info, 764 paramInfos, resultType, argTypes, required); 765 FunctionInfos.InsertNode(FI, insertPos); 766 767 bool inserted = FunctionsBeingProcessed.insert(FI).second; 768 (void)inserted; 769 assert(inserted && "Recursively being processed?"); 770 771 // Compute ABI information. 772 if (CC == llvm::CallingConv::SPIR_KERNEL) { 773 // Force target independent argument handling for the host visible 774 // kernel functions. 775 computeSPIRKernelABIInfo(CGM, *FI); 776 } else if (info.getCC() == CC_Swift) { 777 swiftcall::computeABIInfo(CGM, *FI); 778 } else { 779 getABIInfo().computeInfo(*FI); 780 } 781 782 // Loop over all of the computed argument and return value info. If any of 783 // them are direct or extend without a specified coerce type, specify the 784 // default now. 785 ABIArgInfo &retInfo = FI->getReturnInfo(); 786 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr) 787 retInfo.setCoerceToType(ConvertType(FI->getReturnType())); 788 789 for (auto &I : FI->arguments()) 790 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr) 791 I.info.setCoerceToType(ConvertType(I.type)); 792 793 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased; 794 assert(erased && "Not in set?"); 795 796 return *FI; 797 } 798 799 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, 800 bool instanceMethod, 801 bool chainCall, 802 const FunctionType::ExtInfo &info, 803 ArrayRef<ExtParameterInfo> paramInfos, 804 CanQualType resultType, 805 ArrayRef<CanQualType> argTypes, 806 RequiredArgs required) { 807 assert(paramInfos.empty() || paramInfos.size() == argTypes.size()); 808 assert(!required.allowsOptionalArgs() || 809 required.getNumRequiredArgs() <= argTypes.size()); 810 811 void *buffer = 812 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>( 813 argTypes.size() + 1, paramInfos.size())); 814 815 CGFunctionInfo *FI = new(buffer) CGFunctionInfo(); 816 FI->CallingConvention = llvmCC; 817 FI->EffectiveCallingConvention = llvmCC; 818 FI->ASTCallingConvention = info.getCC(); 819 FI->InstanceMethod = instanceMethod; 820 FI->ChainCall = chainCall; 821 FI->CmseNSCall = info.getCmseNSCall(); 822 FI->NoReturn = info.getNoReturn(); 823 FI->ReturnsRetained = info.getProducesResult(); 824 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs(); 825 FI->NoCfCheck = info.getNoCfCheck(); 826 FI->Required = required; 827 FI->HasRegParm = info.getHasRegParm(); 828 FI->RegParm = info.getRegParm(); 829 FI->ArgStruct = nullptr; 830 FI->ArgStructAlign = 0; 831 FI->NumArgs = argTypes.size(); 832 FI->HasExtParameterInfos = !paramInfos.empty(); 833 FI->getArgsBuffer()[0].type = resultType; 834 for (unsigned i = 0, e = argTypes.size(); i != e; ++i) 835 FI->getArgsBuffer()[i + 1].type = argTypes[i]; 836 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i) 837 FI->getExtParameterInfosBuffer()[i] = paramInfos[i]; 838 return FI; 839 } 840 841 /***/ 842 843 namespace { 844 // ABIArgInfo::Expand implementation. 845 846 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded. 847 struct TypeExpansion { 848 enum TypeExpansionKind { 849 // Elements of constant arrays are expanded recursively. 850 TEK_ConstantArray, 851 // Record fields are expanded recursively (but if record is a union, only 852 // the field with the largest size is expanded). 853 TEK_Record, 854 // For complex types, real and imaginary parts are expanded recursively. 855 TEK_Complex, 856 // All other types are not expandable. 857 TEK_None 858 }; 859 860 const TypeExpansionKind Kind; 861 862 TypeExpansion(TypeExpansionKind K) : Kind(K) {} 863 virtual ~TypeExpansion() {} 864 }; 865 866 struct ConstantArrayExpansion : TypeExpansion { 867 QualType EltTy; 868 uint64_t NumElts; 869 870 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts) 871 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {} 872 static bool classof(const TypeExpansion *TE) { 873 return TE->Kind == TEK_ConstantArray; 874 } 875 }; 876 877 struct RecordExpansion : TypeExpansion { 878 SmallVector<const CXXBaseSpecifier *, 1> Bases; 879 880 SmallVector<const FieldDecl *, 1> Fields; 881 882 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases, 883 SmallVector<const FieldDecl *, 1> &&Fields) 884 : TypeExpansion(TEK_Record), Bases(std::move(Bases)), 885 Fields(std::move(Fields)) {} 886 static bool classof(const TypeExpansion *TE) { 887 return TE->Kind == TEK_Record; 888 } 889 }; 890 891 struct ComplexExpansion : TypeExpansion { 892 QualType EltTy; 893 894 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {} 895 static bool classof(const TypeExpansion *TE) { 896 return TE->Kind == TEK_Complex; 897 } 898 }; 899 900 struct NoExpansion : TypeExpansion { 901 NoExpansion() : TypeExpansion(TEK_None) {} 902 static bool classof(const TypeExpansion *TE) { 903 return TE->Kind == TEK_None; 904 } 905 }; 906 } // namespace 907 908 static std::unique_ptr<TypeExpansion> 909 getTypeExpansion(QualType Ty, const ASTContext &Context) { 910 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) { 911 return std::make_unique<ConstantArrayExpansion>( 912 AT->getElementType(), AT->getSize().getZExtValue()); 913 } 914 if (const RecordType *RT = Ty->getAs<RecordType>()) { 915 SmallVector<const CXXBaseSpecifier *, 1> Bases; 916 SmallVector<const FieldDecl *, 1> Fields; 917 const RecordDecl *RD = RT->getDecl(); 918 assert(!RD->hasFlexibleArrayMember() && 919 "Cannot expand structure with flexible array."); 920 if (RD->isUnion()) { 921 // Unions can be here only in degenerative cases - all the fields are same 922 // after flattening. Thus we have to use the "largest" field. 923 const FieldDecl *LargestFD = nullptr; 924 CharUnits UnionSize = CharUnits::Zero(); 925 926 for (const auto *FD : RD->fields()) { 927 if (FD->isZeroLengthBitField(Context)) 928 continue; 929 assert(!FD->isBitField() && 930 "Cannot expand structure with bit-field members."); 931 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType()); 932 if (UnionSize < FieldSize) { 933 UnionSize = FieldSize; 934 LargestFD = FD; 935 } 936 } 937 if (LargestFD) 938 Fields.push_back(LargestFD); 939 } else { 940 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) { 941 assert(!CXXRD->isDynamicClass() && 942 "cannot expand vtable pointers in dynamic classes"); 943 for (const CXXBaseSpecifier &BS : CXXRD->bases()) 944 Bases.push_back(&BS); 945 } 946 947 for (const auto *FD : RD->fields()) { 948 if (FD->isZeroLengthBitField(Context)) 949 continue; 950 assert(!FD->isBitField() && 951 "Cannot expand structure with bit-field members."); 952 Fields.push_back(FD); 953 } 954 } 955 return std::make_unique<RecordExpansion>(std::move(Bases), 956 std::move(Fields)); 957 } 958 if (const ComplexType *CT = Ty->getAs<ComplexType>()) { 959 return std::make_unique<ComplexExpansion>(CT->getElementType()); 960 } 961 return std::make_unique<NoExpansion>(); 962 } 963 964 static int getExpansionSize(QualType Ty, const ASTContext &Context) { 965 auto Exp = getTypeExpansion(Ty, Context); 966 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 967 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context); 968 } 969 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 970 int Res = 0; 971 for (auto BS : RExp->Bases) 972 Res += getExpansionSize(BS->getType(), Context); 973 for (auto FD : RExp->Fields) 974 Res += getExpansionSize(FD->getType(), Context); 975 return Res; 976 } 977 if (isa<ComplexExpansion>(Exp.get())) 978 return 2; 979 assert(isa<NoExpansion>(Exp.get())); 980 return 1; 981 } 982 983 void 984 CodeGenTypes::getExpandedTypes(QualType Ty, 985 SmallVectorImpl<llvm::Type *>::iterator &TI) { 986 auto Exp = getTypeExpansion(Ty, Context); 987 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 988 for (int i = 0, n = CAExp->NumElts; i < n; i++) { 989 getExpandedTypes(CAExp->EltTy, TI); 990 } 991 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 992 for (auto BS : RExp->Bases) 993 getExpandedTypes(BS->getType(), TI); 994 for (auto FD : RExp->Fields) 995 getExpandedTypes(FD->getType(), TI); 996 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) { 997 llvm::Type *EltTy = ConvertType(CExp->EltTy); 998 *TI++ = EltTy; 999 *TI++ = EltTy; 1000 } else { 1001 assert(isa<NoExpansion>(Exp.get())); 1002 *TI++ = ConvertType(Ty); 1003 } 1004 } 1005 1006 static void forConstantArrayExpansion(CodeGenFunction &CGF, 1007 ConstantArrayExpansion *CAE, 1008 Address BaseAddr, 1009 llvm::function_ref<void(Address)> Fn) { 1010 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy); 1011 CharUnits EltAlign = 1012 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize); 1013 1014 for (int i = 0, n = CAE->NumElts; i < n; i++) { 1015 llvm::Value *EltAddr = 1016 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i); 1017 Fn(Address(EltAddr, EltAlign)); 1018 } 1019 } 1020 1021 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV, 1022 llvm::Function::arg_iterator &AI) { 1023 assert(LV.isSimple() && 1024 "Unexpected non-simple lvalue during struct expansion."); 1025 1026 auto Exp = getTypeExpansion(Ty, getContext()); 1027 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1028 forConstantArrayExpansion( 1029 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) { 1030 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy); 1031 ExpandTypeFromArgs(CAExp->EltTy, LV, AI); 1032 }); 1033 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1034 Address This = LV.getAddress(*this); 1035 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1036 // Perform a single step derived-to-base conversion. 1037 Address Base = 1038 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1039 /*NullCheckValue=*/false, SourceLocation()); 1040 LValue SubLV = MakeAddrLValue(Base, BS->getType()); 1041 1042 // Recurse onto bases. 1043 ExpandTypeFromArgs(BS->getType(), SubLV, AI); 1044 } 1045 for (auto FD : RExp->Fields) { 1046 // FIXME: What are the right qualifiers here? 1047 LValue SubLV = EmitLValueForFieldInitialization(LV, FD); 1048 ExpandTypeFromArgs(FD->getType(), SubLV, AI); 1049 } 1050 } else if (isa<ComplexExpansion>(Exp.get())) { 1051 auto realValue = &*AI++; 1052 auto imagValue = &*AI++; 1053 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true); 1054 } else { 1055 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a 1056 // primitive store. 1057 assert(isa<NoExpansion>(Exp.get())); 1058 if (LV.isBitField()) 1059 EmitStoreThroughLValue(RValue::get(&*AI++), LV); 1060 else 1061 EmitStoreOfScalar(&*AI++, LV); 1062 } 1063 } 1064 1065 void CodeGenFunction::ExpandTypeToArgs( 1066 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy, 1067 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) { 1068 auto Exp = getTypeExpansion(Ty, getContext()); 1069 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) { 1070 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1071 : Arg.getKnownRValue().getAggregateAddress(); 1072 forConstantArrayExpansion( 1073 *this, CAExp, Addr, [&](Address EltAddr) { 1074 CallArg EltArg = CallArg( 1075 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()), 1076 CAExp->EltTy); 1077 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs, 1078 IRCallArgPos); 1079 }); 1080 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) { 1081 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this) 1082 : Arg.getKnownRValue().getAggregateAddress(); 1083 for (const CXXBaseSpecifier *BS : RExp->Bases) { 1084 // Perform a single step derived-to-base conversion. 1085 Address Base = 1086 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1, 1087 /*NullCheckValue=*/false, SourceLocation()); 1088 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType()); 1089 1090 // Recurse onto bases. 1091 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs, 1092 IRCallArgPos); 1093 } 1094 1095 LValue LV = MakeAddrLValue(This, Ty); 1096 for (auto FD : RExp->Fields) { 1097 CallArg FldArg = 1098 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType()); 1099 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs, 1100 IRCallArgPos); 1101 } 1102 } else if (isa<ComplexExpansion>(Exp.get())) { 1103 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal(); 1104 IRCallArgs[IRCallArgPos++] = CV.first; 1105 IRCallArgs[IRCallArgPos++] = CV.second; 1106 } else { 1107 assert(isa<NoExpansion>(Exp.get())); 1108 auto RV = Arg.getKnownRValue(); 1109 assert(RV.isScalar() && 1110 "Unexpected non-scalar rvalue during struct expansion."); 1111 1112 // Insert a bitcast as needed. 1113 llvm::Value *V = RV.getScalarVal(); 1114 if (IRCallArgPos < IRFuncTy->getNumParams() && 1115 V->getType() != IRFuncTy->getParamType(IRCallArgPos)) 1116 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos)); 1117 1118 IRCallArgs[IRCallArgPos++] = V; 1119 } 1120 } 1121 1122 /// Create a temporary allocation for the purposes of coercion. 1123 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty, 1124 CharUnits MinAlign, 1125 const Twine &Name = "tmp") { 1126 // Don't use an alignment that's worse than what LLVM would prefer. 1127 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty); 1128 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign)); 1129 1130 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce"); 1131 } 1132 1133 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are 1134 /// accessing some number of bytes out of it, try to gep into the struct to get 1135 /// at its inner goodness. Dive as deep as possible without entering an element 1136 /// with an in-memory size smaller than DstSize. 1137 static Address 1138 EnterStructPointerForCoercedAccess(Address SrcPtr, 1139 llvm::StructType *SrcSTy, 1140 uint64_t DstSize, CodeGenFunction &CGF) { 1141 // We can't dive into a zero-element struct. 1142 if (SrcSTy->getNumElements() == 0) return SrcPtr; 1143 1144 llvm::Type *FirstElt = SrcSTy->getElementType(0); 1145 1146 // If the first elt is at least as large as what we're looking for, or if the 1147 // first element is the same size as the whole struct, we can enter it. The 1148 // comparison must be made on the store size and not the alloca size. Using 1149 // the alloca size may overstate the size of the load. 1150 uint64_t FirstEltSize = 1151 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt); 1152 if (FirstEltSize < DstSize && 1153 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy)) 1154 return SrcPtr; 1155 1156 // GEP into the first element. 1157 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive"); 1158 1159 // If the first element is a struct, recurse. 1160 llvm::Type *SrcTy = SrcPtr.getElementType(); 1161 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) 1162 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF); 1163 1164 return SrcPtr; 1165 } 1166 1167 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both 1168 /// are either integers or pointers. This does a truncation of the value if it 1169 /// is too large or a zero extension if it is too small. 1170 /// 1171 /// This behaves as if the value were coerced through memory, so on big-endian 1172 /// targets the high bits are preserved in a truncation, while little-endian 1173 /// targets preserve the low bits. 1174 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, 1175 llvm::Type *Ty, 1176 CodeGenFunction &CGF) { 1177 if (Val->getType() == Ty) 1178 return Val; 1179 1180 if (isa<llvm::PointerType>(Val->getType())) { 1181 // If this is Pointer->Pointer avoid conversion to and from int. 1182 if (isa<llvm::PointerType>(Ty)) 1183 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); 1184 1185 // Convert the pointer to an integer so we can play with its width. 1186 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); 1187 } 1188 1189 llvm::Type *DestIntTy = Ty; 1190 if (isa<llvm::PointerType>(DestIntTy)) 1191 DestIntTy = CGF.IntPtrTy; 1192 1193 if (Val->getType() != DestIntTy) { 1194 const llvm::DataLayout &DL = CGF.CGM.getDataLayout(); 1195 if (DL.isBigEndian()) { 1196 // Preserve the high bits on big-endian targets. 1197 // That is what memory coercion does. 1198 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType()); 1199 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy); 1200 1201 if (SrcSize > DstSize) { 1202 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits"); 1203 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii"); 1204 } else { 1205 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii"); 1206 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits"); 1207 } 1208 } else { 1209 // Little-endian targets preserve the low bits. No shifts required. 1210 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); 1211 } 1212 } 1213 1214 if (isa<llvm::PointerType>(Ty)) 1215 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip"); 1216 return Val; 1217 } 1218 1219 1220 1221 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as 1222 /// a pointer to an object of type \arg Ty, known to be aligned to 1223 /// \arg SrcAlign bytes. 1224 /// 1225 /// This safely handles the case when the src type is smaller than the 1226 /// destination type; in this situation the values of bits which not 1227 /// present in the src are undefined. 1228 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty, 1229 CodeGenFunction &CGF) { 1230 llvm::Type *SrcTy = Src.getElementType(); 1231 1232 // If SrcTy and Ty are the same, just do a load. 1233 if (SrcTy == Ty) 1234 return CGF.Builder.CreateLoad(Src); 1235 1236 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty); 1237 1238 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) { 1239 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, 1240 DstSize.getFixedSize(), CGF); 1241 SrcTy = Src.getElementType(); 1242 } 1243 1244 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1245 1246 // If the source and destination are integer or pointer types, just do an 1247 // extension or truncation to the desired type. 1248 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) && 1249 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) { 1250 llvm::Value *Load = CGF.Builder.CreateLoad(Src); 1251 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF); 1252 } 1253 1254 // If load is legal, just bitcast the src pointer. 1255 if (!SrcSize.isScalable() && !DstSize.isScalable() && 1256 SrcSize.getFixedSize() >= DstSize.getFixedSize()) { 1257 // Generally SrcSize is never greater than DstSize, since this means we are 1258 // losing bits. However, this can happen in cases where the structure has 1259 // additional padding, for example due to a user specified alignment. 1260 // 1261 // FIXME: Assert that we aren't truncating non-padding bits when have access 1262 // to that information. 1263 Src = CGF.Builder.CreateBitCast(Src, 1264 Ty->getPointerTo(Src.getAddressSpace())); 1265 return CGF.Builder.CreateLoad(Src); 1266 } 1267 1268 // If coercing a fixed vector to a scalable vector for ABI compatibility, and 1269 // the types match, use the llvm.experimental.vector.insert intrinsic to 1270 // perform the conversion. 1271 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) { 1272 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) { 1273 if (ScalableDst->getElementType() == FixedSrc->getElementType()) { 1274 auto *Load = CGF.Builder.CreateLoad(Src); 1275 auto *UndefVec = llvm::UndefValue::get(ScalableDst); 1276 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty); 1277 return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero, 1278 "castScalableSve"); 1279 } 1280 } 1281 } 1282 1283 // Otherwise do coercion through memory. This is stupid, but simple. 1284 Address Tmp = 1285 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName()); 1286 CGF.Builder.CreateMemCpy( 1287 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(), 1288 Src.getAlignment().getAsAlign(), 1289 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize())); 1290 return CGF.Builder.CreateLoad(Tmp); 1291 } 1292 1293 // Function to store a first-class aggregate into memory. We prefer to 1294 // store the elements rather than the aggregate to be more friendly to 1295 // fast-isel. 1296 // FIXME: Do we need to recurse here? 1297 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest, 1298 bool DestIsVolatile) { 1299 // Prefer scalar stores to first-class aggregate stores. 1300 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) { 1301 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 1302 Address EltPtr = Builder.CreateStructGEP(Dest, i); 1303 llvm::Value *Elt = Builder.CreateExtractValue(Val, i); 1304 Builder.CreateStore(Elt, EltPtr, DestIsVolatile); 1305 } 1306 } else { 1307 Builder.CreateStore(Val, Dest, DestIsVolatile); 1308 } 1309 } 1310 1311 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src, 1312 /// where the source and destination may have different types. The 1313 /// destination is known to be aligned to \arg DstAlign bytes. 1314 /// 1315 /// This safely handles the case when the src type is larger than the 1316 /// destination type; the upper bits of the src will be lost. 1317 static void CreateCoercedStore(llvm::Value *Src, 1318 Address Dst, 1319 bool DstIsVolatile, 1320 CodeGenFunction &CGF) { 1321 llvm::Type *SrcTy = Src->getType(); 1322 llvm::Type *DstTy = Dst.getElementType(); 1323 if (SrcTy == DstTy) { 1324 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1325 return; 1326 } 1327 1328 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy); 1329 1330 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) { 1331 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, 1332 SrcSize.getFixedSize(), CGF); 1333 DstTy = Dst.getElementType(); 1334 } 1335 1336 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy); 1337 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy); 1338 if (SrcPtrTy && DstPtrTy && 1339 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) { 1340 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy); 1341 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1342 return; 1343 } 1344 1345 // If the source and destination are integer or pointer types, just do an 1346 // extension or truncation to the desired type. 1347 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) && 1348 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) { 1349 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF); 1350 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile); 1351 return; 1352 } 1353 1354 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy); 1355 1356 // If store is legal, just bitcast the src pointer. 1357 if (isa<llvm::ScalableVectorType>(SrcTy) || 1358 isa<llvm::ScalableVectorType>(DstTy) || 1359 SrcSize.getFixedSize() <= DstSize.getFixedSize()) { 1360 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy); 1361 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile); 1362 } else { 1363 // Otherwise do coercion through memory. This is stupid, but 1364 // simple. 1365 1366 // Generally SrcSize is never greater than DstSize, since this means we are 1367 // losing bits. However, this can happen in cases where the structure has 1368 // additional padding, for example due to a user specified alignment. 1369 // 1370 // FIXME: Assert that we aren't truncating non-padding bits when have access 1371 // to that information. 1372 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment()); 1373 CGF.Builder.CreateStore(Src, Tmp); 1374 CGF.Builder.CreateMemCpy( 1375 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(), 1376 Tmp.getAlignment().getAsAlign(), 1377 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize())); 1378 } 1379 } 1380 1381 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr, 1382 const ABIArgInfo &info) { 1383 if (unsigned offset = info.getDirectOffset()) { 1384 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty); 1385 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr, 1386 CharUnits::fromQuantity(offset)); 1387 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType()); 1388 } 1389 return addr; 1390 } 1391 1392 namespace { 1393 1394 /// Encapsulates information about the way function arguments from 1395 /// CGFunctionInfo should be passed to actual LLVM IR function. 1396 class ClangToLLVMArgMapping { 1397 static const unsigned InvalidIndex = ~0U; 1398 unsigned InallocaArgNo; 1399 unsigned SRetArgNo; 1400 unsigned TotalIRArgs; 1401 1402 /// Arguments of LLVM IR function corresponding to single Clang argument. 1403 struct IRArgs { 1404 unsigned PaddingArgIndex; 1405 // Argument is expanded to IR arguments at positions 1406 // [FirstArgIndex, FirstArgIndex + NumberOfArgs). 1407 unsigned FirstArgIndex; 1408 unsigned NumberOfArgs; 1409 1410 IRArgs() 1411 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex), 1412 NumberOfArgs(0) {} 1413 }; 1414 1415 SmallVector<IRArgs, 8> ArgInfo; 1416 1417 public: 1418 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI, 1419 bool OnlyRequiredArgs = false) 1420 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0), 1421 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) { 1422 construct(Context, FI, OnlyRequiredArgs); 1423 } 1424 1425 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; } 1426 unsigned getInallocaArgNo() const { 1427 assert(hasInallocaArg()); 1428 return InallocaArgNo; 1429 } 1430 1431 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; } 1432 unsigned getSRetArgNo() const { 1433 assert(hasSRetArg()); 1434 return SRetArgNo; 1435 } 1436 1437 unsigned totalIRArgs() const { return TotalIRArgs; } 1438 1439 bool hasPaddingArg(unsigned ArgNo) const { 1440 assert(ArgNo < ArgInfo.size()); 1441 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex; 1442 } 1443 unsigned getPaddingArgNo(unsigned ArgNo) const { 1444 assert(hasPaddingArg(ArgNo)); 1445 return ArgInfo[ArgNo].PaddingArgIndex; 1446 } 1447 1448 /// Returns index of first IR argument corresponding to ArgNo, and their 1449 /// quantity. 1450 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const { 1451 assert(ArgNo < ArgInfo.size()); 1452 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex, 1453 ArgInfo[ArgNo].NumberOfArgs); 1454 } 1455 1456 private: 1457 void construct(const ASTContext &Context, const CGFunctionInfo &FI, 1458 bool OnlyRequiredArgs); 1459 }; 1460 1461 void ClangToLLVMArgMapping::construct(const ASTContext &Context, 1462 const CGFunctionInfo &FI, 1463 bool OnlyRequiredArgs) { 1464 unsigned IRArgNo = 0; 1465 bool SwapThisWithSRet = false; 1466 const ABIArgInfo &RetAI = FI.getReturnInfo(); 1467 1468 if (RetAI.getKind() == ABIArgInfo::Indirect) { 1469 SwapThisWithSRet = RetAI.isSRetAfterThis(); 1470 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++; 1471 } 1472 1473 unsigned ArgNo = 0; 1474 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size(); 1475 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs; 1476 ++I, ++ArgNo) { 1477 assert(I != FI.arg_end()); 1478 QualType ArgType = I->type; 1479 const ABIArgInfo &AI = I->info; 1480 // Collect data about IR arguments corresponding to Clang argument ArgNo. 1481 auto &IRArgs = ArgInfo[ArgNo]; 1482 1483 if (AI.getPaddingType()) 1484 IRArgs.PaddingArgIndex = IRArgNo++; 1485 1486 switch (AI.getKind()) { 1487 case ABIArgInfo::Extend: 1488 case ABIArgInfo::Direct: { 1489 // FIXME: handle sseregparm someday... 1490 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType()); 1491 if (AI.isDirect() && AI.getCanBeFlattened() && STy) { 1492 IRArgs.NumberOfArgs = STy->getNumElements(); 1493 } else { 1494 IRArgs.NumberOfArgs = 1; 1495 } 1496 break; 1497 } 1498 case ABIArgInfo::Indirect: 1499 case ABIArgInfo::IndirectAliased: 1500 IRArgs.NumberOfArgs = 1; 1501 break; 1502 case ABIArgInfo::Ignore: 1503 case ABIArgInfo::InAlloca: 1504 // ignore and inalloca doesn't have matching LLVM parameters. 1505 IRArgs.NumberOfArgs = 0; 1506 break; 1507 case ABIArgInfo::CoerceAndExpand: 1508 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size(); 1509 break; 1510 case ABIArgInfo::Expand: 1511 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context); 1512 break; 1513 } 1514 1515 if (IRArgs.NumberOfArgs > 0) { 1516 IRArgs.FirstArgIndex = IRArgNo; 1517 IRArgNo += IRArgs.NumberOfArgs; 1518 } 1519 1520 // Skip over the sret parameter when it comes second. We already handled it 1521 // above. 1522 if (IRArgNo == 1 && SwapThisWithSRet) 1523 IRArgNo++; 1524 } 1525 assert(ArgNo == ArgInfo.size()); 1526 1527 if (FI.usesInAlloca()) 1528 InallocaArgNo = IRArgNo++; 1529 1530 TotalIRArgs = IRArgNo; 1531 } 1532 } // namespace 1533 1534 /***/ 1535 1536 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) { 1537 const auto &RI = FI.getReturnInfo(); 1538 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet()); 1539 } 1540 1541 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) { 1542 return ReturnTypeUsesSRet(FI) && 1543 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs(); 1544 } 1545 1546 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) { 1547 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) { 1548 switch (BT->getKind()) { 1549 default: 1550 return false; 1551 case BuiltinType::Float: 1552 return getTarget().useObjCFPRetForRealType(TargetInfo::Float); 1553 case BuiltinType::Double: 1554 return getTarget().useObjCFPRetForRealType(TargetInfo::Double); 1555 case BuiltinType::LongDouble: 1556 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble); 1557 } 1558 } 1559 1560 return false; 1561 } 1562 1563 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) { 1564 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) { 1565 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) { 1566 if (BT->getKind() == BuiltinType::LongDouble) 1567 return getTarget().useObjCFP2RetForComplexLongDouble(); 1568 } 1569 } 1570 1571 return false; 1572 } 1573 1574 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) { 1575 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD); 1576 return GetFunctionType(FI); 1577 } 1578 1579 llvm::FunctionType * 1580 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) { 1581 1582 bool Inserted = FunctionsBeingProcessed.insert(&FI).second; 1583 (void)Inserted; 1584 assert(Inserted && "Recursively being processed?"); 1585 1586 llvm::Type *resultType = nullptr; 1587 const ABIArgInfo &retAI = FI.getReturnInfo(); 1588 switch (retAI.getKind()) { 1589 case ABIArgInfo::Expand: 1590 case ABIArgInfo::IndirectAliased: 1591 llvm_unreachable("Invalid ABI kind for return argument"); 1592 1593 case ABIArgInfo::Extend: 1594 case ABIArgInfo::Direct: 1595 resultType = retAI.getCoerceToType(); 1596 break; 1597 1598 case ABIArgInfo::InAlloca: 1599 if (retAI.getInAllocaSRet()) { 1600 // sret things on win32 aren't void, they return the sret pointer. 1601 QualType ret = FI.getReturnType(); 1602 llvm::Type *ty = ConvertType(ret); 1603 unsigned addressSpace = Context.getTargetAddressSpace(ret); 1604 resultType = llvm::PointerType::get(ty, addressSpace); 1605 } else { 1606 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1607 } 1608 break; 1609 1610 case ABIArgInfo::Indirect: 1611 case ABIArgInfo::Ignore: 1612 resultType = llvm::Type::getVoidTy(getLLVMContext()); 1613 break; 1614 1615 case ABIArgInfo::CoerceAndExpand: 1616 resultType = retAI.getUnpaddedCoerceAndExpandType(); 1617 break; 1618 } 1619 1620 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true); 1621 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs()); 1622 1623 // Add type for sret argument. 1624 if (IRFunctionArgs.hasSRetArg()) { 1625 QualType Ret = FI.getReturnType(); 1626 llvm::Type *Ty = ConvertType(Ret); 1627 unsigned AddressSpace = Context.getTargetAddressSpace(Ret); 1628 ArgTypes[IRFunctionArgs.getSRetArgNo()] = 1629 llvm::PointerType::get(Ty, AddressSpace); 1630 } 1631 1632 // Add type for inalloca argument. 1633 if (IRFunctionArgs.hasInallocaArg()) { 1634 auto ArgStruct = FI.getArgStruct(); 1635 assert(ArgStruct); 1636 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo(); 1637 } 1638 1639 // Add in all of the required arguments. 1640 unsigned ArgNo = 0; 1641 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), 1642 ie = it + FI.getNumRequiredArgs(); 1643 for (; it != ie; ++it, ++ArgNo) { 1644 const ABIArgInfo &ArgInfo = it->info; 1645 1646 // Insert a padding type to ensure proper alignment. 1647 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 1648 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 1649 ArgInfo.getPaddingType(); 1650 1651 unsigned FirstIRArg, NumIRArgs; 1652 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 1653 1654 switch (ArgInfo.getKind()) { 1655 case ABIArgInfo::Ignore: 1656 case ABIArgInfo::InAlloca: 1657 assert(NumIRArgs == 0); 1658 break; 1659 1660 case ABIArgInfo::Indirect: { 1661 assert(NumIRArgs == 1); 1662 // indirect arguments are always on the stack, which is alloca addr space. 1663 llvm::Type *LTy = ConvertTypeForMem(it->type); 1664 ArgTypes[FirstIRArg] = LTy->getPointerTo( 1665 CGM.getDataLayout().getAllocaAddrSpace()); 1666 break; 1667 } 1668 case ABIArgInfo::IndirectAliased: { 1669 assert(NumIRArgs == 1); 1670 llvm::Type *LTy = ConvertTypeForMem(it->type); 1671 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace()); 1672 break; 1673 } 1674 case ABIArgInfo::Extend: 1675 case ABIArgInfo::Direct: { 1676 // Fast-isel and the optimizer generally like scalar values better than 1677 // FCAs, so we flatten them if this is safe to do for this argument. 1678 llvm::Type *argType = ArgInfo.getCoerceToType(); 1679 llvm::StructType *st = dyn_cast<llvm::StructType>(argType); 1680 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 1681 assert(NumIRArgs == st->getNumElements()); 1682 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i) 1683 ArgTypes[FirstIRArg + i] = st->getElementType(i); 1684 } else { 1685 assert(NumIRArgs == 1); 1686 ArgTypes[FirstIRArg] = argType; 1687 } 1688 break; 1689 } 1690 1691 case ABIArgInfo::CoerceAndExpand: { 1692 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1693 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) { 1694 *ArgTypesIter++ = EltTy; 1695 } 1696 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1697 break; 1698 } 1699 1700 case ABIArgInfo::Expand: 1701 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg; 1702 getExpandedTypes(it->type, ArgTypesIter); 1703 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs); 1704 break; 1705 } 1706 } 1707 1708 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased; 1709 assert(Erased && "Not in set?"); 1710 1711 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic()); 1712 } 1713 1714 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) { 1715 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl()); 1716 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>(); 1717 1718 if (!isFuncTypeConvertible(FPT)) 1719 return llvm::StructType::get(getLLVMContext()); 1720 1721 return GetFunctionType(GD); 1722 } 1723 1724 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx, 1725 llvm::AttrBuilder &FuncAttrs, 1726 const FunctionProtoType *FPT) { 1727 if (!FPT) 1728 return; 1729 1730 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) && 1731 FPT->isNothrow()) 1732 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1733 } 1734 1735 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context, 1736 QualType ReturnType) { 1737 // We can't just discard the return value for a record type with a 1738 // complex destructor or a non-trivially copyable type. 1739 if (const RecordType *RT = 1740 ReturnType.getCanonicalType()->getAs<RecordType>()) { 1741 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) 1742 return ClassDecl->hasTrivialDestructor(); 1743 } 1744 return ReturnType.isTriviallyCopyableType(Context); 1745 } 1746 1747 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name, 1748 bool HasOptnone, 1749 bool AttrOnCallSite, 1750 llvm::AttrBuilder &FuncAttrs) { 1751 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed. 1752 if (!HasOptnone) { 1753 if (CodeGenOpts.OptimizeSize) 1754 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize); 1755 if (CodeGenOpts.OptimizeSize == 2) 1756 FuncAttrs.addAttribute(llvm::Attribute::MinSize); 1757 } 1758 1759 if (CodeGenOpts.DisableRedZone) 1760 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone); 1761 if (CodeGenOpts.IndirectTlsSegRefs) 1762 FuncAttrs.addAttribute("indirect-tls-seg-refs"); 1763 if (CodeGenOpts.NoImplicitFloat) 1764 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat); 1765 1766 if (AttrOnCallSite) { 1767 // Attributes that should go on the call site only. 1768 if (!CodeGenOpts.SimplifyLibCalls || 1769 CodeGenOpts.isNoBuiltinFunc(Name.data())) 1770 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin); 1771 if (!CodeGenOpts.TrapFuncName.empty()) 1772 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName); 1773 } else { 1774 StringRef FpKind; 1775 switch (CodeGenOpts.getFramePointer()) { 1776 case CodeGenOptions::FramePointerKind::None: 1777 FpKind = "none"; 1778 break; 1779 case CodeGenOptions::FramePointerKind::NonLeaf: 1780 FpKind = "non-leaf"; 1781 break; 1782 case CodeGenOptions::FramePointerKind::All: 1783 FpKind = "all"; 1784 break; 1785 } 1786 FuncAttrs.addAttribute("frame-pointer", FpKind); 1787 1788 if (CodeGenOpts.LessPreciseFPMAD) 1789 FuncAttrs.addAttribute("less-precise-fpmad", "true"); 1790 1791 if (CodeGenOpts.NullPointerIsValid) 1792 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid); 1793 1794 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE()) 1795 FuncAttrs.addAttribute("denormal-fp-math", 1796 CodeGenOpts.FPDenormalMode.str()); 1797 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) { 1798 FuncAttrs.addAttribute( 1799 "denormal-fp-math-f32", 1800 CodeGenOpts.FP32DenormalMode.str()); 1801 } 1802 1803 if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore) 1804 FuncAttrs.addAttribute("no-trapping-math", "true"); 1805 1806 // Strict (compliant) code is the default, so only add this attribute to 1807 // indicate that we are trying to workaround a problem case. 1808 if (!CodeGenOpts.StrictFloatCastOverflow) 1809 FuncAttrs.addAttribute("strict-float-cast-overflow", "false"); 1810 1811 // TODO: Are these all needed? 1812 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags. 1813 if (LangOpts.NoHonorInfs) 1814 FuncAttrs.addAttribute("no-infs-fp-math", "true"); 1815 if (LangOpts.NoHonorNaNs) 1816 FuncAttrs.addAttribute("no-nans-fp-math", "true"); 1817 if (LangOpts.UnsafeFPMath) 1818 FuncAttrs.addAttribute("unsafe-fp-math", "true"); 1819 if (CodeGenOpts.SoftFloat) 1820 FuncAttrs.addAttribute("use-soft-float", "true"); 1821 FuncAttrs.addAttribute("stack-protector-buffer-size", 1822 llvm::utostr(CodeGenOpts.SSPBufferSize)); 1823 if (LangOpts.NoSignedZero) 1824 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true"); 1825 1826 // TODO: Reciprocal estimate codegen options should apply to instructions? 1827 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals; 1828 if (!Recips.empty()) 1829 FuncAttrs.addAttribute("reciprocal-estimates", 1830 llvm::join(Recips, ",")); 1831 1832 if (!CodeGenOpts.PreferVectorWidth.empty() && 1833 CodeGenOpts.PreferVectorWidth != "none") 1834 FuncAttrs.addAttribute("prefer-vector-width", 1835 CodeGenOpts.PreferVectorWidth); 1836 1837 if (CodeGenOpts.StackRealignment) 1838 FuncAttrs.addAttribute("stackrealign"); 1839 if (CodeGenOpts.Backchain) 1840 FuncAttrs.addAttribute("backchain"); 1841 if (CodeGenOpts.EnableSegmentedStacks) 1842 FuncAttrs.addAttribute("split-stack"); 1843 1844 if (CodeGenOpts.SpeculativeLoadHardening) 1845 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 1846 } 1847 1848 if (getLangOpts().assumeFunctionsAreConvergent()) { 1849 // Conservatively, mark all functions and calls in CUDA and OpenCL as 1850 // convergent (meaning, they may call an intrinsically convergent op, such 1851 // as __syncthreads() / barrier(), and so can't have certain optimizations 1852 // applied around them). LLVM will remove this attribute where it safely 1853 // can. 1854 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 1855 } 1856 1857 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) { 1858 // Exceptions aren't supported in CUDA device code. 1859 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 1860 } 1861 1862 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) { 1863 StringRef Var, Value; 1864 std::tie(Var, Value) = Attr.split('='); 1865 FuncAttrs.addAttribute(Var, Value); 1866 } 1867 } 1868 1869 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) { 1870 llvm::AttrBuilder FuncAttrs; 1871 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(), 1872 /* AttrOnCallSite = */ false, FuncAttrs); 1873 // TODO: call GetCPUAndFeaturesAttributes? 1874 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs); 1875 } 1876 1877 void CodeGenModule::addDefaultFunctionDefinitionAttributes( 1878 llvm::AttrBuilder &attrs) { 1879 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false, 1880 /*for call*/ false, attrs); 1881 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs); 1882 } 1883 1884 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs, 1885 const LangOptions &LangOpts, 1886 const NoBuiltinAttr *NBA = nullptr) { 1887 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) { 1888 SmallString<32> AttributeName; 1889 AttributeName += "no-builtin-"; 1890 AttributeName += BuiltinName; 1891 FuncAttrs.addAttribute(AttributeName); 1892 }; 1893 1894 // First, handle the language options passed through -fno-builtin. 1895 if (LangOpts.NoBuiltin) { 1896 // -fno-builtin disables them all. 1897 FuncAttrs.addAttribute("no-builtins"); 1898 return; 1899 } 1900 1901 // Then, add attributes for builtins specified through -fno-builtin-<name>. 1902 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr); 1903 1904 // Now, let's check the __attribute__((no_builtin("...")) attribute added to 1905 // the source. 1906 if (!NBA) 1907 return; 1908 1909 // If there is a wildcard in the builtin names specified through the 1910 // attribute, disable them all. 1911 if (llvm::is_contained(NBA->builtinNames(), "*")) { 1912 FuncAttrs.addAttribute("no-builtins"); 1913 return; 1914 } 1915 1916 // And last, add the rest of the builtin names. 1917 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr); 1918 } 1919 1920 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types, 1921 const llvm::DataLayout &DL, const ABIArgInfo &AI, 1922 bool CheckCoerce = true) { 1923 llvm::Type *Ty = Types.ConvertTypeForMem(QTy); 1924 if (AI.getKind() == ABIArgInfo::Indirect) 1925 return true; 1926 if (AI.getKind() == ABIArgInfo::Extend) 1927 return true; 1928 if (!DL.typeSizeEqualsStoreSize(Ty)) 1929 // TODO: This will result in a modest amount of values not marked noundef 1930 // when they could be. We care about values that *invisibly* contain undef 1931 // bits from the perspective of LLVM IR. 1932 return false; 1933 if (CheckCoerce && AI.canHaveCoerceToType()) { 1934 llvm::Type *CoerceTy = AI.getCoerceToType(); 1935 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy), 1936 DL.getTypeSizeInBits(Ty))) 1937 // If we're coercing to a type with a greater size than the canonical one, 1938 // we're introducing new undef bits. 1939 // Coercing to a type of smaller or equal size is ok, as we know that 1940 // there's no internal padding (typeSizeEqualsStoreSize). 1941 return false; 1942 } 1943 if (QTy->isExtIntType()) 1944 return true; 1945 if (QTy->isReferenceType()) 1946 return true; 1947 if (QTy->isNullPtrType()) 1948 return false; 1949 if (QTy->isMemberPointerType()) 1950 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For 1951 // now, never mark them. 1952 return false; 1953 if (QTy->isScalarType()) { 1954 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy)) 1955 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false); 1956 return true; 1957 } 1958 if (const VectorType *Vector = dyn_cast<VectorType>(QTy)) 1959 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false); 1960 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy)) 1961 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false); 1962 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy)) 1963 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false); 1964 1965 // TODO: Some structs may be `noundef`, in specific situations. 1966 return false; 1967 } 1968 1969 /// Construct the IR attribute list of a function or call. 1970 /// 1971 /// When adding an attribute, please consider where it should be handled: 1972 /// 1973 /// - getDefaultFunctionAttributes is for attributes that are essentially 1974 /// part of the global target configuration (but perhaps can be 1975 /// overridden on a per-function basis). Adding attributes there 1976 /// will cause them to also be set in frontends that build on Clang's 1977 /// target-configuration logic, as well as for code defined in library 1978 /// modules such as CUDA's libdevice. 1979 /// 1980 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes 1981 /// and adds declaration-specific, convention-specific, and 1982 /// frontend-specific logic. The last is of particular importance: 1983 /// attributes that restrict how the frontend generates code must be 1984 /// added here rather than getDefaultFunctionAttributes. 1985 /// 1986 void CodeGenModule::ConstructAttributeList( 1987 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo, 1988 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) { 1989 llvm::AttrBuilder FuncAttrs; 1990 llvm::AttrBuilder RetAttrs; 1991 1992 // Collect function IR attributes from the CC lowering. 1993 // We'll collect the paramete and result attributes later. 1994 CallingConv = FI.getEffectiveCallingConvention(); 1995 if (FI.isNoReturn()) 1996 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 1997 if (FI.isCmseNSCall()) 1998 FuncAttrs.addAttribute("cmse_nonsecure_call"); 1999 2000 // Collect function IR attributes from the callee prototype if we have one. 2001 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs, 2002 CalleeInfo.getCalleeFunctionProtoType()); 2003 2004 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl(); 2005 2006 bool HasOptnone = false; 2007 // The NoBuiltinAttr attached to the target FunctionDecl. 2008 const NoBuiltinAttr *NBA = nullptr; 2009 2010 // Collect function IR attributes based on declaration-specific 2011 // information. 2012 // FIXME: handle sseregparm someday... 2013 if (TargetDecl) { 2014 if (TargetDecl->hasAttr<ReturnsTwiceAttr>()) 2015 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice); 2016 if (TargetDecl->hasAttr<NoThrowAttr>()) 2017 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2018 if (TargetDecl->hasAttr<NoReturnAttr>()) 2019 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2020 if (TargetDecl->hasAttr<ColdAttr>()) 2021 FuncAttrs.addAttribute(llvm::Attribute::Cold); 2022 if (TargetDecl->hasAttr<HotAttr>()) 2023 FuncAttrs.addAttribute(llvm::Attribute::Hot); 2024 if (TargetDecl->hasAttr<NoDuplicateAttr>()) 2025 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate); 2026 if (TargetDecl->hasAttr<ConvergentAttr>()) 2027 FuncAttrs.addAttribute(llvm::Attribute::Convergent); 2028 2029 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2030 AddAttributesFromFunctionProtoType( 2031 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>()); 2032 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) { 2033 // A sane operator new returns a non-aliasing pointer. 2034 auto Kind = Fn->getDeclName().getCXXOverloadedOperator(); 2035 if (getCodeGenOpts().AssumeSaneOperatorNew && 2036 (Kind == OO_New || Kind == OO_Array_New)) 2037 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2038 } 2039 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn); 2040 const bool IsVirtualCall = MD && MD->isVirtual(); 2041 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a 2042 // virtual function. These attributes are not inherited by overloads. 2043 if (!(AttrOnCallSite && IsVirtualCall)) { 2044 if (Fn->isNoReturn()) 2045 FuncAttrs.addAttribute(llvm::Attribute::NoReturn); 2046 NBA = Fn->getAttr<NoBuiltinAttr>(); 2047 } 2048 // Only place nomerge attribute on call sites, never functions. This 2049 // allows it to work on indirect virtual function calls. 2050 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>()) 2051 FuncAttrs.addAttribute(llvm::Attribute::NoMerge); 2052 } 2053 2054 // 'const', 'pure' and 'noalias' attributed functions are also nounwind. 2055 if (TargetDecl->hasAttr<ConstAttr>()) { 2056 FuncAttrs.addAttribute(llvm::Attribute::ReadNone); 2057 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2058 // gcc specifies that 'const' functions have greater restrictions than 2059 // 'pure' functions, so they also cannot have infinite loops. 2060 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2061 } else if (TargetDecl->hasAttr<PureAttr>()) { 2062 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly); 2063 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2064 // gcc specifies that 'pure' functions cannot have infinite loops. 2065 FuncAttrs.addAttribute(llvm::Attribute::WillReturn); 2066 } else if (TargetDecl->hasAttr<NoAliasAttr>()) { 2067 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly); 2068 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind); 2069 } 2070 if (TargetDecl->hasAttr<RestrictAttr>()) 2071 RetAttrs.addAttribute(llvm::Attribute::NoAlias); 2072 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() && 2073 !CodeGenOpts.NullPointerIsValid) 2074 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2075 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()) 2076 FuncAttrs.addAttribute("no_caller_saved_registers"); 2077 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>()) 2078 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck); 2079 if (TargetDecl->hasAttr<LeafAttr>()) 2080 FuncAttrs.addAttribute(llvm::Attribute::NoCallback); 2081 2082 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>(); 2083 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) { 2084 Optional<unsigned> NumElemsParam; 2085 if (AllocSize->getNumElemsParam().isValid()) 2086 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex(); 2087 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(), 2088 NumElemsParam); 2089 } 2090 2091 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) { 2092 if (getLangOpts().OpenCLVersion <= 120) { 2093 // OpenCL v1.2 Work groups are always uniform 2094 FuncAttrs.addAttribute("uniform-work-group-size", "true"); 2095 } else { 2096 // OpenCL v2.0 Work groups may be whether uniform or not. 2097 // '-cl-uniform-work-group-size' compile option gets a hint 2098 // to the compiler that the global work-size be a multiple of 2099 // the work-group size specified to clEnqueueNDRangeKernel 2100 // (i.e. work groups are uniform). 2101 FuncAttrs.addAttribute("uniform-work-group-size", 2102 llvm::toStringRef(CodeGenOpts.UniformWGSize)); 2103 } 2104 } 2105 2106 std::string AssumptionValueStr; 2107 for (AssumptionAttr *AssumptionA : 2108 TargetDecl->specific_attrs<AssumptionAttr>()) { 2109 std::string AS = AssumptionA->getAssumption().str(); 2110 if (!AS.empty() && !AssumptionValueStr.empty()) 2111 AssumptionValueStr += ","; 2112 AssumptionValueStr += AS; 2113 } 2114 2115 if (!AssumptionValueStr.empty()) 2116 FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr); 2117 } 2118 2119 // Attach "no-builtins" attributes to: 2120 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>". 2121 // * definitions: "no-builtins" or "no-builtin-<name>" only. 2122 // The attributes can come from: 2123 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name> 2124 // * FunctionDecl attributes: __attribute__((no_builtin(...))) 2125 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA); 2126 2127 // Collect function IR attributes based on global settiings. 2128 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs); 2129 2130 // Override some default IR attributes based on declaration-specific 2131 // information. 2132 if (TargetDecl) { 2133 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>()) 2134 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening); 2135 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>()) 2136 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening); 2137 if (TargetDecl->hasAttr<NoSplitStackAttr>()) 2138 FuncAttrs.removeAttribute("split-stack"); 2139 2140 // Add NonLazyBind attribute to function declarations when -fno-plt 2141 // is used. 2142 // FIXME: what if we just haven't processed the function definition 2143 // yet, or if it's an external definition like C99 inline? 2144 if (CodeGenOpts.NoPLT) { 2145 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2146 if (!Fn->isDefined() && !AttrOnCallSite) { 2147 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind); 2148 } 2149 } 2150 } 2151 } 2152 2153 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage 2154 // functions with -funique-internal-linkage-names. 2155 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) { 2156 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) { 2157 if (this->getFunctionLinkage(Fn) == llvm::GlobalValue::InternalLinkage) 2158 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy", 2159 "selected"); 2160 } 2161 } 2162 2163 // Collect non-call-site function IR attributes from declaration-specific 2164 // information. 2165 if (!AttrOnCallSite) { 2166 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>()) 2167 FuncAttrs.addAttribute("cmse_nonsecure_entry"); 2168 2169 // Whether tail calls are enabled. 2170 auto shouldDisableTailCalls = [&] { 2171 // Should this be honored in getDefaultFunctionAttributes? 2172 if (CodeGenOpts.DisableTailCalls) 2173 return true; 2174 2175 if (!TargetDecl) 2176 return false; 2177 2178 if (TargetDecl->hasAttr<DisableTailCallsAttr>() || 2179 TargetDecl->hasAttr<AnyX86InterruptAttr>()) 2180 return true; 2181 2182 if (CodeGenOpts.NoEscapingBlockTailCalls) { 2183 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl)) 2184 if (!BD->doesNotEscape()) 2185 return true; 2186 } 2187 2188 return false; 2189 }; 2190 if (shouldDisableTailCalls()) 2191 FuncAttrs.addAttribute("disable-tail-calls", "true"); 2192 2193 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes 2194 // handles these separately to set them based on the global defaults. 2195 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs); 2196 } 2197 2198 // Collect attributes from arguments and return values. 2199 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI); 2200 2201 QualType RetTy = FI.getReturnType(); 2202 const ABIArgInfo &RetAI = FI.getReturnInfo(); 2203 const llvm::DataLayout &DL = getDataLayout(); 2204 2205 // C++ explicitly makes returning undefined values UB. C's rule only applies 2206 // to used values, so we never mark them noundef for now. 2207 bool HasStrictReturn = getLangOpts().CPlusPlus; 2208 if (TargetDecl) { 2209 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) 2210 HasStrictReturn &= !FDecl->isExternC(); 2211 else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) 2212 // Function pointer 2213 HasStrictReturn &= !VDecl->isExternC(); 2214 } 2215 2216 // We don't want to be too aggressive with the return checking, unless 2217 // it's explicit in the code opts or we're using an appropriate sanitizer. 2218 // Try to respect what the programmer intended. 2219 HasStrictReturn &= getCodeGenOpts().StrictReturn || 2220 !MayDropFunctionReturn(getContext(), RetTy) || 2221 getLangOpts().Sanitize.has(SanitizerKind::Memory) || 2222 getLangOpts().Sanitize.has(SanitizerKind::Return); 2223 2224 // Determine if the return type could be partially undef 2225 if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) { 2226 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect && 2227 DetermineNoUndef(RetTy, getTypes(), DL, RetAI)) 2228 RetAttrs.addAttribute(llvm::Attribute::NoUndef); 2229 } 2230 2231 switch (RetAI.getKind()) { 2232 case ABIArgInfo::Extend: 2233 if (RetAI.isSignExt()) 2234 RetAttrs.addAttribute(llvm::Attribute::SExt); 2235 else 2236 RetAttrs.addAttribute(llvm::Attribute::ZExt); 2237 LLVM_FALLTHROUGH; 2238 case ABIArgInfo::Direct: 2239 if (RetAI.getInReg()) 2240 RetAttrs.addAttribute(llvm::Attribute::InReg); 2241 break; 2242 case ABIArgInfo::Ignore: 2243 break; 2244 2245 case ABIArgInfo::InAlloca: 2246 case ABIArgInfo::Indirect: { 2247 // inalloca and sret disable readnone and readonly 2248 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2249 .removeAttribute(llvm::Attribute::ReadNone); 2250 break; 2251 } 2252 2253 case ABIArgInfo::CoerceAndExpand: 2254 break; 2255 2256 case ABIArgInfo::Expand: 2257 case ABIArgInfo::IndirectAliased: 2258 llvm_unreachable("Invalid ABI kind for return argument"); 2259 } 2260 2261 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) { 2262 QualType PTy = RefTy->getPointeeType(); 2263 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2264 RetAttrs.addDereferenceableAttr( 2265 getMinimumObjectSize(PTy).getQuantity()); 2266 if (getContext().getTargetAddressSpace(PTy) == 0 && 2267 !CodeGenOpts.NullPointerIsValid) 2268 RetAttrs.addAttribute(llvm::Attribute::NonNull); 2269 if (PTy->isObjectType()) { 2270 llvm::Align Alignment = 2271 getNaturalPointeeTypeAlignment(RetTy).getAsAlign(); 2272 RetAttrs.addAlignmentAttr(Alignment); 2273 } 2274 } 2275 2276 bool hasUsedSRet = false; 2277 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs()); 2278 2279 // Attach attributes to sret. 2280 if (IRFunctionArgs.hasSRetArg()) { 2281 llvm::AttrBuilder SRETAttrs; 2282 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy)); 2283 hasUsedSRet = true; 2284 if (RetAI.getInReg()) 2285 SRETAttrs.addAttribute(llvm::Attribute::InReg); 2286 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity()); 2287 ArgAttrs[IRFunctionArgs.getSRetArgNo()] = 2288 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs); 2289 } 2290 2291 // Attach attributes to inalloca argument. 2292 if (IRFunctionArgs.hasInallocaArg()) { 2293 llvm::AttrBuilder Attrs; 2294 Attrs.addAttribute(llvm::Attribute::InAlloca); 2295 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] = 2296 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2297 } 2298 2299 // Apply `nonnull` and `dereferencable(N)` to the `this` argument. 2300 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() && 2301 !FI.arg_begin()->type->isVoidPointerType()) { 2302 auto IRArgs = IRFunctionArgs.getIRArgs(0); 2303 2304 assert(IRArgs.second == 1 && "Expected only a single `this` pointer."); 2305 2306 llvm::AttrBuilder Attrs; 2307 2308 if (!CodeGenOpts.NullPointerIsValid && 2309 getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) { 2310 Attrs.addAttribute(llvm::Attribute::NonNull); 2311 Attrs.addDereferenceableAttr( 2312 getMinimumObjectSize( 2313 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) 2314 .getQuantity()); 2315 } else { 2316 // FIXME dereferenceable should be correct here, regardless of 2317 // NullPointerIsValid. However, dereferenceable currently does not always 2318 // respect NullPointerIsValid and may imply nonnull and break the program. 2319 // See https://reviews.llvm.org/D66618 for discussions. 2320 Attrs.addDereferenceableOrNullAttr( 2321 getMinimumObjectSize( 2322 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType()) 2323 .getQuantity()); 2324 } 2325 2326 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs); 2327 } 2328 2329 unsigned ArgNo = 0; 2330 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(), 2331 E = FI.arg_end(); 2332 I != E; ++I, ++ArgNo) { 2333 QualType ParamType = I->type; 2334 const ABIArgInfo &AI = I->info; 2335 llvm::AttrBuilder Attrs; 2336 2337 // Add attribute for padding argument, if necessary. 2338 if (IRFunctionArgs.hasPaddingArg(ArgNo)) { 2339 if (AI.getPaddingInReg()) { 2340 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 2341 llvm::AttributeSet::get( 2342 getLLVMContext(), 2343 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg)); 2344 } 2345 } 2346 2347 // Decide whether the argument we're handling could be partially undef 2348 bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI); 2349 if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef) 2350 Attrs.addAttribute(llvm::Attribute::NoUndef); 2351 2352 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we 2353 // have the corresponding parameter variable. It doesn't make 2354 // sense to do it here because parameters are so messed up. 2355 switch (AI.getKind()) { 2356 case ABIArgInfo::Extend: 2357 if (AI.isSignExt()) 2358 Attrs.addAttribute(llvm::Attribute::SExt); 2359 else 2360 Attrs.addAttribute(llvm::Attribute::ZExt); 2361 LLVM_FALLTHROUGH; 2362 case ABIArgInfo::Direct: 2363 if (ArgNo == 0 && FI.isChainCall()) 2364 Attrs.addAttribute(llvm::Attribute::Nest); 2365 else if (AI.getInReg()) 2366 Attrs.addAttribute(llvm::Attribute::InReg); 2367 break; 2368 2369 case ABIArgInfo::Indirect: { 2370 if (AI.getInReg()) 2371 Attrs.addAttribute(llvm::Attribute::InReg); 2372 2373 if (AI.getIndirectByVal()) 2374 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType)); 2375 2376 auto *Decl = ParamType->getAsRecordDecl(); 2377 if (CodeGenOpts.PassByValueIsNoAlias && Decl && 2378 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs) 2379 // When calling the function, the pointer passed in will be the only 2380 // reference to the underlying object. Mark it accordingly. 2381 Attrs.addAttribute(llvm::Attribute::NoAlias); 2382 2383 // TODO: We could add the byref attribute if not byval, but it would 2384 // require updating many testcases. 2385 2386 CharUnits Align = AI.getIndirectAlign(); 2387 2388 // In a byval argument, it is important that the required 2389 // alignment of the type is honored, as LLVM might be creating a 2390 // *new* stack object, and needs to know what alignment to give 2391 // it. (Sometimes it can deduce a sensible alignment on its own, 2392 // but not if clang decides it must emit a packed struct, or the 2393 // user specifies increased alignment requirements.) 2394 // 2395 // This is different from indirect *not* byval, where the object 2396 // exists already, and the align attribute is purely 2397 // informative. 2398 assert(!Align.isZero()); 2399 2400 // For now, only add this when we have a byval argument. 2401 // TODO: be less lazy about updating test cases. 2402 if (AI.getIndirectByVal()) 2403 Attrs.addAlignmentAttr(Align.getQuantity()); 2404 2405 // byval disables readnone and readonly. 2406 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2407 .removeAttribute(llvm::Attribute::ReadNone); 2408 2409 break; 2410 } 2411 case ABIArgInfo::IndirectAliased: { 2412 CharUnits Align = AI.getIndirectAlign(); 2413 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType)); 2414 Attrs.addAlignmentAttr(Align.getQuantity()); 2415 break; 2416 } 2417 case ABIArgInfo::Ignore: 2418 case ABIArgInfo::Expand: 2419 case ABIArgInfo::CoerceAndExpand: 2420 break; 2421 2422 case ABIArgInfo::InAlloca: 2423 // inalloca disables readnone and readonly. 2424 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly) 2425 .removeAttribute(llvm::Attribute::ReadNone); 2426 continue; 2427 } 2428 2429 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) { 2430 QualType PTy = RefTy->getPointeeType(); 2431 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) 2432 Attrs.addDereferenceableAttr( 2433 getMinimumObjectSize(PTy).getQuantity()); 2434 if (getContext().getTargetAddressSpace(PTy) == 0 && 2435 !CodeGenOpts.NullPointerIsValid) 2436 Attrs.addAttribute(llvm::Attribute::NonNull); 2437 if (PTy->isObjectType()) { 2438 llvm::Align Alignment = 2439 getNaturalPointeeTypeAlignment(ParamType).getAsAlign(); 2440 Attrs.addAlignmentAttr(Alignment); 2441 } 2442 } 2443 2444 switch (FI.getExtParameterInfo(ArgNo).getABI()) { 2445 case ParameterABI::Ordinary: 2446 break; 2447 2448 case ParameterABI::SwiftIndirectResult: { 2449 // Add 'sret' if we haven't already used it for something, but 2450 // only if the result is void. 2451 if (!hasUsedSRet && RetTy->isVoidType()) { 2452 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType)); 2453 hasUsedSRet = true; 2454 } 2455 2456 // Add 'noalias' in either case. 2457 Attrs.addAttribute(llvm::Attribute::NoAlias); 2458 2459 // Add 'dereferenceable' and 'alignment'. 2460 auto PTy = ParamType->getPointeeType(); 2461 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) { 2462 auto info = getContext().getTypeInfoInChars(PTy); 2463 Attrs.addDereferenceableAttr(info.Width.getQuantity()); 2464 Attrs.addAlignmentAttr(info.Align.getAsAlign()); 2465 } 2466 break; 2467 } 2468 2469 case ParameterABI::SwiftErrorResult: 2470 Attrs.addAttribute(llvm::Attribute::SwiftError); 2471 break; 2472 2473 case ParameterABI::SwiftContext: 2474 Attrs.addAttribute(llvm::Attribute::SwiftSelf); 2475 break; 2476 } 2477 2478 if (FI.getExtParameterInfo(ArgNo).isNoEscape()) 2479 Attrs.addAttribute(llvm::Attribute::NoCapture); 2480 2481 if (Attrs.hasAttributes()) { 2482 unsigned FirstIRArg, NumIRArgs; 2483 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2484 for (unsigned i = 0; i < NumIRArgs; i++) 2485 ArgAttrs[FirstIRArg + i] = 2486 llvm::AttributeSet::get(getLLVMContext(), Attrs); 2487 } 2488 } 2489 assert(ArgNo == FI.arg_size()); 2490 2491 AttrList = llvm::AttributeList::get( 2492 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs), 2493 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs); 2494 } 2495 2496 /// An argument came in as a promoted argument; demote it back to its 2497 /// declared type. 2498 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF, 2499 const VarDecl *var, 2500 llvm::Value *value) { 2501 llvm::Type *varType = CGF.ConvertType(var->getType()); 2502 2503 // This can happen with promotions that actually don't change the 2504 // underlying type, like the enum promotions. 2505 if (value->getType() == varType) return value; 2506 2507 assert((varType->isIntegerTy() || varType->isFloatingPointTy()) 2508 && "unexpected promotion type"); 2509 2510 if (isa<llvm::IntegerType>(varType)) 2511 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote"); 2512 2513 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote"); 2514 } 2515 2516 /// Returns the attribute (either parameter attribute, or function 2517 /// attribute), which declares argument ArgNo to be non-null. 2518 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD, 2519 QualType ArgType, unsigned ArgNo) { 2520 // FIXME: __attribute__((nonnull)) can also be applied to: 2521 // - references to pointers, where the pointee is known to be 2522 // nonnull (apparently a Clang extension) 2523 // - transparent unions containing pointers 2524 // In the former case, LLVM IR cannot represent the constraint. In 2525 // the latter case, we have no guarantee that the transparent union 2526 // is in fact passed as a pointer. 2527 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType()) 2528 return nullptr; 2529 // First, check attribute on parameter itself. 2530 if (PVD) { 2531 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>()) 2532 return ParmNNAttr; 2533 } 2534 // Check function attributes. 2535 if (!FD) 2536 return nullptr; 2537 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) { 2538 if (NNAttr->isNonNull(ArgNo)) 2539 return NNAttr; 2540 } 2541 return nullptr; 2542 } 2543 2544 namespace { 2545 struct CopyBackSwiftError final : EHScopeStack::Cleanup { 2546 Address Temp; 2547 Address Arg; 2548 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {} 2549 void Emit(CodeGenFunction &CGF, Flags flags) override { 2550 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp); 2551 CGF.Builder.CreateStore(errorValue, Arg); 2552 } 2553 }; 2554 } 2555 2556 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI, 2557 llvm::Function *Fn, 2558 const FunctionArgList &Args) { 2559 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) 2560 // Naked functions don't have prologues. 2561 return; 2562 2563 // If this is an implicit-return-zero function, go ahead and 2564 // initialize the return value. TODO: it might be nice to have 2565 // a more general mechanism for this that didn't require synthesized 2566 // return statements. 2567 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) { 2568 if (FD->hasImplicitReturnZero()) { 2569 QualType RetTy = FD->getReturnType().getUnqualifiedType(); 2570 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy); 2571 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy); 2572 Builder.CreateStore(Zero, ReturnValue); 2573 } 2574 } 2575 2576 // FIXME: We no longer need the types from FunctionArgList; lift up and 2577 // simplify. 2578 2579 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI); 2580 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs()); 2581 2582 // If we're using inalloca, all the memory arguments are GEPs off of the last 2583 // parameter, which is a pointer to the complete memory area. 2584 Address ArgStruct = Address::invalid(); 2585 if (IRFunctionArgs.hasInallocaArg()) { 2586 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()), 2587 FI.getArgStructAlignment()); 2588 2589 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo()); 2590 } 2591 2592 // Name the struct return parameter. 2593 if (IRFunctionArgs.hasSRetArg()) { 2594 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo()); 2595 AI->setName("agg.result"); 2596 AI->addAttr(llvm::Attribute::NoAlias); 2597 } 2598 2599 // Track if we received the parameter as a pointer (indirect, byval, or 2600 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it 2601 // into a local alloca for us. 2602 SmallVector<ParamValue, 16> ArgVals; 2603 ArgVals.reserve(Args.size()); 2604 2605 // Create a pointer value for every parameter declaration. This usually 2606 // entails copying one or more LLVM IR arguments into an alloca. Don't push 2607 // any cleanups or do anything that might unwind. We do that separately, so 2608 // we can push the cleanups in the correct order for the ABI. 2609 assert(FI.arg_size() == Args.size() && 2610 "Mismatch between function signature & arguments."); 2611 unsigned ArgNo = 0; 2612 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin(); 2613 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end(); 2614 i != e; ++i, ++info_it, ++ArgNo) { 2615 const VarDecl *Arg = *i; 2616 const ABIArgInfo &ArgI = info_it->info; 2617 2618 bool isPromoted = 2619 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted(); 2620 // We are converting from ABIArgInfo type to VarDecl type directly, unless 2621 // the parameter is promoted. In this case we convert to 2622 // CGFunctionInfo::ArgInfo type with subsequent argument demotion. 2623 QualType Ty = isPromoted ? info_it->type : Arg->getType(); 2624 assert(hasScalarEvaluationKind(Ty) == 2625 hasScalarEvaluationKind(Arg->getType())); 2626 2627 unsigned FirstIRArg, NumIRArgs; 2628 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 2629 2630 switch (ArgI.getKind()) { 2631 case ABIArgInfo::InAlloca: { 2632 assert(NumIRArgs == 0); 2633 auto FieldIndex = ArgI.getInAllocaFieldIndex(); 2634 Address V = 2635 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName()); 2636 if (ArgI.getInAllocaIndirect()) 2637 V = Address(Builder.CreateLoad(V), 2638 getContext().getTypeAlignInChars(Ty)); 2639 ArgVals.push_back(ParamValue::forIndirect(V)); 2640 break; 2641 } 2642 2643 case ABIArgInfo::Indirect: 2644 case ABIArgInfo::IndirectAliased: { 2645 assert(NumIRArgs == 1); 2646 Address ParamAddr = 2647 Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign()); 2648 2649 if (!hasScalarEvaluationKind(Ty)) { 2650 // Aggregates and complex variables are accessed by reference. All we 2651 // need to do is realign the value, if requested. Also, if the address 2652 // may be aliased, copy it to ensure that the parameter variable is 2653 // mutable and has a unique adress, as C requires. 2654 Address V = ParamAddr; 2655 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) { 2656 Address AlignedTemp = CreateMemTemp(Ty, "coerce"); 2657 2658 // Copy from the incoming argument pointer to the temporary with the 2659 // appropriate alignment. 2660 // 2661 // FIXME: We should have a common utility for generating an aggregate 2662 // copy. 2663 CharUnits Size = getContext().getTypeSizeInChars(Ty); 2664 Builder.CreateMemCpy( 2665 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(), 2666 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(), 2667 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity())); 2668 V = AlignedTemp; 2669 } 2670 ArgVals.push_back(ParamValue::forIndirect(V)); 2671 } else { 2672 // Load scalar value from indirect argument. 2673 llvm::Value *V = 2674 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc()); 2675 2676 if (isPromoted) 2677 V = emitArgumentDemotion(*this, Arg, V); 2678 ArgVals.push_back(ParamValue::forDirect(V)); 2679 } 2680 break; 2681 } 2682 2683 case ABIArgInfo::Extend: 2684 case ABIArgInfo::Direct: { 2685 auto AI = Fn->getArg(FirstIRArg); 2686 llvm::Type *LTy = ConvertType(Arg->getType()); 2687 2688 // Prepare parameter attributes. So far, only attributes for pointer 2689 // parameters are prepared. See 2690 // http://llvm.org/docs/LangRef.html#paramattrs. 2691 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() && 2692 ArgI.getCoerceToType()->isPointerTy()) { 2693 assert(NumIRArgs == 1); 2694 2695 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) { 2696 // Set `nonnull` attribute if any. 2697 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(), 2698 PVD->getFunctionScopeIndex()) && 2699 !CGM.getCodeGenOpts().NullPointerIsValid) 2700 AI->addAttr(llvm::Attribute::NonNull); 2701 2702 QualType OTy = PVD->getOriginalType(); 2703 if (const auto *ArrTy = 2704 getContext().getAsConstantArrayType(OTy)) { 2705 // A C99 array parameter declaration with the static keyword also 2706 // indicates dereferenceability, and if the size is constant we can 2707 // use the dereferenceable attribute (which requires the size in 2708 // bytes). 2709 if (ArrTy->getSizeModifier() == ArrayType::Static) { 2710 QualType ETy = ArrTy->getElementType(); 2711 llvm::Align Alignment = 2712 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2713 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2714 uint64_t ArrSize = ArrTy->getSize().getZExtValue(); 2715 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() && 2716 ArrSize) { 2717 llvm::AttrBuilder Attrs; 2718 Attrs.addDereferenceableAttr( 2719 getContext().getTypeSizeInChars(ETy).getQuantity() * 2720 ArrSize); 2721 AI->addAttrs(Attrs); 2722 } else if (getContext().getTargetInfo().getNullPointerValue( 2723 ETy.getAddressSpace()) == 0 && 2724 !CGM.getCodeGenOpts().NullPointerIsValid) { 2725 AI->addAttr(llvm::Attribute::NonNull); 2726 } 2727 } 2728 } else if (const auto *ArrTy = 2729 getContext().getAsVariableArrayType(OTy)) { 2730 // For C99 VLAs with the static keyword, we don't know the size so 2731 // we can't use the dereferenceable attribute, but in addrspace(0) 2732 // we know that it must be nonnull. 2733 if (ArrTy->getSizeModifier() == VariableArrayType::Static) { 2734 QualType ETy = ArrTy->getElementType(); 2735 llvm::Align Alignment = 2736 CGM.getNaturalTypeAlignment(ETy).getAsAlign(); 2737 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment)); 2738 if (!getContext().getTargetAddressSpace(ETy) && 2739 !CGM.getCodeGenOpts().NullPointerIsValid) 2740 AI->addAttr(llvm::Attribute::NonNull); 2741 } 2742 } 2743 2744 // Set `align` attribute if any. 2745 const auto *AVAttr = PVD->getAttr<AlignValueAttr>(); 2746 if (!AVAttr) 2747 if (const auto *TOTy = dyn_cast<TypedefType>(OTy)) 2748 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>(); 2749 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) { 2750 // If alignment-assumption sanitizer is enabled, we do *not* add 2751 // alignment attribute here, but emit normal alignment assumption, 2752 // so the UBSAN check could function. 2753 llvm::ConstantInt *AlignmentCI = 2754 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment())); 2755 unsigned AlignmentInt = 2756 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment); 2757 if (AI->getParamAlign().valueOrOne() < AlignmentInt) { 2758 AI->removeAttr(llvm::Attribute::AttrKind::Alignment); 2759 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr( 2760 llvm::Align(AlignmentInt))); 2761 } 2762 } 2763 } 2764 2765 // Set 'noalias' if an argument type has the `restrict` qualifier. 2766 if (Arg->getType().isRestrictQualified()) 2767 AI->addAttr(llvm::Attribute::NoAlias); 2768 } 2769 2770 // Prepare the argument value. If we have the trivial case, handle it 2771 // with no muss and fuss. 2772 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) && 2773 ArgI.getCoerceToType() == ConvertType(Ty) && 2774 ArgI.getDirectOffset() == 0) { 2775 assert(NumIRArgs == 1); 2776 2777 // LLVM expects swifterror parameters to be used in very restricted 2778 // ways. Copy the value into a less-restricted temporary. 2779 llvm::Value *V = AI; 2780 if (FI.getExtParameterInfo(ArgNo).getABI() 2781 == ParameterABI::SwiftErrorResult) { 2782 QualType pointeeTy = Ty->getPointeeType(); 2783 assert(pointeeTy->isPointerType()); 2784 Address temp = 2785 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 2786 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy)); 2787 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg); 2788 Builder.CreateStore(incomingErrorValue, temp); 2789 V = temp.getPointer(); 2790 2791 // Push a cleanup to copy the value back at the end of the function. 2792 // The convention does not guarantee that the value will be written 2793 // back if the function exits with an unwind exception. 2794 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg); 2795 } 2796 2797 // Ensure the argument is the correct type. 2798 if (V->getType() != ArgI.getCoerceToType()) 2799 V = Builder.CreateBitCast(V, ArgI.getCoerceToType()); 2800 2801 if (isPromoted) 2802 V = emitArgumentDemotion(*this, Arg, V); 2803 2804 // Because of merging of function types from multiple decls it is 2805 // possible for the type of an argument to not match the corresponding 2806 // type in the function type. Since we are codegening the callee 2807 // in here, add a cast to the argument type. 2808 llvm::Type *LTy = ConvertType(Arg->getType()); 2809 if (V->getType() != LTy) 2810 V = Builder.CreateBitCast(V, LTy); 2811 2812 ArgVals.push_back(ParamValue::forDirect(V)); 2813 break; 2814 } 2815 2816 // VLST arguments are coerced to VLATs at the function boundary for 2817 // ABI consistency. If this is a VLST that was coerced to 2818 // a VLAT at the function boundary and the types match up, use 2819 // llvm.experimental.vector.extract to convert back to the original 2820 // VLST. 2821 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) { 2822 auto *Coerced = Fn->getArg(FirstIRArg); 2823 if (auto *VecTyFrom = 2824 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) { 2825 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) { 2826 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); 2827 2828 assert(NumIRArgs == 1); 2829 Coerced->setName(Arg->getName() + ".coerce"); 2830 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector( 2831 VecTyTo, Coerced, Zero, "castFixedSve"))); 2832 break; 2833 } 2834 } 2835 } 2836 2837 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg), 2838 Arg->getName()); 2839 2840 // Pointer to store into. 2841 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI); 2842 2843 // Fast-isel and the optimizer generally like scalar values better than 2844 // FCAs, so we flatten them if this is safe to do for this argument. 2845 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType()); 2846 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy && 2847 STy->getNumElements() > 1) { 2848 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy); 2849 llvm::Type *DstTy = Ptr.getElementType(); 2850 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy); 2851 2852 Address AddrToStoreInto = Address::invalid(); 2853 if (SrcSize <= DstSize) { 2854 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy); 2855 } else { 2856 AddrToStoreInto = 2857 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce"); 2858 } 2859 2860 assert(STy->getNumElements() == NumIRArgs); 2861 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 2862 auto AI = Fn->getArg(FirstIRArg + i); 2863 AI->setName(Arg->getName() + ".coerce" + Twine(i)); 2864 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i); 2865 Builder.CreateStore(AI, EltPtr); 2866 } 2867 2868 if (SrcSize > DstSize) { 2869 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize); 2870 } 2871 2872 } else { 2873 // Simple case, just do a coerced store of the argument into the alloca. 2874 assert(NumIRArgs == 1); 2875 auto AI = Fn->getArg(FirstIRArg); 2876 AI->setName(Arg->getName() + ".coerce"); 2877 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this); 2878 } 2879 2880 // Match to what EmitParmDecl is expecting for this type. 2881 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) { 2882 llvm::Value *V = 2883 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc()); 2884 if (isPromoted) 2885 V = emitArgumentDemotion(*this, Arg, V); 2886 ArgVals.push_back(ParamValue::forDirect(V)); 2887 } else { 2888 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2889 } 2890 break; 2891 } 2892 2893 case ABIArgInfo::CoerceAndExpand: { 2894 // Reconstruct into a temporary. 2895 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2896 ArgVals.push_back(ParamValue::forIndirect(alloca)); 2897 2898 auto coercionType = ArgI.getCoerceAndExpandType(); 2899 alloca = Builder.CreateElementBitCast(alloca, coercionType); 2900 2901 unsigned argIndex = FirstIRArg; 2902 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 2903 llvm::Type *eltType = coercionType->getElementType(i); 2904 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) 2905 continue; 2906 2907 auto eltAddr = Builder.CreateStructGEP(alloca, i); 2908 auto elt = Fn->getArg(argIndex++); 2909 Builder.CreateStore(elt, eltAddr); 2910 } 2911 assert(argIndex == FirstIRArg + NumIRArgs); 2912 break; 2913 } 2914 2915 case ABIArgInfo::Expand: { 2916 // If this structure was expanded into multiple arguments then 2917 // we need to create a temporary and reconstruct it from the 2918 // arguments. 2919 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg)); 2920 LValue LV = MakeAddrLValue(Alloca, Ty); 2921 ArgVals.push_back(ParamValue::forIndirect(Alloca)); 2922 2923 auto FnArgIter = Fn->arg_begin() + FirstIRArg; 2924 ExpandTypeFromArgs(Ty, LV, FnArgIter); 2925 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs); 2926 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) { 2927 auto AI = Fn->getArg(FirstIRArg + i); 2928 AI->setName(Arg->getName() + "." + Twine(i)); 2929 } 2930 break; 2931 } 2932 2933 case ABIArgInfo::Ignore: 2934 assert(NumIRArgs == 0); 2935 // Initialize the local variable appropriately. 2936 if (!hasScalarEvaluationKind(Ty)) { 2937 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty))); 2938 } else { 2939 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType())); 2940 ArgVals.push_back(ParamValue::forDirect(U)); 2941 } 2942 break; 2943 } 2944 } 2945 2946 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) { 2947 for (int I = Args.size() - 1; I >= 0; --I) 2948 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2949 } else { 2950 for (unsigned I = 0, E = Args.size(); I != E; ++I) 2951 EmitParmDecl(*Args[I], ArgVals[I], I + 1); 2952 } 2953 } 2954 2955 static void eraseUnusedBitCasts(llvm::Instruction *insn) { 2956 while (insn->use_empty()) { 2957 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn); 2958 if (!bitcast) return; 2959 2960 // This is "safe" because we would have used a ConstantExpr otherwise. 2961 insn = cast<llvm::Instruction>(bitcast->getOperand(0)); 2962 bitcast->eraseFromParent(); 2963 } 2964 } 2965 2966 /// Try to emit a fused autorelease of a return result. 2967 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF, 2968 llvm::Value *result) { 2969 // We must be immediately followed the cast. 2970 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock(); 2971 if (BB->empty()) return nullptr; 2972 if (&BB->back() != result) return nullptr; 2973 2974 llvm::Type *resultType = result->getType(); 2975 2976 // result is in a BasicBlock and is therefore an Instruction. 2977 llvm::Instruction *generator = cast<llvm::Instruction>(result); 2978 2979 SmallVector<llvm::Instruction *, 4> InstsToKill; 2980 2981 // Look for: 2982 // %generator = bitcast %type1* %generator2 to %type2* 2983 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) { 2984 // We would have emitted this as a constant if the operand weren't 2985 // an Instruction. 2986 generator = cast<llvm::Instruction>(bitcast->getOperand(0)); 2987 2988 // Require the generator to be immediately followed by the cast. 2989 if (generator->getNextNode() != bitcast) 2990 return nullptr; 2991 2992 InstsToKill.push_back(bitcast); 2993 } 2994 2995 // Look for: 2996 // %generator = call i8* @objc_retain(i8* %originalResult) 2997 // or 2998 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult) 2999 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator); 3000 if (!call) return nullptr; 3001 3002 bool doRetainAutorelease; 3003 3004 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) { 3005 doRetainAutorelease = true; 3006 } else if (call->getCalledOperand() == 3007 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) { 3008 doRetainAutorelease = false; 3009 3010 // If we emitted an assembly marker for this call (and the 3011 // ARCEntrypoints field should have been set if so), go looking 3012 // for that call. If we can't find it, we can't do this 3013 // optimization. But it should always be the immediately previous 3014 // instruction, unless we needed bitcasts around the call. 3015 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) { 3016 llvm::Instruction *prev = call->getPrevNode(); 3017 assert(prev); 3018 if (isa<llvm::BitCastInst>(prev)) { 3019 prev = prev->getPrevNode(); 3020 assert(prev); 3021 } 3022 assert(isa<llvm::CallInst>(prev)); 3023 assert(cast<llvm::CallInst>(prev)->getCalledOperand() == 3024 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker); 3025 InstsToKill.push_back(prev); 3026 } 3027 } else { 3028 return nullptr; 3029 } 3030 3031 result = call->getArgOperand(0); 3032 InstsToKill.push_back(call); 3033 3034 // Keep killing bitcasts, for sanity. Note that we no longer care 3035 // about precise ordering as long as there's exactly one use. 3036 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) { 3037 if (!bitcast->hasOneUse()) break; 3038 InstsToKill.push_back(bitcast); 3039 result = bitcast->getOperand(0); 3040 } 3041 3042 // Delete all the unnecessary instructions, from latest to earliest. 3043 for (auto *I : InstsToKill) 3044 I->eraseFromParent(); 3045 3046 // Do the fused retain/autorelease if we were asked to. 3047 if (doRetainAutorelease) 3048 result = CGF.EmitARCRetainAutoreleaseReturnValue(result); 3049 3050 // Cast back to the result type. 3051 return CGF.Builder.CreateBitCast(result, resultType); 3052 } 3053 3054 /// If this is a +1 of the value of an immutable 'self', remove it. 3055 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF, 3056 llvm::Value *result) { 3057 // This is only applicable to a method with an immutable 'self'. 3058 const ObjCMethodDecl *method = 3059 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl); 3060 if (!method) return nullptr; 3061 const VarDecl *self = method->getSelfDecl(); 3062 if (!self->getType().isConstQualified()) return nullptr; 3063 3064 // Look for a retain call. 3065 llvm::CallInst *retainCall = 3066 dyn_cast<llvm::CallInst>(result->stripPointerCasts()); 3067 if (!retainCall || retainCall->getCalledOperand() != 3068 CGF.CGM.getObjCEntrypoints().objc_retain) 3069 return nullptr; 3070 3071 // Look for an ordinary load of 'self'. 3072 llvm::Value *retainedValue = retainCall->getArgOperand(0); 3073 llvm::LoadInst *load = 3074 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts()); 3075 if (!load || load->isAtomic() || load->isVolatile() || 3076 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer()) 3077 return nullptr; 3078 3079 // Okay! Burn it all down. This relies for correctness on the 3080 // assumption that the retain is emitted as part of the return and 3081 // that thereafter everything is used "linearly". 3082 llvm::Type *resultType = result->getType(); 3083 eraseUnusedBitCasts(cast<llvm::Instruction>(result)); 3084 assert(retainCall->use_empty()); 3085 retainCall->eraseFromParent(); 3086 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue)); 3087 3088 return CGF.Builder.CreateBitCast(load, resultType); 3089 } 3090 3091 /// Emit an ARC autorelease of the result of a function. 3092 /// 3093 /// \return the value to actually return from the function 3094 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF, 3095 llvm::Value *result) { 3096 // If we're returning 'self', kill the initial retain. This is a 3097 // heuristic attempt to "encourage correctness" in the really unfortunate 3098 // case where we have a return of self during a dealloc and we desperately 3099 // need to avoid the possible autorelease. 3100 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result)) 3101 return self; 3102 3103 // At -O0, try to emit a fused retain/autorelease. 3104 if (CGF.shouldUseFusedARCCalls()) 3105 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result)) 3106 return fused; 3107 3108 return CGF.EmitARCAutoreleaseReturnValue(result); 3109 } 3110 3111 /// Heuristically search for a dominating store to the return-value slot. 3112 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) { 3113 // Check if a User is a store which pointerOperand is the ReturnValue. 3114 // We are looking for stores to the ReturnValue, not for stores of the 3115 // ReturnValue to some other location. 3116 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * { 3117 auto *SI = dyn_cast<llvm::StoreInst>(U); 3118 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer()) 3119 return nullptr; 3120 // These aren't actually possible for non-coerced returns, and we 3121 // only care about non-coerced returns on this code path. 3122 assert(!SI->isAtomic() && !SI->isVolatile()); 3123 return SI; 3124 }; 3125 // If there are multiple uses of the return-value slot, just check 3126 // for something immediately preceding the IP. Sometimes this can 3127 // happen with how we generate implicit-returns; it can also happen 3128 // with noreturn cleanups. 3129 if (!CGF.ReturnValue.getPointer()->hasOneUse()) { 3130 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3131 if (IP->empty()) return nullptr; 3132 llvm::Instruction *I = &IP->back(); 3133 3134 // Skip lifetime markers 3135 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(), 3136 IE = IP->rend(); 3137 II != IE; ++II) { 3138 if (llvm::IntrinsicInst *Intrinsic = 3139 dyn_cast<llvm::IntrinsicInst>(&*II)) { 3140 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) { 3141 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1); 3142 ++II; 3143 if (II == IE) 3144 break; 3145 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II)) 3146 continue; 3147 } 3148 } 3149 I = &*II; 3150 break; 3151 } 3152 3153 return GetStoreIfValid(I); 3154 } 3155 3156 llvm::StoreInst *store = 3157 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back()); 3158 if (!store) return nullptr; 3159 3160 // Now do a first-and-dirty dominance check: just walk up the 3161 // single-predecessors chain from the current insertion point. 3162 llvm::BasicBlock *StoreBB = store->getParent(); 3163 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock(); 3164 while (IP != StoreBB) { 3165 if (!(IP = IP->getSinglePredecessor())) 3166 return nullptr; 3167 } 3168 3169 // Okay, the store's basic block dominates the insertion point; we 3170 // can do our thing. 3171 return store; 3172 } 3173 3174 // Helper functions for EmitCMSEClearRecord 3175 3176 // Set the bits corresponding to a field having width `BitWidth` and located at 3177 // offset `BitOffset` (from the least significant bit) within a storage unit of 3178 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte. 3179 // Use little-endian layout, i.e.`Bits[0]` is the LSB. 3180 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset, 3181 int BitWidth, int CharWidth) { 3182 assert(CharWidth <= 64); 3183 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth); 3184 3185 int Pos = 0; 3186 if (BitOffset >= CharWidth) { 3187 Pos += BitOffset / CharWidth; 3188 BitOffset = BitOffset % CharWidth; 3189 } 3190 3191 const uint64_t Used = (uint64_t(1) << CharWidth) - 1; 3192 if (BitOffset + BitWidth >= CharWidth) { 3193 Bits[Pos++] |= (Used << BitOffset) & Used; 3194 BitWidth -= CharWidth - BitOffset; 3195 BitOffset = 0; 3196 } 3197 3198 while (BitWidth >= CharWidth) { 3199 Bits[Pos++] = Used; 3200 BitWidth -= CharWidth; 3201 } 3202 3203 if (BitWidth > 0) 3204 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset; 3205 } 3206 3207 // Set the bits corresponding to a field having width `BitWidth` and located at 3208 // offset `BitOffset` (from the least significant bit) within a storage unit of 3209 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of 3210 // `Bits` corresponds to one target byte. Use target endian layout. 3211 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset, 3212 int StorageSize, int BitOffset, int BitWidth, 3213 int CharWidth, bool BigEndian) { 3214 3215 SmallVector<uint64_t, 8> TmpBits(StorageSize); 3216 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth); 3217 3218 if (BigEndian) 3219 std::reverse(TmpBits.begin(), TmpBits.end()); 3220 3221 for (uint64_t V : TmpBits) 3222 Bits[StorageOffset++] |= V; 3223 } 3224 3225 static void setUsedBits(CodeGenModule &, QualType, int, 3226 SmallVectorImpl<uint64_t> &); 3227 3228 // Set the bits in `Bits`, which correspond to the value representations of 3229 // the actual members of the record type `RTy`. Note that this function does 3230 // not handle base classes, virtual tables, etc, since they cannot happen in 3231 // CMSE function arguments or return. The bit mask corresponds to the target 3232 // memory layout, i.e. it's endian dependent. 3233 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset, 3234 SmallVectorImpl<uint64_t> &Bits) { 3235 ASTContext &Context = CGM.getContext(); 3236 int CharWidth = Context.getCharWidth(); 3237 const RecordDecl *RD = RTy->getDecl()->getDefinition(); 3238 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD); 3239 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD); 3240 3241 int Idx = 0; 3242 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) { 3243 const FieldDecl *F = *I; 3244 3245 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) || 3246 F->getType()->isIncompleteArrayType()) 3247 continue; 3248 3249 if (F->isBitField()) { 3250 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F); 3251 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(), 3252 BFI.StorageSize / CharWidth, BFI.Offset, 3253 BFI.Size, CharWidth, 3254 CGM.getDataLayout().isBigEndian()); 3255 continue; 3256 } 3257 3258 setUsedBits(CGM, F->getType(), 3259 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits); 3260 } 3261 } 3262 3263 // Set the bits in `Bits`, which correspond to the value representations of 3264 // the elements of an array type `ATy`. 3265 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy, 3266 int Offset, SmallVectorImpl<uint64_t> &Bits) { 3267 const ASTContext &Context = CGM.getContext(); 3268 3269 QualType ETy = Context.getBaseElementType(ATy); 3270 int Size = Context.getTypeSizeInChars(ETy).getQuantity(); 3271 SmallVector<uint64_t, 4> TmpBits(Size); 3272 setUsedBits(CGM, ETy, 0, TmpBits); 3273 3274 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) { 3275 auto Src = TmpBits.begin(); 3276 auto Dst = Bits.begin() + Offset + I * Size; 3277 for (int J = 0; J < Size; ++J) 3278 *Dst++ |= *Src++; 3279 } 3280 } 3281 3282 // Set the bits in `Bits`, which correspond to the value representations of 3283 // the type `QTy`. 3284 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset, 3285 SmallVectorImpl<uint64_t> &Bits) { 3286 if (const auto *RTy = QTy->getAs<RecordType>()) 3287 return setUsedBits(CGM, RTy, Offset, Bits); 3288 3289 ASTContext &Context = CGM.getContext(); 3290 if (const auto *ATy = Context.getAsConstantArrayType(QTy)) 3291 return setUsedBits(CGM, ATy, Offset, Bits); 3292 3293 int Size = Context.getTypeSizeInChars(QTy).getQuantity(); 3294 if (Size <= 0) 3295 return; 3296 3297 std::fill_n(Bits.begin() + Offset, Size, 3298 (uint64_t(1) << Context.getCharWidth()) - 1); 3299 } 3300 3301 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits, 3302 int Pos, int Size, int CharWidth, 3303 bool BigEndian) { 3304 assert(Size > 0); 3305 uint64_t Mask = 0; 3306 if (BigEndian) { 3307 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E; 3308 ++P) 3309 Mask = (Mask << CharWidth) | *P; 3310 } else { 3311 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos; 3312 do 3313 Mask = (Mask << CharWidth) | *--P; 3314 while (P != End); 3315 } 3316 return Mask; 3317 } 3318 3319 // Emit code to clear the bits in a record, which aren't a part of any user 3320 // declared member, when the record is a function return. 3321 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3322 llvm::IntegerType *ITy, 3323 QualType QTy) { 3324 assert(Src->getType() == ITy); 3325 assert(ITy->getScalarSizeInBits() <= 64); 3326 3327 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3328 int Size = DataLayout.getTypeStoreSize(ITy); 3329 SmallVector<uint64_t, 4> Bits(Size); 3330 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3331 3332 int CharWidth = CGM.getContext().getCharWidth(); 3333 uint64_t Mask = 3334 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian()); 3335 3336 return Builder.CreateAnd(Src, Mask, "cmse.clear"); 3337 } 3338 3339 // Emit code to clear the bits in a record, which aren't a part of any user 3340 // declared member, when the record is a function argument. 3341 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src, 3342 llvm::ArrayType *ATy, 3343 QualType QTy) { 3344 const llvm::DataLayout &DataLayout = CGM.getDataLayout(); 3345 int Size = DataLayout.getTypeStoreSize(ATy); 3346 SmallVector<uint64_t, 16> Bits(Size); 3347 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits); 3348 3349 // Clear each element of the LLVM array. 3350 int CharWidth = CGM.getContext().getCharWidth(); 3351 int CharsPerElt = 3352 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth; 3353 int MaskIndex = 0; 3354 llvm::Value *R = llvm::UndefValue::get(ATy); 3355 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) { 3356 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth, 3357 DataLayout.isBigEndian()); 3358 MaskIndex += CharsPerElt; 3359 llvm::Value *T0 = Builder.CreateExtractValue(Src, I); 3360 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear"); 3361 R = Builder.CreateInsertValue(R, T1, I); 3362 } 3363 3364 return R; 3365 } 3366 3367 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI, 3368 bool EmitRetDbgLoc, 3369 SourceLocation EndLoc) { 3370 if (FI.isNoReturn()) { 3371 // Noreturn functions don't return. 3372 EmitUnreachable(EndLoc); 3373 return; 3374 } 3375 3376 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) { 3377 // Naked functions don't have epilogues. 3378 Builder.CreateUnreachable(); 3379 return; 3380 } 3381 3382 // Functions with no result always return void. 3383 if (!ReturnValue.isValid()) { 3384 Builder.CreateRetVoid(); 3385 return; 3386 } 3387 3388 llvm::DebugLoc RetDbgLoc; 3389 llvm::Value *RV = nullptr; 3390 QualType RetTy = FI.getReturnType(); 3391 const ABIArgInfo &RetAI = FI.getReturnInfo(); 3392 3393 switch (RetAI.getKind()) { 3394 case ABIArgInfo::InAlloca: 3395 // Aggregrates get evaluated directly into the destination. Sometimes we 3396 // need to return the sret value in a register, though. 3397 assert(hasAggregateEvaluationKind(RetTy)); 3398 if (RetAI.getInAllocaSRet()) { 3399 llvm::Function::arg_iterator EI = CurFn->arg_end(); 3400 --EI; 3401 llvm::Value *ArgStruct = &*EI; 3402 llvm::Value *SRet = Builder.CreateStructGEP( 3403 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex()); 3404 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret"); 3405 } 3406 break; 3407 3408 case ABIArgInfo::Indirect: { 3409 auto AI = CurFn->arg_begin(); 3410 if (RetAI.isSRetAfterThis()) 3411 ++AI; 3412 switch (getEvaluationKind(RetTy)) { 3413 case TEK_Complex: { 3414 ComplexPairTy RT = 3415 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc); 3416 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy), 3417 /*isInit*/ true); 3418 break; 3419 } 3420 case TEK_Aggregate: 3421 // Do nothing; aggregrates get evaluated directly into the destination. 3422 break; 3423 case TEK_Scalar: 3424 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), 3425 MakeNaturalAlignAddrLValue(&*AI, RetTy), 3426 /*isInit*/ true); 3427 break; 3428 } 3429 break; 3430 } 3431 3432 case ABIArgInfo::Extend: 3433 case ABIArgInfo::Direct: 3434 if (RetAI.getCoerceToType() == ConvertType(RetTy) && 3435 RetAI.getDirectOffset() == 0) { 3436 // The internal return value temp always will have pointer-to-return-type 3437 // type, just do a load. 3438 3439 // If there is a dominating store to ReturnValue, we can elide 3440 // the load, zap the store, and usually zap the alloca. 3441 if (llvm::StoreInst *SI = 3442 findDominatingStoreToReturnValue(*this)) { 3443 // Reuse the debug location from the store unless there is 3444 // cleanup code to be emitted between the store and return 3445 // instruction. 3446 if (EmitRetDbgLoc && !AutoreleaseResult) 3447 RetDbgLoc = SI->getDebugLoc(); 3448 // Get the stored value and nuke the now-dead store. 3449 RV = SI->getValueOperand(); 3450 SI->eraseFromParent(); 3451 3452 // Otherwise, we have to do a simple load. 3453 } else { 3454 RV = Builder.CreateLoad(ReturnValue); 3455 } 3456 } else { 3457 // If the value is offset in memory, apply the offset now. 3458 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI); 3459 3460 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this); 3461 } 3462 3463 // In ARC, end functions that return a retainable type with a call 3464 // to objc_autoreleaseReturnValue. 3465 if (AutoreleaseResult) { 3466 #ifndef NDEBUG 3467 // Type::isObjCRetainabletype has to be called on a QualType that hasn't 3468 // been stripped of the typedefs, so we cannot use RetTy here. Get the 3469 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from 3470 // CurCodeDecl or BlockInfo. 3471 QualType RT; 3472 3473 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl)) 3474 RT = FD->getReturnType(); 3475 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl)) 3476 RT = MD->getReturnType(); 3477 else if (isa<BlockDecl>(CurCodeDecl)) 3478 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType(); 3479 else 3480 llvm_unreachable("Unexpected function/method type"); 3481 3482 assert(getLangOpts().ObjCAutoRefCount && 3483 !FI.isReturnsRetained() && 3484 RT->isObjCRetainableType()); 3485 #endif 3486 RV = emitAutoreleaseOfResult(*this, RV); 3487 } 3488 3489 break; 3490 3491 case ABIArgInfo::Ignore: 3492 break; 3493 3494 case ABIArgInfo::CoerceAndExpand: { 3495 auto coercionType = RetAI.getCoerceAndExpandType(); 3496 3497 // Load all of the coerced elements out into results. 3498 llvm::SmallVector<llvm::Value*, 4> results; 3499 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType); 3500 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 3501 auto coercedEltType = coercionType->getElementType(i); 3502 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType)) 3503 continue; 3504 3505 auto eltAddr = Builder.CreateStructGEP(addr, i); 3506 auto elt = Builder.CreateLoad(eltAddr); 3507 results.push_back(elt); 3508 } 3509 3510 // If we have one result, it's the single direct result type. 3511 if (results.size() == 1) { 3512 RV = results[0]; 3513 3514 // Otherwise, we need to make a first-class aggregate. 3515 } else { 3516 // Construct a return type that lacks padding elements. 3517 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType(); 3518 3519 RV = llvm::UndefValue::get(returnType); 3520 for (unsigned i = 0, e = results.size(); i != e; ++i) { 3521 RV = Builder.CreateInsertValue(RV, results[i], i); 3522 } 3523 } 3524 break; 3525 } 3526 case ABIArgInfo::Expand: 3527 case ABIArgInfo::IndirectAliased: 3528 llvm_unreachable("Invalid ABI kind for return argument"); 3529 } 3530 3531 llvm::Instruction *Ret; 3532 if (RV) { 3533 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) { 3534 // For certain return types, clear padding bits, as they may reveal 3535 // sensitive information. 3536 // Small struct/union types are passed as integers. 3537 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType()); 3538 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType())) 3539 RV = EmitCMSEClearRecord(RV, ITy, RetTy); 3540 } 3541 EmitReturnValueCheck(RV); 3542 Ret = Builder.CreateRet(RV); 3543 } else { 3544 Ret = Builder.CreateRetVoid(); 3545 } 3546 3547 if (RetDbgLoc) 3548 Ret->setDebugLoc(std::move(RetDbgLoc)); 3549 } 3550 3551 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) { 3552 // A current decl may not be available when emitting vtable thunks. 3553 if (!CurCodeDecl) 3554 return; 3555 3556 // If the return block isn't reachable, neither is this check, so don't emit 3557 // it. 3558 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) 3559 return; 3560 3561 ReturnsNonNullAttr *RetNNAttr = nullptr; 3562 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) 3563 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>(); 3564 3565 if (!RetNNAttr && !requiresReturnValueNullabilityCheck()) 3566 return; 3567 3568 // Prefer the returns_nonnull attribute if it's present. 3569 SourceLocation AttrLoc; 3570 SanitizerMask CheckKind; 3571 SanitizerHandler Handler; 3572 if (RetNNAttr) { 3573 assert(!requiresReturnValueNullabilityCheck() && 3574 "Cannot check nullability and the nonnull attribute"); 3575 AttrLoc = RetNNAttr->getLocation(); 3576 CheckKind = SanitizerKind::ReturnsNonnullAttribute; 3577 Handler = SanitizerHandler::NonnullReturn; 3578 } else { 3579 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl)) 3580 if (auto *TSI = DD->getTypeSourceInfo()) 3581 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>()) 3582 AttrLoc = FTL.getReturnLoc().findNullabilityLoc(); 3583 CheckKind = SanitizerKind::NullabilityReturn; 3584 Handler = SanitizerHandler::NullabilityReturn; 3585 } 3586 3587 SanitizerScope SanScope(this); 3588 3589 // Make sure the "return" source location is valid. If we're checking a 3590 // nullability annotation, make sure the preconditions for the check are met. 3591 llvm::BasicBlock *Check = createBasicBlock("nullcheck"); 3592 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck"); 3593 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load"); 3594 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr); 3595 if (requiresReturnValueNullabilityCheck()) 3596 CanNullCheck = 3597 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition); 3598 Builder.CreateCondBr(CanNullCheck, Check, NoCheck); 3599 EmitBlock(Check); 3600 3601 // Now do the null check. 3602 llvm::Value *Cond = Builder.CreateIsNotNull(RV); 3603 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)}; 3604 llvm::Value *DynamicData[] = {SLocPtr}; 3605 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData); 3606 3607 EmitBlock(NoCheck); 3608 3609 #ifndef NDEBUG 3610 // The return location should not be used after the check has been emitted. 3611 ReturnLocation = Address::invalid(); 3612 #endif 3613 } 3614 3615 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) { 3616 const CXXRecordDecl *RD = type->getAsCXXRecordDecl(); 3617 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory; 3618 } 3619 3620 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, 3621 QualType Ty) { 3622 // FIXME: Generate IR in one pass, rather than going back and fixing up these 3623 // placeholders. 3624 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty); 3625 llvm::Type *IRPtrTy = IRTy->getPointerTo(); 3626 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo()); 3627 3628 // FIXME: When we generate this IR in one pass, we shouldn't need 3629 // this win32-specific alignment hack. 3630 CharUnits Align = CharUnits::fromQuantity(4); 3631 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align); 3632 3633 return AggValueSlot::forAddr(Address(Placeholder, Align), 3634 Ty.getQualifiers(), 3635 AggValueSlot::IsNotDestructed, 3636 AggValueSlot::DoesNotNeedGCBarriers, 3637 AggValueSlot::IsNotAliased, 3638 AggValueSlot::DoesNotOverlap); 3639 } 3640 3641 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args, 3642 const VarDecl *param, 3643 SourceLocation loc) { 3644 // StartFunction converted the ABI-lowered parameter(s) into a 3645 // local alloca. We need to turn that into an r-value suitable 3646 // for EmitCall. 3647 Address local = GetAddrOfLocalVar(param); 3648 3649 QualType type = param->getType(); 3650 3651 if (isInAllocaArgument(CGM.getCXXABI(), type)) { 3652 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter"); 3653 } 3654 3655 // GetAddrOfLocalVar returns a pointer-to-pointer for references, 3656 // but the argument needs to be the original pointer. 3657 if (type->isReferenceType()) { 3658 args.add(RValue::get(Builder.CreateLoad(local)), type); 3659 3660 // In ARC, move out of consumed arguments so that the release cleanup 3661 // entered by StartFunction doesn't cause an over-release. This isn't 3662 // optimal -O0 code generation, but it should get cleaned up when 3663 // optimization is enabled. This also assumes that delegate calls are 3664 // performed exactly once for a set of arguments, but that should be safe. 3665 } else if (getLangOpts().ObjCAutoRefCount && 3666 param->hasAttr<NSConsumedAttr>() && 3667 type->isObjCRetainableType()) { 3668 llvm::Value *ptr = Builder.CreateLoad(local); 3669 auto null = 3670 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType())); 3671 Builder.CreateStore(null, local); 3672 args.add(RValue::get(ptr), type); 3673 3674 // For the most part, we just need to load the alloca, except that 3675 // aggregate r-values are actually pointers to temporaries. 3676 } else { 3677 args.add(convertTempToRValue(local, type, loc), type); 3678 } 3679 3680 // Deactivate the cleanup for the callee-destructed param that was pushed. 3681 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk && 3682 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() && 3683 param->needsDestruction(getContext())) { 3684 EHScopeStack::stable_iterator cleanup = 3685 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param)); 3686 assert(cleanup.isValid() && 3687 "cleanup for callee-destructed param not recorded"); 3688 // This unreachable is a temporary marker which will be removed later. 3689 llvm::Instruction *isActive = Builder.CreateUnreachable(); 3690 args.addArgCleanupDeactivation(cleanup, isActive); 3691 } 3692 } 3693 3694 static bool isProvablyNull(llvm::Value *addr) { 3695 return isa<llvm::ConstantPointerNull>(addr); 3696 } 3697 3698 /// Emit the actual writing-back of a writeback. 3699 static void emitWriteback(CodeGenFunction &CGF, 3700 const CallArgList::Writeback &writeback) { 3701 const LValue &srcLV = writeback.Source; 3702 Address srcAddr = srcLV.getAddress(CGF); 3703 assert(!isProvablyNull(srcAddr.getPointer()) && 3704 "shouldn't have writeback for provably null argument"); 3705 3706 llvm::BasicBlock *contBB = nullptr; 3707 3708 // If the argument wasn't provably non-null, we need to null check 3709 // before doing the store. 3710 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3711 CGF.CGM.getDataLayout()); 3712 if (!provablyNonNull) { 3713 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback"); 3714 contBB = CGF.createBasicBlock("icr.done"); 3715 3716 llvm::Value *isNull = 3717 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3718 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB); 3719 CGF.EmitBlock(writebackBB); 3720 } 3721 3722 // Load the value to writeback. 3723 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary); 3724 3725 // Cast it back, in case we're writing an id to a Foo* or something. 3726 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(), 3727 "icr.writeback-cast"); 3728 3729 // Perform the writeback. 3730 3731 // If we have a "to use" value, it's something we need to emit a use 3732 // of. This has to be carefully threaded in: if it's done after the 3733 // release it's potentially undefined behavior (and the optimizer 3734 // will ignore it), and if it happens before the retain then the 3735 // optimizer could move the release there. 3736 if (writeback.ToUse) { 3737 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong); 3738 3739 // Retain the new value. No need to block-copy here: the block's 3740 // being passed up the stack. 3741 value = CGF.EmitARCRetainNonBlock(value); 3742 3743 // Emit the intrinsic use here. 3744 CGF.EmitARCIntrinsicUse(writeback.ToUse); 3745 3746 // Load the old value (primitively). 3747 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation()); 3748 3749 // Put the new value in place (primitively). 3750 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false); 3751 3752 // Release the old value. 3753 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime()); 3754 3755 // Otherwise, we can just do a normal lvalue store. 3756 } else { 3757 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV); 3758 } 3759 3760 // Jump to the continuation block. 3761 if (!provablyNonNull) 3762 CGF.EmitBlock(contBB); 3763 } 3764 3765 static void emitWritebacks(CodeGenFunction &CGF, 3766 const CallArgList &args) { 3767 for (const auto &I : args.writebacks()) 3768 emitWriteback(CGF, I); 3769 } 3770 3771 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF, 3772 const CallArgList &CallArgs) { 3773 ArrayRef<CallArgList::CallArgCleanup> Cleanups = 3774 CallArgs.getCleanupsToDeactivate(); 3775 // Iterate in reverse to increase the likelihood of popping the cleanup. 3776 for (const auto &I : llvm::reverse(Cleanups)) { 3777 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP); 3778 I.IsActiveIP->eraseFromParent(); 3779 } 3780 } 3781 3782 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) { 3783 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens())) 3784 if (uop->getOpcode() == UO_AddrOf) 3785 return uop->getSubExpr(); 3786 return nullptr; 3787 } 3788 3789 /// Emit an argument that's being passed call-by-writeback. That is, 3790 /// we are passing the address of an __autoreleased temporary; it 3791 /// might be copy-initialized with the current value of the given 3792 /// address, but it will definitely be copied out of after the call. 3793 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args, 3794 const ObjCIndirectCopyRestoreExpr *CRE) { 3795 LValue srcLV; 3796 3797 // Make an optimistic effort to emit the address as an l-value. 3798 // This can fail if the argument expression is more complicated. 3799 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) { 3800 srcLV = CGF.EmitLValue(lvExpr); 3801 3802 // Otherwise, just emit it as a scalar. 3803 } else { 3804 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr()); 3805 3806 QualType srcAddrType = 3807 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType(); 3808 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType); 3809 } 3810 Address srcAddr = srcLV.getAddress(CGF); 3811 3812 // The dest and src types don't necessarily match in LLVM terms 3813 // because of the crazy ObjC compatibility rules. 3814 3815 llvm::PointerType *destType = 3816 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType())); 3817 3818 // If the address is a constant null, just pass the appropriate null. 3819 if (isProvablyNull(srcAddr.getPointer())) { 3820 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)), 3821 CRE->getType()); 3822 return; 3823 } 3824 3825 // Create the temporary. 3826 Address temp = CGF.CreateTempAlloca(destType->getElementType(), 3827 CGF.getPointerAlign(), 3828 "icr.temp"); 3829 // Loading an l-value can introduce a cleanup if the l-value is __weak, 3830 // and that cleanup will be conditional if we can't prove that the l-value 3831 // isn't null, so we need to register a dominating point so that the cleanups 3832 // system will make valid IR. 3833 CodeGenFunction::ConditionalEvaluation condEval(CGF); 3834 3835 // Zero-initialize it if we're not doing a copy-initialization. 3836 bool shouldCopy = CRE->shouldCopy(); 3837 if (!shouldCopy) { 3838 llvm::Value *null = 3839 llvm::ConstantPointerNull::get( 3840 cast<llvm::PointerType>(destType->getElementType())); 3841 CGF.Builder.CreateStore(null, temp); 3842 } 3843 3844 llvm::BasicBlock *contBB = nullptr; 3845 llvm::BasicBlock *originBB = nullptr; 3846 3847 // If the address is *not* known to be non-null, we need to switch. 3848 llvm::Value *finalArgument; 3849 3850 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(), 3851 CGF.CGM.getDataLayout()); 3852 if (provablyNonNull) { 3853 finalArgument = temp.getPointer(); 3854 } else { 3855 llvm::Value *isNull = 3856 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull"); 3857 3858 finalArgument = CGF.Builder.CreateSelect(isNull, 3859 llvm::ConstantPointerNull::get(destType), 3860 temp.getPointer(), "icr.argument"); 3861 3862 // If we need to copy, then the load has to be conditional, which 3863 // means we need control flow. 3864 if (shouldCopy) { 3865 originBB = CGF.Builder.GetInsertBlock(); 3866 contBB = CGF.createBasicBlock("icr.cont"); 3867 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy"); 3868 CGF.Builder.CreateCondBr(isNull, contBB, copyBB); 3869 CGF.EmitBlock(copyBB); 3870 condEval.begin(CGF); 3871 } 3872 } 3873 3874 llvm::Value *valueToUse = nullptr; 3875 3876 // Perform a copy if necessary. 3877 if (shouldCopy) { 3878 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation()); 3879 assert(srcRV.isScalar()); 3880 3881 llvm::Value *src = srcRV.getScalarVal(); 3882 src = CGF.Builder.CreateBitCast(src, destType->getElementType(), 3883 "icr.cast"); 3884 3885 // Use an ordinary store, not a store-to-lvalue. 3886 CGF.Builder.CreateStore(src, temp); 3887 3888 // If optimization is enabled, and the value was held in a 3889 // __strong variable, we need to tell the optimizer that this 3890 // value has to stay alive until we're doing the store back. 3891 // This is because the temporary is effectively unretained, 3892 // and so otherwise we can violate the high-level semantics. 3893 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 && 3894 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) { 3895 valueToUse = src; 3896 } 3897 } 3898 3899 // Finish the control flow if we needed it. 3900 if (shouldCopy && !provablyNonNull) { 3901 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock(); 3902 CGF.EmitBlock(contBB); 3903 3904 // Make a phi for the value to intrinsically use. 3905 if (valueToUse) { 3906 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2, 3907 "icr.to-use"); 3908 phiToUse->addIncoming(valueToUse, copyBB); 3909 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()), 3910 originBB); 3911 valueToUse = phiToUse; 3912 } 3913 3914 condEval.end(CGF); 3915 } 3916 3917 args.addWriteback(srcLV, temp, valueToUse); 3918 args.add(RValue::get(finalArgument), CRE->getType()); 3919 } 3920 3921 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) { 3922 assert(!StackBase); 3923 3924 // Save the stack. 3925 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave); 3926 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save"); 3927 } 3928 3929 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const { 3930 if (StackBase) { 3931 // Restore the stack after the call. 3932 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore); 3933 CGF.Builder.CreateCall(F, StackBase); 3934 } 3935 } 3936 3937 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType, 3938 SourceLocation ArgLoc, 3939 AbstractCallee AC, 3940 unsigned ParmNum) { 3941 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) || 3942 SanOpts.has(SanitizerKind::NullabilityArg))) 3943 return; 3944 3945 // The param decl may be missing in a variadic function. 3946 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr; 3947 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum; 3948 3949 // Prefer the nonnull attribute if it's present. 3950 const NonNullAttr *NNAttr = nullptr; 3951 if (SanOpts.has(SanitizerKind::NonnullAttribute)) 3952 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo); 3953 3954 bool CanCheckNullability = false; 3955 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) { 3956 auto Nullability = PVD->getType()->getNullability(getContext()); 3957 CanCheckNullability = Nullability && 3958 *Nullability == NullabilityKind::NonNull && 3959 PVD->getTypeSourceInfo(); 3960 } 3961 3962 if (!NNAttr && !CanCheckNullability) 3963 return; 3964 3965 SourceLocation AttrLoc; 3966 SanitizerMask CheckKind; 3967 SanitizerHandler Handler; 3968 if (NNAttr) { 3969 AttrLoc = NNAttr->getLocation(); 3970 CheckKind = SanitizerKind::NonnullAttribute; 3971 Handler = SanitizerHandler::NonnullArg; 3972 } else { 3973 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc(); 3974 CheckKind = SanitizerKind::NullabilityArg; 3975 Handler = SanitizerHandler::NullabilityArg; 3976 } 3977 3978 SanitizerScope SanScope(this); 3979 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType); 3980 llvm::Constant *StaticData[] = { 3981 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc), 3982 llvm::ConstantInt::get(Int32Ty, ArgNo + 1), 3983 }; 3984 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None); 3985 } 3986 3987 // Check if the call is going to use the inalloca convention. This needs to 3988 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged 3989 // later, so we can't check it directly. 3990 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC, 3991 ArrayRef<QualType> ArgTypes) { 3992 // The Swift calling convention doesn't go through the target-specific 3993 // argument classification, so it never uses inalloca. 3994 // TODO: Consider limiting inalloca use to only calling conventions supported 3995 // by MSVC. 3996 if (ExplicitCC == CC_Swift) 3997 return false; 3998 if (!CGM.getTarget().getCXXABI().isMicrosoft()) 3999 return false; 4000 return llvm::any_of(ArgTypes, [&](QualType Ty) { 4001 return isInAllocaArgument(CGM.getCXXABI(), Ty); 4002 }); 4003 } 4004 4005 #ifndef NDEBUG 4006 // Determine whether the given argument is an Objective-C method 4007 // that may have type parameters in its signature. 4008 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) { 4009 const DeclContext *dc = method->getDeclContext(); 4010 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) { 4011 return classDecl->getTypeParamListAsWritten(); 4012 } 4013 4014 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) { 4015 return catDecl->getTypeParamList(); 4016 } 4017 4018 return false; 4019 } 4020 #endif 4021 4022 /// EmitCallArgs - Emit call arguments for a function. 4023 void CodeGenFunction::EmitCallArgs( 4024 CallArgList &Args, PrototypeWrapper Prototype, 4025 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange, 4026 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) { 4027 SmallVector<QualType, 16> ArgTypes; 4028 4029 assert((ParamsToSkip == 0 || Prototype.P) && 4030 "Can't skip parameters if type info is not provided"); 4031 4032 // This variable only captures *explicitly* written conventions, not those 4033 // applied by default via command line flags or target defaults, such as 4034 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would 4035 // require knowing if this is a C++ instance method or being able to see 4036 // unprototyped FunctionTypes. 4037 CallingConv ExplicitCC = CC_C; 4038 4039 // First, if a prototype was provided, use those argument types. 4040 bool IsVariadic = false; 4041 if (Prototype.P) { 4042 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>(); 4043 if (MD) { 4044 IsVariadic = MD->isVariadic(); 4045 ExplicitCC = getCallingConventionForDecl( 4046 MD, CGM.getTarget().getTriple().isOSWindows()); 4047 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip, 4048 MD->param_type_end()); 4049 } else { 4050 const auto *FPT = Prototype.P.get<const FunctionProtoType *>(); 4051 IsVariadic = FPT->isVariadic(); 4052 ExplicitCC = FPT->getExtInfo().getCC(); 4053 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip, 4054 FPT->param_type_end()); 4055 } 4056 4057 #ifndef NDEBUG 4058 // Check that the prototyped types match the argument expression types. 4059 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD); 4060 CallExpr::const_arg_iterator Arg = ArgRange.begin(); 4061 for (QualType Ty : ArgTypes) { 4062 assert(Arg != ArgRange.end() && "Running over edge of argument list!"); 4063 assert( 4064 (isGenericMethod || Ty->isVariablyModifiedType() || 4065 Ty.getNonReferenceType()->isObjCRetainableType() || 4066 getContext() 4067 .getCanonicalType(Ty.getNonReferenceType()) 4068 .getTypePtr() == 4069 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) && 4070 "type mismatch in call argument!"); 4071 ++Arg; 4072 } 4073 4074 // Either we've emitted all the call args, or we have a call to variadic 4075 // function. 4076 assert((Arg == ArgRange.end() || IsVariadic) && 4077 "Extra arguments in non-variadic function!"); 4078 #endif 4079 } 4080 4081 // If we still have any arguments, emit them using the type of the argument. 4082 for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()), 4083 ArgRange.end())) 4084 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType()); 4085 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin())); 4086 4087 // We must evaluate arguments from right to left in the MS C++ ABI, 4088 // because arguments are destroyed left to right in the callee. As a special 4089 // case, there are certain language constructs that require left-to-right 4090 // evaluation, and in those cases we consider the evaluation order requirement 4091 // to trump the "destruction order is reverse construction order" guarantee. 4092 bool LeftToRight = 4093 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee() 4094 ? Order == EvaluationOrder::ForceLeftToRight 4095 : Order != EvaluationOrder::ForceRightToLeft; 4096 4097 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg, 4098 RValue EmittedArg) { 4099 if (!AC.hasFunctionDecl() || I >= AC.getNumParams()) 4100 return; 4101 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>(); 4102 if (PS == nullptr) 4103 return; 4104 4105 const auto &Context = getContext(); 4106 auto SizeTy = Context.getSizeType(); 4107 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy)); 4108 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?"); 4109 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T, 4110 EmittedArg.getScalarVal(), 4111 PS->isDynamic()); 4112 Args.add(RValue::get(V), SizeTy); 4113 // If we're emitting args in reverse, be sure to do so with 4114 // pass_object_size, as well. 4115 if (!LeftToRight) 4116 std::swap(Args.back(), *(&Args.back() - 1)); 4117 }; 4118 4119 // Insert a stack save if we're going to need any inalloca args. 4120 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) { 4121 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 && 4122 "inalloca only supported on x86"); 4123 Args.allocateArgumentMemory(*this); 4124 } 4125 4126 // Evaluate each argument in the appropriate order. 4127 size_t CallArgsStart = Args.size(); 4128 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) { 4129 unsigned Idx = LeftToRight ? I : E - I - 1; 4130 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx; 4131 unsigned InitialArgSize = Args.size(); 4132 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of 4133 // the argument and parameter match or the objc method is parameterized. 4134 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) || 4135 getContext().hasSameUnqualifiedType((*Arg)->getType(), 4136 ArgTypes[Idx]) || 4137 (isa<ObjCMethodDecl>(AC.getDecl()) && 4138 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) && 4139 "Argument and parameter types don't match"); 4140 EmitCallArg(Args, *Arg, ArgTypes[Idx]); 4141 // In particular, we depend on it being the last arg in Args, and the 4142 // objectsize bits depend on there only being one arg if !LeftToRight. 4143 assert(InitialArgSize + 1 == Args.size() && 4144 "The code below depends on only adding one arg per EmitCallArg"); 4145 (void)InitialArgSize; 4146 // Since pointer argument are never emitted as LValue, it is safe to emit 4147 // non-null argument check for r-value only. 4148 if (!Args.back().hasLValue()) { 4149 RValue RVArg = Args.back().getKnownRValue(); 4150 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC, 4151 ParamsToSkip + Idx); 4152 // @llvm.objectsize should never have side-effects and shouldn't need 4153 // destruction/cleanups, so we can safely "emit" it after its arg, 4154 // regardless of right-to-leftness 4155 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg); 4156 } 4157 } 4158 4159 if (!LeftToRight) { 4160 // Un-reverse the arguments we just evaluated so they match up with the LLVM 4161 // IR function. 4162 std::reverse(Args.begin() + CallArgsStart, Args.end()); 4163 } 4164 } 4165 4166 namespace { 4167 4168 struct DestroyUnpassedArg final : EHScopeStack::Cleanup { 4169 DestroyUnpassedArg(Address Addr, QualType Ty) 4170 : Addr(Addr), Ty(Ty) {} 4171 4172 Address Addr; 4173 QualType Ty; 4174 4175 void Emit(CodeGenFunction &CGF, Flags flags) override { 4176 QualType::DestructionKind DtorKind = Ty.isDestructedType(); 4177 if (DtorKind == QualType::DK_cxx_destructor) { 4178 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor(); 4179 assert(!Dtor->isTrivial()); 4180 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false, 4181 /*Delegating=*/false, Addr, Ty); 4182 } else { 4183 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty)); 4184 } 4185 } 4186 }; 4187 4188 struct DisableDebugLocationUpdates { 4189 CodeGenFunction &CGF; 4190 bool disabledDebugInfo; 4191 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) { 4192 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo())) 4193 CGF.disableDebugInfo(); 4194 } 4195 ~DisableDebugLocationUpdates() { 4196 if (disabledDebugInfo) 4197 CGF.enableDebugInfo(); 4198 } 4199 }; 4200 4201 } // end anonymous namespace 4202 4203 RValue CallArg::getRValue(CodeGenFunction &CGF) const { 4204 if (!HasLV) 4205 return RV; 4206 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty); 4207 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap, 4208 LV.isVolatile()); 4209 IsUsed = true; 4210 return RValue::getAggregate(Copy.getAddress(CGF)); 4211 } 4212 4213 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const { 4214 LValue Dst = CGF.MakeAddrLValue(Addr, Ty); 4215 if (!HasLV && RV.isScalar()) 4216 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true); 4217 else if (!HasLV && RV.isComplex()) 4218 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true); 4219 else { 4220 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress(); 4221 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty); 4222 // We assume that call args are never copied into subobjects. 4223 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap, 4224 HasLV ? LV.isVolatileQualified() 4225 : RV.isVolatileQualified()); 4226 } 4227 IsUsed = true; 4228 } 4229 4230 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E, 4231 QualType type) { 4232 DisableDebugLocationUpdates Dis(*this, E); 4233 if (const ObjCIndirectCopyRestoreExpr *CRE 4234 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) { 4235 assert(getLangOpts().ObjCAutoRefCount); 4236 return emitWritebackArg(*this, args, CRE); 4237 } 4238 4239 assert(type->isReferenceType() == E->isGLValue() && 4240 "reference binding to unmaterialized r-value!"); 4241 4242 if (E->isGLValue()) { 4243 assert(E->getObjectKind() == OK_Ordinary); 4244 return args.add(EmitReferenceBindingToExpr(E), type); 4245 } 4246 4247 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type); 4248 4249 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee. 4250 // However, we still have to push an EH-only cleanup in case we unwind before 4251 // we make it to the call. 4252 if (HasAggregateEvalKind && 4253 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) { 4254 // If we're using inalloca, use the argument memory. Otherwise, use a 4255 // temporary. 4256 AggValueSlot Slot; 4257 if (args.isUsingInAlloca()) 4258 Slot = createPlaceholderSlot(*this, type); 4259 else 4260 Slot = CreateAggTemp(type, "agg.tmp"); 4261 4262 bool DestroyedInCallee = true, NeedsEHCleanup = true; 4263 if (const auto *RD = type->getAsCXXRecordDecl()) 4264 DestroyedInCallee = RD->hasNonTrivialDestructor(); 4265 else 4266 NeedsEHCleanup = needsEHCleanup(type.isDestructedType()); 4267 4268 if (DestroyedInCallee) 4269 Slot.setExternallyDestructed(); 4270 4271 EmitAggExpr(E, Slot); 4272 RValue RV = Slot.asRValue(); 4273 args.add(RV, type); 4274 4275 if (DestroyedInCallee && NeedsEHCleanup) { 4276 // Create a no-op GEP between the placeholder and the cleanup so we can 4277 // RAUW it successfully. It also serves as a marker of the first 4278 // instruction where the cleanup is active. 4279 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(), 4280 type); 4281 // This unreachable is a temporary marker which will be removed later. 4282 llvm::Instruction *IsActive = Builder.CreateUnreachable(); 4283 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive); 4284 } 4285 return; 4286 } 4287 4288 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) && 4289 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) { 4290 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr()); 4291 assert(L.isSimple()); 4292 args.addUncopiedAggregate(L, type); 4293 return; 4294 } 4295 4296 args.add(EmitAnyExprToTemp(E), type); 4297 } 4298 4299 QualType CodeGenFunction::getVarArgType(const Expr *Arg) { 4300 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC 4301 // implicitly widens null pointer constants that are arguments to varargs 4302 // functions to pointer-sized ints. 4303 if (!getTarget().getTriple().isOSWindows()) 4304 return Arg->getType(); 4305 4306 if (Arg->getType()->isIntegerType() && 4307 getContext().getTypeSize(Arg->getType()) < 4308 getContext().getTargetInfo().getPointerWidth(0) && 4309 Arg->isNullPointerConstant(getContext(), 4310 Expr::NPC_ValueDependentIsNotNull)) { 4311 return getContext().getIntPtrType(); 4312 } 4313 4314 return Arg->getType(); 4315 } 4316 4317 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4318 // optimizer it can aggressively ignore unwind edges. 4319 void 4320 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) { 4321 if (CGM.getCodeGenOpts().OptimizationLevel != 0 && 4322 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions) 4323 Inst->setMetadata("clang.arc.no_objc_arc_exceptions", 4324 CGM.getNoObjCARCExceptionsMetadata()); 4325 } 4326 4327 /// Emits a call to the given no-arguments nounwind runtime function. 4328 llvm::CallInst * 4329 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4330 const llvm::Twine &name) { 4331 return EmitNounwindRuntimeCall(callee, None, name); 4332 } 4333 4334 /// Emits a call to the given nounwind runtime function. 4335 llvm::CallInst * 4336 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee, 4337 ArrayRef<llvm::Value *> args, 4338 const llvm::Twine &name) { 4339 llvm::CallInst *call = EmitRuntimeCall(callee, args, name); 4340 call->setDoesNotThrow(); 4341 return call; 4342 } 4343 4344 /// Emits a simple call (never an invoke) to the given no-arguments 4345 /// runtime function. 4346 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4347 const llvm::Twine &name) { 4348 return EmitRuntimeCall(callee, None, name); 4349 } 4350 4351 // Calls which may throw must have operand bundles indicating which funclet 4352 // they are nested within. 4353 SmallVector<llvm::OperandBundleDef, 1> 4354 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) { 4355 SmallVector<llvm::OperandBundleDef, 1> BundleList; 4356 // There is no need for a funclet operand bundle if we aren't inside a 4357 // funclet. 4358 if (!CurrentFuncletPad) 4359 return BundleList; 4360 4361 // Skip intrinsics which cannot throw. 4362 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts()); 4363 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) 4364 return BundleList; 4365 4366 BundleList.emplace_back("funclet", CurrentFuncletPad); 4367 return BundleList; 4368 } 4369 4370 /// Emits a simple call (never an invoke) to the given runtime function. 4371 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee, 4372 ArrayRef<llvm::Value *> args, 4373 const llvm::Twine &name) { 4374 llvm::CallInst *call = Builder.CreateCall( 4375 callee, args, getBundlesForFunclet(callee.getCallee()), name); 4376 call->setCallingConv(getRuntimeCC()); 4377 return call; 4378 } 4379 4380 /// Emits a call or invoke to the given noreturn runtime function. 4381 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke( 4382 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) { 4383 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4384 getBundlesForFunclet(callee.getCallee()); 4385 4386 if (getInvokeDest()) { 4387 llvm::InvokeInst *invoke = 4388 Builder.CreateInvoke(callee, 4389 getUnreachableBlock(), 4390 getInvokeDest(), 4391 args, 4392 BundleList); 4393 invoke->setDoesNotReturn(); 4394 invoke->setCallingConv(getRuntimeCC()); 4395 } else { 4396 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList); 4397 call->setDoesNotReturn(); 4398 call->setCallingConv(getRuntimeCC()); 4399 Builder.CreateUnreachable(); 4400 } 4401 } 4402 4403 /// Emits a call or invoke instruction to the given nullary runtime function. 4404 llvm::CallBase * 4405 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4406 const Twine &name) { 4407 return EmitRuntimeCallOrInvoke(callee, None, name); 4408 } 4409 4410 /// Emits a call or invoke instruction to the given runtime function. 4411 llvm::CallBase * 4412 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, 4413 ArrayRef<llvm::Value *> args, 4414 const Twine &name) { 4415 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name); 4416 call->setCallingConv(getRuntimeCC()); 4417 return call; 4418 } 4419 4420 /// Emits a call or invoke instruction to the given function, depending 4421 /// on the current state of the EH stack. 4422 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee, 4423 ArrayRef<llvm::Value *> Args, 4424 const Twine &Name) { 4425 llvm::BasicBlock *InvokeDest = getInvokeDest(); 4426 SmallVector<llvm::OperandBundleDef, 1> BundleList = 4427 getBundlesForFunclet(Callee.getCallee()); 4428 4429 llvm::CallBase *Inst; 4430 if (!InvokeDest) 4431 Inst = Builder.CreateCall(Callee, Args, BundleList, Name); 4432 else { 4433 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont"); 4434 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList, 4435 Name); 4436 EmitBlock(ContBB); 4437 } 4438 4439 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 4440 // optimizer it can aggressively ignore unwind edges. 4441 if (CGM.getLangOpts().ObjCAutoRefCount) 4442 AddObjCARCExceptionMetadata(Inst); 4443 4444 return Inst; 4445 } 4446 4447 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old, 4448 llvm::Value *New) { 4449 DeferredReplacements.push_back( 4450 std::make_pair(llvm::WeakTrackingVH(Old), New)); 4451 } 4452 4453 namespace { 4454 4455 /// Specify given \p NewAlign as the alignment of return value attribute. If 4456 /// such attribute already exists, re-set it to the maximal one of two options. 4457 LLVM_NODISCARD llvm::AttributeList 4458 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx, 4459 const llvm::AttributeList &Attrs, 4460 llvm::Align NewAlign) { 4461 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne(); 4462 if (CurAlign >= NewAlign) 4463 return Attrs; 4464 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign); 4465 return Attrs 4466 .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex, 4467 llvm::Attribute::AttrKind::Alignment) 4468 .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr); 4469 } 4470 4471 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter { 4472 protected: 4473 CodeGenFunction &CGF; 4474 4475 /// We do nothing if this is, or becomes, nullptr. 4476 const AlignedAttrTy *AA = nullptr; 4477 4478 llvm::Value *Alignment = nullptr; // May or may not be a constant. 4479 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero. 4480 4481 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4482 : CGF(CGF_) { 4483 if (!FuncDecl) 4484 return; 4485 AA = FuncDecl->getAttr<AlignedAttrTy>(); 4486 } 4487 4488 public: 4489 /// If we can, materialize the alignment as an attribute on return value. 4490 LLVM_NODISCARD llvm::AttributeList 4491 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) { 4492 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment)) 4493 return Attrs; 4494 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment); 4495 if (!AlignmentCI) 4496 return Attrs; 4497 // We may legitimately have non-power-of-2 alignment here. 4498 // If so, this is UB land, emit it via `@llvm.assume` instead. 4499 if (!AlignmentCI->getValue().isPowerOf2()) 4500 return Attrs; 4501 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute( 4502 CGF.getLLVMContext(), Attrs, 4503 llvm::Align( 4504 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment))); 4505 AA = nullptr; // We're done. Disallow doing anything else. 4506 return NewAttrs; 4507 } 4508 4509 /// Emit alignment assumption. 4510 /// This is a general fallback that we take if either there is an offset, 4511 /// or the alignment is variable or we are sanitizing for alignment. 4512 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) { 4513 if (!AA) 4514 return; 4515 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, 4516 AA->getLocation(), Alignment, OffsetCI); 4517 AA = nullptr; // We're done. Disallow doing anything else. 4518 } 4519 }; 4520 4521 /// Helper data structure to emit `AssumeAlignedAttr`. 4522 class AssumeAlignedAttrEmitter final 4523 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> { 4524 public: 4525 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl) 4526 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4527 if (!AA) 4528 return; 4529 // It is guaranteed that the alignment/offset are constants. 4530 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment())); 4531 if (Expr *Offset = AA->getOffset()) { 4532 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset)); 4533 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset. 4534 OffsetCI = nullptr; 4535 } 4536 } 4537 }; 4538 4539 /// Helper data structure to emit `AllocAlignAttr`. 4540 class AllocAlignAttrEmitter final 4541 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> { 4542 public: 4543 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, 4544 const CallArgList &CallArgs) 4545 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { 4546 if (!AA) 4547 return; 4548 // Alignment may or may not be a constant, and that is okay. 4549 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] 4550 .getRValue(CGF) 4551 .getScalarVal(); 4552 } 4553 }; 4554 4555 } // namespace 4556 4557 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, 4558 const CGCallee &Callee, 4559 ReturnValueSlot ReturnValue, 4560 const CallArgList &CallArgs, 4561 llvm::CallBase **callOrInvoke, 4562 SourceLocation Loc) { 4563 // FIXME: We no longer need the types from CallArgs; lift up and simplify. 4564 4565 assert(Callee.isOrdinary() || Callee.isVirtual()); 4566 4567 // Handle struct-return functions by passing a pointer to the 4568 // location that we would like to return into. 4569 QualType RetTy = CallInfo.getReturnType(); 4570 const ABIArgInfo &RetAI = CallInfo.getReturnInfo(); 4571 4572 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo); 4573 4574 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl(); 4575 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) { 4576 // We can only guarantee that a function is called from the correct 4577 // context/function based on the appropriate target attributes, 4578 // so only check in the case where we have both always_inline and target 4579 // since otherwise we could be making a conditional call after a check for 4580 // the proper cpu features (and it won't cause code generation issues due to 4581 // function based code generation). 4582 if (TargetDecl->hasAttr<AlwaysInlineAttr>() && 4583 TargetDecl->hasAttr<TargetAttr>()) 4584 checkTargetFeatures(Loc, FD); 4585 4586 // Some architectures (such as x86-64) have the ABI changed based on 4587 // attribute-target/features. Give them a chance to diagnose. 4588 CGM.getTargetCodeGenInfo().checkFunctionCallABI( 4589 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs); 4590 } 4591 4592 #ifndef NDEBUG 4593 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) { 4594 // For an inalloca varargs function, we don't expect CallInfo to match the 4595 // function pointer's type, because the inalloca struct a will have extra 4596 // fields in it for the varargs parameters. Code later in this function 4597 // bitcasts the function pointer to the type derived from CallInfo. 4598 // 4599 // In other cases, we assert that the types match up (until pointers stop 4600 // having pointee types). 4601 llvm::Type *TypeFromVal; 4602 if (Callee.isVirtual()) 4603 TypeFromVal = Callee.getVirtualFunctionType(); 4604 else 4605 TypeFromVal = 4606 Callee.getFunctionPointer()->getType()->getPointerElementType(); 4607 assert(IRFuncTy == TypeFromVal); 4608 } 4609 #endif 4610 4611 // 1. Set up the arguments. 4612 4613 // If we're using inalloca, insert the allocation after the stack save. 4614 // FIXME: Do this earlier rather than hacking it in here! 4615 Address ArgMemory = Address::invalid(); 4616 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) { 4617 const llvm::DataLayout &DL = CGM.getDataLayout(); 4618 llvm::Instruction *IP = CallArgs.getStackBase(); 4619 llvm::AllocaInst *AI; 4620 if (IP) { 4621 IP = IP->getNextNode(); 4622 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(), 4623 "argmem", IP); 4624 } else { 4625 AI = CreateTempAlloca(ArgStruct, "argmem"); 4626 } 4627 auto Align = CallInfo.getArgStructAlignment(); 4628 AI->setAlignment(Align.getAsAlign()); 4629 AI->setUsedWithInAlloca(true); 4630 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca()); 4631 ArgMemory = Address(AI, Align); 4632 } 4633 4634 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo); 4635 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs()); 4636 4637 // If the call returns a temporary with struct return, create a temporary 4638 // alloca to hold the result, unless one is given to us. 4639 Address SRetPtr = Address::invalid(); 4640 Address SRetAlloca = Address::invalid(); 4641 llvm::Value *UnusedReturnSizePtr = nullptr; 4642 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { 4643 if (!ReturnValue.isNull()) { 4644 SRetPtr = ReturnValue.getValue(); 4645 } else { 4646 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); 4647 if (HaveInsertPoint() && ReturnValue.isUnused()) { 4648 uint64_t size = 4649 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy)); 4650 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer()); 4651 } 4652 } 4653 if (IRFunctionArgs.hasSRetArg()) { 4654 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer(); 4655 } else if (RetAI.isInAlloca()) { 4656 Address Addr = 4657 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex()); 4658 Builder.CreateStore(SRetPtr.getPointer(), Addr); 4659 } 4660 } 4661 4662 Address swiftErrorTemp = Address::invalid(); 4663 Address swiftErrorArg = Address::invalid(); 4664 4665 // When passing arguments using temporary allocas, we need to add the 4666 // appropriate lifetime markers. This vector keeps track of all the lifetime 4667 // markers that need to be ended right after the call. 4668 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall; 4669 4670 // Translate all of the arguments as necessary to match the IR lowering. 4671 assert(CallInfo.arg_size() == CallArgs.size() && 4672 "Mismatch between function signature & arguments."); 4673 unsigned ArgNo = 0; 4674 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin(); 4675 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end(); 4676 I != E; ++I, ++info_it, ++ArgNo) { 4677 const ABIArgInfo &ArgInfo = info_it->info; 4678 4679 // Insert a padding argument to ensure proper alignment. 4680 if (IRFunctionArgs.hasPaddingArg(ArgNo)) 4681 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] = 4682 llvm::UndefValue::get(ArgInfo.getPaddingType()); 4683 4684 unsigned FirstIRArg, NumIRArgs; 4685 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo); 4686 4687 switch (ArgInfo.getKind()) { 4688 case ABIArgInfo::InAlloca: { 4689 assert(NumIRArgs == 0); 4690 assert(getTarget().getTriple().getArch() == llvm::Triple::x86); 4691 if (I->isAggregate()) { 4692 Address Addr = I->hasLValue() 4693 ? I->getKnownLValue().getAddress(*this) 4694 : I->getKnownRValue().getAggregateAddress(); 4695 llvm::Instruction *Placeholder = 4696 cast<llvm::Instruction>(Addr.getPointer()); 4697 4698 if (!ArgInfo.getInAllocaIndirect()) { 4699 // Replace the placeholder with the appropriate argument slot GEP. 4700 CGBuilderTy::InsertPoint IP = Builder.saveIP(); 4701 Builder.SetInsertPoint(Placeholder); 4702 Addr = Builder.CreateStructGEP(ArgMemory, 4703 ArgInfo.getInAllocaFieldIndex()); 4704 Builder.restoreIP(IP); 4705 } else { 4706 // For indirect things such as overaligned structs, replace the 4707 // placeholder with a regular aggregate temporary alloca. Store the 4708 // address of this alloca into the struct. 4709 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp"); 4710 Address ArgSlot = Builder.CreateStructGEP( 4711 ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4712 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4713 } 4714 deferPlaceholderReplacement(Placeholder, Addr.getPointer()); 4715 } else if (ArgInfo.getInAllocaIndirect()) { 4716 // Make a temporary alloca and store the address of it into the argument 4717 // struct. 4718 Address Addr = CreateMemTempWithoutCast( 4719 I->Ty, getContext().getTypeAlignInChars(I->Ty), 4720 "indirect-arg-temp"); 4721 I->copyInto(*this, Addr); 4722 Address ArgSlot = 4723 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4724 Builder.CreateStore(Addr.getPointer(), ArgSlot); 4725 } else { 4726 // Store the RValue into the argument struct. 4727 Address Addr = 4728 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex()); 4729 unsigned AS = Addr.getType()->getPointerAddressSpace(); 4730 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS); 4731 // There are some cases where a trivial bitcast is not avoidable. The 4732 // definition of a type later in a translation unit may change it's type 4733 // from {}* to (%struct.foo*)*. 4734 if (Addr.getType() != MemType) 4735 Addr = Builder.CreateBitCast(Addr, MemType); 4736 I->copyInto(*this, Addr); 4737 } 4738 break; 4739 } 4740 4741 case ABIArgInfo::Indirect: 4742 case ABIArgInfo::IndirectAliased: { 4743 assert(NumIRArgs == 1); 4744 if (!I->isAggregate()) { 4745 // Make a temporary alloca to pass the argument. 4746 Address Addr = CreateMemTempWithoutCast( 4747 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp"); 4748 IRCallArgs[FirstIRArg] = Addr.getPointer(); 4749 4750 I->copyInto(*this, Addr); 4751 } else { 4752 // We want to avoid creating an unnecessary temporary+copy here; 4753 // however, we need one in three cases: 4754 // 1. If the argument is not byval, and we are required to copy the 4755 // source. (This case doesn't occur on any common architecture.) 4756 // 2. If the argument is byval, RV is not sufficiently aligned, and 4757 // we cannot force it to be sufficiently aligned. 4758 // 3. If the argument is byval, but RV is not located in default 4759 // or alloca address space. 4760 Address Addr = I->hasLValue() 4761 ? I->getKnownLValue().getAddress(*this) 4762 : I->getKnownRValue().getAggregateAddress(); 4763 llvm::Value *V = Addr.getPointer(); 4764 CharUnits Align = ArgInfo.getIndirectAlign(); 4765 const llvm::DataLayout *TD = &CGM.getDataLayout(); 4766 4767 assert((FirstIRArg >= IRFuncTy->getNumParams() || 4768 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() == 4769 TD->getAllocaAddrSpace()) && 4770 "indirect argument must be in alloca address space"); 4771 4772 bool NeedCopy = false; 4773 4774 if (Addr.getAlignment() < Align && 4775 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) < 4776 Align.getAsAlign()) { 4777 NeedCopy = true; 4778 } else if (I->hasLValue()) { 4779 auto LV = I->getKnownLValue(); 4780 auto AS = LV.getAddressSpace(); 4781 4782 if (!ArgInfo.getIndirectByVal() || 4783 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) { 4784 NeedCopy = true; 4785 } 4786 if (!getLangOpts().OpenCL) { 4787 if ((ArgInfo.getIndirectByVal() && 4788 (AS != LangAS::Default && 4789 AS != CGM.getASTAllocaAddressSpace()))) { 4790 NeedCopy = true; 4791 } 4792 } 4793 // For OpenCL even if RV is located in default or alloca address space 4794 // we don't want to perform address space cast for it. 4795 else if ((ArgInfo.getIndirectByVal() && 4796 Addr.getType()->getAddressSpace() != IRFuncTy-> 4797 getParamType(FirstIRArg)->getPointerAddressSpace())) { 4798 NeedCopy = true; 4799 } 4800 } 4801 4802 if (NeedCopy) { 4803 // Create an aligned temporary, and copy to it. 4804 Address AI = CreateMemTempWithoutCast( 4805 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp"); 4806 IRCallArgs[FirstIRArg] = AI.getPointer(); 4807 4808 // Emit lifetime markers for the temporary alloca. 4809 uint64_t ByvalTempElementSize = 4810 CGM.getDataLayout().getTypeAllocSize(AI.getElementType()); 4811 llvm::Value *LifetimeSize = 4812 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer()); 4813 4814 // Add cleanup code to emit the end lifetime marker after the call. 4815 if (LifetimeSize) // In case we disabled lifetime markers. 4816 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize); 4817 4818 // Generate the copy. 4819 I->copyInto(*this, AI); 4820 } else { 4821 // Skip the extra memcpy call. 4822 auto *T = V->getType()->getPointerElementType()->getPointerTo( 4823 CGM.getDataLayout().getAllocaAddrSpace()); 4824 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast( 4825 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T, 4826 true); 4827 } 4828 } 4829 break; 4830 } 4831 4832 case ABIArgInfo::Ignore: 4833 assert(NumIRArgs == 0); 4834 break; 4835 4836 case ABIArgInfo::Extend: 4837 case ABIArgInfo::Direct: { 4838 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) && 4839 ArgInfo.getCoerceToType() == ConvertType(info_it->type) && 4840 ArgInfo.getDirectOffset() == 0) { 4841 assert(NumIRArgs == 1); 4842 llvm::Value *V; 4843 if (!I->isAggregate()) 4844 V = I->getKnownRValue().getScalarVal(); 4845 else 4846 V = Builder.CreateLoad( 4847 I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4848 : I->getKnownRValue().getAggregateAddress()); 4849 4850 // Implement swifterror by copying into a new swifterror argument. 4851 // We'll write back in the normal path out of the call. 4852 if (CallInfo.getExtParameterInfo(ArgNo).getABI() 4853 == ParameterABI::SwiftErrorResult) { 4854 assert(!swiftErrorTemp.isValid() && "multiple swifterror args"); 4855 4856 QualType pointeeTy = I->Ty->getPointeeType(); 4857 swiftErrorArg = 4858 Address(V, getContext().getTypeAlignInChars(pointeeTy)); 4859 4860 swiftErrorTemp = 4861 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp"); 4862 V = swiftErrorTemp.getPointer(); 4863 cast<llvm::AllocaInst>(V)->setSwiftError(true); 4864 4865 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg); 4866 Builder.CreateStore(errorValue, swiftErrorTemp); 4867 } 4868 4869 // We might have to widen integers, but we should never truncate. 4870 if (ArgInfo.getCoerceToType() != V->getType() && 4871 V->getType()->isIntegerTy()) 4872 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType()); 4873 4874 // If the argument doesn't match, perform a bitcast to coerce it. This 4875 // can happen due to trivial type mismatches. 4876 if (FirstIRArg < IRFuncTy->getNumParams() && 4877 V->getType() != IRFuncTy->getParamType(FirstIRArg)) 4878 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg)); 4879 4880 IRCallArgs[FirstIRArg] = V; 4881 break; 4882 } 4883 4884 // FIXME: Avoid the conversion through memory if possible. 4885 Address Src = Address::invalid(); 4886 if (!I->isAggregate()) { 4887 Src = CreateMemTemp(I->Ty, "coerce"); 4888 I->copyInto(*this, Src); 4889 } else { 4890 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4891 : I->getKnownRValue().getAggregateAddress(); 4892 } 4893 4894 // If the value is offset in memory, apply the offset now. 4895 Src = emitAddressAtOffset(*this, Src, ArgInfo); 4896 4897 // Fast-isel and the optimizer generally like scalar values better than 4898 // FCAs, so we flatten them if this is safe to do for this argument. 4899 llvm::StructType *STy = 4900 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType()); 4901 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) { 4902 llvm::Type *SrcTy = Src.getElementType(); 4903 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy); 4904 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy); 4905 4906 // If the source type is smaller than the destination type of the 4907 // coerce-to logic, copy the source value into a temp alloca the size 4908 // of the destination type to allow loading all of it. The bits past 4909 // the source value are left undef. 4910 if (SrcSize < DstSize) { 4911 Address TempAlloca 4912 = CreateTempAlloca(STy, Src.getAlignment(), 4913 Src.getName() + ".coerce"); 4914 Builder.CreateMemCpy(TempAlloca, Src, SrcSize); 4915 Src = TempAlloca; 4916 } else { 4917 Src = Builder.CreateBitCast(Src, 4918 STy->getPointerTo(Src.getAddressSpace())); 4919 } 4920 4921 assert(NumIRArgs == STy->getNumElements()); 4922 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4923 Address EltPtr = Builder.CreateStructGEP(Src, i); 4924 llvm::Value *LI = Builder.CreateLoad(EltPtr); 4925 IRCallArgs[FirstIRArg + i] = LI; 4926 } 4927 } else { 4928 // In the simple case, just pass the coerced loaded value. 4929 assert(NumIRArgs == 1); 4930 llvm::Value *Load = 4931 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this); 4932 4933 if (CallInfo.isCmseNSCall()) { 4934 // For certain parameter types, clear padding bits, as they may reveal 4935 // sensitive information. 4936 // Small struct/union types are passed as integer arrays. 4937 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType()); 4938 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType())) 4939 Load = EmitCMSEClearRecord(Load, ATy, I->Ty); 4940 } 4941 IRCallArgs[FirstIRArg] = Load; 4942 } 4943 4944 break; 4945 } 4946 4947 case ABIArgInfo::CoerceAndExpand: { 4948 auto coercionType = ArgInfo.getCoerceAndExpandType(); 4949 auto layout = CGM.getDataLayout().getStructLayout(coercionType); 4950 4951 llvm::Value *tempSize = nullptr; 4952 Address addr = Address::invalid(); 4953 Address AllocaAddr = Address::invalid(); 4954 if (I->isAggregate()) { 4955 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this) 4956 : I->getKnownRValue().getAggregateAddress(); 4957 4958 } else { 4959 RValue RV = I->getKnownRValue(); 4960 assert(RV.isScalar()); // complex should always just be direct 4961 4962 llvm::Type *scalarType = RV.getScalarVal()->getType(); 4963 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType); 4964 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType); 4965 4966 // Materialize to a temporary. 4967 addr = CreateTempAlloca( 4968 RV.getScalarVal()->getType(), 4969 CharUnits::fromQuantity(std::max( 4970 (unsigned)layout->getAlignment().value(), scalarAlign)), 4971 "tmp", 4972 /*ArraySize=*/nullptr, &AllocaAddr); 4973 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer()); 4974 4975 Builder.CreateStore(RV.getScalarVal(), addr); 4976 } 4977 4978 addr = Builder.CreateElementBitCast(addr, coercionType); 4979 4980 unsigned IRArgPos = FirstIRArg; 4981 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 4982 llvm::Type *eltType = coercionType->getElementType(i); 4983 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 4984 Address eltAddr = Builder.CreateStructGEP(addr, i); 4985 llvm::Value *elt = Builder.CreateLoad(eltAddr); 4986 IRCallArgs[IRArgPos++] = elt; 4987 } 4988 assert(IRArgPos == FirstIRArg + NumIRArgs); 4989 4990 if (tempSize) { 4991 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer()); 4992 } 4993 4994 break; 4995 } 4996 4997 case ABIArgInfo::Expand: { 4998 unsigned IRArgPos = FirstIRArg; 4999 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos); 5000 assert(IRArgPos == FirstIRArg + NumIRArgs); 5001 break; 5002 } 5003 } 5004 } 5005 5006 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this); 5007 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer(); 5008 5009 // If we're using inalloca, set up that argument. 5010 if (ArgMemory.isValid()) { 5011 llvm::Value *Arg = ArgMemory.getPointer(); 5012 if (CallInfo.isVariadic()) { 5013 // When passing non-POD arguments by value to variadic functions, we will 5014 // end up with a variadic prototype and an inalloca call site. In such 5015 // cases, we can't do any parameter mismatch checks. Give up and bitcast 5016 // the callee. 5017 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace(); 5018 CalleePtr = 5019 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS)); 5020 } else { 5021 llvm::Type *LastParamTy = 5022 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1); 5023 if (Arg->getType() != LastParamTy) { 5024 #ifndef NDEBUG 5025 // Assert that these structs have equivalent element types. 5026 llvm::StructType *FullTy = CallInfo.getArgStruct(); 5027 llvm::StructType *DeclaredTy = cast<llvm::StructType>( 5028 cast<llvm::PointerType>(LastParamTy)->getElementType()); 5029 assert(DeclaredTy->getNumElements() == FullTy->getNumElements()); 5030 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(), 5031 DE = DeclaredTy->element_end(), 5032 FI = FullTy->element_begin(); 5033 DI != DE; ++DI, ++FI) 5034 assert(*DI == *FI); 5035 #endif 5036 Arg = Builder.CreateBitCast(Arg, LastParamTy); 5037 } 5038 } 5039 assert(IRFunctionArgs.hasInallocaArg()); 5040 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg; 5041 } 5042 5043 // 2. Prepare the function pointer. 5044 5045 // If the callee is a bitcast of a non-variadic function to have a 5046 // variadic function pointer type, check to see if we can remove the 5047 // bitcast. This comes up with unprototyped functions. 5048 // 5049 // This makes the IR nicer, but more importantly it ensures that we 5050 // can inline the function at -O0 if it is marked always_inline. 5051 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT, 5052 llvm::Value *Ptr) -> llvm::Function * { 5053 if (!CalleeFT->isVarArg()) 5054 return nullptr; 5055 5056 // Get underlying value if it's a bitcast 5057 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) { 5058 if (CE->getOpcode() == llvm::Instruction::BitCast) 5059 Ptr = CE->getOperand(0); 5060 } 5061 5062 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr); 5063 if (!OrigFn) 5064 return nullptr; 5065 5066 llvm::FunctionType *OrigFT = OrigFn->getFunctionType(); 5067 5068 // If the original type is variadic, or if any of the component types 5069 // disagree, we cannot remove the cast. 5070 if (OrigFT->isVarArg() || 5071 OrigFT->getNumParams() != CalleeFT->getNumParams() || 5072 OrigFT->getReturnType() != CalleeFT->getReturnType()) 5073 return nullptr; 5074 5075 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i) 5076 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i)) 5077 return nullptr; 5078 5079 return OrigFn; 5080 }; 5081 5082 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) { 5083 CalleePtr = OrigFn; 5084 IRFuncTy = OrigFn->getFunctionType(); 5085 } 5086 5087 // 3. Perform the actual call. 5088 5089 // Deactivate any cleanups that we're supposed to do immediately before 5090 // the call. 5091 if (!CallArgs.getCleanupsToDeactivate().empty()) 5092 deactivateArgCleanupsBeforeCall(*this, CallArgs); 5093 5094 // Assert that the arguments we computed match up. The IR verifier 5095 // will catch this, but this is a common enough source of problems 5096 // during IRGen changes that it's way better for debugging to catch 5097 // it ourselves here. 5098 #ifndef NDEBUG 5099 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg()); 5100 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5101 // Inalloca argument can have different type. 5102 if (IRFunctionArgs.hasInallocaArg() && 5103 i == IRFunctionArgs.getInallocaArgNo()) 5104 continue; 5105 if (i < IRFuncTy->getNumParams()) 5106 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i)); 5107 } 5108 #endif 5109 5110 // Update the largest vector width if any arguments have vector types. 5111 for (unsigned i = 0; i < IRCallArgs.size(); ++i) { 5112 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType())) 5113 LargestVectorWidth = 5114 std::max((uint64_t)LargestVectorWidth, 5115 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5116 } 5117 5118 // Compute the calling convention and attributes. 5119 unsigned CallingConv; 5120 llvm::AttributeList Attrs; 5121 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo, 5122 Callee.getAbstractInfo(), Attrs, CallingConv, 5123 /*AttrOnCallSite=*/true); 5124 5125 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5126 if (FD->hasAttr<StrictFPAttr>()) 5127 // All calls within a strictfp function are marked strictfp 5128 Attrs = 5129 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5130 llvm::Attribute::StrictFP); 5131 5132 // Add call-site nomerge attribute if exists. 5133 if (InNoMergeAttributedStmt) 5134 Attrs = 5135 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5136 llvm::Attribute::NoMerge); 5137 5138 // Apply some call-site-specific attributes. 5139 // TODO: work this into building the attribute set. 5140 5141 // Apply always_inline to all calls within flatten functions. 5142 // FIXME: should this really take priority over __try, below? 5143 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() && 5144 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) { 5145 Attrs = 5146 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5147 llvm::Attribute::AlwaysInline); 5148 } 5149 5150 // Disable inlining inside SEH __try blocks. 5151 if (isSEHTryScope()) { 5152 Attrs = 5153 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5154 llvm::Attribute::NoInline); 5155 } 5156 5157 // Decide whether to use a call or an invoke. 5158 bool CannotThrow; 5159 if (currentFunctionUsesSEHTry()) { 5160 // SEH cares about asynchronous exceptions, so everything can "throw." 5161 CannotThrow = false; 5162 } else if (isCleanupPadScope() && 5163 EHPersonality::get(*this).isMSVCXXPersonality()) { 5164 // The MSVC++ personality will implicitly terminate the program if an 5165 // exception is thrown during a cleanup outside of a try/catch. 5166 // We don't need to model anything in IR to get this behavior. 5167 CannotThrow = true; 5168 } else { 5169 // Otherwise, nounwind call sites will never throw. 5170 CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind); 5171 5172 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr)) 5173 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind)) 5174 CannotThrow = true; 5175 } 5176 5177 // If we made a temporary, be sure to clean up after ourselves. Note that we 5178 // can't depend on being inside of an ExprWithCleanups, so we need to manually 5179 // pop this cleanup later on. Being eager about this is OK, since this 5180 // temporary is 'invisible' outside of the callee. 5181 if (UnusedReturnSizePtr) 5182 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca, 5183 UnusedReturnSizePtr); 5184 5185 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest(); 5186 5187 SmallVector<llvm::OperandBundleDef, 1> BundleList = 5188 getBundlesForFunclet(CalleePtr); 5189 5190 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) 5191 if (FD->hasAttr<StrictFPAttr>()) 5192 // All calls within a strictfp function are marked strictfp 5193 Attrs = 5194 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex, 5195 llvm::Attribute::StrictFP); 5196 5197 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); 5198 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5199 5200 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); 5201 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); 5202 5203 // Emit the actual call/invoke instruction. 5204 llvm::CallBase *CI; 5205 if (!InvokeDest) { 5206 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList); 5207 } else { 5208 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont"); 5209 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs, 5210 BundleList); 5211 EmitBlock(Cont); 5212 } 5213 if (callOrInvoke) 5214 *callOrInvoke = CI; 5215 5216 // If this is within a function that has the guard(nocf) attribute and is an 5217 // indirect call, add the "guard_nocf" attribute to this call to indicate that 5218 // Control Flow Guard checks should not be added, even if the call is inlined. 5219 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) { 5220 if (const auto *A = FD->getAttr<CFGuardAttr>()) { 5221 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction()) 5222 Attrs = Attrs.addAttribute( 5223 getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf"); 5224 } 5225 } 5226 5227 // Apply the attributes and calling convention. 5228 CI->setAttributes(Attrs); 5229 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv)); 5230 5231 // Apply various metadata. 5232 5233 if (!CI->getType()->isVoidTy()) 5234 CI->setName("call"); 5235 5236 // Update largest vector width from the return type. 5237 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType())) 5238 LargestVectorWidth = 5239 std::max((uint64_t)LargestVectorWidth, 5240 VT->getPrimitiveSizeInBits().getKnownMinSize()); 5241 5242 // Insert instrumentation or attach profile metadata at indirect call sites. 5243 // For more details, see the comment before the definition of 5244 // IPVK_IndirectCallTarget in InstrProfData.inc. 5245 if (!CI->getCalledFunction()) 5246 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget, 5247 CI, CalleePtr); 5248 5249 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC 5250 // optimizer it can aggressively ignore unwind edges. 5251 if (CGM.getLangOpts().ObjCAutoRefCount) 5252 AddObjCARCExceptionMetadata(CI); 5253 5254 // Suppress tail calls if requested. 5255 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) { 5256 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>()) 5257 Call->setTailCallKind(llvm::CallInst::TCK_NoTail); 5258 } 5259 5260 // Add metadata for calls to MSAllocator functions 5261 if (getDebugInfo() && TargetDecl && 5262 TargetDecl->hasAttr<MSAllocatorAttr>()) 5263 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc); 5264 5265 // 4. Finish the call. 5266 5267 // If the call doesn't return, finish the basic block and clear the 5268 // insertion point; this allows the rest of IRGen to discard 5269 // unreachable code. 5270 if (CI->doesNotReturn()) { 5271 if (UnusedReturnSizePtr) 5272 PopCleanupBlock(); 5273 5274 // Strip away the noreturn attribute to better diagnose unreachable UB. 5275 if (SanOpts.has(SanitizerKind::Unreachable)) { 5276 // Also remove from function since CallBase::hasFnAttr additionally checks 5277 // attributes of the called function. 5278 if (auto *F = CI->getCalledFunction()) 5279 F->removeFnAttr(llvm::Attribute::NoReturn); 5280 CI->removeAttribute(llvm::AttributeList::FunctionIndex, 5281 llvm::Attribute::NoReturn); 5282 5283 // Avoid incompatibility with ASan which relies on the `noreturn` 5284 // attribute to insert handler calls. 5285 if (SanOpts.hasOneOf(SanitizerKind::Address | 5286 SanitizerKind::KernelAddress)) { 5287 SanitizerScope SanScope(this); 5288 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder); 5289 Builder.SetInsertPoint(CI); 5290 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false); 5291 llvm::FunctionCallee Fn = 5292 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return"); 5293 EmitNounwindRuntimeCall(Fn); 5294 } 5295 } 5296 5297 EmitUnreachable(Loc); 5298 Builder.ClearInsertionPoint(); 5299 5300 // FIXME: For now, emit a dummy basic block because expr emitters in 5301 // generally are not ready to handle emitting expressions at unreachable 5302 // points. 5303 EnsureInsertPoint(); 5304 5305 // Return a reasonable RValue. 5306 return GetUndefRValue(RetTy); 5307 } 5308 5309 // Perform the swifterror writeback. 5310 if (swiftErrorTemp.isValid()) { 5311 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp); 5312 Builder.CreateStore(errorResult, swiftErrorArg); 5313 } 5314 5315 // Emit any call-associated writebacks immediately. Arguably this 5316 // should happen after any return-value munging. 5317 if (CallArgs.hasWritebacks()) 5318 emitWritebacks(*this, CallArgs); 5319 5320 // The stack cleanup for inalloca arguments has to run out of the normal 5321 // lexical order, so deactivate it and run it manually here. 5322 CallArgs.freeArgumentMemory(*this); 5323 5324 // Extract the return value. 5325 RValue Ret = [&] { 5326 switch (RetAI.getKind()) { 5327 case ABIArgInfo::CoerceAndExpand: { 5328 auto coercionType = RetAI.getCoerceAndExpandType(); 5329 5330 Address addr = SRetPtr; 5331 addr = Builder.CreateElementBitCast(addr, coercionType); 5332 5333 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); 5334 bool requiresExtract = isa<llvm::StructType>(CI->getType()); 5335 5336 unsigned unpaddedIndex = 0; 5337 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { 5338 llvm::Type *eltType = coercionType->getElementType(i); 5339 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; 5340 Address eltAddr = Builder.CreateStructGEP(addr, i); 5341 llvm::Value *elt = CI; 5342 if (requiresExtract) 5343 elt = Builder.CreateExtractValue(elt, unpaddedIndex++); 5344 else 5345 assert(unpaddedIndex == 0); 5346 Builder.CreateStore(elt, eltAddr); 5347 } 5348 // FALLTHROUGH 5349 LLVM_FALLTHROUGH; 5350 } 5351 5352 case ABIArgInfo::InAlloca: 5353 case ABIArgInfo::Indirect: { 5354 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); 5355 if (UnusedReturnSizePtr) 5356 PopCleanupBlock(); 5357 return ret; 5358 } 5359 5360 case ABIArgInfo::Ignore: 5361 // If we are ignoring an argument that had a result, make sure to 5362 // construct the appropriate return value for our caller. 5363 return GetUndefRValue(RetTy); 5364 5365 case ABIArgInfo::Extend: 5366 case ABIArgInfo::Direct: { 5367 llvm::Type *RetIRTy = ConvertType(RetTy); 5368 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { 5369 switch (getEvaluationKind(RetTy)) { 5370 case TEK_Complex: { 5371 llvm::Value *Real = Builder.CreateExtractValue(CI, 0); 5372 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); 5373 return RValue::getComplex(std::make_pair(Real, Imag)); 5374 } 5375 case TEK_Aggregate: { 5376 Address DestPtr = ReturnValue.getValue(); 5377 bool DestIsVolatile = ReturnValue.isVolatile(); 5378 5379 if (!DestPtr.isValid()) { 5380 DestPtr = CreateMemTemp(RetTy, "agg.tmp"); 5381 DestIsVolatile = false; 5382 } 5383 EmitAggregateStore(CI, DestPtr, DestIsVolatile); 5384 return RValue::getAggregate(DestPtr); 5385 } 5386 case TEK_Scalar: { 5387 // If the argument doesn't match, perform a bitcast to coerce it. This 5388 // can happen due to trivial type mismatches. 5389 llvm::Value *V = CI; 5390 if (V->getType() != RetIRTy) 5391 V = Builder.CreateBitCast(V, RetIRTy); 5392 return RValue::get(V); 5393 } 5394 } 5395 llvm_unreachable("bad evaluation kind"); 5396 } 5397 5398 Address DestPtr = ReturnValue.getValue(); 5399 bool DestIsVolatile = ReturnValue.isVolatile(); 5400 5401 if (!DestPtr.isValid()) { 5402 DestPtr = CreateMemTemp(RetTy, "coerce"); 5403 DestIsVolatile = false; 5404 } 5405 5406 // If the value is offset in memory, apply the offset now. 5407 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); 5408 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); 5409 5410 return convertTempToRValue(DestPtr, RetTy, SourceLocation()); 5411 } 5412 5413 case ABIArgInfo::Expand: 5414 case ABIArgInfo::IndirectAliased: 5415 llvm_unreachable("Invalid ABI kind for return argument"); 5416 } 5417 5418 llvm_unreachable("Unhandled ABIArgInfo::Kind"); 5419 } (); 5420 5421 // Emit the assume_aligned check on the return value. 5422 if (Ret.isScalar() && TargetDecl) { 5423 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5424 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); 5425 } 5426 5427 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though 5428 // we can't use the full cleanup mechanism. 5429 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall) 5430 LifetimeEnd.Emit(*this, /*Flags=*/{}); 5431 5432 if (!ReturnValue.isExternallyDestructed() && 5433 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct) 5434 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(), 5435 RetTy); 5436 5437 return Ret; 5438 } 5439 5440 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const { 5441 if (isVirtual()) { 5442 const CallExpr *CE = getVirtualCallExpr(); 5443 return CGF.CGM.getCXXABI().getVirtualFunctionPointer( 5444 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(), 5445 CE ? CE->getBeginLoc() : SourceLocation()); 5446 } 5447 5448 return *this; 5449 } 5450 5451 /* VarArg handling */ 5452 5453 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) { 5454 VAListAddr = VE->isMicrosoftABI() 5455 ? EmitMSVAListRef(VE->getSubExpr()) 5456 : EmitVAListRef(VE->getSubExpr()); 5457 QualType Ty = VE->getType(); 5458 if (VE->isMicrosoftABI()) 5459 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty); 5460 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty); 5461 } 5462